├── .gitignore ├── Makefile ├── README.md ├── dkms.conf ├── intel-iommu.patch └── src ├── Kbuild ├── Kconfig ├── ithc-debug.c ├── ithc-debug.h ├── ithc-dma.c ├── ithc-dma.h ├── ithc-hid.c ├── ithc-hid.h ├── ithc-legacy.c ├── ithc-legacy.h ├── ithc-main.c ├── ithc-quickspi.c ├── ithc-quickspi.h ├── ithc-regs.c ├── ithc-regs.h └── ithc.h /.gitignore: -------------------------------------------------------------------------------- 1 | /build/ 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | KDIR ?= /lib/modules/`uname -r`/build 2 | DEST ?= build 3 | 4 | default: 5 | mkdir -p $(DEST) 6 | find $(DEST)/ -type l -exec rm {} + 7 | ln -sr src/* $(DEST)/ 8 | $(MAKE) -C $(KDIR) M=$(abspath $(DEST)) CONFIG_HID_ITHC=m 9 | 10 | install: 11 | $(MAKE) -C $(KDIR) M=$(abspath $(DEST)) CONFIG_HID_ITHC=m modules_install 12 | depmod -a 13 | sync 14 | 15 | PKG_NAME := `sed -n '/^PACKAGE_NAME="\(.*\)"$$/s//\1/p' dkms.conf` 16 | PKG_VER := `sed -n '/^PACKAGE_VERSION="\(.*\)"$$/s//\1/p' dkms.conf` 17 | DKMS_DIR = /usr/src/$(PKG_NAME)-$(PKG_VER) 18 | DKMS_PKG = $(PKG_NAME)/$(PKG_VER) 19 | 20 | dkms-install: 21 | -test -e $(DKMS_DIR) && $(MAKE) dkms-uninstall 22 | mkdir -p $(DKMS_DIR) 23 | cp -r dkms.conf Makefile src $(DKMS_DIR) 24 | dkms add $(DKMS_PKG) 25 | dkms build $(DKMS_PKG) 26 | dkms install $(DKMS_PKG) 27 | sync 28 | 29 | dkms-uninstall: 30 | -modprobe -r ithc 31 | -dkms uninstall $(DKMS_PKG) --all 32 | -dkms remove $(DKMS_PKG) --all 33 | -rm -rf $(DKMS_DIR) 34 | sync 35 | 36 | set-nosid: 37 | echo 'GRUB_CMDLINE_LINUX_DEFAULT="$$GRUB_CMDLINE_LINUX_DEFAULT intremap=nosid"' > /etc/default/grub.d/intremap-nosid.cfg 38 | update-grub 39 | sync 40 | 41 | clear-nosid: 42 | rm /etc/default/grub.d/intremap-nosid.cfg 43 | update-grub 44 | sync 45 | 46 | clean: 47 | -rm -r $(DEST) 48 | 49 | run: 50 | $(MAKE) 51 | -sudo modprobe -r ithc 52 | sudo $(MAKE) install 53 | sudo modprobe ithc 54 | 55 | checkpatch: 56 | $(KDIR)/scripts/checkpatch.pl -f -q --no-tree --show-types --ignore TABSTOP,BLOCK_COMMENT_STYLE,LINE_SPACING src/* 57 | 58 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Linux driver for Intel Touch Host Controller 2 | ============================================ 3 | 4 | NOTE: This driver is included in the [Linux Surface](https://github.com/linux-surface/linux-surface) kernel. 5 | 6 | The ithc kernel module provides support for the Intel Touch Host Controller, 7 | which is used for the touchscreens in some newer Intel-based devices, such 8 | as the Surface Pro 7+, Surface Pro 8, X1 Fold, etc. 9 | 10 | The module works as an HID transport driver. For Surface devices, you will 11 | also need to install [IPTSD](https://github.com/linux-surface/iptsd) to 12 | enable multi-touch and pen support. Without IPTSD, only single touch will 13 | work. Non-Microsoft devices use standard HID data, and don't need IPTSD. 14 | 15 | Installation with DKMS 16 | ---------------------- 17 | 18 | - Install prerequisites (e.g. Debian packages: `dkms` and `linux-headers-amd64`) 19 | - `sudo make dkms-install` 20 | - `sudo modprobe ithc` (or reboot) 21 | - Check dmesg for ithc messages/errors. 22 | 23 | Installation without DKMS 24 | ------------------------- 25 | 26 | - Install prerequisites (e.g. Debian packages: `build-essential` and `linux-headers-amd64`) 27 | - `make && sudo make install` 28 | - `sudo modprobe ithc` (or reboot) 29 | - Check dmesg for ithc messages/errors. 30 | 31 | Known issues 32 | ------------ 33 | 34 | On Lakefield and Tiger Lake devices (SP7+/SP8/SLS/SL4/X1Fold) the driver 35 | may fail to start correctly, and you will see the following error in dmesg: 36 | "Blocked an interrupt request due to source-id verification failure". 37 | 38 | To fix this, apply one of the following workarounds: 39 | 1. Add the kernel parameter `intremap=nosid` and reboot. 40 | If you're using GRUB on Debian/Ubuntu, you can do this with 41 | `sudo make set-nosid`. 42 | 2. Apply `intel-iommu.patch` to your kernel. 43 | 3. Use the driver in polling mode by setting the `poll` module parameter. 44 | Run `echo options ithc poll | sudo tee /etc/modprobe.d/ithc-poll.conf` 45 | and reload the module or reboot. 46 | 47 | License 48 | ------- 49 | 50 | Public domain/CC0. 51 | (Files are marked GPL/BSD since that is preferred for the kernel.) 52 | 53 | -------------------------------------------------------------------------------- /dkms.conf: -------------------------------------------------------------------------------- 1 | PACKAGE_NAME="ithc" 2 | PACKAGE_VERSION="0.1" 3 | AUTOINSTALL="yes" 4 | MAKE[0]="make KDIR=$kernel_source_dir" 5 | CLEAN="make clean" 6 | BUILT_MODULE_NAME[0]="ithc" 7 | BUILT_MODULE_LOCATION[0]="build/" 8 | DEST_MODULE_LOCATION[0]="/kernel/drivers/misc/" 9 | 10 | -------------------------------------------------------------------------------- /intel-iommu.patch: -------------------------------------------------------------------------------- 1 | diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c 2 | index 08f56326e2f8..e0c20ff84d21 100644 3 | --- a/drivers/iommu/intel/irq_remapping.c 4 | +++ b/drivers/iommu/intel/irq_remapping.c 5 | @@ -386,6 +386,23 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev) 6 | data.busmatch_count = 0; 7 | pci_for_each_dma_alias(dev, set_msi_sid_cb, &data); 8 | 9 | + /* 10 | + * The Intel Touch Host Controller is at 00:10.6, but for some reason 11 | + * the MSI interrupts have request id 01:05.0 on LKF/TGL. 12 | + * Possibly a hardware bug, which seems to have been fixed with ADL. 13 | + * Disable id verification for affected devices to work around this. 14 | + * FIXME Find proper fix or turn this into a quirk. 15 | + */ 16 | + if (dev->vendor == PCI_VENDOR_ID_INTEL && (dev->class >> 8) == PCI_CLASS_INPUT_PEN) { 17 | + switch(dev->device) { 18 | + case 0x98d0: case 0x98d1: // LKF 19 | + case 0xa0d0: case 0xa0d1: // TGL LP 20 | + case 0x43d0: case 0x43d1: // TGL H 21 | + set_irte_sid(irte, SVT_NO_VERIFY, SQ_ALL_16, 0); 22 | + return 0; 23 | + } 24 | + } 25 | + 26 | /* 27 | * DMA alias provides us with a PCI device and alias. The only case 28 | * where the it will return an alias on a different bus than the 29 | -------------------------------------------------------------------------------- /src/Kbuild: -------------------------------------------------------------------------------- 1 | obj-$(CONFIG_HID_ITHC) := ithc.o 2 | 3 | ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-hid.o ithc-legacy.o ithc-quickspi.o ithc-debug.o 4 | 5 | ccflags-y := -std=gnu11 -Wno-declaration-after-statement 6 | 7 | -------------------------------------------------------------------------------- /src/Kconfig: -------------------------------------------------------------------------------- 1 | config HID_ITHC 2 | tristate "Intel Touch Host Controller" 3 | depends on PCI 4 | depends on HID 5 | help 6 | Say Y here if your system has a touchscreen using Intels 7 | Touch Host Controller (ITHC / IPTS) technology. 8 | 9 | If unsure say N. 10 | 11 | To compile this driver as a module, choose M here: the 12 | module will be called ithc. 13 | -------------------------------------------------------------------------------- /src/ithc-debug.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 | 3 | #include "ithc.h" 4 | 5 | void ithc_log_regs(struct ithc *ithc) 6 | { 7 | if (!ithc->prev_regs) 8 | return; 9 | u32 __iomem *cur = (__iomem void *)ithc->regs; 10 | u32 *prev = (void *)ithc->prev_regs; 11 | for (int i = 1024; i < sizeof(*ithc->regs) / 4; i++) { 12 | u32 x = readl(cur + i); 13 | if (x != prev[i]) { 14 | pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x); 15 | prev[i] = x; 16 | } 17 | } 18 | } 19 | 20 | static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len, 21 | loff_t *offset) 22 | { 23 | // Debug commands consist of a single letter followed by a list of numbers (decimal or 24 | // hexadecimal, space-separated). 25 | struct ithc *ithc = file_inode(f)->i_private; 26 | char cmd[256]; 27 | if (!ithc || !ithc->pci) 28 | return -ENODEV; 29 | if (!len) 30 | return -EINVAL; 31 | if (len >= sizeof(cmd)) 32 | return -EINVAL; 33 | if (copy_from_user(cmd, buf, len)) 34 | return -EFAULT; 35 | cmd[len] = 0; 36 | if (cmd[len-1] == '\n') 37 | cmd[len-1] = 0; 38 | pci_info(ithc->pci, "debug command: %s\n", cmd); 39 | 40 | // Parse the list of arguments into a u32 array. 41 | u32 n = 0; 42 | const char *s = cmd + 1; 43 | u32 a[32]; 44 | while (*s && *s != '\n') { 45 | if (n >= ARRAY_SIZE(a)) 46 | return -EINVAL; 47 | if (*s++ != ' ') 48 | return -EINVAL; 49 | char *e; 50 | a[n++] = simple_strtoul(s, &e, 0); 51 | if (e == s) 52 | return -EINVAL; 53 | s = e; 54 | } 55 | ithc_log_regs(ithc); 56 | 57 | // Execute the command. 58 | switch (cmd[0]) { 59 | case 'x': // reset 60 | ithc_reset(ithc); 61 | break; 62 | case 'w': // write register: offset mask value 63 | if (n != 3 || (a[0] & 3)) 64 | return -EINVAL; 65 | pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n", 66 | a[0], a[2], a[1]); 67 | bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]); 68 | break; 69 | case 'r': // read register: offset 70 | if (n != 1 || (a[0] & 3)) 71 | return -EINVAL; 72 | pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0], 73 | readl(((__iomem u32 *)ithc->regs) + a[0] / 4)); 74 | break; 75 | case 's': // spi command: cmd offset len data... 76 | // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 77 | // set touch cfg: s 6 12 4 XX 78 | if (n < 3 || a[2] > (n - 3) * 4) 79 | return -EINVAL; 80 | pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]); 81 | if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3)) 82 | for (u32 i = 0; i < (a[2] + 3) / 4; i++) 83 | pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]); 84 | break; 85 | case 'd': // dma command: cmd len data... 86 | // get report descriptor: d 7 8 0 0 87 | // enable multitouch: d 3 2 0x0105 88 | if (n < 1) 89 | return -EINVAL; 90 | pci_info(ithc->pci, "debug dma command with %u bytes of data\n", n * 4); 91 | struct ithc_data data = { .type = ITHC_DATA_RAW, .size = n * 4, .data = a }; 92 | if (ithc_dma_tx(ithc, &data)) 93 | pci_err(ithc->pci, "dma tx failed\n"); 94 | break; 95 | default: 96 | return -EINVAL; 97 | } 98 | ithc_log_regs(ithc); 99 | return len; 100 | } 101 | 102 | static struct dentry *dbg_dir; 103 | 104 | void __init ithc_debug_init_module(void) 105 | { 106 | struct dentry *d = debugfs_create_dir(DEVNAME, NULL); 107 | if (IS_ERR(d)) 108 | pr_warn("failed to create debugfs dir (%li)\n", PTR_ERR(d)); 109 | else 110 | dbg_dir = d; 111 | } 112 | 113 | void __exit ithc_debug_exit_module(void) 114 | { 115 | debugfs_remove_recursive(dbg_dir); 116 | dbg_dir = NULL; 117 | } 118 | 119 | static const struct file_operations ithc_debugfops_cmd = { 120 | .owner = THIS_MODULE, 121 | .write = ithc_debugfs_cmd_write, 122 | }; 123 | 124 | static void ithc_debugfs_devres_release(struct device *dev, void *res) 125 | { 126 | struct dentry **dbgm = res; 127 | debugfs_remove_recursive(*dbgm); 128 | } 129 | 130 | int ithc_debug_init_device(struct ithc *ithc) 131 | { 132 | if (!dbg_dir) 133 | return -ENOENT; 134 | struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof(*dbgm), GFP_KERNEL); 135 | if (!dbgm) 136 | return -ENOMEM; 137 | devres_add(&ithc->pci->dev, dbgm); 138 | struct dentry *dbg = debugfs_create_dir(pci_name(ithc->pci), dbg_dir); 139 | if (IS_ERR(dbg)) 140 | return PTR_ERR(dbg); 141 | *dbgm = dbg; 142 | 143 | struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd); 144 | if (IS_ERR(cmd)) 145 | return PTR_ERR(cmd); 146 | 147 | return 0; 148 | } 149 | 150 | -------------------------------------------------------------------------------- /src/ithc-debug.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ 2 | 3 | void ithc_debug_init_module(void); 4 | void ithc_debug_exit_module(void); 5 | int ithc_debug_init_device(struct ithc *ithc); 6 | void ithc_log_regs(struct ithc *ithc); 7 | 8 | -------------------------------------------------------------------------------- /src/ithc-dma.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 | 3 | #include "ithc.h" 4 | 5 | // The THC uses tables of PRDs (physical region descriptors) to describe the TX and RX data buffers. 6 | // Each PRD contains the DMA address and size of a block of DMA memory, and some status flags. 7 | // This allows each data buffer to consist of multiple non-contiguous blocks of memory. 8 | 9 | static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p, 10 | unsigned int num_buffers, unsigned int num_pages, enum dma_data_direction dir) 11 | { 12 | p->num_pages = num_pages; 13 | p->dir = dir; 14 | // We allocate enough space to have one PRD per data buffer page, however if the data 15 | // buffer pages happen to be contiguous, we can describe the buffer using fewer PRDs, so 16 | // some will remain unused (which is fine). 17 | p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE); 18 | p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL); 19 | if (!p->addr) 20 | return -ENOMEM; 21 | if (p->dma_addr & (PAGE_SIZE - 1)) 22 | return -EFAULT; 23 | return 0; 24 | } 25 | 26 | // Devres managed sg_table wrapper. 27 | struct ithc_sg_table { 28 | void *addr; 29 | struct sg_table sgt; 30 | enum dma_data_direction dir; 31 | }; 32 | static void ithc_dma_sgtable_free(struct sg_table *sgt) 33 | { 34 | struct scatterlist *sg; 35 | int i; 36 | for_each_sgtable_sg(sgt, sg, i) { 37 | struct page *p = sg_page(sg); 38 | if (p) 39 | __free_page(p); 40 | } 41 | sg_free_table(sgt); 42 | } 43 | static void ithc_dma_data_devres_release(struct device *dev, void *res) 44 | { 45 | struct ithc_sg_table *sgt = res; 46 | if (sgt->addr) 47 | vunmap(sgt->addr); 48 | dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0); 49 | ithc_dma_sgtable_free(&sgt->sgt); 50 | } 51 | 52 | static int ithc_dma_data_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, 53 | struct ithc_dma_data_buffer *b) 54 | { 55 | // We don't use dma_alloc_coherent() for data buffers, because they don't have to be 56 | // coherent (they are unidirectional) or contiguous (we can use one PRD per page). 57 | // We could use dma_alloc_noncontiguous(), however this still always allocates a single 58 | // DMA mapped segment, which is more restrictive than what we need. 59 | // Instead we use an sg_table of individually allocated pages. 60 | struct page *pages[16]; 61 | if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages)) 62 | return -EINVAL; 63 | b->active_idx = -1; 64 | struct ithc_sg_table *sgt = devres_alloc( 65 | ithc_dma_data_devres_release, sizeof(*sgt), GFP_KERNEL); 66 | if (!sgt) 67 | return -ENOMEM; 68 | sgt->dir = prds->dir; 69 | 70 | if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) { 71 | struct scatterlist *sg; 72 | int i; 73 | bool ok = true; 74 | for_each_sgtable_sg(&sgt->sgt, sg, i) { 75 | // NOTE: don't need __GFP_DMA for PCI DMA 76 | struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); 77 | if (!p) { 78 | ok = false; 79 | break; 80 | } 81 | sg_set_page(sg, p, PAGE_SIZE, 0); 82 | } 83 | if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) { 84 | devres_add(&ithc->pci->dev, sgt); 85 | b->sgt = &sgt->sgt; 86 | b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL); 87 | if (!b->addr) 88 | return -ENOMEM; 89 | return 0; 90 | } 91 | ithc_dma_sgtable_free(&sgt->sgt); 92 | } 93 | devres_free(sgt); 94 | return -ENOMEM; 95 | } 96 | 97 | static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, 98 | struct ithc_dma_data_buffer *b, unsigned int idx) 99 | { 100 | // Give a buffer to the THC. 101 | struct ithc_phys_region_desc *prd = prds->addr; 102 | prd += idx * prds->num_pages; 103 | if (b->active_idx >= 0) { 104 | pci_err(ithc->pci, "buffer already active\n"); 105 | return -EINVAL; 106 | } 107 | b->active_idx = idx; 108 | if (prds->dir == DMA_TO_DEVICE) { 109 | // TX buffer: Caller should have already filled the data buffer, so just fill 110 | // the PRD and flush. 111 | // (TODO: Support multi-page TX buffers. So far no device seems to use or need 112 | // these though.) 113 | if (b->data_size > PAGE_SIZE) 114 | return -EINVAL; 115 | prd->addr = sg_dma_address(b->sgt->sgl) >> 10; 116 | prd->size = b->data_size | PRD_FLAG_END; 117 | flush_kernel_vmap_range(b->addr, b->data_size); 118 | } else if (prds->dir == DMA_FROM_DEVICE) { 119 | // RX buffer: Reset PRDs. 120 | struct scatterlist *sg; 121 | int i; 122 | for_each_sgtable_dma_sg(b->sgt, sg, i) { 123 | prd->addr = sg_dma_address(sg) >> 10; 124 | prd->size = sg_dma_len(sg); 125 | prd++; 126 | } 127 | prd[-1].size |= PRD_FLAG_END; 128 | } 129 | dma_wmb(); // for the prds 130 | dma_sync_sgtable_for_device(&ithc->pci->dev, b->sgt, prds->dir); 131 | return 0; 132 | } 133 | 134 | static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, 135 | struct ithc_dma_data_buffer *b, unsigned int idx) 136 | { 137 | // Take a buffer from the THC. 138 | struct ithc_phys_region_desc *prd = prds->addr; 139 | prd += idx * prds->num_pages; 140 | // This is purely a sanity check. We don't strictly need the idx parameter for this 141 | // function, because it should always be the same as active_idx, unless we have a bug. 142 | if (b->active_idx != idx) { 143 | pci_err(ithc->pci, "wrong buffer index\n"); 144 | return -EINVAL; 145 | } 146 | b->active_idx = -1; 147 | if (prds->dir == DMA_FROM_DEVICE) { 148 | // RX buffer: Calculate actual received data size from PRDs. 149 | dma_rmb(); // for the prds 150 | b->data_size = 0; 151 | struct scatterlist *sg; 152 | int i; 153 | for_each_sgtable_dma_sg(b->sgt, sg, i) { 154 | unsigned int size = prd->size; 155 | b->data_size += size & PRD_SIZE_MASK; 156 | if (size & PRD_FLAG_END) 157 | break; 158 | if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) { 159 | pci_err(ithc->pci, "truncated prd\n"); 160 | break; 161 | } 162 | prd++; 163 | } 164 | invalidate_kernel_vmap_range(b->addr, b->data_size); 165 | } 166 | dma_sync_sgtable_for_cpu(&ithc->pci->dev, b->sgt, prds->dir); 167 | return 0; 168 | } 169 | 170 | int ithc_dma_rx_init(struct ithc *ithc, u8 channel) 171 | { 172 | struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; 173 | mutex_init(&rx->mutex); 174 | 175 | // Allocate buffers. 176 | unsigned int num_pages = (ithc->max_rx_size + PAGE_SIZE - 1) / PAGE_SIZE; 177 | pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n", 178 | NUM_RX_BUF, ithc->max_rx_size, num_pages); 179 | CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE); 180 | for (unsigned int i = 0; i < NUM_RX_BUF; i++) 181 | CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]); 182 | 183 | // Init registers. 184 | writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2); 185 | lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr); 186 | writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs); 187 | writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds); 188 | u8 head = readb(&ithc->regs->dma_rx[channel].head); 189 | if (head) { 190 | pci_err(ithc->pci, "head is nonzero (%u)\n", head); 191 | return -EIO; 192 | } 193 | 194 | // Init buffers. 195 | for (unsigned int i = 0; i < NUM_RX_BUF; i++) 196 | CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i); 197 | 198 | writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail); 199 | return 0; 200 | } 201 | 202 | void ithc_dma_rx_enable(struct ithc *ithc, u8 channel) 203 | { 204 | bitsb_set(&ithc->regs->dma_rx[channel].control, 205 | DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA); 206 | CHECK(waitl, ithc, &ithc->regs->dma_rx[channel].status, 207 | DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED); 208 | } 209 | 210 | int ithc_dma_tx_init(struct ithc *ithc) 211 | { 212 | struct ithc_dma_tx *tx = &ithc->dma_tx; 213 | mutex_init(&tx->mutex); 214 | 215 | // Allocate buffers. 216 | unsigned int num_pages = (ithc->max_tx_size + PAGE_SIZE - 1) / PAGE_SIZE; 217 | pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n", 218 | ithc->max_tx_size, num_pages); 219 | CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE); 220 | CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf); 221 | 222 | // Init registers. 223 | lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr); 224 | writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds); 225 | 226 | // Init buffers. 227 | CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); 228 | return 0; 229 | } 230 | 231 | static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) 232 | { 233 | // Process all filled RX buffers from the ringbuffer. 234 | struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; 235 | unsigned int n = rx->num_received; 236 | u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head); 237 | while (1) { 238 | u8 tail = n % NUM_RX_BUF; 239 | u8 tail_wrap = tail | ((n / NUM_RX_BUF) & 1 ? 0 : DMA_RX_WRAP_FLAG); 240 | writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail); 241 | // ringbuffer is full if tail_wrap == head_wrap 242 | // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG 243 | if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG)) 244 | return 0; 245 | 246 | // take the buffer that the device just filled 247 | struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF]; 248 | CHECK_RET(ithc_dma_data_buffer_get, ithc, &rx->prds, b, tail); 249 | rx->num_received = ++n; 250 | 251 | // process data 252 | struct ithc_data d; 253 | if ((ithc->use_quickspi ? ithc_quickspi_decode_rx : ithc_legacy_decode_rx) 254 | (ithc, b->addr, b->data_size, &d) < 0) { 255 | pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u: %*ph\n", 256 | channel, tail, b->data_size, min((int)b->data_size, 64), b->addr); 257 | print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, 258 | b->addr, min(b->data_size, 0x400u), 0); 259 | } else { 260 | ithc_hid_process_data(ithc, &d); 261 | } 262 | 263 | // give the buffer back to the device 264 | CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail); 265 | } 266 | } 267 | int ithc_dma_rx(struct ithc *ithc, u8 channel) 268 | { 269 | struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; 270 | mutex_lock(&rx->mutex); 271 | int ret = ithc_dma_rx_unlocked(ithc, channel); 272 | mutex_unlock(&rx->mutex); 273 | return ret; 274 | } 275 | 276 | static int ithc_dma_tx_unlocked(struct ithc *ithc, const struct ithc_data *data) 277 | { 278 | // Send a single TX buffer to the THC. 279 | pci_dbg(ithc->pci, "dma tx data type %u, size %u\n", data->type, data->size); 280 | CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); 281 | 282 | // Fill the TX buffer with header and data. 283 | ssize_t sz; 284 | if (data->type == ITHC_DATA_RAW) { 285 | sz = min(data->size, ithc->max_tx_size); 286 | memcpy(ithc->dma_tx.buf.addr, data->data, sz); 287 | } else { 288 | sz = (ithc->use_quickspi ? ithc_quickspi_encode_tx : ithc_legacy_encode_tx) 289 | (ithc, data, ithc->dma_tx.buf.addr, ithc->max_tx_size); 290 | } 291 | ithc->dma_tx.buf.data_size = sz < 0 ? 0 : sz; 292 | CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); 293 | if (sz < 0) { 294 | pci_err(ithc->pci, "failed to encode tx data type %i, size %u, error %i\n", 295 | data->type, data->size, (int)sz); 296 | return -EINVAL; 297 | } 298 | 299 | // Let the THC process the buffer. 300 | bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND); 301 | CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0); 302 | writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status); 303 | return 0; 304 | } 305 | int ithc_dma_tx(struct ithc *ithc, const struct ithc_data *data) 306 | { 307 | mutex_lock(&ithc->dma_tx.mutex); 308 | int ret = ithc_dma_tx_unlocked(ithc, data); 309 | mutex_unlock(&ithc->dma_tx.mutex); 310 | return ret; 311 | } 312 | 313 | -------------------------------------------------------------------------------- /src/ithc-dma.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ 2 | 3 | #define PRD_SIZE_MASK 0xffffff 4 | #define PRD_FLAG_END 0x1000000 5 | #define PRD_FLAG_SUCCESS 0x2000000 6 | #define PRD_FLAG_ERROR 0x4000000 7 | 8 | struct ithc_phys_region_desc { 9 | u64 addr; // physical addr/1024 10 | u32 size; // num bytes, PRD_FLAG_END marks last prd for data split over multiple prds 11 | u32 unused; 12 | }; 13 | 14 | struct ithc_dma_prd_buffer { 15 | void *addr; 16 | dma_addr_t dma_addr; 17 | u32 size; 18 | u32 num_pages; // per data buffer 19 | enum dma_data_direction dir; 20 | }; 21 | 22 | struct ithc_dma_data_buffer { 23 | void *addr; 24 | struct sg_table *sgt; 25 | int active_idx; 26 | u32 data_size; 27 | }; 28 | 29 | struct ithc_dma_tx { 30 | struct mutex mutex; 31 | struct ithc_dma_prd_buffer prds; 32 | struct ithc_dma_data_buffer buf; 33 | }; 34 | 35 | struct ithc_dma_rx { 36 | struct mutex mutex; 37 | u32 num_received; 38 | struct ithc_dma_prd_buffer prds; 39 | struct ithc_dma_data_buffer bufs[NUM_RX_BUF]; 40 | }; 41 | 42 | int ithc_dma_rx_init(struct ithc *ithc, u8 channel); 43 | void ithc_dma_rx_enable(struct ithc *ithc, u8 channel); 44 | int ithc_dma_tx_init(struct ithc *ithc); 45 | int ithc_dma_rx(struct ithc *ithc, u8 channel); 46 | int ithc_dma_tx(struct ithc *ithc, const struct ithc_data *data); 47 | 48 | -------------------------------------------------------------------------------- /src/ithc-hid.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 | 3 | #include "ithc.h" 4 | 5 | static int ithc_hid_start(struct hid_device *hdev) { return 0; } 6 | static void ithc_hid_stop(struct hid_device *hdev) { } 7 | static int ithc_hid_open(struct hid_device *hdev) { return 0; } 8 | static void ithc_hid_close(struct hid_device *hdev) { } 9 | 10 | static int ithc_hid_parse(struct hid_device *hdev) 11 | { 12 | struct ithc *ithc = hdev->driver_data; 13 | const struct ithc_data get_report_desc = { .type = ITHC_DATA_REPORT_DESCRIPTOR }; 14 | WRITE_ONCE(ithc->hid.parse_done, false); 15 | for (int retries = 0; ; retries++) { 16 | ithc_log_regs(ithc); 17 | CHECK_RET(ithc_dma_tx, ithc, &get_report_desc); 18 | if (wait_event_timeout(ithc->hid.wait_parse, READ_ONCE(ithc->hid.parse_done), 19 | msecs_to_jiffies(200))) { 20 | ithc_log_regs(ithc); 21 | return 0; 22 | } 23 | if (retries > 5) { 24 | ithc_log_regs(ithc); 25 | pci_err(ithc->pci, "failed to read report descriptor\n"); 26 | return -ETIMEDOUT; 27 | } 28 | pci_warn(ithc->pci, "failed to read report descriptor, retrying\n"); 29 | } 30 | } 31 | 32 | static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, 33 | size_t len, unsigned char rtype, int reqtype) 34 | { 35 | struct ithc *ithc = hdev->driver_data; 36 | if (!buf || !len) 37 | return -EINVAL; 38 | 39 | struct ithc_data d = { .size = len, .data = buf }; 40 | buf[0] = reportnum; 41 | 42 | if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) { 43 | d.type = ITHC_DATA_OUTPUT_REPORT; 44 | CHECK_RET(ithc_dma_tx, ithc, &d); 45 | return 0; 46 | } 47 | 48 | if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) { 49 | d.type = ITHC_DATA_SET_FEATURE; 50 | CHECK_RET(ithc_dma_tx, ithc, &d); 51 | return 0; 52 | } 53 | 54 | if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) { 55 | d.type = ITHC_DATA_GET_FEATURE; 56 | d.data = &reportnum; 57 | d.size = 1; 58 | 59 | // Prepare for response. 60 | mutex_lock(&ithc->hid.get_feature_mutex); 61 | ithc->hid.get_feature_buf = buf; 62 | ithc->hid.get_feature_size = len; 63 | mutex_unlock(&ithc->hid.get_feature_mutex); 64 | 65 | // Transmit 'get feature' request. 66 | int r = CHECK(ithc_dma_tx, ithc, &d); 67 | if (!r) { 68 | r = wait_event_interruptible_timeout(ithc->hid.wait_get_feature, 69 | !ithc->hid.get_feature_buf, msecs_to_jiffies(1000)); 70 | if (!r) 71 | r = -ETIMEDOUT; 72 | else if (r < 0) 73 | r = -EINTR; 74 | else 75 | r = 0; 76 | } 77 | 78 | // If everything went ok, the buffer has been filled with the response data. 79 | // Return the response size. 80 | mutex_lock(&ithc->hid.get_feature_mutex); 81 | ithc->hid.get_feature_buf = NULL; 82 | if (!r) 83 | r = ithc->hid.get_feature_size; 84 | mutex_unlock(&ithc->hid.get_feature_mutex); 85 | return r; 86 | } 87 | 88 | pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n", 89 | rtype, reqtype, reportnum); 90 | return -EINVAL; 91 | } 92 | 93 | // FIXME hid_input_report()/hid_parse_report() currently don't take const buffers, so we have to 94 | // cast away the const to avoid a compiler warning... 95 | #define NOCONST(x) ((void *)x) 96 | 97 | void ithc_hid_process_data(struct ithc *ithc, struct ithc_data *d) 98 | { 99 | WARN_ON(!ithc->hid.dev); 100 | if (!ithc->hid.dev) 101 | return; 102 | 103 | switch (d->type) { 104 | 105 | case ITHC_DATA_IGNORE: 106 | return; 107 | 108 | case ITHC_DATA_ERROR: 109 | CHECK(ithc_reset, ithc); 110 | return; 111 | 112 | case ITHC_DATA_REPORT_DESCRIPTOR: 113 | // Response to the report descriptor request sent by ithc_hid_parse(). 114 | CHECK(hid_parse_report, ithc->hid.dev, NOCONST(d->data), d->size); 115 | WRITE_ONCE(ithc->hid.parse_done, true); 116 | wake_up(&ithc->hid.wait_parse); 117 | return; 118 | 119 | case ITHC_DATA_INPUT_REPORT: 120 | { 121 | // Standard HID input report. 122 | int r = hid_input_report(ithc->hid.dev, HID_INPUT_REPORT, NOCONST(d->data), d->size, 1); 123 | if (r < 0) { 124 | pci_warn(ithc->pci, "hid_input_report failed with %i (size %u, report ID 0x%02x)\n", 125 | r, d->size, d->size ? *(u8 *)d->data : 0); 126 | print_hex_dump_debug(DEVNAME " report: ", DUMP_PREFIX_OFFSET, 32, 1, 127 | d->data, min(d->size, 0x400u), 0); 128 | } 129 | return; 130 | } 131 | 132 | case ITHC_DATA_GET_FEATURE: 133 | { 134 | // Response to a 'get feature' request sent by ithc_hid_raw_request(). 135 | bool done = false; 136 | mutex_lock(&ithc->hid.get_feature_mutex); 137 | if (ithc->hid.get_feature_buf) { 138 | if (d->size < ithc->hid.get_feature_size) 139 | ithc->hid.get_feature_size = d->size; 140 | memcpy(ithc->hid.get_feature_buf, d->data, ithc->hid.get_feature_size); 141 | ithc->hid.get_feature_buf = NULL; 142 | done = true; 143 | } 144 | mutex_unlock(&ithc->hid.get_feature_mutex); 145 | if (done) { 146 | wake_up(&ithc->hid.wait_get_feature); 147 | } else { 148 | // Received data without a matching request, or the request already 149 | // timed out. (XXX What's the correct thing to do here?) 150 | CHECK(hid_input_report, ithc->hid.dev, HID_FEATURE_REPORT, 151 | NOCONST(d->data), d->size, 1); 152 | } 153 | return; 154 | } 155 | 156 | default: 157 | pci_err(ithc->pci, "unhandled data type %i\n", d->type); 158 | return; 159 | } 160 | } 161 | 162 | static struct hid_ll_driver ithc_ll_driver = { 163 | .start = ithc_hid_start, 164 | .stop = ithc_hid_stop, 165 | .open = ithc_hid_open, 166 | .close = ithc_hid_close, 167 | .parse = ithc_hid_parse, 168 | .raw_request = ithc_hid_raw_request, 169 | }; 170 | 171 | static void ithc_hid_devres_release(struct device *dev, void *res) 172 | { 173 | struct hid_device **hidm = res; 174 | if (*hidm) 175 | hid_destroy_device(*hidm); 176 | } 177 | 178 | int ithc_hid_init(struct ithc *ithc) 179 | { 180 | struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof(*hidm), GFP_KERNEL); 181 | if (!hidm) 182 | return -ENOMEM; 183 | devres_add(&ithc->pci->dev, hidm); 184 | struct hid_device *hid = hid_allocate_device(); 185 | if (IS_ERR(hid)) 186 | return PTR_ERR(hid); 187 | *hidm = hid; 188 | 189 | strscpy(hid->name, DEVFULLNAME, sizeof(hid->name)); 190 | strscpy(hid->phys, ithc->phys, sizeof(hid->phys)); 191 | hid->ll_driver = &ithc_ll_driver; 192 | hid->bus = BUS_PCI; 193 | hid->vendor = ithc->vendor_id; 194 | hid->product = ithc->product_id; 195 | hid->version = 0x100; 196 | hid->dev.parent = &ithc->pci->dev; 197 | hid->driver_data = ithc; 198 | 199 | ithc->hid.dev = hid; 200 | 201 | init_waitqueue_head(&ithc->hid.wait_parse); 202 | init_waitqueue_head(&ithc->hid.wait_get_feature); 203 | mutex_init(&ithc->hid.get_feature_mutex); 204 | 205 | return 0; 206 | } 207 | 208 | -------------------------------------------------------------------------------- /src/ithc-hid.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ 2 | 3 | enum ithc_data_type { 4 | ITHC_DATA_IGNORE, 5 | ITHC_DATA_RAW, 6 | ITHC_DATA_ERROR, 7 | ITHC_DATA_REPORT_DESCRIPTOR, 8 | ITHC_DATA_INPUT_REPORT, 9 | ITHC_DATA_OUTPUT_REPORT, 10 | ITHC_DATA_GET_FEATURE, 11 | ITHC_DATA_SET_FEATURE, 12 | }; 13 | 14 | struct ithc_data { 15 | enum ithc_data_type type; 16 | u32 size; 17 | const void *data; 18 | }; 19 | 20 | struct ithc_hid { 21 | struct hid_device *dev; 22 | bool parse_done; 23 | wait_queue_head_t wait_parse; 24 | wait_queue_head_t wait_get_feature; 25 | struct mutex get_feature_mutex; 26 | void *get_feature_buf; 27 | size_t get_feature_size; 28 | }; 29 | 30 | int ithc_hid_init(struct ithc *ithc); 31 | void ithc_hid_process_data(struct ithc *ithc, struct ithc_data *d); 32 | 33 | -------------------------------------------------------------------------------- /src/ithc-legacy.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 | 3 | #include "ithc.h" 4 | 5 | #define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6) 6 | #define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6) 7 | 8 | #define DEVCFG_TOUCH_MASK 0x3f 9 | #define DEVCFG_TOUCH_ENABLE BIT(0) 10 | #define DEVCFG_TOUCH_PROP_DATA_ENABLE BIT(1) 11 | #define DEVCFG_TOUCH_HID_REPORT_ENABLE BIT(2) 12 | #define DEVCFG_TOUCH_POWER_STATE(x) (((x) & 7) << 3) 13 | #define DEVCFG_TOUCH_UNKNOWN_6 BIT(6) 14 | 15 | #define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC" 16 | 17 | #define DEVCFG_SPI_CLKDIV(x) (((x) >> 1) & 7) 18 | #define DEVCFG_SPI_CLKDIV_8 BIT(4) 19 | #define DEVCFG_SPI_SUPPORTS_SINGLE BIT(5) 20 | #define DEVCFG_SPI_SUPPORTS_DUAL BIT(6) 21 | #define DEVCFG_SPI_SUPPORTS_QUAD BIT(7) 22 | #define DEVCFG_SPI_MAX_TOUCH_POINTS(x) (((x) >> 8) & 0x3f) 23 | #define DEVCFG_SPI_MIN_RESET_TIME(x) (((x) >> 16) & 0xf) 24 | #define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20) // TODO implement heartbeat 25 | #define DEVCFG_SPI_HEARTBEAT_INTERVAL(x) (((x) >> 21) & 7) 26 | #define DEVCFG_SPI_UNKNOWN_25 BIT(25) 27 | #define DEVCFG_SPI_UNKNOWN_26 BIT(26) 28 | #define DEVCFG_SPI_UNKNOWN_27 BIT(27) 29 | #define DEVCFG_SPI_DELAY(x) (((x) >> 28) & 7) // TODO use this 30 | #define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31) // TODO use this? 31 | 32 | struct ithc_device_config { // (Example values are from an SP7+.) 33 | u32 irq_cause; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET) 34 | u32 error; // 04 = 0x00000000 35 | u32 dma_buf_sizes; // 08 = 0x000a00ff 36 | u32 touch_cfg; // 0c = 0x0000001c 37 | u32 touch_state; // 10 = 0x0000001c 38 | u32 device_id; // 14 = 0x43495424 = "$TIC" 39 | u32 spi_config; // 18 = 0xfda00a2e 40 | u16 vendor_id; // 1c = 0x045e = Microsoft Corp. 41 | u16 product_id; // 1e = 0x0c1a 42 | u32 revision; // 20 = 0x00000001 43 | u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139 (this value looks more random on newer devices) 44 | u32 command; // 28 = 0x00000000 45 | u32 fw_mode; // 2c = 0x00000000 (for fw update?) 46 | u32 _unknown_30; // 30 = 0x00000000 47 | u8 eds_minor_ver; // 34 = 0x5e 48 | u8 eds_major_ver; // 35 = 0x03 49 | u8 interface_rev; // 36 = 0x04 50 | u8 eu_kernel_ver; // 37 = 0x04 51 | u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET) 52 | u32 _unknown_3c; // 3c = 0x00000002 53 | }; 54 | static_assert(sizeof(struct ithc_device_config) == 64); 55 | 56 | #define RX_CODE_INPUT_REPORT 3 57 | #define RX_CODE_FEATURE_REPORT 4 58 | #define RX_CODE_REPORT_DESCRIPTOR 5 59 | #define RX_CODE_RESET 7 60 | 61 | #define TX_CODE_SET_FEATURE 3 62 | #define TX_CODE_GET_FEATURE 4 63 | #define TX_CODE_OUTPUT_REPORT 5 64 | #define TX_CODE_GET_REPORT_DESCRIPTOR 7 65 | 66 | static int ithc_set_device_enabled(struct ithc *ithc, bool enable) 67 | { 68 | u32 x = ithc->legacy_touch_cfg = 69 | (ithc->legacy_touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | 70 | DEVCFG_TOUCH_HID_REPORT_ENABLE | 71 | (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_POWER_STATE(3) : 0); 72 | return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE, 73 | offsetof(struct ithc_device_config, touch_cfg), sizeof(x), &x); 74 | } 75 | 76 | int ithc_legacy_init(struct ithc *ithc) 77 | { 78 | // Since we don't yet know which SPI config the device wants, use default speed and mode 79 | // initially for reading config data. 80 | CHECK(ithc_set_spi_config, ithc, 2, true, SPI_MODE_SINGLE, SPI_MODE_SINGLE); 81 | 82 | // Setting the following bit seems to make reading the config more reliable. 83 | bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_31); 84 | 85 | // Setting this bit may be necessary on ADL devices. 86 | switch (ithc->pci->device) { 87 | case PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1: 88 | case PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2: 89 | case PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1: 90 | case PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2: 91 | case PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1: 92 | case PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2: 93 | bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_5); 94 | break; 95 | } 96 | 97 | // Take the touch device out of reset. 98 | bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0); 99 | CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0); 100 | for (int retries = 0; ; retries++) { 101 | ithc_log_regs(ithc); 102 | bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET); 103 | if (!waitl(ithc, &ithc->regs->irq_cause, 0xf, 2)) 104 | break; 105 | if (retries > 5) { 106 | pci_err(ithc->pci, "failed to reset device, irq_cause = 0x%08x\n", 107 | readl(&ithc->regs->irq_cause)); 108 | return -ETIMEDOUT; 109 | } 110 | pci_warn(ithc->pci, "invalid irq_cause, retrying reset\n"); 111 | bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0); 112 | if (msleep_interruptible(1000)) 113 | return -EINTR; 114 | } 115 | ithc_log_regs(ithc); 116 | 117 | CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_READY, DMA_RX_STATUS_READY); 118 | 119 | // Read configuration data. 120 | u32 spi_cfg; 121 | for (int retries = 0; ; retries++) { 122 | ithc_log_regs(ithc); 123 | struct ithc_device_config config = { 0 }; 124 | CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof(config), &config); 125 | u32 *p = (void *)&config; 126 | pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 127 | p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 128 | if (config.device_id == DEVCFG_DEVICE_ID_TIC) { 129 | spi_cfg = config.spi_config; 130 | ithc->vendor_id = config.vendor_id; 131 | ithc->product_id = config.product_id; 132 | ithc->product_rev = config.revision; 133 | ithc->max_rx_size = DEVCFG_DMA_RX_SIZE(config.dma_buf_sizes); 134 | ithc->max_tx_size = DEVCFG_DMA_TX_SIZE(config.dma_buf_sizes); 135 | ithc->legacy_touch_cfg = config.touch_cfg; 136 | ithc->have_config = true; 137 | break; 138 | } 139 | if (retries > 10) { 140 | pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n", 141 | config.device_id); 142 | return -EIO; 143 | } 144 | pci_warn(ithc->pci, "failed to read config, retrying\n"); 145 | if (msleep_interruptible(100)) 146 | return -EINTR; 147 | } 148 | ithc_log_regs(ithc); 149 | 150 | // Apply SPI config and enable touch device. 151 | CHECK_RET(ithc_set_spi_config, ithc, 152 | DEVCFG_SPI_CLKDIV(spi_cfg), (spi_cfg & DEVCFG_SPI_CLKDIV_8) != 0, 153 | spi_cfg & DEVCFG_SPI_SUPPORTS_QUAD ? SPI_MODE_QUAD : 154 | spi_cfg & DEVCFG_SPI_SUPPORTS_DUAL ? SPI_MODE_DUAL : 155 | SPI_MODE_SINGLE, 156 | SPI_MODE_SINGLE); 157 | CHECK_RET(ithc_set_device_enabled, ithc, true); 158 | ithc_log_regs(ithc); 159 | return 0; 160 | } 161 | 162 | void ithc_legacy_exit(struct ithc *ithc) 163 | { 164 | CHECK(ithc_set_device_enabled, ithc, false); 165 | } 166 | 167 | int ithc_legacy_decode_rx(struct ithc *ithc, const void *src, size_t len, struct ithc_data *dest) 168 | { 169 | const struct { 170 | u32 code; 171 | u32 data_size; 172 | u32 _unknown[14]; 173 | } *hdr = src; 174 | 175 | if (len < sizeof(*hdr)) 176 | return -ENODATA; 177 | // Note: RX data is not padded, even though TX data must be padded. 178 | if (len != sizeof(*hdr) + hdr->data_size) 179 | return -EMSGSIZE; 180 | 181 | dest->data = hdr + 1; 182 | dest->size = hdr->data_size; 183 | 184 | switch (hdr->code) { 185 | case RX_CODE_RESET: 186 | // The THC sends a reset request when we need to reinitialize the device. 187 | // This usually only happens if we send an invalid command or put the device 188 | // in a bad state. 189 | dest->type = ITHC_DATA_ERROR; 190 | return 0; 191 | case RX_CODE_REPORT_DESCRIPTOR: 192 | // The descriptor is preceded by 8 nul bytes. 193 | if (hdr->data_size < 8) 194 | return -ENODATA; 195 | dest->type = ITHC_DATA_REPORT_DESCRIPTOR; 196 | dest->data = (char *)(hdr + 1) + 8; 197 | dest->size = hdr->data_size - 8; 198 | return 0; 199 | case RX_CODE_INPUT_REPORT: 200 | dest->type = ITHC_DATA_INPUT_REPORT; 201 | return 0; 202 | case RX_CODE_FEATURE_REPORT: 203 | dest->type = ITHC_DATA_GET_FEATURE; 204 | return 0; 205 | default: 206 | return -EINVAL; 207 | } 208 | } 209 | 210 | ssize_t ithc_legacy_encode_tx(struct ithc *ithc, const struct ithc_data *src, void *dest, 211 | size_t maxlen) 212 | { 213 | struct { 214 | u32 code; 215 | u32 data_size; 216 | } *hdr = dest; 217 | 218 | size_t src_size = src->size; 219 | const void *src_data = src->data; 220 | const u64 get_report_desc_data = 0; 221 | u32 code; 222 | 223 | switch (src->type) { 224 | case ITHC_DATA_SET_FEATURE: 225 | code = TX_CODE_SET_FEATURE; 226 | break; 227 | case ITHC_DATA_GET_FEATURE: 228 | code = TX_CODE_GET_FEATURE; 229 | break; 230 | case ITHC_DATA_OUTPUT_REPORT: 231 | code = TX_CODE_OUTPUT_REPORT; 232 | break; 233 | case ITHC_DATA_REPORT_DESCRIPTOR: 234 | code = TX_CODE_GET_REPORT_DESCRIPTOR; 235 | src_size = sizeof(get_report_desc_data); 236 | src_data = &get_report_desc_data; 237 | break; 238 | default: 239 | return -EINVAL; 240 | } 241 | 242 | // Data must be padded to next 4-byte boundary. 243 | size_t padded = round_up(src_size, 4); 244 | if (sizeof(*hdr) + padded > maxlen) 245 | return -EOVERFLOW; 246 | 247 | // Fill the TX buffer with header and data. 248 | hdr->code = code; 249 | hdr->data_size = src_size; 250 | memcpy_and_pad(hdr + 1, padded, src_data, src_size, 0); 251 | 252 | return sizeof(*hdr) + padded; 253 | } 254 | 255 | -------------------------------------------------------------------------------- /src/ithc-legacy.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ 2 | 3 | int ithc_legacy_init(struct ithc *ithc); 4 | void ithc_legacy_exit(struct ithc *ithc); 5 | int ithc_legacy_decode_rx(struct ithc *ithc, const void *src, size_t len, struct ithc_data *dest); 6 | ssize_t ithc_legacy_encode_tx(struct ithc *ithc, const struct ithc_data *src, void *dest, 7 | size_t maxlen); 8 | 9 | -------------------------------------------------------------------------------- /src/ithc-main.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 | 3 | #include "ithc.h" 4 | 5 | MODULE_DESCRIPTION("Intel Touch Host Controller driver"); 6 | MODULE_LICENSE("Dual BSD/GPL"); 7 | 8 | static const struct pci_device_id ithc_pci_tbl[] = { 9 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) }, 10 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) }, 11 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1) }, 12 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2) }, 13 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1) }, 14 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2) }, 15 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1) }, 16 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2) }, 17 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1) }, 18 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2) }, 19 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1) }, 20 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2) }, 21 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1) }, 22 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) }, 23 | // MTL and up are handled by drivers/hid/intel-thc-hid 24 | {} 25 | }; 26 | MODULE_DEVICE_TABLE(pci, ithc_pci_tbl); 27 | 28 | // Module parameters 29 | 30 | static bool ithc_use_polling = false; 31 | module_param_named(poll, ithc_use_polling, bool, 0); 32 | MODULE_PARM_DESC(poll, "Use polling instead of interrupts"); 33 | 34 | // Since all known devices seem to use only channel 1, by default we disable channel 0. 35 | static bool ithc_use_rx0 = false; 36 | module_param_named(rx0, ithc_use_rx0, bool, 0); 37 | MODULE_PARM_DESC(rx0, "Use DMA RX channel 0"); 38 | 39 | static bool ithc_use_rx1 = true; 40 | module_param_named(rx1, ithc_use_rx1, bool, 0); 41 | MODULE_PARM_DESC(rx1, "Use DMA RX channel 1"); 42 | 43 | static int ithc_active_ltr_us = -1; 44 | module_param_named(activeltr, ithc_active_ltr_us, int, 0); 45 | MODULE_PARM_DESC(activeltr, "Active LTR value override (in microseconds)"); 46 | 47 | static int ithc_idle_ltr_us = -1; 48 | module_param_named(idleltr, ithc_idle_ltr_us, int, 0); 49 | MODULE_PARM_DESC(idleltr, "Idle LTR value override (in microseconds)"); 50 | 51 | static unsigned int ithc_idle_delay_ms = 1000; 52 | module_param_named(idledelay, ithc_idle_delay_ms, uint, 0); 53 | MODULE_PARM_DESC(idleltr, "Minimum idle time before applying idle LTR value (in milliseconds)"); 54 | 55 | static bool ithc_log_regs_enabled = false; 56 | module_param_named(logregs, ithc_log_regs_enabled, bool, 0); 57 | MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)"); 58 | 59 | // Interrupts/polling 60 | 61 | static void ithc_disable_interrupts(struct ithc *ithc) 62 | { 63 | writel(0, &ithc->regs->error_control); 64 | bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0); 65 | bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_READY | DMA_RX_CONTROL_IRQ_DATA, 0); 66 | bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_READY | DMA_RX_CONTROL_IRQ_DATA, 0); 67 | bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0); 68 | } 69 | 70 | static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned int channel) 71 | { 72 | writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_READY | DMA_RX_STATUS_HAVE_DATA, 73 | &ithc->regs->dma_rx[channel].status); 74 | } 75 | 76 | static void ithc_clear_interrupts(struct ithc *ithc) 77 | { 78 | writel(0xffffffff, &ithc->regs->error_flags); 79 | writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status); 80 | writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); 81 | ithc_clear_dma_rx_interrupts(ithc, 0); 82 | ithc_clear_dma_rx_interrupts(ithc, 1); 83 | writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2, 84 | &ithc->regs->dma_tx.status); 85 | } 86 | 87 | static void ithc_idle_timer_callback(struct timer_list *t) 88 | { 89 | struct ithc *ithc = container_of(t, struct ithc, idle_timer); 90 | ithc_set_ltr_idle(ithc); 91 | } 92 | 93 | static void ithc_process(struct ithc *ithc) 94 | { 95 | ithc_log_regs(ithc); 96 | 97 | // The THC automatically transitions from LTR idle to active at the start of a DMA transfer. 98 | // It does not appear to automatically go back to idle, so we switch it back after a delay. 99 | mod_timer(&ithc->idle_timer, jiffies + msecs_to_jiffies(ithc_idle_delay_ms)); 100 | 101 | bool rx0 = ithc_use_rx0 && (readl(&ithc->regs->dma_rx[0].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0; 102 | bool rx1 = ithc_use_rx1 && (readl(&ithc->regs->dma_rx[1].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0; 103 | 104 | // Read and clear error bits 105 | u32 err = readl(&ithc->regs->error_flags); 106 | if (err) { 107 | writel(err, &ithc->regs->error_flags); 108 | if (err & ~ERROR_FLAG_DMA_RX_TIMEOUT) 109 | pci_err(ithc->pci, "error flags: 0x%08x\n", err); 110 | if (err & ERROR_FLAG_DMA_RX_TIMEOUT) 111 | pci_err(ithc->pci, "DMA RX timeout/error (try decreasing activeltr/idleltr if this happens frequently)\n"); 112 | } 113 | 114 | // Process DMA rx 115 | if (ithc_use_rx0) { 116 | ithc_clear_dma_rx_interrupts(ithc, 0); 117 | if (rx0) 118 | ithc_dma_rx(ithc, 0); 119 | } 120 | if (ithc_use_rx1) { 121 | ithc_clear_dma_rx_interrupts(ithc, 1); 122 | if (rx1) 123 | ithc_dma_rx(ithc, 1); 124 | } 125 | 126 | ithc_log_regs(ithc); 127 | } 128 | 129 | static irqreturn_t ithc_interrupt_thread(int irq, void *arg) 130 | { 131 | struct ithc *ithc = arg; 132 | pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n", 133 | readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags), 134 | readb(&ithc->regs->spi_cmd.control), readl(&ithc->regs->spi_cmd.status), 135 | readb(&ithc->regs->dma_rx[0].control), readl(&ithc->regs->dma_rx[0].status), 136 | readb(&ithc->regs->dma_rx[1].control), readl(&ithc->regs->dma_rx[1].status), 137 | readb(&ithc->regs->dma_tx.control), readl(&ithc->regs->dma_tx.status)); 138 | ithc_process(ithc); 139 | return IRQ_HANDLED; 140 | } 141 | 142 | static int ithc_poll_thread(void *arg) 143 | { 144 | struct ithc *ithc = arg; 145 | unsigned int sleep = 100; 146 | while (!kthread_should_stop()) { 147 | u32 n = ithc->dma_rx[1].num_received; 148 | ithc_process(ithc); 149 | // Decrease polling interval to 20ms if we received data, otherwise slowly 150 | // increase it up to 200ms. 151 | sleep = n != ithc->dma_rx[1].num_received ? 20 152 | : min(200u, sleep + (sleep >> 4) + 1); 153 | msleep_interruptible(sleep); 154 | } 155 | return 0; 156 | } 157 | 158 | // Device initialization and shutdown 159 | 160 | static void ithc_disable(struct ithc *ithc) 161 | { 162 | bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE); 163 | CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED); 164 | bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0); 165 | bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND, 0); 166 | bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0); 167 | bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_ENABLE, 0); 168 | bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_ENABLE, 0); 169 | ithc_disable_interrupts(ithc); 170 | ithc_clear_interrupts(ithc); 171 | } 172 | 173 | static int ithc_init_device(struct ithc *ithc) 174 | { 175 | // Read ACPI config for QuickSPI mode 176 | struct ithc_acpi_config cfg = { 0 }; 177 | CHECK_RET(ithc_read_acpi_config, ithc, &cfg); 178 | if (!cfg.has_config) 179 | pci_info(ithc->pci, "no ACPI config, using legacy mode\n"); 180 | else 181 | ithc_print_acpi_config(ithc, &cfg); 182 | ithc->use_quickspi = cfg.has_config; 183 | 184 | // Shut down device 185 | ithc_log_regs(ithc); 186 | bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0; 187 | ithc_disable(ithc); 188 | CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY); 189 | ithc_log_regs(ithc); 190 | 191 | // If the device was previously enabled, wait a bit to make sure it's fully shut down. 192 | if (was_enabled) 193 | if (msleep_interruptible(100)) 194 | return -EINTR; 195 | 196 | // Set Latency Tolerance Reporting config. The device will automatically 197 | // apply these values depending on whether it is active or idle. 198 | // If active value is too high, DMA buffer data can become truncated. 199 | // By default, we set the active LTR value to 50us, and idle to 100ms. 200 | u64 active_ltr_ns = ithc_active_ltr_us >= 0 ? (u64)ithc_active_ltr_us * 1000 201 | : cfg.has_config && cfg.has_active_ltr ? (u64)cfg.active_ltr << 10 202 | : 50 * 1000; 203 | u64 idle_ltr_ns = ithc_idle_ltr_us >= 0 ? (u64)ithc_idle_ltr_us * 1000 204 | : cfg.has_config && cfg.has_idle_ltr ? (u64)cfg.idle_ltr << 10 205 | : 100 * 1000 * 1000; 206 | ithc_set_ltr_config(ithc, active_ltr_ns, idle_ltr_ns); 207 | 208 | if (ithc->use_quickspi) 209 | CHECK_RET(ithc_quickspi_init, ithc, &cfg); 210 | else 211 | CHECK_RET(ithc_legacy_init, ithc); 212 | 213 | return 0; 214 | } 215 | 216 | int ithc_reset(struct ithc *ithc) 217 | { 218 | // FIXME This should probably do devres_release_group()+ithc_start(). 219 | // But because this is called during DMA processing, that would have to be done 220 | // asynchronously (schedule_work()?). And with extra locking? 221 | pci_err(ithc->pci, "reset\n"); 222 | CHECK(ithc_init_device, ithc); 223 | if (ithc_use_rx0) 224 | ithc_dma_rx_enable(ithc, 0); 225 | if (ithc_use_rx1) 226 | ithc_dma_rx_enable(ithc, 1); 227 | ithc_log_regs(ithc); 228 | pci_dbg(ithc->pci, "reset completed\n"); 229 | return 0; 230 | } 231 | 232 | static void ithc_stop(void *res) 233 | { 234 | struct ithc *ithc = res; 235 | pci_dbg(ithc->pci, "stopping\n"); 236 | ithc_log_regs(ithc); 237 | 238 | if (ithc->poll_thread) 239 | CHECK(kthread_stop, ithc->poll_thread); 240 | if (ithc->irq >= 0) 241 | disable_irq(ithc->irq); 242 | if (ithc->use_quickspi) 243 | ithc_quickspi_exit(ithc); 244 | else 245 | ithc_legacy_exit(ithc); 246 | ithc_disable(ithc); 247 | timer_delete_sync(&ithc->idle_timer); 248 | 249 | // Clear DMA config. 250 | for (unsigned int i = 0; i < 2; i++) { 251 | CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0); 252 | lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr); 253 | writeb(0, &ithc->regs->dma_rx[i].num_bufs); 254 | writeb(0, &ithc->regs->dma_rx[i].num_prds); 255 | } 256 | lo_hi_writeq(0, &ithc->regs->dma_tx.addr); 257 | writeb(0, &ithc->regs->dma_tx.num_prds); 258 | 259 | ithc_log_regs(ithc); 260 | pci_dbg(ithc->pci, "stopped\n"); 261 | } 262 | 263 | static void ithc_clear_drvdata(void *res) 264 | { 265 | struct pci_dev *pci = res; 266 | pci_set_drvdata(pci, NULL); 267 | } 268 | 269 | static int ithc_start(struct pci_dev *pci) 270 | { 271 | pci_dbg(pci, "starting\n"); 272 | if (pci_get_drvdata(pci)) { 273 | pci_err(pci, "device already initialized\n"); 274 | return -EINVAL; 275 | } 276 | if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL)) 277 | return -ENOMEM; 278 | 279 | // Allocate/init main driver struct. 280 | struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof(*ithc), GFP_KERNEL); 281 | if (!ithc) 282 | return -ENOMEM; 283 | ithc->irq = -1; 284 | ithc->pci = pci; 285 | snprintf(ithc->phys, sizeof(ithc->phys), "pci-%s/" DEVNAME, pci_name(pci)); 286 | pci_set_drvdata(pci, ithc); 287 | CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci); 288 | if (ithc_log_regs_enabled) 289 | ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof(*ithc->prev_regs), GFP_KERNEL); 290 | 291 | // PCI initialization. 292 | CHECK_RET(pcim_enable_device, pci); 293 | pci_set_master(pci); 294 | CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs"); 295 | CHECK_RET(dma_set_mask_and_coherent, &pci->dev, DMA_BIT_MASK(64)); 296 | CHECK_RET(pci_set_power_state, pci, PCI_D0); 297 | ithc->regs = pcim_iomap_table(pci)[0]; 298 | 299 | // Allocate IRQ. 300 | if (!ithc_use_polling) { 301 | CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); 302 | ithc->irq = CHECK(pci_irq_vector, pci, 0); 303 | if (ithc->irq < 0) 304 | return ithc->irq; 305 | } 306 | 307 | // Initialize THC and touch device. 308 | CHECK_RET(ithc_init_device, ithc); 309 | 310 | // Initialize HID and DMA. 311 | CHECK_RET(ithc_hid_init, ithc); 312 | if (ithc_use_rx0) 313 | CHECK_RET(ithc_dma_rx_init, ithc, 0); 314 | if (ithc_use_rx1) 315 | CHECK_RET(ithc_dma_rx_init, ithc, 1); 316 | CHECK_RET(ithc_dma_tx_init, ithc); 317 | 318 | timer_setup(&ithc->idle_timer, ithc_idle_timer_callback, 0); 319 | 320 | // Add ithc_stop() callback AFTER setting up DMA buffers, so that polling/irqs/DMA are 321 | // disabled BEFORE the buffers are freed. 322 | CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc); 323 | 324 | // Start polling/IRQ. 325 | if (ithc_use_polling) { 326 | pci_info(pci, "using polling instead of irq\n"); 327 | // Use a thread instead of simple timer because we want to be able to sleep. 328 | ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll"); 329 | if (IS_ERR(ithc->poll_thread)) { 330 | int err = PTR_ERR(ithc->poll_thread); 331 | ithc->poll_thread = NULL; 332 | return err; 333 | } 334 | } else { 335 | CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL, 336 | ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc); 337 | } 338 | 339 | if (ithc_use_rx0) 340 | ithc_dma_rx_enable(ithc, 0); 341 | if (ithc_use_rx1) 342 | ithc_dma_rx_enable(ithc, 1); 343 | 344 | // hid_add_device() can only be called after irq/polling is started and DMA is enabled, 345 | // because it calls ithc_hid_parse() which reads the report descriptor via DMA. 346 | CHECK_RET(hid_add_device, ithc->hid.dev); 347 | 348 | CHECK(ithc_debug_init_device, ithc); 349 | 350 | ithc_set_ltr_idle(ithc); 351 | 352 | pci_dbg(pci, "started\n"); 353 | return 0; 354 | } 355 | 356 | static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id) 357 | { 358 | pci_dbg(pci, "device probe\n"); 359 | return ithc_start(pci); 360 | } 361 | 362 | static void ithc_remove(struct pci_dev *pci) 363 | { 364 | pci_dbg(pci, "device remove\n"); 365 | // all cleanup is handled by devres 366 | } 367 | 368 | // For suspend/resume, we just deinitialize and reinitialize everything. 369 | // TODO It might be cleaner to keep the HID device around, however we would then have to signal 370 | // to userspace that the touch device has lost state and userspace needs to e.g. resend 'set 371 | // feature' requests. Hidraw does not seem to have a facility to do that. 372 | static int ithc_suspend(struct device *dev) 373 | { 374 | struct pci_dev *pci = to_pci_dev(dev); 375 | pci_dbg(pci, "pm suspend\n"); 376 | devres_release_group(dev, ithc_start); 377 | return 0; 378 | } 379 | 380 | static int ithc_resume(struct device *dev) 381 | { 382 | struct pci_dev *pci = to_pci_dev(dev); 383 | pci_dbg(pci, "pm resume\n"); 384 | return ithc_start(pci); 385 | } 386 | 387 | static int ithc_freeze(struct device *dev) 388 | { 389 | struct pci_dev *pci = to_pci_dev(dev); 390 | pci_dbg(pci, "pm freeze\n"); 391 | devres_release_group(dev, ithc_start); 392 | return 0; 393 | } 394 | 395 | static int ithc_thaw(struct device *dev) 396 | { 397 | struct pci_dev *pci = to_pci_dev(dev); 398 | pci_dbg(pci, "pm thaw\n"); 399 | return ithc_start(pci); 400 | } 401 | 402 | static int ithc_restore(struct device *dev) 403 | { 404 | struct pci_dev *pci = to_pci_dev(dev); 405 | pci_dbg(pci, "pm restore\n"); 406 | return ithc_start(pci); 407 | } 408 | 409 | static struct pci_driver ithc_driver = { 410 | .name = DEVNAME, 411 | .id_table = ithc_pci_tbl, 412 | .probe = ithc_probe, 413 | .remove = ithc_remove, 414 | .driver.pm = &(const struct dev_pm_ops) { 415 | .suspend = ithc_suspend, 416 | .resume = ithc_resume, 417 | .freeze = ithc_freeze, 418 | .thaw = ithc_thaw, 419 | .restore = ithc_restore, 420 | }, 421 | .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS, 422 | }; 423 | 424 | static int __init ithc_init(void) 425 | { 426 | ithc_debug_init_module(); 427 | return pci_register_driver(&ithc_driver); 428 | } 429 | 430 | static void __exit ithc_exit(void) 431 | { 432 | pci_unregister_driver(&ithc_driver); 433 | ithc_debug_exit_module(); 434 | } 435 | 436 | module_init(ithc_init); 437 | module_exit(ithc_exit); 438 | 439 | -------------------------------------------------------------------------------- /src/ithc-quickspi.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 | 3 | // Some public THC/QuickSPI documentation can be found in: 4 | // - Intel Firmware Support Package repo: https://github.com/intel/FSP 5 | // - HID over SPI (HIDSPI) spec: https://www.microsoft.com/en-us/download/details.aspx?id=103325 6 | 7 | #include "ithc.h" 8 | 9 | static const guid_t guid_hidspi = 10 | GUID_INIT(0x6e2ac436, 0x0fcf, 0x41af, 0xa2, 0x65, 0xb3, 0x2a, 0x22, 0x0d, 0xcf, 0xab); 11 | static const guid_t guid_thc_quickspi = 12 | GUID_INIT(0x300d35b7, 0xac20, 0x413e, 0x8e, 0x9c, 0x92, 0xe4, 0xda, 0xfd, 0x0a, 0xfe); 13 | static const guid_t guid_thc_ltr = 14 | GUID_INIT(0x84005682, 0x5b71, 0x41a4, 0x8d, 0x66, 0x81, 0x30, 0xf7, 0x87, 0xa1, 0x38); 15 | 16 | // TODO The HIDSPI spec says revision should be 3. Should we try both? 17 | #define DSM_REV 2 18 | 19 | struct hidspi_header { 20 | u8 type; 21 | u16 len; 22 | u8 id; 23 | } __packed; 24 | static_assert(sizeof(struct hidspi_header) == 4); 25 | 26 | #define HIDSPI_INPUT_TYPE_DATA 1 27 | #define HIDSPI_INPUT_TYPE_RESET_RESPONSE 3 28 | #define HIDSPI_INPUT_TYPE_COMMAND_RESPONSE 4 29 | #define HIDSPI_INPUT_TYPE_GET_FEATURE_RESPONSE 5 30 | #define HIDSPI_INPUT_TYPE_DEVICE_DESCRIPTOR 7 31 | #define HIDSPI_INPUT_TYPE_REPORT_DESCRIPTOR 8 32 | #define HIDSPI_INPUT_TYPE_SET_FEATURE_RESPONSE 9 33 | #define HIDSPI_INPUT_TYPE_OUTPUT_REPORT_RESPONSE 10 34 | #define HIDSPI_INPUT_TYPE_GET_INPUT_REPORT_RESPONSE 11 35 | 36 | #define HIDSPI_OUTPUT_TYPE_DEVICE_DESCRIPTOR_REQUEST 1 37 | #define HIDSPI_OUTPUT_TYPE_REPORT_DESCRIPTOR_REQUEST 2 38 | #define HIDSPI_OUTPUT_TYPE_SET_FEATURE 3 39 | #define HIDSPI_OUTPUT_TYPE_GET_FEATURE 4 40 | #define HIDSPI_OUTPUT_TYPE_OUTPUT_REPORT 5 41 | #define HIDSPI_OUTPUT_TYPE_INPUT_REPORT_REQUEST 6 42 | #define HIDSPI_OUTPUT_TYPE_COMMAND 7 43 | 44 | struct hidspi_device_descriptor { 45 | u16 wDeviceDescLength; 46 | u16 bcdVersion; 47 | u16 wReportDescLength; 48 | u16 wMaxInputLength; 49 | u16 wMaxOutputLength; 50 | u16 wMaxFragmentLength; 51 | u16 wVendorID; 52 | u16 wProductID; 53 | u16 wVersionID; 54 | u16 wFlags; 55 | u32 dwReserved; 56 | }; 57 | static_assert(sizeof(struct hidspi_device_descriptor) == 24); 58 | 59 | static int read_acpi_u32(struct ithc *ithc, const guid_t *guid, u32 func, u32 *dest) 60 | { 61 | acpi_handle handle = ACPI_HANDLE(&ithc->pci->dev); 62 | union acpi_object *o = acpi_evaluate_dsm(handle, guid, DSM_REV, func, NULL); 63 | if (!o) 64 | return 0; 65 | if (o->type != ACPI_TYPE_INTEGER) { 66 | pci_err(ithc->pci, "DSM %pUl %u returned type %i instead of integer\n", 67 | guid, func, o->type); 68 | ACPI_FREE(o); 69 | return -1; 70 | } 71 | pci_dbg(ithc->pci, "DSM %pUl %u = 0x%08x\n", guid, func, (u32)o->integer.value); 72 | *dest = (u32)o->integer.value; 73 | ACPI_FREE(o); 74 | return 1; 75 | } 76 | 77 | static int read_acpi_buf(struct ithc *ithc, const guid_t *guid, u32 func, size_t len, u8 *dest) 78 | { 79 | acpi_handle handle = ACPI_HANDLE(&ithc->pci->dev); 80 | union acpi_object *o = acpi_evaluate_dsm(handle, guid, DSM_REV, func, NULL); 81 | if (!o) 82 | return 0; 83 | if (o->type != ACPI_TYPE_BUFFER) { 84 | pci_err(ithc->pci, "DSM %pUl %u returned type %i instead of buffer\n", 85 | guid, func, o->type); 86 | ACPI_FREE(o); 87 | return -1; 88 | } 89 | if (o->buffer.length != len) { 90 | pci_err(ithc->pci, "DSM %pUl %u returned len %u instead of %zu\n", 91 | guid, func, o->buffer.length, len); 92 | ACPI_FREE(o); 93 | return -1; 94 | } 95 | memcpy(dest, o->buffer.pointer, len); 96 | pci_dbg(ithc->pci, "DSM %pUl %u = 0x%02x\n", guid, func, dest[0]); 97 | ACPI_FREE(o); 98 | return 1; 99 | } 100 | 101 | int ithc_read_acpi_config(struct ithc *ithc, struct ithc_acpi_config *cfg) 102 | { 103 | int r; 104 | acpi_handle handle = ACPI_HANDLE(&ithc->pci->dev); 105 | 106 | cfg->has_config = acpi_check_dsm(handle, &guid_hidspi, DSM_REV, BIT(0)); 107 | if (!cfg->has_config) 108 | return 0; 109 | 110 | // HIDSPI settings 111 | 112 | r = read_acpi_u32(ithc, &guid_hidspi, 1, &cfg->input_report_header_address); 113 | if (r < 0) 114 | return r; 115 | cfg->has_input_report_header_address = r > 0; 116 | if (r > 0 && cfg->input_report_header_address > 0xffffff) { 117 | pci_err(ithc->pci, "Invalid input report header address 0x%x\n", 118 | cfg->input_report_header_address); 119 | return -1; 120 | } 121 | 122 | r = read_acpi_u32(ithc, &guid_hidspi, 2, &cfg->input_report_body_address); 123 | if (r < 0) 124 | return r; 125 | cfg->has_input_report_body_address = r > 0; 126 | if (r > 0 && cfg->input_report_body_address > 0xffffff) { 127 | pci_err(ithc->pci, "Invalid input report body address 0x%x\n", 128 | cfg->input_report_body_address); 129 | return -1; 130 | } 131 | 132 | r = read_acpi_u32(ithc, &guid_hidspi, 3, &cfg->output_report_body_address); 133 | if (r < 0) 134 | return r; 135 | cfg->has_output_report_body_address = r > 0; 136 | if (r > 0 && cfg->output_report_body_address > 0xffffff) { 137 | pci_err(ithc->pci, "Invalid output report body address 0x%x\n", 138 | cfg->output_report_body_address); 139 | return -1; 140 | } 141 | 142 | r = read_acpi_buf(ithc, &guid_hidspi, 4, sizeof(cfg->read_opcode), &cfg->read_opcode); 143 | if (r < 0) 144 | return r; 145 | cfg->has_read_opcode = r > 0; 146 | 147 | r = read_acpi_buf(ithc, &guid_hidspi, 5, sizeof(cfg->write_opcode), &cfg->write_opcode); 148 | if (r < 0) 149 | return r; 150 | cfg->has_write_opcode = r > 0; 151 | 152 | u32 flags; 153 | r = read_acpi_u32(ithc, &guid_hidspi, 6, &flags); 154 | if (r < 0) 155 | return r; 156 | cfg->has_read_mode = cfg->has_write_mode = r > 0; 157 | if (r > 0) { 158 | cfg->read_mode = (flags >> 14) & 3; 159 | cfg->write_mode = flags & BIT(13) ? cfg->read_mode : SPI_MODE_SINGLE; 160 | } 161 | 162 | // Quick SPI settings 163 | 164 | r = read_acpi_u32(ithc, &guid_thc_quickspi, 1, &cfg->spi_frequency); 165 | if (r < 0) 166 | return r; 167 | cfg->has_spi_frequency = r > 0; 168 | 169 | r = read_acpi_u32(ithc, &guid_thc_quickspi, 2, &cfg->limit_packet_size); 170 | if (r < 0) 171 | return r; 172 | cfg->has_limit_packet_size = r > 0; 173 | 174 | r = read_acpi_u32(ithc, &guid_thc_quickspi, 3, &cfg->tx_delay); 175 | if (r < 0) 176 | return r; 177 | cfg->has_tx_delay = r > 0; 178 | if (r > 0) 179 | cfg->tx_delay &= 0xffff; 180 | 181 | // LTR settings 182 | 183 | r = read_acpi_u32(ithc, &guid_thc_ltr, 1, &cfg->active_ltr); 184 | if (r < 0) 185 | return r; 186 | cfg->has_active_ltr = r > 0; 187 | if (r > 0 && (!cfg->active_ltr || cfg->active_ltr > 0x3ff)) { 188 | if (cfg->active_ltr != 0xffffffff) 189 | pci_warn(ithc->pci, "Ignoring invalid active LTR value 0x%x\n", 190 | cfg->active_ltr); 191 | cfg->active_ltr = 500; 192 | } 193 | 194 | r = read_acpi_u32(ithc, &guid_thc_ltr, 2, &cfg->idle_ltr); 195 | if (r < 0) 196 | return r; 197 | cfg->has_idle_ltr = r > 0; 198 | if (r > 0 && (!cfg->idle_ltr || cfg->idle_ltr > 0x3ff)) { 199 | if (cfg->idle_ltr != 0xffffffff) 200 | pci_warn(ithc->pci, "Ignoring invalid idle LTR value 0x%x\n", 201 | cfg->idle_ltr); 202 | cfg->idle_ltr = 500; 203 | if (cfg->has_active_ltr && cfg->active_ltr > cfg->idle_ltr) 204 | cfg->idle_ltr = cfg->active_ltr; 205 | } 206 | 207 | return 0; 208 | } 209 | 210 | void ithc_print_acpi_config(struct ithc *ithc, const struct ithc_acpi_config *cfg) 211 | { 212 | if (!cfg->has_config) { 213 | pci_info(ithc->pci, "No ACPI config"); 214 | return; 215 | } 216 | 217 | char input_report_header_address[16] = "-"; 218 | if (cfg->has_input_report_header_address) 219 | sprintf(input_report_header_address, "0x%x", cfg->input_report_header_address); 220 | char input_report_body_address[16] = "-"; 221 | if (cfg->has_input_report_body_address) 222 | sprintf(input_report_body_address, "0x%x", cfg->input_report_body_address); 223 | char output_report_body_address[16] = "-"; 224 | if (cfg->has_output_report_body_address) 225 | sprintf(output_report_body_address, "0x%x", cfg->output_report_body_address); 226 | char read_opcode[16] = "-"; 227 | if (cfg->has_read_opcode) 228 | sprintf(read_opcode, "0x%02x", cfg->read_opcode); 229 | char write_opcode[16] = "-"; 230 | if (cfg->has_write_opcode) 231 | sprintf(write_opcode, "0x%02x", cfg->write_opcode); 232 | char read_mode[16] = "-"; 233 | if (cfg->has_read_mode) 234 | sprintf(read_mode, "%i", cfg->read_mode); 235 | char write_mode[16] = "-"; 236 | if (cfg->has_write_mode) 237 | sprintf(write_mode, "%i", cfg->write_mode); 238 | char spi_frequency[16] = "-"; 239 | if (cfg->has_spi_frequency) 240 | sprintf(spi_frequency, "%u", cfg->spi_frequency); 241 | char limit_packet_size[16] = "-"; 242 | if (cfg->has_limit_packet_size) 243 | sprintf(limit_packet_size, "%u", cfg->limit_packet_size); 244 | char tx_delay[16] = "-"; 245 | if (cfg->has_tx_delay) 246 | sprintf(tx_delay, "%u", cfg->tx_delay); 247 | char active_ltr[16] = "-"; 248 | if (cfg->has_active_ltr) 249 | sprintf(active_ltr, "%u", cfg->active_ltr); 250 | char idle_ltr[16] = "-"; 251 | if (cfg->has_idle_ltr) 252 | sprintf(idle_ltr, "%u", cfg->idle_ltr); 253 | 254 | pci_info(ithc->pci, "ACPI config: InputHeaderAddr=%s InputBodyAddr=%s OutputBodyAddr=%s ReadOpcode=%s WriteOpcode=%s ReadMode=%s WriteMode=%s Frequency=%s LimitPacketSize=%s TxDelay=%s ActiveLTR=%s IdleLTR=%s\n", 255 | input_report_header_address, input_report_body_address, output_report_body_address, 256 | read_opcode, write_opcode, read_mode, write_mode, 257 | spi_frequency, limit_packet_size, tx_delay, active_ltr, idle_ltr); 258 | } 259 | 260 | static void set_opcode(struct ithc *ithc, size_t i, u8 opcode) 261 | { 262 | writeb(opcode, &ithc->regs->opcode[i].header); 263 | writeb(opcode, &ithc->regs->opcode[i].single); 264 | writeb(opcode, &ithc->regs->opcode[i].dual); 265 | writeb(opcode, &ithc->regs->opcode[i].quad); 266 | } 267 | 268 | static int ithc_quickspi_init_regs(struct ithc *ithc, const struct ithc_acpi_config *cfg) 269 | { 270 | pci_dbg(ithc->pci, "initializing QuickSPI registers\n"); 271 | 272 | // SPI frequency and mode 273 | if (!cfg->has_spi_frequency || !cfg->spi_frequency) { 274 | pci_err(ithc->pci, "Missing SPI frequency in configuration\n"); 275 | return -EINVAL; 276 | } 277 | unsigned int clkdiv = DIV_ROUND_UP(SPI_CLK_FREQ_BASE, cfg->spi_frequency); 278 | bool clkdiv8 = clkdiv > 7; 279 | if (clkdiv8) 280 | clkdiv = min(7u, DIV_ROUND_UP(clkdiv, 8u)); 281 | if (!clkdiv) 282 | clkdiv = 1; 283 | CHECK_RET(ithc_set_spi_config, ithc, clkdiv, clkdiv8, 284 | cfg->has_read_mode ? cfg->read_mode : SPI_MODE_SINGLE, 285 | cfg->has_write_mode ? cfg->write_mode : SPI_MODE_SINGLE); 286 | 287 | // SPI addresses and opcodes 288 | if (cfg->has_input_report_header_address) 289 | writel(cfg->input_report_header_address, &ithc->regs->spi_header_addr); 290 | if (cfg->has_input_report_body_address) { 291 | writel(cfg->input_report_body_address, &ithc->regs->dma_rx[0].spi_addr); 292 | writel(cfg->input_report_body_address, &ithc->regs->dma_rx[1].spi_addr); 293 | } 294 | if (cfg->has_output_report_body_address) 295 | writel(cfg->output_report_body_address, &ithc->regs->dma_tx.spi_addr); 296 | 297 | switch (ithc->pci->device) { 298 | // LKF/TGL don't support QuickSPI. 299 | // For ADL, opcode layout is RX/TX/unused. 300 | case PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1: 301 | case PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2: 302 | case PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1: 303 | case PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2: 304 | case PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1: 305 | case PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2: 306 | if (cfg->has_read_opcode) { 307 | set_opcode(ithc, 0, cfg->read_opcode); 308 | } 309 | if (cfg->has_write_opcode) { 310 | set_opcode(ithc, 1, cfg->write_opcode); 311 | } 312 | break; 313 | // For MTL, opcode layout was changed to RX/RX/TX. 314 | // (RPL layout is unknown.) 315 | default: 316 | if (cfg->has_read_opcode) { 317 | set_opcode(ithc, 0, cfg->read_opcode); 318 | set_opcode(ithc, 1, cfg->read_opcode); 319 | } 320 | if (cfg->has_write_opcode) { 321 | set_opcode(ithc, 2, cfg->write_opcode); 322 | } 323 | break; 324 | } 325 | 326 | ithc_log_regs(ithc); 327 | 328 | // The rest... 329 | bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_31); 330 | 331 | bitsl(&ithc->regs->quickspi_config1, 332 | QUICKSPI_CONFIG1_UNKNOWN_0(0xff) | QUICKSPI_CONFIG1_UNKNOWN_5(0xff) | 333 | QUICKSPI_CONFIG1_UNKNOWN_10(0xff) | QUICKSPI_CONFIG1_UNKNOWN_16(0xffff), 334 | QUICKSPI_CONFIG1_UNKNOWN_0(4) | QUICKSPI_CONFIG1_UNKNOWN_5(4) | 335 | QUICKSPI_CONFIG1_UNKNOWN_10(22) | QUICKSPI_CONFIG1_UNKNOWN_16(2)); 336 | 337 | bitsl(&ithc->regs->quickspi_config2, 338 | QUICKSPI_CONFIG2_UNKNOWN_0(0xff) | QUICKSPI_CONFIG2_UNKNOWN_5(0xff) | 339 | QUICKSPI_CONFIG2_UNKNOWN_12(0xff), 340 | QUICKSPI_CONFIG2_UNKNOWN_0(8) | QUICKSPI_CONFIG2_UNKNOWN_5(14) | 341 | QUICKSPI_CONFIG2_UNKNOWN_12(2)); 342 | 343 | u32 pktsize = cfg->has_limit_packet_size && cfg->limit_packet_size == 1 ? 4 : 0x80; 344 | bitsl(&ithc->regs->spi_config, 345 | SPI_CONFIG_READ_PACKET_SIZE(0xfff) | SPI_CONFIG_WRITE_PACKET_SIZE(0xfff), 346 | SPI_CONFIG_READ_PACKET_SIZE(pktsize) | SPI_CONFIG_WRITE_PACKET_SIZE(pktsize)); 347 | 348 | bitsl_set(&ithc->regs->quickspi_config2, 349 | QUICKSPI_CONFIG2_UNKNOWN_16 | QUICKSPI_CONFIG2_UNKNOWN_17); 350 | bitsl(&ithc->regs->quickspi_config2, 351 | QUICKSPI_CONFIG2_DISABLE_READ_ADDRESS_INCREMENT | 352 | QUICKSPI_CONFIG2_DISABLE_WRITE_ADDRESS_INCREMENT | 353 | QUICKSPI_CONFIG2_ENABLE_WRITE_STREAMING_MODE, 0); 354 | 355 | return 0; 356 | } 357 | 358 | static int wait_for_report(struct ithc *ithc) 359 | { 360 | CHECK_RET(waitl, ithc, &ithc->regs->dma_rx[0].status, 361 | DMA_RX_STATUS_READY, DMA_RX_STATUS_READY); 362 | writel(DMA_RX_STATUS_READY, &ithc->regs->dma_rx[0].status); 363 | 364 | u32 h = readl(&ithc->regs->input_header); 365 | ithc_log_regs(ithc); 366 | if (INPUT_HEADER_SYNC(h) != INPUT_HEADER_SYNC_VALUE 367 | || INPUT_HEADER_VERSION(h) != INPUT_HEADER_VERSION_VALUE) { 368 | pci_err(ithc->pci, "invalid input report frame header 0x%08x\n", h); 369 | return -ENODATA; 370 | } 371 | return INPUT_HEADER_REPORT_LENGTH(h) * 4; 372 | } 373 | 374 | static int ithc_quickspi_init_hidspi(struct ithc *ithc, const struct ithc_acpi_config *cfg) 375 | { 376 | pci_dbg(ithc->pci, "initializing HIDSPI\n"); 377 | 378 | // HIDSPI initialization sequence: 379 | // "1. The host shall invoke the ACPI reset method to clear the device state." 380 | acpi_status s = acpi_evaluate_object(ACPI_HANDLE(&ithc->pci->dev), "_RST", NULL, NULL); 381 | if (ACPI_FAILURE(s)) { 382 | pci_err(ithc->pci, "ACPI reset failed\n"); 383 | return -EIO; 384 | } 385 | 386 | bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0); 387 | 388 | // "2. Within 1 second, the device shall signal an interrupt and make available to the host 389 | // an input report containing a device reset response." 390 | int size = wait_for_report(ithc); 391 | if (size < 0) 392 | return size; 393 | if (size < sizeof(struct hidspi_header)) { 394 | pci_err(ithc->pci, "SPI data size too small for reset response (%u)\n", size); 395 | return -EMSGSIZE; 396 | } 397 | 398 | // "3. The host shall read the reset response from the device at the Input Report addresses 399 | // specified in ACPI." 400 | u32 in_addr = cfg->has_input_report_body_address ? cfg->input_report_body_address : 0x1000; 401 | struct { 402 | struct hidspi_header header; 403 | union { 404 | struct hidspi_device_descriptor device_desc; 405 | u32 data[16]; 406 | }; 407 | } resp = { 0 }; 408 | if (size > sizeof(resp)) { 409 | pci_err(ithc->pci, "SPI data size for reset response too big (%u)\n", size); 410 | return -EMSGSIZE; 411 | } 412 | CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, in_addr, size, &resp); 413 | if (resp.header.type != HIDSPI_INPUT_TYPE_RESET_RESPONSE) { 414 | pci_err(ithc->pci, "received type %i instead of reset response\n", resp.header.type); 415 | return -ENOMSG; 416 | } 417 | 418 | // "4. The host shall then write an Output Report to the device at the Output Report Address 419 | // specified in ACPI, requesting the Device Descriptor from the device." 420 | u32 out_addr = cfg->has_output_report_body_address ? cfg->output_report_body_address : 0x1000; 421 | struct hidspi_header req = { .type = HIDSPI_OUTPUT_TYPE_DEVICE_DESCRIPTOR_REQUEST }; 422 | CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_WRITE, out_addr, sizeof(req), &req); 423 | 424 | // "5. Within 1 second, the device shall signal an interrupt and make available to the host 425 | // an input report containing the Device Descriptor." 426 | size = wait_for_report(ithc); 427 | if (size < 0) 428 | return size; 429 | if (size < sizeof(resp.header) + sizeof(resp.device_desc)) { 430 | pci_err(ithc->pci, "SPI data size too small for device descriptor (%u)\n", size); 431 | return -EMSGSIZE; 432 | } 433 | 434 | // "6. The host shall read the Device Descriptor from the Input Report addresses specified 435 | // in ACPI." 436 | if (size > sizeof(resp)) { 437 | pci_err(ithc->pci, "SPI data size for device descriptor too big (%u)\n", size); 438 | return -EMSGSIZE; 439 | } 440 | memset(&resp, 0, sizeof(resp)); 441 | CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, in_addr, size, &resp); 442 | if (resp.header.type != HIDSPI_INPUT_TYPE_DEVICE_DESCRIPTOR) { 443 | pci_err(ithc->pci, "received type %i instead of device descriptor\n", 444 | resp.header.type); 445 | return -ENOMSG; 446 | } 447 | struct hidspi_device_descriptor *d = &resp.device_desc; 448 | if (resp.header.len < sizeof(*d)) { 449 | pci_err(ithc->pci, "response too small for device descriptor (%u)\n", 450 | resp.header.len); 451 | return -EMSGSIZE; 452 | } 453 | if (d->wDeviceDescLength != sizeof(*d)) { 454 | pci_err(ithc->pci, "invalid device descriptor length (%u)\n", 455 | d->wDeviceDescLength); 456 | return -EMSGSIZE; 457 | } 458 | 459 | pci_info(ithc->pci, "Device descriptor: bcdVersion=0x%04x wReportDescLength=%u wMaxInputLength=%u wMaxOutputLength=%u wMaxFragmentLength=%u wVendorID=0x%04x wProductID=0x%04x wVersionID=0x%04x wFlags=0x%04x dwReserved=0x%08x\n", 460 | d->bcdVersion, d->wReportDescLength, 461 | d->wMaxInputLength, d->wMaxOutputLength, d->wMaxFragmentLength, 462 | d->wVendorID, d->wProductID, d->wVersionID, 463 | d->wFlags, d->dwReserved); 464 | 465 | ithc->vendor_id = d->wVendorID; 466 | ithc->product_id = d->wProductID; 467 | ithc->product_rev = d->wVersionID; 468 | ithc->max_rx_size = max_t(u32, d->wMaxInputLength, 469 | d->wReportDescLength + sizeof(struct hidspi_header)); 470 | ithc->max_tx_size = d->wMaxOutputLength; 471 | ithc->have_config = true; 472 | 473 | // "7. The device and host shall then enter their "Ready" states - where the device may 474 | // begin sending Input Reports, and the device shall be prepared for Output Reports from 475 | // the host." 476 | 477 | return 0; 478 | } 479 | 480 | int ithc_quickspi_init(struct ithc *ithc, const struct ithc_acpi_config *cfg) 481 | { 482 | bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE); 483 | CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED); 484 | 485 | ithc_log_regs(ithc); 486 | CHECK_RET(ithc_quickspi_init_regs, ithc, cfg); 487 | ithc_log_regs(ithc); 488 | CHECK_RET(ithc_quickspi_init_hidspi, ithc, cfg); 489 | ithc_log_regs(ithc); 490 | 491 | // This value is set to 2 in ithc_quickspi_init_regs(). It needs to be set to 1 here, 492 | // otherwise DMA will not work. Maybe selects between DMA and PIO mode? 493 | bitsl(&ithc->regs->quickspi_config1, 494 | QUICKSPI_CONFIG1_UNKNOWN_16(0xffff), QUICKSPI_CONFIG1_UNKNOWN_16(1)); 495 | 496 | // TODO Do we need to set any of the following bits here? 497 | //bitsb_set(&ithc->regs->dma_rx[1].control2, DMA_RX_CONTROL2_UNKNOWN_4); 498 | //bitsb_set(&ithc->regs->dma_rx[0].control2, DMA_RX_CONTROL2_UNKNOWN_5); 499 | //bitsb_set(&ithc->regs->dma_rx[1].control2, DMA_RX_CONTROL2_UNKNOWN_5); 500 | //bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_3); 501 | //bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_31); 502 | 503 | ithc_log_regs(ithc); 504 | 505 | return 0; 506 | } 507 | 508 | void ithc_quickspi_exit(struct ithc *ithc) 509 | { 510 | // TODO Should we send HIDSPI 'power off' command? 511 | //struct hidspi_header h = { .type = HIDSPI_OUTPUT_TYPE_COMMAND, .id = 3, }; 512 | //struct ithc_data d = { .type = ITHC_DATA_RAW, .data = &h, .size = sizeof(h) }; 513 | //CHECK(ithc_dma_tx, ithc, &d); // or ithc_spi_command() 514 | } 515 | 516 | int ithc_quickspi_decode_rx(struct ithc *ithc, const void *src, size_t len, struct ithc_data *dest) 517 | { 518 | const struct hidspi_header *hdr = src; 519 | 520 | if (len < sizeof(*hdr)) 521 | return -ENODATA; 522 | // TODO Do we need to handle HIDSPI packet fragmentation? 523 | if (len < sizeof(*hdr) + hdr->len) 524 | return -EMSGSIZE; 525 | if (len > round_up(sizeof(*hdr) + hdr->len, 4)) 526 | return -EMSGSIZE; 527 | 528 | switch (hdr->type) { 529 | case HIDSPI_INPUT_TYPE_RESET_RESPONSE: 530 | // TODO "When the device detects an error condition, it may interrupt and make 531 | // available to the host an Input Report containing an unsolicited Reset Response. 532 | // After receiving an unsolicited Reset Response, the host shall initiate the 533 | // request procedure from step (4) in the [HIDSPI initialization] process." 534 | dest->type = ITHC_DATA_ERROR; 535 | return 0; 536 | case HIDSPI_INPUT_TYPE_REPORT_DESCRIPTOR: 537 | dest->type = ITHC_DATA_REPORT_DESCRIPTOR; 538 | dest->data = hdr + 1; 539 | dest->size = hdr->len; 540 | return 0; 541 | case HIDSPI_INPUT_TYPE_DATA: 542 | case HIDSPI_INPUT_TYPE_GET_INPUT_REPORT_RESPONSE: 543 | dest->type = ITHC_DATA_INPUT_REPORT; 544 | dest->data = &hdr->id; 545 | dest->size = hdr->len + 1; 546 | return 0; 547 | case HIDSPI_INPUT_TYPE_GET_FEATURE_RESPONSE: 548 | dest->type = ITHC_DATA_GET_FEATURE; 549 | dest->data = &hdr->id; 550 | dest->size = hdr->len + 1; 551 | return 0; 552 | case HIDSPI_INPUT_TYPE_SET_FEATURE_RESPONSE: 553 | case HIDSPI_INPUT_TYPE_OUTPUT_REPORT_RESPONSE: 554 | dest->type = ITHC_DATA_IGNORE; 555 | return 0; 556 | default: 557 | return -EINVAL; 558 | } 559 | } 560 | 561 | ssize_t ithc_quickspi_encode_tx(struct ithc *ithc, const struct ithc_data *src, void *dest, 562 | size_t maxlen) 563 | { 564 | struct hidspi_header *hdr = dest; 565 | 566 | size_t src_size = src->size; 567 | const u8 *src_data = src->data; 568 | u8 type; 569 | 570 | switch (src->type) { 571 | case ITHC_DATA_SET_FEATURE: 572 | type = HIDSPI_OUTPUT_TYPE_SET_FEATURE; 573 | break; 574 | case ITHC_DATA_GET_FEATURE: 575 | type = HIDSPI_OUTPUT_TYPE_GET_FEATURE; 576 | break; 577 | case ITHC_DATA_OUTPUT_REPORT: 578 | type = HIDSPI_OUTPUT_TYPE_OUTPUT_REPORT; 579 | break; 580 | case ITHC_DATA_REPORT_DESCRIPTOR: 581 | type = HIDSPI_OUTPUT_TYPE_REPORT_DESCRIPTOR_REQUEST; 582 | src_size = 0; 583 | break; 584 | default: 585 | return -EINVAL; 586 | } 587 | 588 | u8 id = 0; 589 | if (src_size) { 590 | id = *src_data++; 591 | src_size--; 592 | } 593 | 594 | // Data must be padded to next 4-byte boundary. 595 | size_t padded = round_up(src_size, 4); 596 | if (sizeof(*hdr) + padded > maxlen) 597 | return -EOVERFLOW; 598 | 599 | // Fill the TX buffer with header and data. 600 | hdr->type = type; 601 | hdr->len = (u16)src_size; 602 | hdr->id = id; 603 | memcpy_and_pad(hdr + 1, padded, src_data, src_size, 0); 604 | 605 | return sizeof(*hdr) + padded; 606 | } 607 | 608 | -------------------------------------------------------------------------------- /src/ithc-quickspi.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ 2 | 3 | struct ithc_acpi_config { 4 | bool has_config: 1; 5 | bool has_input_report_header_address: 1; 6 | bool has_input_report_body_address: 1; 7 | bool has_output_report_body_address: 1; 8 | bool has_read_opcode: 1; 9 | bool has_write_opcode: 1; 10 | bool has_read_mode: 1; 11 | bool has_write_mode: 1; 12 | bool has_spi_frequency: 1; 13 | bool has_limit_packet_size: 1; 14 | bool has_tx_delay: 1; 15 | bool has_active_ltr: 1; 16 | bool has_idle_ltr: 1; 17 | u32 input_report_header_address; 18 | u32 input_report_body_address; 19 | u32 output_report_body_address; 20 | u8 read_opcode; 21 | u8 write_opcode; 22 | u8 read_mode; 23 | u8 write_mode; 24 | u32 spi_frequency; 25 | u32 limit_packet_size; 26 | u32 tx_delay; // us/10 // TODO use? 27 | u32 active_ltr; // ns/1024 28 | u32 idle_ltr; // ns/1024 29 | }; 30 | 31 | int ithc_read_acpi_config(struct ithc *ithc, struct ithc_acpi_config *cfg); 32 | void ithc_print_acpi_config(struct ithc *ithc, const struct ithc_acpi_config *cfg); 33 | 34 | int ithc_quickspi_init(struct ithc *ithc, const struct ithc_acpi_config *cfg); 35 | void ithc_quickspi_exit(struct ithc *ithc); 36 | int ithc_quickspi_decode_rx(struct ithc *ithc, const void *src, size_t len, struct ithc_data *dest); 37 | ssize_t ithc_quickspi_encode_tx(struct ithc *ithc, const struct ithc_data *src, void *dest, 38 | size_t maxlen); 39 | 40 | -------------------------------------------------------------------------------- /src/ithc-regs.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 | 3 | #include "ithc.h" 4 | 5 | #define reg_num(r) (0x1fff & (u16)(__force u64)(r)) 6 | 7 | void bitsl(__iomem u32 *reg, u32 mask, u32 val) 8 | { 9 | if (val & ~mask) 10 | pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", 11 | reg_num(reg), val, mask); 12 | writel((readl(reg) & ~mask) | (val & mask), reg); 13 | } 14 | 15 | void bitsb(__iomem u8 *reg, u8 mask, u8 val) 16 | { 17 | if (val & ~mask) 18 | pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", 19 | reg_num(reg), val, mask); 20 | writeb((readb(reg) & ~mask) | (val & mask), reg); 21 | } 22 | 23 | int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val) 24 | { 25 | ithc_log_regs(ithc); 26 | pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", 27 | reg_num(reg), mask, val); 28 | u32 x; 29 | if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) { 30 | ithc_log_regs(ithc); 31 | pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", 32 | reg_num(reg), mask, val); 33 | return -ETIMEDOUT; 34 | } 35 | ithc_log_regs(ithc); 36 | pci_dbg(ithc->pci, "done waiting\n"); 37 | return 0; 38 | } 39 | 40 | int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val) 41 | { 42 | ithc_log_regs(ithc); 43 | pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", 44 | reg_num(reg), mask, val); 45 | u8 x; 46 | if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) { 47 | ithc_log_regs(ithc); 48 | pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", 49 | reg_num(reg), mask, val); 50 | return -ETIMEDOUT; 51 | } 52 | ithc_log_regs(ithc); 53 | pci_dbg(ithc->pci, "done waiting\n"); 54 | return 0; 55 | } 56 | 57 | static void calc_ltr(u64 *ns, unsigned int *val, unsigned int *scale) 58 | { 59 | unsigned int s = 0; 60 | u64 v = *ns; 61 | while (v > 0x3ff) { 62 | s++; 63 | v >>= 5; 64 | } 65 | if (s > 5) { 66 | s = 5; 67 | v = 0x3ff; 68 | } 69 | *val = v; 70 | *scale = s; 71 | *ns = v << (5 * s); 72 | } 73 | 74 | void ithc_set_ltr_config(struct ithc *ithc, u64 active_ltr_ns, u64 idle_ltr_ns) 75 | { 76 | unsigned int active_val, active_scale, idle_val, idle_scale; 77 | calc_ltr(&active_ltr_ns, &active_val, &active_scale); 78 | calc_ltr(&idle_ltr_ns, &idle_val, &idle_scale); 79 | pci_dbg(ithc->pci, "setting active LTR value to %llu ns, idle LTR value to %llu ns\n", 80 | active_ltr_ns, idle_ltr_ns); 81 | writel(LTR_CONFIG_ENABLE_ACTIVE | LTR_CONFIG_ENABLE_IDLE | LTR_CONFIG_APPLY | 82 | LTR_CONFIG_ACTIVE_LTR_SCALE(active_scale) | LTR_CONFIG_ACTIVE_LTR_VALUE(active_val) | 83 | LTR_CONFIG_IDLE_LTR_SCALE(idle_scale) | LTR_CONFIG_IDLE_LTR_VALUE(idle_val), 84 | &ithc->regs->ltr_config); 85 | } 86 | 87 | void ithc_set_ltr_idle(struct ithc *ithc) 88 | { 89 | u32 ltr = readl(&ithc->regs->ltr_config); 90 | switch (ltr & (LTR_CONFIG_STATUS_ACTIVE | LTR_CONFIG_STATUS_IDLE)) { 91 | case LTR_CONFIG_STATUS_IDLE: 92 | break; 93 | case LTR_CONFIG_STATUS_ACTIVE: 94 | writel(ltr | LTR_CONFIG_TOGGLE | LTR_CONFIG_APPLY, &ithc->regs->ltr_config); 95 | break; 96 | default: 97 | pci_err(ithc->pci, "invalid LTR state 0x%08x\n", ltr); 98 | break; 99 | } 100 | } 101 | 102 | int ithc_set_spi_config(struct ithc *ithc, u8 clkdiv, bool clkdiv8, u8 read_mode, u8 write_mode) 103 | { 104 | if (clkdiv == 0 || clkdiv > 7 || read_mode > SPI_MODE_QUAD || write_mode > SPI_MODE_QUAD) 105 | return -EINVAL; 106 | static const char * const modes[] = { "single", "dual", "quad" }; 107 | pci_dbg(ithc->pci, "setting SPI frequency to %i Hz, %s read, %s write\n", 108 | SPI_CLK_FREQ_BASE / (clkdiv * (clkdiv8 ? 8 : 1)), 109 | modes[read_mode], modes[write_mode]); 110 | bitsl(&ithc->regs->spi_config, 111 | SPI_CONFIG_READ_MODE(0xff) | SPI_CONFIG_READ_CLKDIV(0xff) | 112 | SPI_CONFIG_WRITE_MODE(0xff) | SPI_CONFIG_WRITE_CLKDIV(0xff) | 113 | SPI_CONFIG_CLKDIV_8, 114 | SPI_CONFIG_READ_MODE(read_mode) | SPI_CONFIG_READ_CLKDIV(clkdiv) | 115 | SPI_CONFIG_WRITE_MODE(write_mode) | SPI_CONFIG_WRITE_CLKDIV(clkdiv) | 116 | (clkdiv8 ? SPI_CONFIG_CLKDIV_8 : 0)); 117 | return 0; 118 | } 119 | 120 | int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data) 121 | { 122 | pci_dbg(ithc->pci, "SPI command %u, size %u, offset 0x%x\n", command, size, offset); 123 | if (size > sizeof(ithc->regs->spi_cmd.data)) 124 | return -EINVAL; 125 | 126 | // Wait if the device is still busy. 127 | CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0); 128 | // Clear result flags. 129 | writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); 130 | 131 | // Init SPI command data. 132 | writeb(command, &ithc->regs->spi_cmd.code); 133 | writew(size, &ithc->regs->spi_cmd.size); 134 | writel(offset, &ithc->regs->spi_cmd.offset); 135 | u32 *p = data, n = (size + 3) / 4; 136 | for (u32 i = 0; i < n; i++) 137 | writel(p[i], &ithc->regs->spi_cmd.data[i]); 138 | 139 | // Start transmission. 140 | bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND); 141 | CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0); 142 | 143 | // Read response. 144 | if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE) 145 | return -EIO; 146 | if (readw(&ithc->regs->spi_cmd.size) != size) 147 | return -EMSGSIZE; 148 | for (u32 i = 0; i < n; i++) 149 | p[i] = readl(&ithc->regs->spi_cmd.data[i]); 150 | 151 | writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); 152 | return 0; 153 | } 154 | 155 | -------------------------------------------------------------------------------- /src/ithc-regs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ 2 | 3 | #define LTR_CONFIG_ENABLE_ACTIVE BIT(0) 4 | #define LTR_CONFIG_TOGGLE BIT(1) 5 | #define LTR_CONFIG_ENABLE_IDLE BIT(2) 6 | #define LTR_CONFIG_APPLY BIT(3) 7 | #define LTR_CONFIG_IDLE_LTR_SCALE(x) (((x) & 7) << 4) 8 | #define LTR_CONFIG_IDLE_LTR_VALUE(x) (((x) & 0x3ff) << 7) 9 | #define LTR_CONFIG_ACTIVE_LTR_SCALE(x) (((x) & 7) << 17) 10 | #define LTR_CONFIG_ACTIVE_LTR_VALUE(x) (((x) & 0x3ff) << 20) 11 | #define LTR_CONFIG_STATUS_ACTIVE BIT(30) 12 | #define LTR_CONFIG_STATUS_IDLE BIT(31) 13 | 14 | #define CONTROL_QUIESCE BIT(1) 15 | #define CONTROL_IS_QUIESCED BIT(2) 16 | #define CONTROL_NRESET BIT(3) 17 | #define CONTROL_UNKNOWN_24(x) (((x) & 3) << 24) 18 | #define CONTROL_READY BIT(29) 19 | 20 | #define SPI_CONFIG_READ_MODE(x) (((x) & 3) << 2) 21 | #define SPI_CONFIG_READ_CLKDIV(x) (((x) & 7) << 4) 22 | #define SPI_CONFIG_READ_PACKET_SIZE(x) (((x) & 0x1ff) << 7) 23 | #define SPI_CONFIG_WRITE_MODE(x) (((x) & 3) << 18) 24 | #define SPI_CONFIG_WRITE_CLKDIV(x) (((x) & 7) << 20) 25 | #define SPI_CONFIG_CLKDIV_8 BIT(23) // additionally divide clk by 8, for both read and write 26 | #define SPI_CONFIG_WRITE_PACKET_SIZE(x) (((x) & 0xff) << 24) 27 | 28 | #define SPI_CLK_FREQ_BASE 125000000 29 | #define SPI_MODE_SINGLE 0 30 | #define SPI_MODE_DUAL 1 31 | #define SPI_MODE_QUAD 2 32 | 33 | #define ERROR_CONTROL_UNKNOWN_0 BIT(0) 34 | #define ERROR_CONTROL_DISABLE_DMA BIT(1) // clears DMA_RX_CONTROL_ENABLE when a DMA error occurs 35 | #define ERROR_CONTROL_UNKNOWN_2 BIT(2) 36 | #define ERROR_CONTROL_UNKNOWN_3 BIT(3) 37 | #define ERROR_CONTROL_IRQ_DMA_UNKNOWN_9 BIT(9) 38 | #define ERROR_CONTROL_IRQ_DMA_UNKNOWN_10 BIT(10) 39 | #define ERROR_CONTROL_IRQ_DMA_UNKNOWN_12 BIT(12) 40 | #define ERROR_CONTROL_IRQ_DMA_UNKNOWN_13 BIT(13) 41 | #define ERROR_CONTROL_UNKNOWN_16(x) (((x) & 0xff) << 16) // spi error code irq? 42 | #define ERROR_CONTROL_SET_DMA_STATUS BIT(29) // sets DMA_RX_STATUS_ERROR when a DMA error occurs 43 | 44 | #define ERROR_STATUS_DMA BIT(28) 45 | #define ERROR_STATUS_SPI BIT(30) 46 | 47 | #define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9) 48 | #define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10) 49 | #define ERROR_FLAG_DMA_RX_TIMEOUT BIT(12) // set when we receive a truncated DMA message 50 | #define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13) 51 | #define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16) 52 | #define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17) 53 | #define ERROR_FLAG_SPI_INTRA_PACKET_TIMEOUT BIT(18) 54 | #define ERROR_FLAG_SPI_INVALID_RESPONSE BIT(19) 55 | #define ERROR_FLAG_SPI_HS_RX_TIMEOUT BIT(20) 56 | #define ERROR_FLAG_SPI_TOUCH_IC_INIT BIT(21) 57 | 58 | #define SPI_CMD_CONTROL_SEND BIT(0) // cleared by device when sending is complete 59 | #define SPI_CMD_CONTROL_IRQ BIT(1) 60 | 61 | #define SPI_CMD_CODE_READ 4 62 | #define SPI_CMD_CODE_WRITE 6 63 | 64 | #define SPI_CMD_STATUS_DONE BIT(0) 65 | #define SPI_CMD_STATUS_ERROR BIT(1) 66 | #define SPI_CMD_STATUS_BUSY BIT(3) 67 | 68 | #define DMA_TX_CONTROL_SEND BIT(0) // cleared by device when sending is complete 69 | #define DMA_TX_CONTROL_IRQ BIT(3) 70 | 71 | #define DMA_TX_STATUS_DONE BIT(0) 72 | #define DMA_TX_STATUS_ERROR BIT(1) 73 | #define DMA_TX_STATUS_UNKNOWN_2 BIT(2) 74 | #define DMA_TX_STATUS_UNKNOWN_3 BIT(3) // busy? 75 | 76 | #define INPUT_HEADER_VERSION(x) ((x) & 0xf) 77 | #define INPUT_HEADER_REPORT_LENGTH(x) (((x) >> 8) & 0x3fff) 78 | #define INPUT_HEADER_SYNC(x) ((x) >> 24) 79 | #define INPUT_HEADER_VERSION_VALUE 3 80 | #define INPUT_HEADER_SYNC_VALUE 0x5a 81 | 82 | #define QUICKSPI_CONFIG1_UNKNOWN_0(x) (((x) & 0x1f) << 0) 83 | #define QUICKSPI_CONFIG1_UNKNOWN_5(x) (((x) & 0x1f) << 5) 84 | #define QUICKSPI_CONFIG1_UNKNOWN_10(x) (((x) & 0x1f) << 10) 85 | #define QUICKSPI_CONFIG1_UNKNOWN_16(x) (((x) & 0xffff) << 16) 86 | 87 | #define QUICKSPI_CONFIG2_UNKNOWN_0(x) (((x) & 0x1f) << 0) 88 | #define QUICKSPI_CONFIG2_UNKNOWN_5(x) (((x) & 0x1f) << 5) 89 | #define QUICKSPI_CONFIG2_UNKNOWN_12(x) (((x) & 0xf) << 12) 90 | #define QUICKSPI_CONFIG2_UNKNOWN_16 BIT(16) 91 | #define QUICKSPI_CONFIG2_UNKNOWN_17 BIT(17) 92 | #define QUICKSPI_CONFIG2_DISABLE_READ_ADDRESS_INCREMENT BIT(24) 93 | #define QUICKSPI_CONFIG2_DISABLE_WRITE_ADDRESS_INCREMENT BIT(25) 94 | #define QUICKSPI_CONFIG2_ENABLE_WRITE_STREAMING_MODE BIT(27) 95 | #define QUICKSPI_CONFIG2_IRQ_POLARITY BIT(28) 96 | 97 | #define DMA_RX_CONTROL_ENABLE BIT(0) 98 | #define DMA_RX_CONTROL_IRQ_UNKNOWN_1 BIT(1) // rx1 only? 99 | #define DMA_RX_CONTROL_IRQ_ERROR BIT(3) // rx1 only? 100 | #define DMA_RX_CONTROL_IRQ_READY BIT(4) // rx0 only 101 | #define DMA_RX_CONTROL_IRQ_DATA BIT(5) 102 | 103 | #define DMA_RX_CONTROL2_UNKNOWN_4 BIT(4) // rx1 only? 104 | #define DMA_RX_CONTROL2_UNKNOWN_5 BIT(5) // rx0 only? 105 | #define DMA_RX_CONTROL2_RESET BIT(7) // resets ringbuffer indices 106 | 107 | #define DMA_RX_WRAP_FLAG BIT(7) 108 | 109 | #define DMA_RX_STATUS_ERROR BIT(3) 110 | #define DMA_RX_STATUS_READY BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms) 111 | #define DMA_RX_STATUS_HAVE_DATA BIT(5) 112 | #define DMA_RX_STATUS_ENABLED BIT(8) 113 | 114 | #define INIT_UNKNOWN_GUC_2 BIT(2) 115 | #define INIT_UNKNOWN_3 BIT(3) 116 | #define INIT_UNKNOWN_GUC_4 BIT(4) 117 | #define INIT_UNKNOWN_5 BIT(5) 118 | #define INIT_UNKNOWN_31 BIT(31) 119 | 120 | // COUNTER_RESET can be written to counter registers to reset them to zero. However, in some cases this can mess up the THC. 121 | #define COUNTER_RESET BIT(31) 122 | 123 | struct ithc_registers { 124 | /* 0000 */ u32 _unknown_0000[5]; 125 | /* 0014 */ u32 ltr_config; 126 | /* 0018 */ u32 _unknown_0018[1018]; 127 | /* 1000 */ u32 _unknown_1000; 128 | /* 1004 */ u32 _unknown_1004; 129 | /* 1008 */ u32 control_bits; 130 | /* 100c */ u32 _unknown_100c; 131 | /* 1010 */ u32 spi_config; 132 | struct { 133 | /* 1014/1018/101c */ u8 header; 134 | /* 1015/1019/101d */ u8 quad; 135 | /* 1016/101a/101e */ u8 dual; 136 | /* 1017/101b/101f */ u8 single; 137 | } opcode[3]; 138 | /* 1020 */ u32 error_control; 139 | /* 1024 */ u32 error_status; // write to clear 140 | /* 1028 */ u32 error_flags; // write to clear 141 | /* 102c */ u32 _unknown_102c[5]; 142 | struct { 143 | /* 1040 */ u8 control; 144 | /* 1041 */ u8 code; 145 | /* 1042 */ u16 size; 146 | /* 1044 */ u32 status; // write to clear 147 | /* 1048 */ u32 offset; 148 | /* 104c */ u32 data[16]; 149 | /* 108c */ u32 _unknown_108c; 150 | } spi_cmd; 151 | struct { 152 | /* 1090 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq() 153 | /* 1098 */ u8 control; 154 | /* 1099 */ u8 _unknown_1099; 155 | /* 109a */ u8 _unknown_109a; 156 | /* 109b */ u8 num_prds; 157 | /* 109c */ u32 status; // write to clear 158 | /* 10a0 */ u32 _unknown_10a0[5]; 159 | /* 10b4 */ u32 spi_addr; 160 | } dma_tx; 161 | /* 10b8 */ u32 spi_header_addr; 162 | union { 163 | /* 10bc */ u32 irq_cause; // in legacy THC mode 164 | /* 10bc */ u32 input_header; // in QuickSPI mode (see HIDSPI spec) 165 | }; 166 | /* 10c0 */ u32 _unknown_10c0[8]; 167 | /* 10e0 */ u32 _unknown_10e0_counters[3]; 168 | /* 10ec */ u32 quickspi_config1; 169 | /* 10f0 */ u32 quickspi_config2; 170 | /* 10f4 */ u32 _unknown_10f4[3]; 171 | struct { 172 | /* 1100/1200 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq() 173 | /* 1108/1208 */ u8 num_bufs; 174 | /* 1109/1209 */ u8 num_prds; 175 | /* 110a/120a */ u16 _unknown_110a; 176 | /* 110c/120c */ u8 control; 177 | /* 110d/120d */ u8 head; 178 | /* 110e/120e */ u8 tail; 179 | /* 110f/120f */ u8 control2; 180 | /* 1110/1210 */ u32 status; // write to clear 181 | /* 1114/1214 */ u32 _unknown_1114; 182 | /* 1118/1218 */ u64 _unknown_1118_guc_addr; 183 | /* 1120/1220 */ u32 _unknown_1120_guc; 184 | /* 1124/1224 */ u32 _unknown_1124_guc; 185 | /* 1128/1228 */ u32 init_unknown; 186 | /* 112c/122c */ u32 _unknown_112c; 187 | /* 1130/1230 */ u64 _unknown_1130_guc_addr; 188 | /* 1138/1238 */ u32 _unknown_1138_guc; 189 | /* 113c/123c */ u32 _unknown_113c; 190 | /* 1140/1240 */ u32 _unknown_1140_guc; 191 | /* 1144/1244 */ u32 _unknown_1144[11]; 192 | /* 1170/1270 */ u32 spi_addr; 193 | /* 1174/1274 */ u32 _unknown_1174[11]; 194 | /* 11a0/12a0 */ u32 _unknown_11a0_counters[6]; 195 | /* 11b8/12b8 */ u32 _unknown_11b8[18]; 196 | } dma_rx[2]; 197 | }; 198 | static_assert(sizeof(struct ithc_registers) == 0x1300); 199 | 200 | void bitsl(__iomem u32 *reg, u32 mask, u32 val); 201 | void bitsb(__iomem u8 *reg, u8 mask, u8 val); 202 | #define bitsl_set(reg, x) bitsl(reg, x, x) 203 | #define bitsb_set(reg, x) bitsb(reg, x, x) 204 | int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val); 205 | int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val); 206 | 207 | void ithc_set_ltr_config(struct ithc *ithc, u64 active_ltr_ns, u64 idle_ltr_ns); 208 | void ithc_set_ltr_idle(struct ithc *ithc); 209 | int ithc_set_spi_config(struct ithc *ithc, u8 clkdiv, bool clkdiv8, u8 read_mode, u8 write_mode); 210 | int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data); 211 | 212 | -------------------------------------------------------------------------------- /src/ithc.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #define DEVNAME "ithc" 21 | #define DEVFULLNAME "Intel Touch Host Controller" 22 | 23 | #undef pr_fmt 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 | 26 | #define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; }) 27 | #define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while (0) 28 | 29 | #define NUM_RX_BUF 16 30 | 31 | // PCI device IDs: 32 | // Lakefield 33 | #define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0 34 | #define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1 35 | // Tiger Lake 36 | #define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0 37 | #define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1 38 | #define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0 39 | #define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1 40 | // Alder Lake 41 | #define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8 42 | #define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9 43 | #define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0 44 | #define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1 45 | #define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0 46 | #define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1 47 | // Raptor Lake 48 | #define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58 49 | #define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59 50 | // Meteor Lake 51 | #define PCI_DEVICE_ID_INTEL_THC_MTL_S_PORT1 0x7f59 52 | #define PCI_DEVICE_ID_INTEL_THC_MTL_S_PORT2 0x7f5b 53 | #define PCI_DEVICE_ID_INTEL_THC_MTL_MP_PORT1 0x7e49 54 | #define PCI_DEVICE_ID_INTEL_THC_MTL_MP_PORT2 0x7e4b 55 | 56 | struct ithc; 57 | 58 | #include "ithc-regs.h" 59 | #include "ithc-hid.h" 60 | #include "ithc-dma.h" 61 | #include "ithc-legacy.h" 62 | #include "ithc-quickspi.h" 63 | #include "ithc-debug.h" 64 | 65 | struct ithc { 66 | char phys[32]; 67 | struct pci_dev *pci; 68 | int irq; 69 | struct task_struct *poll_thread; 70 | struct timer_list idle_timer; 71 | 72 | struct ithc_registers __iomem *regs; 73 | struct ithc_registers *prev_regs; // for debugging 74 | struct ithc_dma_rx dma_rx[2]; 75 | struct ithc_dma_tx dma_tx; 76 | struct ithc_hid hid; 77 | 78 | bool use_quickspi; 79 | bool have_config; 80 | u16 vendor_id; 81 | u16 product_id; 82 | u32 product_rev; 83 | u32 max_rx_size; 84 | u32 max_tx_size; 85 | u32 legacy_touch_cfg; 86 | }; 87 | 88 | int ithc_reset(struct ithc *ithc); 89 | 90 | --------------------------------------------------------------------------------