├── Documentation └── virtual │ └── kvm │ ├── 00-INDEX │ ├── api.txt │ ├── arm │ └── vgic-mapped-irqs.txt │ ├── cpuid.txt │ ├── devices │ ├── README │ ├── arm-vgic.txt │ ├── mpic.txt │ ├── s390_flic.txt │ ├── vcpu.txt │ ├── vfio.txt │ ├── vm.txt │ └── xics.txt │ ├── hypercalls.txt │ ├── locking.txt │ ├── mmu.txt │ ├── msr.txt │ ├── nested-vmx.txt │ ├── ppc-pv.txt │ ├── review-checklist.txt │ ├── s390-diag.txt │ └── timekeeping.txt ├── README.md ├── arch └── x86 │ ├── include │ ├── asm │ │ ├── Kbuild │ │ ├── a.out-core.h │ │ ├── acenv.h │ │ ├── acpi.h │ │ ├── agp.h │ │ ├── alternative-asm.h │ │ ├── alternative.h │ │ ├── amd_nb.h │ │ ├── apb_timer.h │ │ ├── apic.h │ │ ├── apic_flat_64.h │ │ ├── apicdef.h │ │ ├── apm.h │ │ ├── arch_hweight.h │ │ ├── archrandom.h │ │ ├── asm-offsets.h │ │ ├── asm.h │ │ ├── atomic.h │ │ ├── atomic64_32.h │ │ ├── atomic64_64.h │ │ ├── barrier.h │ │ ├── bios_ebda.h │ │ ├── bitops.h │ │ ├── boot.h │ │ ├── bootparam_utils.h │ │ ├── bug.h │ │ ├── bugs.h │ │ ├── cache.h │ │ ├── cacheflush.h │ │ ├── calgary.h │ │ ├── ce4100.h │ │ ├── checksum.h │ │ ├── checksum_32.h │ │ ├── checksum_64.h │ │ ├── clocksource.h │ │ ├── cmdline.h │ │ ├── cmpxchg.h │ │ ├── cmpxchg_32.h │ │ ├── cmpxchg_64.h │ │ ├── compat.h │ │ ├── cpu.h │ │ ├── cpu_device_id.h │ │ ├── cpufeature.h │ │ ├── cpufeatures.h │ │ ├── cpumask.h │ │ ├── crash.h │ │ ├── crypto │ │ │ ├── aes.h │ │ │ ├── camellia.h │ │ │ ├── glue_helper.h │ │ │ ├── serpent-avx.h │ │ │ ├── serpent-sse2.h │ │ │ └── twofish.h │ │ ├── current.h │ │ ├── debugreg.h │ │ ├── delay.h │ │ ├── desc.h │ │ ├── desc_defs.h │ │ ├── device.h │ │ ├── disabled-features.h │ │ ├── div64.h │ │ ├── dma-mapping.h │ │ ├── dma.h │ │ ├── dmi.h │ │ ├── dwarf2.h │ │ ├── e820.h │ │ ├── edac.h │ │ ├── efi.h │ │ ├── elf.h │ │ ├── emergency-restart.h │ │ ├── entry_arch.h │ │ ├── espfix.h │ │ ├── exec.h │ │ ├── fb.h │ │ ├── fixmap.h │ │ ├── floppy.h │ │ ├── fpu │ │ │ ├── api.h │ │ │ ├── internal.h │ │ │ ├── regset.h │ │ │ ├── signal.h │ │ │ ├── types.h │ │ │ └── xstate.h │ │ ├── frame.h │ │ ├── ftrace.h │ │ ├── futex.h │ │ ├── gart.h │ │ ├── genapic.h │ │ ├── geode.h │ │ ├── hardirq.h │ │ ├── highmem.h │ │ ├── hpet.h │ │ ├── hugetlb.h │ │ ├── hw_breakpoint.h │ │ ├── hw_irq.h │ │ ├── hypertransport.h │ │ ├── hypervisor.h │ │ ├── i8259.h │ │ ├── ia32.h │ │ ├── ia32_unistd.h │ │ ├── idle.h │ │ ├── imr.h │ │ ├── inat.h │ │ ├── inat_types.h │ │ ├── init.h │ │ ├── insn.h │ │ ├── inst.h │ │ ├── intel-family.h │ │ ├── intel-mid.h │ │ ├── intel_mid_vrtc.h │ │ ├── intel_pmc_ipc.h │ │ ├── intel_pt.h │ │ ├── intel_punit_ipc.h │ │ ├── intel_scu_ipc.h │ │ ├── intel_telemetry.h │ │ ├── io.h │ │ ├── io_apic.h │ │ ├── iomap.h │ │ ├── iommu.h │ │ ├── iommu_table.h │ │ ├── iosf_mbi.h │ │ ├── ipi.h │ │ ├── irq.h │ │ ├── irq_regs.h │ │ ├── irq_remapping.h │ │ ├── irq_vectors.h │ │ ├── irq_work.h │ │ ├── irqdomain.h │ │ ├── irqflags.h │ │ ├── ist.h │ │ ├── jump_label.h │ │ ├── kasan.h │ │ ├── kaslr.h │ │ ├── kbdleds.h │ │ ├── kdebug.h │ │ ├── kexec-bzimage64.h │ │ ├── kexec.h │ │ ├── kgdb.h │ │ ├── kmap_types.h │ │ ├── kmemcheck.h │ │ ├── kprobes.h │ │ ├── kvm_emulate.h │ │ ├── kvm_guest.h │ │ ├── kvm_host.h │ │ ├── kvm_page_track.h │ │ ├── kvm_para.h │ │ ├── lguest.h │ │ ├── lguest_hcall.h │ │ ├── linkage.h │ │ ├── livepatch.h │ │ ├── local.h │ │ ├── local64.h │ │ ├── mach_timer.h │ │ ├── mach_traps.h │ │ ├── math_emu.h │ │ ├── mc146818rtc.h │ │ ├── mce.h │ │ ├── microcode.h │ │ ├── microcode_amd.h │ │ ├── microcode_intel.h │ │ ├── misc.h │ │ ├── mmconfig.h │ │ ├── mmu.h │ │ ├── mmu_context.h │ │ ├── mmx.h │ │ ├── mmzone.h │ │ ├── mmzone_32.h │ │ ├── mmzone_64.h │ │ ├── module.h │ │ ├── mpspec.h │ │ ├── mpspec_def.h │ │ ├── mpx.h │ │ ├── mshyperv.h │ │ ├── msi.h │ │ ├── msidef.h │ │ ├── msr-index.h │ │ ├── msr-trace.h │ │ ├── msr.h │ │ ├── mtrr.h │ │ ├── mutex.h │ │ ├── mutex_32.h │ │ ├── mutex_64.h │ │ ├── mwait.h │ │ ├── nmi.h │ │ ├── nops.h │ │ ├── numa.h │ │ ├── numa_32.h │ │ ├── numachip │ │ │ ├── numachip.h │ │ │ └── numachip_csr.h │ │ ├── olpc.h │ │ ├── olpc_ofw.h │ │ ├── page.h │ │ ├── page_32.h │ │ ├── page_32_types.h │ │ ├── page_64.h │ │ ├── page_64_types.h │ │ ├── page_types.h │ │ ├── paravirt.h │ │ ├── paravirt_types.h │ │ ├── parport.h │ │ ├── pat.h │ │ ├── pci-direct.h │ │ ├── pci-functions.h │ │ ├── pci.h │ │ ├── pci_64.h │ │ ├── pci_x86.h │ │ ├── percpu.h │ │ ├── perf_event.h │ │ ├── perf_event_p4.h │ │ ├── pgalloc.h │ │ ├── pgtable-2level.h │ │ ├── pgtable-2level_types.h │ │ ├── pgtable-3level.h │ │ ├── pgtable-3level_types.h │ │ ├── pgtable.h │ │ ├── pgtable_32.h │ │ ├── pgtable_32_types.h │ │ ├── pgtable_64.h │ │ ├── pgtable_64_types.h │ │ ├── pgtable_types.h │ │ ├── pkeys.h │ │ ├── platform_sst_audio.h │ │ ├── pm-trace.h │ │ ├── pmc_atom.h │ │ ├── pmc_core.h │ │ ├── pmem.h │ │ ├── posix_types.h │ │ ├── preempt.h │ │ ├── probe_roms.h │ │ ├── processor-cyrix.h │ │ ├── processor-flags.h │ │ ├── processor.h │ │ ├── prom.h │ │ ├── proto.h │ │ ├── ptrace.h │ │ ├── pvclock-abi.h │ │ ├── pvclock.h │ │ ├── qrwlock.h │ │ ├── qspinlock.h │ │ ├── qspinlock_paravirt.h │ │ ├── realmode.h │ │ ├── reboot.h │ │ ├── reboot_fixups.h │ │ ├── required-features.h │ │ ├── rio.h │ │ ├── rmwcc.h │ │ ├── rwsem.h │ │ ├── seccomp.h │ │ ├── sections.h │ │ ├── segment.h │ │ ├── serial.h │ │ ├── setup.h │ │ ├── setup_arch.h │ │ ├── shmparam.h │ │ ├── sigcontext.h │ │ ├── sigframe.h │ │ ├── sighandling.h │ │ ├── signal.h │ │ ├── simd.h │ │ ├── smap.h │ │ ├── smp.h │ │ ├── sparsemem.h │ │ ├── special_insns.h │ │ ├── spinlock.h │ │ ├── spinlock_types.h │ │ ├── sta2x11.h │ │ ├── stackprotector.h │ │ ├── stacktrace.h │ │ ├── string.h │ │ ├── string_32.h │ │ ├── string_64.h │ │ ├── suspend.h │ │ ├── suspend_32.h │ │ ├── suspend_64.h │ │ ├── svm.h │ │ ├── swiotlb.h │ │ ├── switch_to.h │ │ ├── sync_bitops.h │ │ ├── sys_ia32.h │ │ ├── syscall.h │ │ ├── syscalls.h │ │ ├── sysfb.h │ │ ├── tce.h │ │ ├── text-patching.h │ │ ├── thread_info.h │ │ ├── time.h │ │ ├── timer.h │ │ ├── timex.h │ │ ├── tlb.h │ │ ├── tlbflush.h │ │ ├── topology.h │ │ ├── trace │ │ │ ├── exceptions.h │ │ │ ├── fpu.h │ │ │ ├── irq_vectors.h │ │ │ └── mpx.h │ │ ├── trace_clock.h │ │ ├── traps.h │ │ ├── tsc.h │ │ ├── uaccess.h │ │ ├── uaccess_32.h │ │ ├── uaccess_64.h │ │ ├── unaligned.h │ │ ├── unistd.h │ │ ├── uprobes.h │ │ ├── user.h │ │ ├── user32.h │ │ ├── user_32.h │ │ ├── user_64.h │ │ ├── uv │ │ │ ├── bios.h │ │ │ ├── uv.h │ │ │ ├── uv_bau.h │ │ │ ├── uv_hub.h │ │ │ ├── uv_irq.h │ │ │ └── uv_mmrs.h │ │ ├── vdso.h │ │ ├── vga.h │ │ ├── vgtod.h │ │ ├── virtext.h │ │ ├── vm86.h │ │ ├── vmx.h │ │ ├── vsyscall.h │ │ ├── vvar.h │ │ ├── word-at-a-time.h │ │ ├── x2apic.h │ │ ├── x86_init.h │ │ ├── xen │ │ │ ├── cpuid.h │ │ │ ├── events.h │ │ │ ├── hypercall.h │ │ │ ├── hypervisor.h │ │ │ ├── interface.h │ │ │ ├── interface_32.h │ │ │ ├── interface_64.h │ │ │ ├── page-coherent.h │ │ │ ├── page.h │ │ │ ├── pci.h │ │ │ ├── swiotlb-xen.h │ │ │ └── trace_types.h │ │ ├── xor.h │ │ ├── xor_32.h │ │ ├── xor_64.h │ │ └── xor_avx.h │ └── uapi │ │ └── asm │ │ ├── Kbuild │ │ ├── a.out.h │ │ ├── auxvec.h │ │ ├── bitsperlong.h │ │ ├── boot.h │ │ ├── bootparam.h │ │ ├── byteorder.h │ │ ├── debugreg.h │ │ ├── e820.h │ │ ├── errno.h │ │ ├── fcntl.h │ │ ├── hw_breakpoint.h │ │ ├── hyperv.h │ │ ├── ioctl.h │ │ ├── ioctls.h │ │ ├── ipcbuf.h │ │ ├── ist.h │ │ ├── kvm.h │ │ ├── kvm_para.h │ │ ├── kvm_perf.h │ │ ├── ldt.h │ │ ├── mce.h │ │ ├── mman.h │ │ ├── msgbuf.h │ │ ├── msr.h │ │ ├── mtrr.h │ │ ├── param.h │ │ ├── perf_regs.h │ │ ├── poll.h │ │ ├── posix_types.h │ │ ├── posix_types_32.h │ │ ├── posix_types_64.h │ │ ├── posix_types_x32.h │ │ ├── prctl.h │ │ ├── processor-flags.h │ │ ├── ptrace-abi.h │ │ ├── ptrace.h │ │ ├── resource.h │ │ ├── sembuf.h │ │ ├── setup.h │ │ ├── shmbuf.h │ │ ├── sigcontext.h │ │ ├── sigcontext32.h │ │ ├── siginfo.h │ │ ├── signal.h │ │ ├── socket.h │ │ ├── sockios.h │ │ ├── stat.h │ │ ├── statfs.h │ │ ├── svm.h │ │ ├── swab.h │ │ ├── termbits.h │ │ ├── termios.h │ │ ├── types.h │ │ ├── ucontext.h │ │ ├── unistd.h │ │ ├── vm86.h │ │ ├── vmx.h │ │ └── vsyscall.h │ ├── kernel │ ├── kvm.c │ └── kvmclock.c │ └── kvm │ ├── Kconfig │ ├── Makefile │ ├── assigned-dev.c │ ├── assigned-dev.h │ ├── cpuid.c │ ├── cpuid.h │ ├── emulate.c │ ├── hyperv.c │ ├── hyperv.h │ ├── i8254.c │ ├── i8254.h │ ├── i8259.c │ ├── ioapic.c │ ├── ioapic.h │ ├── iommu.c │ ├── irq.c │ ├── irq.h │ ├── irq_comm.c │ ├── kvm_cache_regs.h │ ├── lapic.c │ ├── lapic.h │ ├── mmu.c │ ├── mmu.h │ ├── mmu_audit.c │ ├── mmutrace.h │ ├── mtrr.c │ ├── page_track.c │ ├── paging_tmpl.h │ ├── pmu.c │ ├── pmu.h │ ├── pmu_amd.c │ ├── pmu_intel.c │ ├── svm.c │ ├── trace.h │ ├── tss.h │ ├── vmx.c │ ├── x86.c │ └── x86.h ├── include ├── kvm │ ├── arm_arch_timer.h │ ├── arm_pmu.h │ ├── arm_vgic.h │ └── iodev.h ├── linux │ ├── kobject.h │ ├── kvm_host.h │ ├── kvm_irqfd.h │ ├── kvm_para.h │ └── kvm_types.h └── uapi │ └── linux │ ├── kvm.h │ └── kvm_para.h ├── lib └── kobject.c └── virt ├── Makefile ├── kvm ├── Kconfig ├── arm │ ├── arch_timer.c │ ├── hyp │ │ ├── timer-sr.c │ │ └── vgic-v2-sr.c │ ├── pmu.c │ ├── trace.h │ └── vgic │ │ ├── vgic-init.c │ │ ├── vgic-irqfd.c │ │ ├── vgic-its.c │ │ ├── vgic-kvm-device.c │ │ ├── vgic-mmio-v2.c │ │ ├── vgic-mmio-v3.c │ │ ├── vgic-mmio.c │ │ ├── vgic-mmio.h │ │ ├── vgic-v2.c │ │ ├── vgic-v3.c │ │ ├── vgic.c │ │ └── vgic.h ├── async_pf.c ├── async_pf.h ├── coalesced_mmio.c ├── coalesced_mmio.h ├── eventfd.c ├── irqchip.c ├── kvm_main.c ├── vfio.c └── vfio.h └── lib ├── Kconfig ├── Makefile └── irqbypass.c /Documentation/virtual/kvm/00-INDEX: -------------------------------------------------------------------------------- 1 | 00-INDEX 2 | - this file. 3 | api.txt 4 | - KVM userspace API. 5 | cpuid.txt 6 | - KVM-specific cpuid leaves (x86). 7 | devices/ 8 | - KVM_CAP_DEVICE_CTRL userspace API. 9 | hypercalls.txt 10 | - KVM hypercalls. 11 | locking.txt 12 | - notes on KVM locks. 13 | mmu.txt 14 | - the x86 kvm shadow mmu. 15 | msr.txt 16 | - KVM-specific MSRs (x86). 17 | nested-vmx.txt 18 | - notes on nested virtualization for Intel x86 processors. 19 | ppc-pv.txt 20 | - the paravirtualization interface on PowerPC. 21 | review-checklist.txt 22 | - review checklist for KVM patches. 23 | s390-diag.txt 24 | - Diagnose hypercall description (for IBM S/390) 25 | timekeeping.txt 26 | - timekeeping virtualization for x86-based architectures. 27 | -------------------------------------------------------------------------------- /Documentation/virtual/kvm/devices/README: -------------------------------------------------------------------------------- 1 | This directory contains specific device bindings for KVM_CAP_DEVICE_CTRL. 2 | -------------------------------------------------------------------------------- /Documentation/virtual/kvm/devices/vcpu.txt: -------------------------------------------------------------------------------- 1 | Generic vcpu interface 2 | ==================================== 3 | 4 | The virtual cpu "device" also accepts the ioctls KVM_SET_DEVICE_ATTR, 5 | KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct 6 | kvm_device_attr as other devices, but targets VCPU-wide settings and controls. 7 | 8 | The groups and attributes per virtual cpu, if any, are architecture specific. 9 | 10 | 1. GROUP: KVM_ARM_VCPU_PMU_V3_CTRL 11 | Architectures: ARM64 12 | 13 | 1.1. ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_IRQ 14 | Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a 15 | pointer to an int 16 | Returns: -EBUSY: The PMU overflow interrupt is already set 17 | -ENXIO: The overflow interrupt not set when attempting to get it 18 | -ENODEV: PMUv3 not supported 19 | -EINVAL: Invalid PMU overflow interrupt number supplied 20 | 21 | A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt 22 | number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt 23 | type must be same for each vcpu. As a PPI, the interrupt number is the same for 24 | all vcpus, while as an SPI it must be a separate number per vcpu. 25 | 26 | 1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT 27 | Parameters: no additional parameter in kvm_device_attr.addr 28 | Returns: -ENODEV: PMUv3 not supported 29 | -ENXIO: PMUv3 not properly configured as required prior to calling this 30 | attribute 31 | -EBUSY: PMUv3 already initialized 32 | 33 | Request the initialization of the PMUv3. This must be done after creating the 34 | in-kernel irqchip. Creating a PMU with a userspace irqchip is currently not 35 | supported. 36 | -------------------------------------------------------------------------------- /Documentation/virtual/kvm/devices/vfio.txt: -------------------------------------------------------------------------------- 1 | VFIO virtual device 2 | =================== 3 | 4 | Device types supported: 5 | KVM_DEV_TYPE_VFIO 6 | 7 | Only one VFIO instance may be created per VM. The created device 8 | tracks VFIO groups in use by the VM and features of those groups 9 | important to the correctness and acceleration of the VM. As groups 10 | are enabled and disabled for use by the VM, KVM should be updated 11 | about their presence. When registered with KVM, a reference to the 12 | VFIO-group is held by KVM. 13 | 14 | Groups: 15 | KVM_DEV_VFIO_GROUP 16 | 17 | KVM_DEV_VFIO_GROUP attributes: 18 | KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking 19 | KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking 20 | 21 | For each, kvm_device_attr.addr points to an int32_t file descriptor 22 | for the VFIO group. 23 | -------------------------------------------------------------------------------- /Documentation/virtual/kvm/review-checklist.txt: -------------------------------------------------------------------------------- 1 | Review checklist for kvm patches 2 | ================================ 3 | 4 | 1. The patch must follow Documentation/CodingStyle and 5 | Documentation/SubmittingPatches. 6 | 7 | 2. Patches should be against kvm.git master branch. 8 | 9 | 3. If the patch introduces or modifies a new userspace API: 10 | - the API must be documented in Documentation/virtual/kvm/api.txt 11 | - the API must be discoverable using KVM_CHECK_EXTENSION 12 | 13 | 4. New state must include support for save/restore. 14 | 15 | 5. New features must default to off (userspace should explicitly request them). 16 | Performance improvements can and should default to on. 17 | 18 | 6. New cpu features should be exposed via KVM_GET_SUPPORTED_CPUID2 19 | 20 | 7. Emulator changes should be accompanied by unit tests for qemu-kvm.git 21 | kvm/test directory. 22 | 23 | 8. Changes should be vendor neutral when possible. Changes to common code 24 | are better than duplicating changes to vendor code. 25 | 26 | 9. Similarly, prefer changes to arch independent code than to arch dependent 27 | code. 28 | 29 | 10. User/kernel interfaces and guest/host interfaces must be 64-bit clean 30 | (all variables and sizes naturally aligned on 64-bit; use specific types 31 | only - u64 rather than ulong). 32 | 33 | 11. New guest visible features must either be documented in a hardware manual 34 | or be accompanied by documentation. 35 | 36 | 12. Features must be robust against reset and kexec - for example, shared 37 | host/guest memory must be unshared to prevent the host from writing to 38 | guest memory that the guest has not reserved for this purpose. 39 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## KVM源码阅读注释计划 2 | 3 | 将KVM代码从kernel中抽出来,这样比较方便注释。 4 | 5 | 如果发现漏了文件麻烦添加下。 6 | 7 | 每人fork一份走,将自己的理解注释然后pull,周会我们一起讨论后merge。 8 | -------------------------------------------------------------------------------- /arch/x86/include/asm/Kbuild: -------------------------------------------------------------------------------- 1 | 2 | 3 | generated-y += syscalls_32.h 4 | generated-y += syscalls_64.h 5 | generated-y += unistd_32_ia32.h 6 | generated-y += unistd_64_x32.h 7 | generated-y += xen-hypercalls.h 8 | 9 | genhdr-y += unistd_32.h 10 | genhdr-y += unistd_64.h 11 | genhdr-y += unistd_x32.h 12 | 13 | generic-y += clkdev.h 14 | generic-y += cputime.h 15 | generic-y += dma-contiguous.h 16 | generic-y += early_ioremap.h 17 | generic-y += mcs_spinlock.h 18 | generic-y += mm-arch-hooks.h 19 | -------------------------------------------------------------------------------- /arch/x86/include/asm/a.out-core.h: -------------------------------------------------------------------------------- 1 | /* a.out coredump register dumper 2 | * 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 | * Written by David Howells (dhowells@redhat.com) 5 | * 6 | * This program is free software; you can redistribute it and/or 7 | * modify it under the terms of the GNU General Public Licence 8 | * as published by the Free Software Foundation; either version 9 | * 2 of the Licence, or (at your option) any later version. 10 | */ 11 | 12 | #ifndef _ASM_X86_A_OUT_CORE_H 13 | #define _ASM_X86_A_OUT_CORE_H 14 | 15 | #ifdef __KERNEL__ 16 | #ifdef CONFIG_X86_32 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | /* 23 | * fill in the user structure for an a.out core dump 24 | */ 25 | static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) 26 | { 27 | /* changed the size calculations - should hopefully work better. lbt */ 28 | dump->magic = CMAGIC; 29 | dump->start_code = 0; 30 | dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); 31 | dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; 32 | dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1))) 33 | >> PAGE_SHIFT; 34 | dump->u_dsize -= dump->u_tsize; 35 | dump->u_ssize = 0; 36 | aout_dump_debugregs(dump); 37 | 38 | if (dump->start_stack < TASK_SIZE) 39 | dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) 40 | >> PAGE_SHIFT; 41 | 42 | dump->regs.bx = regs->bx; 43 | dump->regs.cx = regs->cx; 44 | dump->regs.dx = regs->dx; 45 | dump->regs.si = regs->si; 46 | dump->regs.di = regs->di; 47 | dump->regs.bp = regs->bp; 48 | dump->regs.ax = regs->ax; 49 | dump->regs.ds = (u16)regs->ds; 50 | dump->regs.es = (u16)regs->es; 51 | dump->regs.fs = (u16)regs->fs; 52 | dump->regs.gs = get_user_gs(regs); 53 | dump->regs.orig_ax = regs->orig_ax; 54 | dump->regs.ip = regs->ip; 55 | dump->regs.cs = (u16)regs->cs; 56 | dump->regs.flags = regs->flags; 57 | dump->regs.sp = regs->sp; 58 | dump->regs.ss = (u16)regs->ss; 59 | 60 | dump->u_fpvalid = dump_fpu(regs, &dump->i387); 61 | } 62 | 63 | #endif /* CONFIG_X86_32 */ 64 | #endif /* __KERNEL__ */ 65 | #endif /* _ASM_X86_A_OUT_CORE_H */ 66 | -------------------------------------------------------------------------------- /arch/x86/include/asm/acenv.h: -------------------------------------------------------------------------------- 1 | /* 2 | * X86 specific ACPICA environments and implementation 3 | * 4 | * Copyright (C) 2014, Intel Corporation 5 | * Author: Lv Zheng 6 | * 7 | * This program is free software; you can redistribute it and/or modify 8 | * it under the terms of the GNU General Public License version 2 as 9 | * published by the Free Software Foundation. 10 | */ 11 | 12 | #ifndef _ASM_X86_ACENV_H 13 | #define _ASM_X86_ACENV_H 14 | 15 | #include 16 | 17 | /* Asm macros */ 18 | 19 | #define ACPI_FLUSH_CPU_CACHE() wbinvd() 20 | 21 | int __acpi_acquire_global_lock(unsigned int *lock); 22 | int __acpi_release_global_lock(unsigned int *lock); 23 | 24 | #define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ 25 | ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) 26 | 27 | #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ 28 | ((Acq) = __acpi_release_global_lock(&facs->global_lock)) 29 | 30 | /* 31 | * Math helper asm macros 32 | */ 33 | #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ 34 | asm("divl %2;" \ 35 | : "=a"(q32), "=d"(r32) \ 36 | : "r"(d32), \ 37 | "0"(n_lo), "1"(n_hi)) 38 | 39 | #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ 40 | asm("shrl $1,%2 ;" \ 41 | "rcrl $1,%3;" \ 42 | : "=r"(n_hi), "=r"(n_lo) \ 43 | : "0"(n_hi), "1"(n_lo)) 44 | 45 | #endif /* _ASM_X86_ACENV_H */ 46 | -------------------------------------------------------------------------------- /arch/x86/include/asm/agp.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_AGP_H 2 | #define _ASM_X86_AGP_H 3 | 4 | #include 5 | #include 6 | 7 | /* 8 | * Functions to keep the agpgart mappings coherent with the MMU. The 9 | * GART gives the CPU a physical alias of pages in memory. The alias 10 | * region is mapped uncacheable. Make sure there are no conflicting 11 | * mappings with different cachability attributes for the same 12 | * page. This avoids data corruption on some CPUs. 13 | */ 14 | 15 | #define map_page_into_agp(page) set_pages_uc(page, 1) 16 | #define unmap_page_from_agp(page) set_pages_wb(page, 1) 17 | 18 | /* 19 | * Could use CLFLUSH here if the cpu supports it. But then it would 20 | * need to be called for each cacheline of the whole page so it may 21 | * not be worth it. Would need a page for it. 22 | */ 23 | #define flush_agp_cache() wbinvd() 24 | 25 | /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 26 | #define alloc_gatt_pages(order) \ 27 | ((char *)__get_free_pages(GFP_KERNEL, (order))) 28 | #define free_gatt_pages(table, order) \ 29 | free_pages((unsigned long)(table), (order)) 30 | 31 | #endif /* _ASM_X86_AGP_H */ 32 | -------------------------------------------------------------------------------- /arch/x86/include/asm/apb_timer.h: -------------------------------------------------------------------------------- 1 | /* 2 | * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare 3 | * 4 | * (C) Copyright 2009 Intel Corporation 5 | * Author: Jacob Pan (jacob.jun.pan@intel.com) 6 | * 7 | * This program is free software; you can redistribute it and/or 8 | * modify it under the terms of the GNU General Public License 9 | * as published by the Free Software Foundation; version 2 10 | * of the License. 11 | * 12 | * Note: 13 | */ 14 | 15 | #ifndef ASM_X86_APBT_H 16 | #define ASM_X86_APBT_H 17 | #include 18 | 19 | #ifdef CONFIG_APB_TIMER 20 | 21 | /* default memory mapped register base */ 22 | #define LNW_SCU_ADDR 0xFF100000 23 | #define LNW_EXT_TIMER_OFFSET 0x1B800 24 | #define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET) 25 | #define LNW_EXT_TIMER_PGOFFSET 0x800 26 | 27 | /* APBT clock speed range from PCLK to fabric base, 25-100MHz */ 28 | #define APBT_MAX_FREQ 50000000 29 | #define APBT_MIN_FREQ 1000000 30 | #define APBT_MMAP_SIZE 1024 31 | 32 | #define APBT_DEV_USED 1 33 | 34 | extern void apbt_time_init(void); 35 | extern unsigned long apbt_quick_calibrate(void); 36 | extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); 37 | extern void apbt_setup_secondary_clock(void); 38 | 39 | extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); 40 | extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); 41 | extern int sfi_mtimer_num; 42 | 43 | #else /* CONFIG_APB_TIMER */ 44 | 45 | static inline unsigned long apbt_quick_calibrate(void) {return 0; } 46 | static inline void apbt_time_init(void) { } 47 | 48 | #endif 49 | #endif /* ASM_X86_APBT_H */ 50 | -------------------------------------------------------------------------------- /arch/x86/include/asm/apic_flat_64.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_APIC_FLAT_64_H 2 | #define _ASM_X86_APIC_FLAT_64_H 3 | 4 | extern void flat_init_apic_ldr(void); 5 | 6 | #endif 7 | 8 | -------------------------------------------------------------------------------- /arch/x86/include/asm/apm.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Machine specific APM BIOS functions for generic. 3 | * Split out from apm.c by Osamu Tomita 4 | */ 5 | 6 | #ifndef _ASM_X86_MACH_DEFAULT_APM_H 7 | #define _ASM_X86_MACH_DEFAULT_APM_H 8 | 9 | #ifdef APM_ZERO_SEGS 10 | # define APM_DO_ZERO_SEGS \ 11 | "pushl %%ds\n\t" \ 12 | "pushl %%es\n\t" \ 13 | "xorl %%edx, %%edx\n\t" \ 14 | "mov %%dx, %%ds\n\t" \ 15 | "mov %%dx, %%es\n\t" \ 16 | "mov %%dx, %%fs\n\t" \ 17 | "mov %%dx, %%gs\n\t" 18 | # define APM_DO_POP_SEGS \ 19 | "popl %%es\n\t" \ 20 | "popl %%ds\n\t" 21 | #else 22 | # define APM_DO_ZERO_SEGS 23 | # define APM_DO_POP_SEGS 24 | #endif 25 | 26 | static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, 27 | u32 *eax, u32 *ebx, u32 *ecx, 28 | u32 *edx, u32 *esi) 29 | { 30 | /* 31 | * N.B. We do NOT need a cld after the BIOS call 32 | * because we always save and restore the flags. 33 | */ 34 | __asm__ __volatile__(APM_DO_ZERO_SEGS 35 | "pushl %%edi\n\t" 36 | "pushl %%ebp\n\t" 37 | "lcall *%%cs:apm_bios_entry\n\t" 38 | "setc %%al\n\t" 39 | "popl %%ebp\n\t" 40 | "popl %%edi\n\t" 41 | APM_DO_POP_SEGS 42 | : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx), 43 | "=S" (*esi) 44 | : "a" (func), "b" (ebx_in), "c" (ecx_in) 45 | : "memory", "cc"); 46 | } 47 | 48 | static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, 49 | u32 ecx_in, u32 *eax) 50 | { 51 | int cx, dx, si; 52 | bool error; 53 | 54 | /* 55 | * N.B. We do NOT need a cld after the BIOS call 56 | * because we always save and restore the flags. 57 | */ 58 | __asm__ __volatile__(APM_DO_ZERO_SEGS 59 | "pushl %%edi\n\t" 60 | "pushl %%ebp\n\t" 61 | "lcall *%%cs:apm_bios_entry\n\t" 62 | "setc %%bl\n\t" 63 | "popl %%ebp\n\t" 64 | "popl %%edi\n\t" 65 | APM_DO_POP_SEGS 66 | : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx), 67 | "=S" (si) 68 | : "a" (func), "b" (ebx_in), "c" (ecx_in) 69 | : "memory", "cc"); 70 | return error; 71 | } 72 | 73 | #endif /* _ASM_X86_MACH_DEFAULT_APM_H */ 74 | -------------------------------------------------------------------------------- /arch/x86/include/asm/arch_hweight.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_HWEIGHT_H 2 | #define _ASM_X86_HWEIGHT_H 3 | 4 | #include 5 | 6 | #ifdef CONFIG_64BIT 7 | /* popcnt %edi, %eax */ 8 | #define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7" 9 | /* popcnt %rdi, %rax */ 10 | #define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7" 11 | #define REG_IN "D" 12 | #define REG_OUT "a" 13 | #else 14 | /* popcnt %eax, %eax */ 15 | #define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0" 16 | #define REG_IN "a" 17 | #define REG_OUT "a" 18 | #endif 19 | 20 | #define __HAVE_ARCH_SW_HWEIGHT 21 | 22 | static __always_inline unsigned int __arch_hweight32(unsigned int w) 23 | { 24 | unsigned int res; 25 | 26 | asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT) 27 | : "="REG_OUT (res) 28 | : REG_IN (w)); 29 | 30 | return res; 31 | } 32 | 33 | static inline unsigned int __arch_hweight16(unsigned int w) 34 | { 35 | return __arch_hweight32(w & 0xffff); 36 | } 37 | 38 | static inline unsigned int __arch_hweight8(unsigned int w) 39 | { 40 | return __arch_hweight32(w & 0xff); 41 | } 42 | 43 | #ifdef CONFIG_X86_32 44 | static inline unsigned long __arch_hweight64(__u64 w) 45 | { 46 | return __arch_hweight32((u32)w) + 47 | __arch_hweight32((u32)(w >> 32)); 48 | } 49 | #else 50 | static __always_inline unsigned long __arch_hweight64(__u64 w) 51 | { 52 | unsigned long res; 53 | 54 | asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) 55 | : "="REG_OUT (res) 56 | : REG_IN (w)); 57 | 58 | return res; 59 | } 60 | #endif /* CONFIG_X86_32 */ 61 | 62 | #endif 63 | -------------------------------------------------------------------------------- /arch/x86/include/asm/asm-offsets.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/asm/bios_ebda.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_BIOS_EBDA_H 2 | #define _ASM_X86_BIOS_EBDA_H 3 | 4 | #include 5 | 6 | /* 7 | * Returns physical address of EBDA. Returns 0 if there is no EBDA. 8 | */ 9 | static inline unsigned int get_bios_ebda(void) 10 | { 11 | /* 12 | * There is a real-mode segmented pointer pointing to the 13 | * 4K EBDA area at 0x40E. 14 | */ 15 | unsigned int address = *(unsigned short *)phys_to_virt(0x40E); 16 | address <<= 4; 17 | return address; /* 0 means none */ 18 | } 19 | 20 | void reserve_bios_regions(void); 21 | 22 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION 23 | /* 24 | * This is obviously not a great place for this, but we want to be 25 | * able to scatter it around anywhere in the kernel. 26 | */ 27 | void check_for_bios_corruption(void); 28 | void start_periodic_check_for_corruption(void); 29 | #else 30 | static inline void check_for_bios_corruption(void) 31 | { 32 | } 33 | 34 | static inline void start_periodic_check_for_corruption(void) 35 | { 36 | } 37 | #endif 38 | 39 | #endif /* _ASM_X86_BIOS_EBDA_H */ 40 | -------------------------------------------------------------------------------- /arch/x86/include/asm/boot.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_BOOT_H 2 | #define _ASM_X86_BOOT_H 3 | 4 | 5 | #include 6 | #include 7 | 8 | /* Physical address where kernel should be loaded. */ 9 | #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ 10 | + (CONFIG_PHYSICAL_ALIGN - 1)) \ 11 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) 12 | 13 | /* Minimum kernel alignment, as a power of two */ 14 | #ifdef CONFIG_X86_64 15 | # define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT 16 | #else 17 | # define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER) 18 | #endif 19 | #define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) 20 | 21 | #if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \ 22 | (CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN) 23 | # error "Invalid value for CONFIG_PHYSICAL_ALIGN" 24 | #endif 25 | 26 | #ifdef CONFIG_KERNEL_BZIP2 27 | # define BOOT_HEAP_SIZE 0x400000 28 | #else /* !CONFIG_KERNEL_BZIP2 */ 29 | # define BOOT_HEAP_SIZE 0x10000 30 | #endif 31 | 32 | #ifdef CONFIG_X86_64 33 | # define BOOT_STACK_SIZE 0x4000 34 | 35 | # define BOOT_INIT_PGT_SIZE (6*4096) 36 | # ifdef CONFIG_RANDOMIZE_BASE 37 | /* 38 | * Assuming all cross the 512GB boundary: 39 | * 1 page for level4 40 | * (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel 41 | * 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP). 42 | * Total is 19 pages. 43 | */ 44 | # ifdef CONFIG_X86_VERBOSE_BOOTUP 45 | # define BOOT_PGT_SIZE (19*4096) 46 | # else /* !CONFIG_X86_VERBOSE_BOOTUP */ 47 | # define BOOT_PGT_SIZE (17*4096) 48 | # endif 49 | # else /* !CONFIG_RANDOMIZE_BASE */ 50 | # define BOOT_PGT_SIZE BOOT_INIT_PGT_SIZE 51 | # endif 52 | 53 | #else /* !CONFIG_X86_64 */ 54 | # define BOOT_STACK_SIZE 0x1000 55 | #endif 56 | 57 | #endif /* _ASM_X86_BOOT_H */ 58 | -------------------------------------------------------------------------------- /arch/x86/include/asm/bug.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_BUG_H 2 | #define _ASM_X86_BUG_H 3 | 4 | #define HAVE_ARCH_BUG 5 | 6 | #ifdef CONFIG_DEBUG_BUGVERBOSE 7 | 8 | #ifdef CONFIG_X86_32 9 | # define __BUG_C0 "2:\t.long 1b, %c0\n" 10 | #else 11 | # define __BUG_C0 "2:\t.long 1b - 2b, %c0 - 2b\n" 12 | #endif 13 | 14 | #define BUG() \ 15 | do { \ 16 | asm volatile("1:\tud2\n" \ 17 | ".pushsection __bug_table,\"a\"\n" \ 18 | __BUG_C0 \ 19 | "\t.word %c1, 0\n" \ 20 | "\t.org 2b+%c2\n" \ 21 | ".popsection" \ 22 | : : "i" (__FILE__), "i" (__LINE__), \ 23 | "i" (sizeof(struct bug_entry))); \ 24 | unreachable(); \ 25 | } while (0) 26 | 27 | #else 28 | #define BUG() \ 29 | do { \ 30 | asm volatile("ud2"); \ 31 | unreachable(); \ 32 | } while (0) 33 | #endif 34 | 35 | #include 36 | 37 | #endif /* _ASM_X86_BUG_H */ 38 | -------------------------------------------------------------------------------- /arch/x86/include/asm/bugs.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_BUGS_H 2 | #define _ASM_X86_BUGS_H 3 | 4 | #include 5 | 6 | extern void check_bugs(void); 7 | 8 | #if defined(CONFIG_CPU_SUP_INTEL) 9 | void check_mpx_erratum(struct cpuinfo_x86 *c); 10 | #else 11 | static inline void check_mpx_erratum(struct cpuinfo_x86 *c) {} 12 | #endif 13 | 14 | #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32) 15 | int ppro_with_ram_bug(void); 16 | #else 17 | static inline int ppro_with_ram_bug(void) { return 0; } 18 | #endif 19 | 20 | #endif /* _ASM_X86_BUGS_H */ 21 | -------------------------------------------------------------------------------- /arch/x86/include/asm/cache.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_CACHE_H 2 | #define _ASM_X86_CACHE_H 3 | 4 | #include 5 | 6 | /* L1 cache line size */ 7 | #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 8 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 9 | 10 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 11 | 12 | #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT 13 | #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) 14 | 15 | #ifdef CONFIG_X86_VSMP 16 | #ifdef CONFIG_SMP 17 | #define __cacheline_aligned_in_smp \ 18 | __attribute__((__aligned__(INTERNODE_CACHE_BYTES))) \ 19 | __page_aligned_data 20 | #endif 21 | #endif 22 | 23 | #endif /* _ASM_X86_CACHE_H */ 24 | -------------------------------------------------------------------------------- /arch/x86/include/asm/ce4100.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_CE4100_H_ 2 | #define _ASM_CE4100_H_ 3 | 4 | int ce4100_pci_init(void); 5 | 6 | #endif 7 | -------------------------------------------------------------------------------- /arch/x86/include/asm/checksum.h: -------------------------------------------------------------------------------- 1 | #ifdef CONFIG_X86_32 2 | # include 3 | #else 4 | # include 5 | #endif 6 | -------------------------------------------------------------------------------- /arch/x86/include/asm/clocksource.h: -------------------------------------------------------------------------------- 1 | /* x86-specific clocksource additions */ 2 | 3 | #ifndef _ASM_X86_CLOCKSOURCE_H 4 | #define _ASM_X86_CLOCKSOURCE_H 5 | 6 | #define VCLOCK_NONE 0 /* No vDSO clock available. */ 7 | #define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */ 8 | #define VCLOCK_PVCLOCK 2 /* vDSO should use vread_pvclock. */ 9 | #define VCLOCK_MAX 2 10 | 11 | struct arch_clocksource_data { 12 | int vclock_mode; 13 | }; 14 | 15 | #endif /* _ASM_X86_CLOCKSOURCE_H */ 16 | -------------------------------------------------------------------------------- /arch/x86/include/asm/cmdline.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_CMDLINE_H 2 | #define _ASM_X86_CMDLINE_H 3 | 4 | int cmdline_find_option_bool(const char *cmdline_ptr, const char *option); 5 | 6 | #endif /* _ASM_X86_CMDLINE_H */ 7 | -------------------------------------------------------------------------------- /arch/x86/include/asm/cmpxchg_64.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_CMPXCHG_64_H 2 | #define _ASM_X86_CMPXCHG_64_H 3 | 4 | static inline void set_64bit(volatile u64 *ptr, u64 val) 5 | { 6 | *ptr = val; 7 | } 8 | 9 | #define cmpxchg64(ptr, o, n) \ 10 | ({ \ 11 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 12 | cmpxchg((ptr), (o), (n)); \ 13 | }) 14 | 15 | #define cmpxchg64_local(ptr, o, n) \ 16 | ({ \ 17 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 18 | cmpxchg_local((ptr), (o), (n)); \ 19 | }) 20 | 21 | #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16) 22 | 23 | #endif /* _ASM_X86_CMPXCHG_64_H */ 24 | -------------------------------------------------------------------------------- /arch/x86/include/asm/cpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_CPU_H 2 | #define _ASM_X86_CPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #ifdef CONFIG_SMP 11 | 12 | extern void prefill_possible_map(void); 13 | 14 | #else /* CONFIG_SMP */ 15 | 16 | static inline void prefill_possible_map(void) {} 17 | 18 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid 19 | #define cpu_acpi_id(cpu) 0 20 | #define safe_smp_processor_id() 0 21 | 22 | #endif /* CONFIG_SMP */ 23 | 24 | struct x86_cpu { 25 | struct cpu cpu; 26 | }; 27 | 28 | #ifdef CONFIG_HOTPLUG_CPU 29 | extern int arch_register_cpu(int num); 30 | extern void arch_unregister_cpu(int); 31 | extern void start_cpu0(void); 32 | #ifdef CONFIG_DEBUG_HOTPLUG_CPU0 33 | extern int _debug_hotplug_cpu(int cpu, int action); 34 | #endif 35 | #endif 36 | 37 | int mwait_usable(const struct cpuinfo_x86 *); 38 | 39 | unsigned int x86_family(unsigned int sig); 40 | unsigned int x86_model(unsigned int sig); 41 | unsigned int x86_stepping(unsigned int sig); 42 | #endif /* _ASM_X86_CPU_H */ 43 | -------------------------------------------------------------------------------- /arch/x86/include/asm/cpu_device_id.h: -------------------------------------------------------------------------------- 1 | #ifndef _CPU_DEVICE_ID 2 | #define _CPU_DEVICE_ID 1 3 | 4 | /* 5 | * Declare drivers belonging to specific x86 CPUs 6 | * Similar in spirit to pci_device_id and related PCI functions 7 | */ 8 | 9 | #include 10 | 11 | extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /arch/x86/include/asm/cpumask.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_CPUMASK_H 2 | #define _ASM_X86_CPUMASK_H 3 | #ifndef __ASSEMBLY__ 4 | #include 5 | 6 | extern cpumask_var_t cpu_callin_mask; 7 | extern cpumask_var_t cpu_callout_mask; 8 | extern cpumask_var_t cpu_initialized_mask; 9 | extern cpumask_var_t cpu_sibling_setup_mask; 10 | 11 | extern void setup_cpu_local_masks(void); 12 | 13 | #endif /* __ASSEMBLY__ */ 14 | #endif /* _ASM_X86_CPUMASK_H */ 15 | -------------------------------------------------------------------------------- /arch/x86/include/asm/crash.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_CRASH_H 2 | #define _ASM_X86_CRASH_H 3 | 4 | int crash_load_segments(struct kimage *image); 5 | int crash_copy_backup_region(struct kimage *image); 6 | int crash_setup_memmap_entries(struct kimage *image, 7 | struct boot_params *params); 8 | 9 | #endif /* _ASM_X86_CRASH_H */ 10 | -------------------------------------------------------------------------------- /arch/x86/include/asm/crypto/aes.h: -------------------------------------------------------------------------------- 1 | #ifndef ASM_X86_AES_H 2 | #define ASM_X86_AES_H 3 | 4 | #include 5 | #include 6 | 7 | void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, 8 | const u8 *src); 9 | void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, 10 | const u8 *src); 11 | #endif 12 | -------------------------------------------------------------------------------- /arch/x86/include/asm/crypto/serpent-avx.h: -------------------------------------------------------------------------------- 1 | #ifndef ASM_X86_SERPENT_AVX_H 2 | #define ASM_X86_SERPENT_AVX_H 3 | 4 | #include 5 | #include 6 | 7 | #define SERPENT_PARALLEL_BLOCKS 8 8 | 9 | struct serpent_lrw_ctx { 10 | struct lrw_table_ctx lrw_table; 11 | struct serpent_ctx serpent_ctx; 12 | }; 13 | 14 | struct serpent_xts_ctx { 15 | struct serpent_ctx tweak_ctx; 16 | struct serpent_ctx crypt_ctx; 17 | }; 18 | 19 | asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, 20 | const u8 *src); 21 | asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, 22 | const u8 *src); 23 | 24 | asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, 25 | const u8 *src); 26 | asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, 27 | const u8 *src, le128 *iv); 28 | 29 | asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, 30 | const u8 *src, le128 *iv); 31 | asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, 32 | const u8 *src, le128 *iv); 33 | 34 | extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, 35 | le128 *iv); 36 | 37 | extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); 38 | extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); 39 | 40 | extern int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key, 41 | unsigned int keylen); 42 | 43 | extern void lrw_serpent_exit_tfm(struct crypto_tfm *tfm); 44 | 45 | extern int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key, 46 | unsigned int keylen); 47 | 48 | #endif 49 | -------------------------------------------------------------------------------- /arch/x86/include/asm/crypto/serpent-sse2.h: -------------------------------------------------------------------------------- 1 | #ifndef ASM_X86_SERPENT_SSE2_H 2 | #define ASM_X86_SERPENT_SSE2_H 3 | 4 | #include 5 | #include 6 | 7 | #ifdef CONFIG_X86_32 8 | 9 | #define SERPENT_PARALLEL_BLOCKS 4 10 | 11 | asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst, 12 | const u8 *src, bool xor); 13 | asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst, 14 | const u8 *src); 15 | 16 | static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, 17 | const u8 *src) 18 | { 19 | __serpent_enc_blk_4way(ctx, dst, src, false); 20 | } 21 | 22 | static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, 23 | const u8 *src) 24 | { 25 | __serpent_enc_blk_4way(ctx, dst, src, true); 26 | } 27 | 28 | static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, 29 | const u8 *src) 30 | { 31 | serpent_dec_blk_4way(ctx, dst, src); 32 | } 33 | 34 | #else 35 | 36 | #define SERPENT_PARALLEL_BLOCKS 8 37 | 38 | asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst, 39 | const u8 *src, bool xor); 40 | asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst, 41 | const u8 *src); 42 | 43 | static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, 44 | const u8 *src) 45 | { 46 | __serpent_enc_blk_8way(ctx, dst, src, false); 47 | } 48 | 49 | static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, 50 | const u8 *src) 51 | { 52 | __serpent_enc_blk_8way(ctx, dst, src, true); 53 | } 54 | 55 | static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, 56 | const u8 *src) 57 | { 58 | serpent_dec_blk_8way(ctx, dst, src); 59 | } 60 | 61 | #endif 62 | 63 | #endif 64 | -------------------------------------------------------------------------------- /arch/x86/include/asm/crypto/twofish.h: -------------------------------------------------------------------------------- 1 | #ifndef ASM_X86_TWOFISH_H 2 | #define ASM_X86_TWOFISH_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | struct twofish_lrw_ctx { 10 | struct lrw_table_ctx lrw_table; 11 | struct twofish_ctx twofish_ctx; 12 | }; 13 | 14 | struct twofish_xts_ctx { 15 | struct twofish_ctx tweak_ctx; 16 | struct twofish_ctx crypt_ctx; 17 | }; 18 | 19 | /* regular block cipher functions from twofish_x86_64 module */ 20 | asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, 21 | const u8 *src); 22 | asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, 23 | const u8 *src); 24 | 25 | /* 3-way parallel cipher functions */ 26 | asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, 27 | const u8 *src, bool xor); 28 | asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, 29 | const u8 *src); 30 | 31 | /* helpers from twofish_x86_64-3way module */ 32 | extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src); 33 | extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, 34 | le128 *iv); 35 | extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, 36 | le128 *iv); 37 | 38 | extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, 39 | unsigned int keylen); 40 | 41 | extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm); 42 | 43 | extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, 44 | unsigned int keylen); 45 | 46 | #endif /* ASM_X86_TWOFISH_H */ 47 | -------------------------------------------------------------------------------- /arch/x86/include/asm/current.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_CURRENT_H 2 | #define _ASM_X86_CURRENT_H 3 | 4 | #include 5 | #include 6 | 7 | #ifndef __ASSEMBLY__ 8 | struct task_struct; 9 | 10 | DECLARE_PER_CPU(struct task_struct *, current_task); 11 | 12 | static __always_inline struct task_struct *get_current(void) 13 | { 14 | return this_cpu_read_stable(current_task); 15 | } 16 | 17 | #define current get_current() 18 | 19 | #endif /* __ASSEMBLY__ */ 20 | 21 | #endif /* _ASM_X86_CURRENT_H */ 22 | -------------------------------------------------------------------------------- /arch/x86/include/asm/delay.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_DELAY_H 2 | #define _ASM_X86_DELAY_H 3 | 4 | #include 5 | 6 | void use_tsc_delay(void); 7 | void use_mwaitx_delay(void); 8 | 9 | #endif /* _ASM_X86_DELAY_H */ 10 | -------------------------------------------------------------------------------- /arch/x86/include/asm/device.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_DEVICE_H 2 | #define _ASM_X86_DEVICE_H 3 | 4 | struct dev_archdata { 5 | #ifdef CONFIG_X86_DEV_DMA_OPS 6 | struct dma_map_ops *dma_ops; 7 | #endif 8 | #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) 9 | void *iommu; /* hook for IOMMU specific extension */ 10 | #endif 11 | }; 12 | 13 | #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) 14 | struct dma_domain { 15 | struct list_head node; 16 | struct dma_map_ops *dma_ops; 17 | int domain_nr; 18 | }; 19 | void add_dma_domain(struct dma_domain *domain); 20 | void del_dma_domain(struct dma_domain *domain); 21 | #endif 22 | 23 | struct pdev_archdata { 24 | }; 25 | 26 | #endif /* _ASM_X86_DEVICE_H */ 27 | -------------------------------------------------------------------------------- /arch/x86/include/asm/disabled-features.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_DISABLED_FEATURES_H 2 | #define _ASM_X86_DISABLED_FEATURES_H 3 | 4 | /* These features, although they might be available in a CPU 5 | * will not be used because the compile options to support 6 | * them are not present. 7 | * 8 | * This code allows them to be checked and disabled at 9 | * compile time without an explicit #ifdef. Use 10 | * cpu_feature_enabled(). 11 | */ 12 | 13 | #ifdef CONFIG_X86_INTEL_MPX 14 | # define DISABLE_MPX 0 15 | #else 16 | # define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) 17 | #endif 18 | 19 | #ifdef CONFIG_X86_64 20 | # define DISABLE_VME (1<<(X86_FEATURE_VME & 31)) 21 | # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) 22 | # define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31)) 23 | # define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31)) 24 | #else 25 | # define DISABLE_VME 0 26 | # define DISABLE_K6_MTRR 0 27 | # define DISABLE_CYRIX_ARR 0 28 | # define DISABLE_CENTAUR_MCR 0 29 | #endif /* CONFIG_X86_64 */ 30 | 31 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 32 | # define DISABLE_PKU 0 33 | # define DISABLE_OSPKE 0 34 | #else 35 | # define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31)) 36 | # define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31)) 37 | #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 38 | 39 | /* 40 | * Make sure to add features to the correct mask 41 | */ 42 | #define DISABLED_MASK0 (DISABLE_VME) 43 | #define DISABLED_MASK1 0 44 | #define DISABLED_MASK2 0 45 | #define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR) 46 | #define DISABLED_MASK4 0 47 | #define DISABLED_MASK5 0 48 | #define DISABLED_MASK6 0 49 | #define DISABLED_MASK7 0 50 | #define DISABLED_MASK8 0 51 | #define DISABLED_MASK9 (DISABLE_MPX) 52 | #define DISABLED_MASK10 0 53 | #define DISABLED_MASK11 0 54 | #define DISABLED_MASK12 0 55 | #define DISABLED_MASK13 0 56 | #define DISABLED_MASK14 0 57 | #define DISABLED_MASK15 0 58 | #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) 59 | #define DISABLED_MASK17 0 60 | #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) 61 | 62 | #endif /* _ASM_X86_DISABLED_FEATURES_H */ 63 | -------------------------------------------------------------------------------- /arch/x86/include/asm/div64.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_DIV64_H 2 | #define _ASM_X86_DIV64_H 3 | 4 | #ifdef CONFIG_X86_32 5 | 6 | #include 7 | #include 8 | 9 | /* 10 | * do_div() is NOT a C function. It wants to return 11 | * two values (the quotient and the remainder), but 12 | * since that doesn't work very well in C, what it 13 | * does is: 14 | * 15 | * - modifies the 64-bit dividend _in_place_ 16 | * - returns the 32-bit remainder 17 | * 18 | * This ends up being the most efficient "calling 19 | * convention" on x86. 20 | */ 21 | #define do_div(n, base) \ 22 | ({ \ 23 | unsigned long __upper, __low, __high, __mod, __base; \ 24 | __base = (base); \ 25 | if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \ 26 | __mod = n & (__base - 1); \ 27 | n >>= ilog2(__base); \ 28 | } else { \ 29 | asm("" : "=a" (__low), "=d" (__high) : "A" (n));\ 30 | __upper = __high; \ 31 | if (__high) { \ 32 | __upper = __high % (__base); \ 33 | __high = __high / (__base); \ 34 | } \ 35 | asm("divl %2" : "=a" (__low), "=d" (__mod) \ 36 | : "rm" (__base), "0" (__low), "1" (__upper)); \ 37 | asm("" : "=A" (n) : "a" (__low), "d" (__high)); \ 38 | } \ 39 | __mod; \ 40 | }) 41 | 42 | static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) 43 | { 44 | union { 45 | u64 v64; 46 | u32 v32[2]; 47 | } d = { dividend }; 48 | u32 upper; 49 | 50 | upper = d.v32[1]; 51 | d.v32[1] = 0; 52 | if (upper >= divisor) { 53 | d.v32[1] = upper / divisor; 54 | upper %= divisor; 55 | } 56 | asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) : 57 | "rm" (divisor), "0" (d.v32[0]), "1" (upper)); 58 | return d.v64; 59 | } 60 | #define div_u64_rem div_u64_rem 61 | 62 | #else 63 | # include 64 | #endif /* CONFIG_X86_32 */ 65 | 66 | #endif /* _ASM_X86_DIV64_H */ 67 | -------------------------------------------------------------------------------- /arch/x86/include/asm/dmi.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_DMI_H 2 | #define _ASM_X86_DMI_H 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | static __always_inline __init void *dmi_alloc(unsigned len) 11 | { 12 | return extend_brk(len, sizeof(int)); 13 | } 14 | 15 | /* Use early IO mappings for DMI because it's initialized early */ 16 | #define dmi_early_remap early_ioremap 17 | #define dmi_early_unmap early_iounmap 18 | #define dmi_remap ioremap_cache 19 | #define dmi_unmap iounmap 20 | 21 | #endif /* _ASM_X86_DMI_H */ 22 | -------------------------------------------------------------------------------- /arch/x86/include/asm/edac.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_EDAC_H 2 | #define _ASM_X86_EDAC_H 3 | 4 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ 5 | 6 | static inline void edac_atomic_scrub(void *va, u32 size) 7 | { 8 | u32 i, *virt_addr = va; 9 | 10 | /* 11 | * Very carefully read and write to memory atomically so we 12 | * are interrupt, DMA and SMP safe. 13 | */ 14 | for (i = 0; i < size / 4; i++, virt_addr++) 15 | asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); 16 | } 17 | 18 | #endif /* _ASM_X86_EDAC_H */ 19 | -------------------------------------------------------------------------------- /arch/x86/include/asm/emergency-restart.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_EMERGENCY_RESTART_H 2 | #define _ASM_X86_EMERGENCY_RESTART_H 3 | 4 | extern void machine_emergency_restart(void); 5 | 6 | #endif /* _ASM_X86_EMERGENCY_RESTART_H */ 7 | -------------------------------------------------------------------------------- /arch/x86/include/asm/entry_arch.h: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is designed to contain the BUILD_INTERRUPT specifications for 3 | * all of the extra named interrupt vectors used by the architecture. 4 | * Usually this is the Inter Process Interrupts (IPIs) 5 | */ 6 | 7 | /* 8 | * The following vectors are part of the Linux architecture, there 9 | * is no hardware IRQ pin equivalent for them, they are triggered 10 | * through the ICC by us (IPIs) 11 | */ 12 | #ifdef CONFIG_SMP 13 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 14 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 15 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) 16 | BUILD_INTERRUPT3(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR, 17 | smp_irq_move_cleanup_interrupt) 18 | BUILD_INTERRUPT3(reboot_interrupt, REBOOT_VECTOR, smp_reboot_interrupt) 19 | #endif 20 | 21 | BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) 22 | 23 | #ifdef CONFIG_HAVE_KVM 24 | BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR, 25 | smp_kvm_posted_intr_ipi) 26 | BUILD_INTERRUPT3(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR, 27 | smp_kvm_posted_intr_wakeup_ipi) 28 | #endif 29 | 30 | /* 31 | * every pentium local APIC has two 'local interrupts', with a 32 | * soft-definable vector attached to both interrupts, one of 33 | * which is a timer interrupt, the other one is error counter 34 | * overflow. Linux uses the local APIC timer interrupt to get 35 | * a much simpler SMP time architecture: 36 | */ 37 | #ifdef CONFIG_X86_LOCAL_APIC 38 | 39 | BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) 40 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) 41 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) 42 | 43 | #ifdef CONFIG_IRQ_WORK 44 | BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR) 45 | #endif 46 | 47 | #ifdef CONFIG_X86_THERMAL_VECTOR 48 | BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) 49 | #endif 50 | 51 | #ifdef CONFIG_X86_MCE_THRESHOLD 52 | BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) 53 | #endif 54 | 55 | #ifdef CONFIG_X86_MCE_AMD 56 | BUILD_INTERRUPT(deferred_error_interrupt, DEFERRED_ERROR_VECTOR) 57 | #endif 58 | #endif 59 | -------------------------------------------------------------------------------- /arch/x86/include/asm/espfix.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_ESPFIX_H 2 | #define _ASM_X86_ESPFIX_H 3 | 4 | #ifdef CONFIG_X86_64 5 | 6 | #include 7 | 8 | DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); 9 | DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); 10 | 11 | extern void init_espfix_bsp(void); 12 | extern void init_espfix_ap(int cpu); 13 | 14 | #endif /* CONFIG_X86_64 */ 15 | 16 | #endif /* _ASM_X86_ESPFIX_H */ 17 | -------------------------------------------------------------------------------- /arch/x86/include/asm/exec.h: -------------------------------------------------------------------------------- 1 | /* define arch_align_stack() here */ 2 | -------------------------------------------------------------------------------- /arch/x86/include/asm/fb.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_FB_H 2 | #define _ASM_X86_FB_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, 9 | unsigned long off) 10 | { 11 | unsigned long prot; 12 | 13 | prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK; 14 | if (boot_cpu_data.x86 > 3) 15 | pgprot_val(vma->vm_page_prot) = 16 | prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS); 17 | } 18 | 19 | extern int fb_is_primary_device(struct fb_info *info); 20 | 21 | #endif /* _ASM_X86_FB_H */ 22 | -------------------------------------------------------------------------------- /arch/x86/include/asm/fpu/api.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 1994 Linus Torvalds 3 | * 4 | * Pentium III FXSR, SSE support 5 | * General FPU state handling cleanups 6 | * Gareth Hughes , May 2000 7 | * x86-64 work by Andi Kleen 2002 8 | */ 9 | 10 | #ifndef _ASM_X86_FPU_API_H 11 | #define _ASM_X86_FPU_API_H 12 | 13 | /* 14 | * Careful: __kernel_fpu_begin/end() must be called with preempt disabled 15 | * and they don't touch the preempt state on their own. 16 | * If you enable preemption after __kernel_fpu_begin(), preempt notifier 17 | * should call the __kernel_fpu_end() to prevent the kernel/user FPU 18 | * state from getting corrupted. KVM for example uses this model. 19 | * 20 | * All other cases use kernel_fpu_begin/end() which disable preemption 21 | * during kernel FPU usage. 22 | */ 23 | extern void __kernel_fpu_begin(void); 24 | extern void __kernel_fpu_end(void); 25 | extern void kernel_fpu_begin(void); 26 | extern void kernel_fpu_end(void); 27 | extern bool irq_fpu_usable(void); 28 | 29 | /* 30 | * Some instructions like VIA's padlock instructions generate a spurious 31 | * DNA fault but don't modify SSE registers. And these instructions 32 | * get used from interrupt context as well. To prevent these kernel instructions 33 | * in interrupt context interacting wrongly with other user/kernel fpu usage, we 34 | * should use them only in the context of irq_ts_save/restore() 35 | */ 36 | extern int irq_ts_save(void); 37 | extern void irq_ts_restore(int TS_state); 38 | 39 | /* 40 | * Query the presence of one or more xfeatures. Works on any legacy CPU as well. 41 | * 42 | * If 'feature_name' is set then put a human-readable description of 43 | * the feature there as well - this can be used to print error (or success) 44 | * messages. 45 | */ 46 | extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name); 47 | 48 | #endif /* _ASM_X86_FPU_API_H */ 49 | -------------------------------------------------------------------------------- /arch/x86/include/asm/fpu/regset.h: -------------------------------------------------------------------------------- 1 | /* 2 | * FPU regset handling methods: 3 | */ 4 | #ifndef _ASM_X86_FPU_REGSET_H 5 | #define _ASM_X86_FPU_REGSET_H 6 | 7 | #include 8 | 9 | extern user_regset_active_fn regset_fpregs_active, regset_xregset_fpregs_active; 10 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, 11 | xstateregs_get; 12 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, 13 | xstateregs_set; 14 | 15 | /* 16 | * xstateregs_active == regset_fpregs_active. Please refer to the comment 17 | * at the definition of regset_fpregs_active. 18 | */ 19 | #define xstateregs_active regset_fpregs_active 20 | 21 | #endif /* _ASM_X86_FPU_REGSET_H */ 22 | -------------------------------------------------------------------------------- /arch/x86/include/asm/fpu/signal.h: -------------------------------------------------------------------------------- 1 | /* 2 | * x86 FPU signal frame handling methods: 3 | */ 4 | #ifndef _ASM_X86_FPU_SIGNAL_H 5 | #define _ASM_X86_FPU_SIGNAL_H 6 | 7 | #ifdef CONFIG_X86_64 8 | # include 9 | # include 10 | struct ksignal; 11 | int ia32_setup_rt_frame(int sig, struct ksignal *ksig, 12 | compat_sigset_t *set, struct pt_regs *regs); 13 | int ia32_setup_frame(int sig, struct ksignal *ksig, 14 | compat_sigset_t *set, struct pt_regs *regs); 15 | #else 16 | # define user_i387_ia32_struct user_i387_struct 17 | # define user32_fxsr_struct user_fxsr_struct 18 | # define ia32_setup_frame __setup_frame 19 | # define ia32_setup_rt_frame __setup_rt_frame 20 | #endif 21 | 22 | extern void convert_from_fxsr(struct user_i387_ia32_struct *env, 23 | struct task_struct *tsk); 24 | extern void convert_to_fxsr(struct task_struct *tsk, 25 | const struct user_i387_ia32_struct *env); 26 | 27 | unsigned long 28 | fpu__alloc_mathframe(unsigned long sp, int ia32_frame, 29 | unsigned long *buf_fx, unsigned long *size); 30 | 31 | extern void fpu__init_prepare_fx_sw_frame(void); 32 | 33 | #endif /* _ASM_X86_FPU_SIGNAL_H */ 34 | -------------------------------------------------------------------------------- /arch/x86/include/asm/fpu/xstate.h: -------------------------------------------------------------------------------- 1 | #ifndef __ASM_X86_XSAVE_H 2 | #define __ASM_X86_XSAVE_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | /* Bit 63 of XCR0 is reserved for future expansion */ 9 | #define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63))) 10 | 11 | #define XSTATE_CPUID 0x0000000d 12 | 13 | #define FXSAVE_SIZE 512 14 | 15 | #define XSAVE_HDR_SIZE 64 16 | #define XSAVE_HDR_OFFSET FXSAVE_SIZE 17 | 18 | #define XSAVE_YMM_SIZE 256 19 | #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) 20 | 21 | /* Supervisor features */ 22 | #define XFEATURE_MASK_SUPERVISOR (XFEATURE_MASK_PT) 23 | 24 | /* Supported features which support lazy state saving */ 25 | #define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \ 26 | XFEATURE_MASK_SSE | \ 27 | XFEATURE_MASK_YMM | \ 28 | XFEATURE_MASK_OPMASK | \ 29 | XFEATURE_MASK_ZMM_Hi256 | \ 30 | XFEATURE_MASK_Hi16_ZMM) 31 | 32 | /* Supported features which require eager state saving */ 33 | #define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | \ 34 | XFEATURE_MASK_BNDCSR | \ 35 | XFEATURE_MASK_PKRU) 36 | 37 | /* All currently supported features */ 38 | #define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER) 39 | 40 | #ifdef CONFIG_X86_64 41 | #define REX_PREFIX "0x48, " 42 | #else 43 | #define REX_PREFIX 44 | #endif 45 | 46 | extern u64 xfeatures_mask; 47 | extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; 48 | 49 | extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); 50 | 51 | void fpu__xstate_clear_all_cpu_caps(void); 52 | void *get_xsave_addr(struct xregs_state *xsave, int xstate); 53 | const void *get_xsave_field_ptr(int xstate_field); 54 | int using_compacted_format(void); 55 | int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, 56 | void __user *ubuf, struct xregs_state *xsave); 57 | int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, 58 | struct xregs_state *xsave); 59 | #endif 60 | -------------------------------------------------------------------------------- /arch/x86/include/asm/frame.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_FRAME_H 2 | #define _ASM_X86_FRAME_H 3 | 4 | #include 5 | 6 | /* 7 | * These are stack frame creation macros. They should be used by every 8 | * callable non-leaf asm function to make kernel stack traces more reliable. 9 | */ 10 | 11 | #ifdef CONFIG_FRAME_POINTER 12 | 13 | #ifdef __ASSEMBLY__ 14 | 15 | .macro FRAME_BEGIN 16 | push %_ASM_BP 17 | _ASM_MOV %_ASM_SP, %_ASM_BP 18 | .endm 19 | 20 | .macro FRAME_END 21 | pop %_ASM_BP 22 | .endm 23 | 24 | #else /* !__ASSEMBLY__ */ 25 | 26 | #define FRAME_BEGIN \ 27 | "push %" _ASM_BP "\n" \ 28 | _ASM_MOV "%" _ASM_SP ", %" _ASM_BP "\n" 29 | 30 | #define FRAME_END "pop %" _ASM_BP "\n" 31 | 32 | #endif /* __ASSEMBLY__ */ 33 | 34 | #define FRAME_OFFSET __ASM_SEL(4, 8) 35 | 36 | #else /* !CONFIG_FRAME_POINTER */ 37 | 38 | #define FRAME_BEGIN 39 | #define FRAME_END 40 | #define FRAME_OFFSET 0 41 | 42 | #endif /* CONFIG_FRAME_POINTER */ 43 | 44 | #endif /* _ASM_X86_FRAME_H */ 45 | -------------------------------------------------------------------------------- /arch/x86/include/asm/ftrace.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_FTRACE_H 2 | #define _ASM_X86_FTRACE_H 3 | 4 | #ifdef CONFIG_FUNCTION_TRACER 5 | #ifdef CC_USING_FENTRY 6 | # define MCOUNT_ADDR ((unsigned long)(__fentry__)) 7 | #else 8 | # define MCOUNT_ADDR ((unsigned long)(mcount)) 9 | #endif 10 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 11 | 12 | #ifdef CONFIG_DYNAMIC_FTRACE 13 | #define ARCH_SUPPORTS_FTRACE_OPS 1 14 | #endif 15 | 16 | #ifndef __ASSEMBLY__ 17 | extern void mcount(void); 18 | extern atomic_t modifying_ftrace_code; 19 | extern void __fentry__(void); 20 | 21 | static inline unsigned long ftrace_call_adjust(unsigned long addr) 22 | { 23 | /* 24 | * addr is the address of the mcount call instruction. 25 | * recordmcount does the necessary offset calculation. 26 | */ 27 | return addr; 28 | } 29 | 30 | #ifdef CONFIG_DYNAMIC_FTRACE 31 | 32 | struct dyn_arch_ftrace { 33 | /* No extra data needed for x86 */ 34 | }; 35 | 36 | int ftrace_int3_handler(struct pt_regs *regs); 37 | 38 | #define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR 39 | 40 | #endif /* CONFIG_DYNAMIC_FTRACE */ 41 | #endif /* __ASSEMBLY__ */ 42 | #endif /* CONFIG_FUNCTION_TRACER */ 43 | 44 | 45 | #if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS) 46 | 47 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) 48 | #include 49 | 50 | /* 51 | * Because ia32 syscalls do not map to x86_64 syscall numbers 52 | * this screws up the trace output when tracing a ia32 task. 53 | * Instead of reporting bogus syscalls, just do not trace them. 54 | * 55 | * If the user really wants these, then they should use the 56 | * raw syscall tracepoints with filtering. 57 | */ 58 | #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 59 | static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) 60 | { 61 | if (in_compat_syscall()) 62 | return true; 63 | return false; 64 | } 65 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ 66 | #endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */ 67 | 68 | #endif /* _ASM_X86_FTRACE_H */ 69 | -------------------------------------------------------------------------------- /arch/x86/include/asm/genapic.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/asm/geode.h: -------------------------------------------------------------------------------- 1 | /* 2 | * AMD Geode definitions 3 | * Copyright (C) 2006, Advanced Micro Devices, Inc. 4 | * 5 | * This program is free software; you can redistribute it and/or 6 | * modify it under the terms of version 2 of the GNU General Public License 7 | * as published by the Free Software Foundation. 8 | */ 9 | 10 | #ifndef _ASM_X86_GEODE_H 11 | #define _ASM_X86_GEODE_H 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | static inline int is_geode_gx(void) 18 | { 19 | return ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC) && 20 | (boot_cpu_data.x86 == 5) && 21 | (boot_cpu_data.x86_model == 5)); 22 | } 23 | 24 | static inline int is_geode_lx(void) 25 | { 26 | return ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && 27 | (boot_cpu_data.x86 == 5) && 28 | (boot_cpu_data.x86_model == 10)); 29 | } 30 | 31 | static inline int is_geode(void) 32 | { 33 | return (is_geode_gx() || is_geode_lx()); 34 | } 35 | 36 | #endif /* _ASM_X86_GEODE_H */ 37 | -------------------------------------------------------------------------------- /arch/x86/include/asm/hardirq.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_HARDIRQ_H 2 | #define _ASM_X86_HARDIRQ_H 3 | 4 | #include 5 | #include 6 | 7 | typedef struct { 8 | unsigned int __softirq_pending; 9 | unsigned int __nmi_count; /* arch dependent */ 10 | #ifdef CONFIG_X86_LOCAL_APIC 11 | unsigned int apic_timer_irqs; /* arch dependent */ 12 | unsigned int irq_spurious_count; 13 | unsigned int icr_read_retry_count; 14 | #endif 15 | #ifdef CONFIG_HAVE_KVM 16 | unsigned int kvm_posted_intr_ipis; 17 | unsigned int kvm_posted_intr_wakeup_ipis; 18 | #endif 19 | unsigned int x86_platform_ipis; /* arch dependent */ 20 | unsigned int apic_perf_irqs; 21 | unsigned int apic_irq_work_irqs; 22 | #ifdef CONFIG_SMP 23 | unsigned int irq_resched_count; 24 | unsigned int irq_call_count; 25 | unsigned int irq_tlb_count; 26 | #endif 27 | #ifdef CONFIG_X86_THERMAL_VECTOR 28 | unsigned int irq_thermal_count; 29 | #endif 30 | #ifdef CONFIG_X86_MCE_THRESHOLD 31 | unsigned int irq_threshold_count; 32 | #endif 33 | #ifdef CONFIG_X86_MCE_AMD 34 | unsigned int irq_deferred_error_count; 35 | #endif 36 | #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) 37 | unsigned int irq_hv_callback_count; 38 | #endif 39 | } ____cacheline_aligned irq_cpustat_t; 40 | 41 | DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 42 | 43 | #define __ARCH_IRQ_STAT 44 | 45 | #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) 46 | 47 | #define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) 48 | 49 | #define __ARCH_SET_SOFTIRQ_PENDING 50 | 51 | #define set_softirq_pending(x) \ 52 | this_cpu_write(irq_stat.__softirq_pending, (x)) 53 | #define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x)) 54 | 55 | extern void ack_bad_irq(unsigned int irq); 56 | 57 | extern u64 arch_irq_stat_cpu(unsigned int cpu); 58 | #define arch_irq_stat_cpu arch_irq_stat_cpu 59 | 60 | extern u64 arch_irq_stat(void); 61 | #define arch_irq_stat arch_irq_stat 62 | 63 | #endif /* _ASM_X86_HARDIRQ_H */ 64 | -------------------------------------------------------------------------------- /arch/x86/include/asm/hypertransport.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_HYPERTRANSPORT_H 2 | #define _ASM_X86_HYPERTRANSPORT_H 3 | 4 | /* 5 | * Constants for x86 Hypertransport Interrupts. 6 | */ 7 | 8 | #define HT_IRQ_LOW_BASE 0xf8000000 9 | 10 | #define HT_IRQ_LOW_VECTOR_SHIFT 16 11 | #define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 12 | #define HT_IRQ_LOW_VECTOR(v) \ 13 | (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) 14 | 15 | #define HT_IRQ_LOW_DEST_ID_SHIFT 8 16 | #define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 17 | #define HT_IRQ_LOW_DEST_ID(v) \ 18 | (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) 19 | 20 | #define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 21 | #define HT_IRQ_LOW_DM_LOGICAL 0x0000040 22 | 23 | #define HT_IRQ_LOW_RQEOI_EDGE 0x0000000 24 | #define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020 25 | 26 | 27 | #define HT_IRQ_LOW_MT_FIXED 0x0000000 28 | #define HT_IRQ_LOW_MT_ARBITRATED 0x0000004 29 | #define HT_IRQ_LOW_MT_SMI 0x0000008 30 | #define HT_IRQ_LOW_MT_NMI 0x000000c 31 | #define HT_IRQ_LOW_MT_INIT 0x0000010 32 | #define HT_IRQ_LOW_MT_STARTUP 0x0000014 33 | #define HT_IRQ_LOW_MT_EXTINT 0x0000018 34 | #define HT_IRQ_LOW_MT_LINT1 0x000008c 35 | #define HT_IRQ_LOW_MT_LINT0 0x0000098 36 | 37 | #define HT_IRQ_LOW_IRQ_MASKED 0x0000001 38 | 39 | 40 | #define HT_IRQ_HIGH_DEST_ID_SHIFT 0 41 | #define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff 42 | #define HT_IRQ_HIGH_DEST_ID(v) \ 43 | ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) 44 | 45 | #endif /* _ASM_X86_HYPERTRANSPORT_H */ 46 | -------------------------------------------------------------------------------- /arch/x86/include/asm/i8259.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_I8259_H 2 | #define _ASM_X86_I8259_H 3 | 4 | #include 5 | 6 | extern unsigned int cached_irq_mask; 7 | 8 | #define __byte(x, y) (((unsigned char *)&(y))[x]) 9 | #define cached_master_mask (__byte(0, cached_irq_mask)) 10 | #define cached_slave_mask (__byte(1, cached_irq_mask)) 11 | 12 | /* i8259A PIC registers */ 13 | #define PIC_MASTER_CMD 0x20 14 | #define PIC_MASTER_IMR 0x21 15 | #define PIC_MASTER_ISR PIC_MASTER_CMD 16 | #define PIC_MASTER_POLL PIC_MASTER_ISR 17 | #define PIC_MASTER_OCW3 PIC_MASTER_ISR 18 | #define PIC_SLAVE_CMD 0xa0 19 | #define PIC_SLAVE_IMR 0xa1 20 | 21 | /* i8259A PIC related value */ 22 | #define PIC_CASCADE_IR 2 23 | #define MASTER_ICW4_DEFAULT 0x01 24 | #define SLAVE_ICW4_DEFAULT 0x01 25 | #define PIC_ICW4_AEOI 2 26 | 27 | extern raw_spinlock_t i8259A_lock; 28 | 29 | /* the PIC may need a careful delay on some platforms, hence specific calls */ 30 | static inline unsigned char inb_pic(unsigned int port) 31 | { 32 | unsigned char value = inb(port); 33 | 34 | /* 35 | * delay for some accesses to PIC on motherboard or in chipset 36 | * must be at least one microsecond, so be safe here: 37 | */ 38 | udelay(2); 39 | 40 | return value; 41 | } 42 | 43 | static inline void outb_pic(unsigned char value, unsigned int port) 44 | { 45 | outb(value, port); 46 | /* 47 | * delay for some accesses to PIC on motherboard or in chipset 48 | * must be at least one microsecond, so be safe here: 49 | */ 50 | udelay(2); 51 | } 52 | 53 | extern struct irq_chip i8259A_chip; 54 | 55 | struct legacy_pic { 56 | int nr_legacy_irqs; 57 | struct irq_chip *chip; 58 | void (*mask)(unsigned int irq); 59 | void (*unmask)(unsigned int irq); 60 | void (*mask_all)(void); 61 | void (*restore_mask)(void); 62 | void (*init)(int auto_eoi); 63 | int (*probe)(void); 64 | int (*irq_pending)(unsigned int irq); 65 | void (*make_irq)(unsigned int irq); 66 | }; 67 | 68 | extern struct legacy_pic *legacy_pic; 69 | extern struct legacy_pic null_legacy_pic; 70 | 71 | static inline int nr_legacy_irqs(void) 72 | { 73 | return legacy_pic->nr_legacy_irqs; 74 | } 75 | 76 | #endif /* _ASM_X86_I8259_H */ 77 | -------------------------------------------------------------------------------- /arch/x86/include/asm/ia32.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_IA32_H 2 | #define _ASM_X86_IA32_H 3 | 4 | 5 | #ifdef CONFIG_IA32_EMULATION 6 | 7 | #include 8 | 9 | /* 10 | * 32 bit structures for IA32 support. 11 | */ 12 | 13 | #include 14 | 15 | /* signal.h */ 16 | 17 | struct ucontext_ia32 { 18 | unsigned int uc_flags; 19 | unsigned int uc_link; 20 | compat_stack_t uc_stack; 21 | struct sigcontext_32 uc_mcontext; 22 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ 23 | }; 24 | 25 | /* This matches struct stat64 in glibc2.2, hence the absolutely 26 | * insane amounts of padding around dev_t's. 27 | */ 28 | struct stat64 { 29 | unsigned long long st_dev; 30 | unsigned char __pad0[4]; 31 | 32 | #define STAT64_HAS_BROKEN_ST_INO 1 33 | unsigned int __st_ino; 34 | 35 | unsigned int st_mode; 36 | unsigned int st_nlink; 37 | 38 | unsigned int st_uid; 39 | unsigned int st_gid; 40 | 41 | unsigned long long st_rdev; 42 | unsigned char __pad3[4]; 43 | 44 | long long st_size; 45 | unsigned int st_blksize; 46 | 47 | long long st_blocks;/* Number 512-byte blocks allocated */ 48 | 49 | unsigned st_atime; 50 | unsigned st_atime_nsec; 51 | unsigned st_mtime; 52 | unsigned st_mtime_nsec; 53 | unsigned st_ctime; 54 | unsigned st_ctime_nsec; 55 | 56 | unsigned long long st_ino; 57 | } __attribute__((packed)); 58 | 59 | #define IA32_STACK_TOP IA32_PAGE_OFFSET 60 | 61 | #ifdef __KERNEL__ 62 | struct linux_binprm; 63 | extern int ia32_setup_arg_pages(struct linux_binprm *bprm, 64 | unsigned long stack_top, int exec_stack); 65 | struct mm_struct; 66 | extern void ia32_pick_mmap_layout(struct mm_struct *mm); 67 | 68 | #endif 69 | 70 | #endif /* !CONFIG_IA32_SUPPORT */ 71 | 72 | #endif /* _ASM_X86_IA32_H */ 73 | -------------------------------------------------------------------------------- /arch/x86/include/asm/ia32_unistd.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_IA32_UNISTD_H 2 | #define _ASM_X86_IA32_UNISTD_H 3 | 4 | /* 5 | * This file contains the system call numbers of the ia32 compat ABI, 6 | * this is for the kernel only. 7 | */ 8 | #define __SYSCALL_ia32_NR(x) (x) 9 | #include 10 | 11 | #endif /* _ASM_X86_IA32_UNISTD_H */ 12 | -------------------------------------------------------------------------------- /arch/x86/include/asm/idle.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_IDLE_H 2 | #define _ASM_X86_IDLE_H 3 | 4 | #define IDLE_START 1 5 | #define IDLE_END 2 6 | 7 | struct notifier_block; 8 | void idle_notifier_register(struct notifier_block *n); 9 | void idle_notifier_unregister(struct notifier_block *n); 10 | 11 | #ifdef CONFIG_X86_64 12 | void enter_idle(void); 13 | void exit_idle(void); 14 | #else /* !CONFIG_X86_64 */ 15 | static inline void enter_idle(void) { } 16 | static inline void exit_idle(void) { } 17 | static inline void __exit_idle(void) { } 18 | #endif /* CONFIG_X86_64 */ 19 | 20 | void amd_e400_remove_cpu(int cpu); 21 | 22 | #endif /* _ASM_X86_IDLE_H */ 23 | -------------------------------------------------------------------------------- /arch/x86/include/asm/imr.h: -------------------------------------------------------------------------------- 1 | /* 2 | * imr.h: Isolated Memory Region API 3 | * 4 | * Copyright(c) 2013 Intel Corporation. 5 | * Copyright(c) 2015 Bryan O'Donoghue 6 | * 7 | * This program is free software; you can redistribute it and/or 8 | * modify it under the terms of the GNU General Public License 9 | * as published by the Free Software Foundation; version 2 10 | * of the License. 11 | */ 12 | #ifndef _IMR_H 13 | #define _IMR_H 14 | 15 | #include 16 | 17 | /* 18 | * IMR agent access mask bits 19 | * See section 12.7.4.7 from quark-x1000-datasheet.pdf for register 20 | * definitions. 21 | */ 22 | #define IMR_ESRAM_FLUSH BIT(31) 23 | #define IMR_CPU_SNOOP BIT(30) /* Applicable only to write */ 24 | #define IMR_RMU BIT(29) 25 | #define IMR_VC1_SAI_ID3 BIT(15) 26 | #define IMR_VC1_SAI_ID2 BIT(14) 27 | #define IMR_VC1_SAI_ID1 BIT(13) 28 | #define IMR_VC1_SAI_ID0 BIT(12) 29 | #define IMR_VC0_SAI_ID3 BIT(11) 30 | #define IMR_VC0_SAI_ID2 BIT(10) 31 | #define IMR_VC0_SAI_ID1 BIT(9) 32 | #define IMR_VC0_SAI_ID0 BIT(8) 33 | #define IMR_CPU_0 BIT(1) /* SMM mode */ 34 | #define IMR_CPU BIT(0) /* Non SMM mode */ 35 | #define IMR_ACCESS_NONE 0 36 | 37 | /* 38 | * Read/Write access-all bits here include some reserved bits 39 | * These are the values firmware uses and are accepted by hardware. 40 | * The kernel defines read/write access-all in the same way as firmware 41 | * in order to have a consistent and crisp definition across firmware, 42 | * bootloader and kernel. 43 | */ 44 | #define IMR_READ_ACCESS_ALL 0xBFFFFFFF 45 | #define IMR_WRITE_ACCESS_ALL 0xFFFFFFFF 46 | 47 | /* Number of IMRs provided by Quark X1000 SoC */ 48 | #define QUARK_X1000_IMR_MAX 0x08 49 | #define QUARK_X1000_IMR_REGBASE 0x40 50 | 51 | /* IMR alignment bits - only bits 31:10 are checked for IMR validity */ 52 | #define IMR_ALIGN 0x400 53 | #define IMR_MASK (IMR_ALIGN - 1) 54 | 55 | int imr_add_range(phys_addr_t base, size_t size, 56 | unsigned int rmask, unsigned int wmask); 57 | 58 | int imr_remove_range(phys_addr_t base, size_t size); 59 | 60 | #endif /* _IMR_H */ 61 | -------------------------------------------------------------------------------- /arch/x86/include/asm/inat_types.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_INAT_TYPES_H 2 | #define _ASM_X86_INAT_TYPES_H 3 | /* 4 | * x86 instruction attributes 5 | * 6 | * Written by Masami Hiramatsu 7 | * 8 | * This program is free software; you can redistribute it and/or modify 9 | * it under the terms of the GNU General Public License as published by 10 | * the Free Software Foundation; either version 2 of the License, or 11 | * (at your option) any later version. 12 | * 13 | * This program is distributed in the hope that it will be useful, 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | * GNU General Public License for more details. 17 | * 18 | * You should have received a copy of the GNU General Public License 19 | * along with this program; if not, write to the Free Software 20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 | * 22 | */ 23 | 24 | /* Instruction attributes */ 25 | typedef unsigned int insn_attr_t; 26 | typedef unsigned char insn_byte_t; 27 | typedef signed int insn_value_t; 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /arch/x86/include/asm/init.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_INIT_H 2 | #define _ASM_X86_INIT_H 3 | 4 | struct x86_mapping_info { 5 | void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ 6 | void *context; /* context for alloc_pgt_page */ 7 | unsigned long pmd_flag; /* page flag for PMD entry */ 8 | unsigned long offset; /* ident mapping offset */ 9 | }; 10 | 11 | int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 12 | unsigned long pstart, unsigned long pend); 13 | 14 | #endif /* _ASM_X86_INIT_H */ 15 | -------------------------------------------------------------------------------- /arch/x86/include/asm/intel_mid_vrtc.h: -------------------------------------------------------------------------------- 1 | #ifndef _INTEL_MID_VRTC_H 2 | #define _INTEL_MID_VRTC_H 3 | 4 | extern unsigned char vrtc_cmos_read(unsigned char reg); 5 | extern void vrtc_cmos_write(unsigned char val, unsigned char reg); 6 | extern void vrtc_get_time(struct timespec *now); 7 | extern int vrtc_set_mmss(const struct timespec *now); 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /arch/x86/include/asm/intel_pmc_ipc.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_INTEL_PMC_IPC_H_ 2 | #define _ASM_X86_INTEL_PMC_IPC_H_ 3 | 4 | /* Commands */ 5 | #define PMC_IPC_PMIC_ACCESS 0xFF 6 | #define PMC_IPC_PMIC_ACCESS_READ 0x0 7 | #define PMC_IPC_PMIC_ACCESS_WRITE 0x1 8 | #define PMC_IPC_USB_PWR_CTRL 0xF0 9 | #define PMC_IPC_PMIC_BLACKLIST_SEL 0xEF 10 | #define PMC_IPC_PHY_CONFIG 0xEE 11 | #define PMC_IPC_NORTHPEAK_CTRL 0xED 12 | #define PMC_IPC_PM_DEBUG 0xEC 13 | #define PMC_IPC_PMC_TELEMTRY 0xEB 14 | #define PMC_IPC_PMC_FW_MSG_CTRL 0xEA 15 | 16 | /* IPC return code */ 17 | #define IPC_ERR_NONE 0 18 | #define IPC_ERR_CMD_NOT_SUPPORTED 1 19 | #define IPC_ERR_CMD_NOT_SERVICED 2 20 | #define IPC_ERR_UNABLE_TO_SERVICE 3 21 | #define IPC_ERR_CMD_INVALID 4 22 | #define IPC_ERR_CMD_FAILED 5 23 | #define IPC_ERR_EMSECURITY 6 24 | #define IPC_ERR_UNSIGNEDKERNEL 7 25 | 26 | #if IS_ENABLED(CONFIG_INTEL_PMC_IPC) 27 | 28 | int intel_pmc_ipc_simple_command(int cmd, int sub); 29 | int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, 30 | u32 *out, u32 outlen, u32 dptr, u32 sptr); 31 | int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, 32 | u32 *out, u32 outlen); 33 | 34 | #else 35 | 36 | static inline int intel_pmc_ipc_simple_command(int cmd, int sub) 37 | { 38 | return -EINVAL; 39 | } 40 | 41 | static inline int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, 42 | u32 *out, u32 outlen, u32 dptr, u32 sptr) 43 | { 44 | return -EINVAL; 45 | } 46 | 47 | static inline int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, 48 | u32 *out, u32 outlen) 49 | { 50 | return -EINVAL; 51 | } 52 | 53 | #endif /*CONFIG_INTEL_PMC_IPC*/ 54 | 55 | #endif 56 | -------------------------------------------------------------------------------- /arch/x86/include/asm/intel_pt.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_INTEL_PT_H 2 | #define _ASM_X86_INTEL_PT_H 3 | 4 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) 5 | void cpu_emergency_stop_pt(void); 6 | #else 7 | static inline void cpu_emergency_stop_pt(void) {} 8 | #endif 9 | 10 | #endif /* _ASM_X86_INTEL_PT_H */ 11 | -------------------------------------------------------------------------------- /arch/x86/include/asm/iomap.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_IOMAP_H 2 | #define _ASM_X86_IOMAP_H 3 | 4 | /* 5 | * Copyright © 2008 Ingo Molnar 6 | * 7 | * This program is free software; you can redistribute it and/or modify 8 | * it under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation; either version 2 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful, but 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 | * General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License along 18 | * with this program; if not, write to the Free Software Foundation, Inc., 19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 20 | */ 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | void __iomem * 30 | iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); 31 | 32 | void 33 | iounmap_atomic(void __iomem *kvaddr); 34 | 35 | int 36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); 37 | 38 | void 39 | iomap_free(resource_size_t base, unsigned long size); 40 | 41 | #endif /* _ASM_X86_IOMAP_H */ 42 | -------------------------------------------------------------------------------- /arch/x86/include/asm/iommu.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_IOMMU_H 2 | #define _ASM_X86_IOMMU_H 3 | 4 | extern struct dma_map_ops nommu_dma_ops; 5 | extern int force_iommu, no_iommu; 6 | extern int iommu_detected; 7 | extern int iommu_pass_through; 8 | 9 | /* 10 seconds */ 10 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) 11 | 12 | #endif /* _ASM_X86_IOMMU_H */ 13 | -------------------------------------------------------------------------------- /arch/x86/include/asm/irq.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_IRQ_H 2 | #define _ASM_X86_IRQ_H 3 | /* 4 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar 5 | * 6 | * IRQ/IPI changes taken from work by Thomas Radke 7 | * 8 | */ 9 | 10 | #include 11 | #include 12 | 13 | static inline int irq_canonicalize(int irq) 14 | { 15 | return ((irq == 2) ? 9 : irq); 16 | } 17 | 18 | #ifdef CONFIG_X86_32 19 | extern void irq_ctx_init(int cpu); 20 | #else 21 | # define irq_ctx_init(cpu) do { } while (0) 22 | #endif 23 | 24 | #define __ARCH_HAS_DO_SOFTIRQ 25 | 26 | struct irq_desc; 27 | 28 | #ifdef CONFIG_HOTPLUG_CPU 29 | #include 30 | extern int check_irq_vectors_for_cpu_disable(void); 31 | extern void fixup_irqs(void); 32 | extern void irq_force_complete_move(struct irq_desc *desc); 33 | #endif 34 | 35 | #ifdef CONFIG_HAVE_KVM 36 | extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); 37 | #endif 38 | 39 | extern void (*x86_platform_ipi_callback)(void); 40 | extern void native_init_IRQ(void); 41 | 42 | extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs); 43 | 44 | extern __visible unsigned int do_IRQ(struct pt_regs *regs); 45 | 46 | /* Interrupt vector management */ 47 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 48 | extern int vector_used_by_percpu_irq(unsigned int vector); 49 | 50 | extern void init_ISA_irqs(void); 51 | 52 | #ifdef CONFIG_X86_LOCAL_APIC 53 | void arch_trigger_all_cpu_backtrace(bool); 54 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 55 | #endif 56 | 57 | #endif /* _ASM_X86_IRQ_H */ 58 | -------------------------------------------------------------------------------- /arch/x86/include/asm/irq_regs.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Per-cpu current frame pointer - the location of the last exception frame on 3 | * the stack, stored in the per-cpu area. 4 | * 5 | * Jeremy Fitzhardinge 6 | */ 7 | #ifndef _ASM_X86_IRQ_REGS_H 8 | #define _ASM_X86_IRQ_REGS_H 9 | 10 | #include 11 | 12 | #define ARCH_HAS_OWN_IRQ_REGS 13 | 14 | DECLARE_PER_CPU(struct pt_regs *, irq_regs); 15 | 16 | static inline struct pt_regs *get_irq_regs(void) 17 | { 18 | return this_cpu_read(irq_regs); 19 | } 20 | 21 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) 22 | { 23 | struct pt_regs *old_regs; 24 | 25 | old_regs = get_irq_regs(); 26 | this_cpu_write(irq_regs, new_regs); 27 | 28 | return old_regs; 29 | } 30 | 31 | #endif /* _ASM_X86_IRQ_REGS_32_H */ 32 | -------------------------------------------------------------------------------- /arch/x86/include/asm/irq_work.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_IRQ_WORK_H 2 | #define _ASM_IRQ_WORK_H 3 | 4 | #include 5 | 6 | static inline bool arch_irq_work_has_interrupt(void) 7 | { 8 | return boot_cpu_has(X86_FEATURE_APIC); 9 | } 10 | 11 | #endif /* _ASM_IRQ_WORK_H */ 12 | -------------------------------------------------------------------------------- /arch/x86/include/asm/irqdomain.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_IRQDOMAIN_H 2 | #define _ASM_IRQDOMAIN_H 3 | 4 | #include 5 | #include 6 | 7 | #ifdef CONFIG_X86_LOCAL_APIC 8 | enum { 9 | /* Allocate contiguous CPU vectors */ 10 | X86_IRQ_ALLOC_CONTIGUOUS_VECTORS = 0x1, 11 | }; 12 | 13 | extern struct irq_domain *x86_vector_domain; 14 | 15 | extern void init_irq_alloc_info(struct irq_alloc_info *info, 16 | const struct cpumask *mask); 17 | extern void copy_irq_alloc_info(struct irq_alloc_info *dst, 18 | struct irq_alloc_info *src); 19 | #endif /* CONFIG_X86_LOCAL_APIC */ 20 | 21 | #ifdef CONFIG_X86_IO_APIC 22 | struct device_node; 23 | struct irq_data; 24 | 25 | enum ioapic_domain_type { 26 | IOAPIC_DOMAIN_INVALID, 27 | IOAPIC_DOMAIN_LEGACY, 28 | IOAPIC_DOMAIN_STRICT, 29 | IOAPIC_DOMAIN_DYNAMIC, 30 | }; 31 | 32 | struct ioapic_domain_cfg { 33 | enum ioapic_domain_type type; 34 | const struct irq_domain_ops *ops; 35 | struct device_node *dev; 36 | }; 37 | 38 | extern const struct irq_domain_ops mp_ioapic_irqdomain_ops; 39 | 40 | extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, 41 | unsigned int nr_irqs, void *arg); 42 | extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, 43 | unsigned int nr_irqs); 44 | extern void mp_irqdomain_activate(struct irq_domain *domain, 45 | struct irq_data *irq_data); 46 | extern void mp_irqdomain_deactivate(struct irq_domain *domain, 47 | struct irq_data *irq_data); 48 | extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain); 49 | #endif /* CONFIG_X86_IO_APIC */ 50 | 51 | #ifdef CONFIG_PCI_MSI 52 | extern void arch_init_msi_domain(struct irq_domain *domain); 53 | #else 54 | static inline void arch_init_msi_domain(struct irq_domain *domain) { } 55 | #endif 56 | 57 | #ifdef CONFIG_HT_IRQ 58 | extern void arch_init_htirq_domain(struct irq_domain *domain); 59 | #else 60 | static inline void arch_init_htirq_domain(struct irq_domain *domain) { } 61 | #endif 62 | 63 | #endif 64 | -------------------------------------------------------------------------------- /arch/x86/include/asm/ist.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Include file for the interface to IST BIOS 3 | * Copyright 2002 Andy Grover 4 | * 5 | * This program is free software; you can redistribute it and/or modify it 6 | * under the terms of the GNU General Public License as published by the 7 | * Free Software Foundation; either version 2, or (at your option) any 8 | * later version. 9 | * 10 | * This program is distributed in the hope that it will be useful, but 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 | * General Public License for more details. 14 | */ 15 | #ifndef _ASM_X86_IST_H 16 | #define _ASM_X86_IST_H 17 | 18 | #include 19 | 20 | 21 | extern struct ist_info ist_info; 22 | 23 | #endif /* _ASM_X86_IST_H */ 24 | -------------------------------------------------------------------------------- /arch/x86/include/asm/kasan.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_KASAN_H 2 | #define _ASM_X86_KASAN_H 3 | 4 | #include 5 | #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) 6 | 7 | /* 8 | * Compiler uses shadow offset assuming that addresses start 9 | * from 0. Kernel addresses don't start from 0, so shadow 10 | * for kernel really starts from compiler's shadow offset + 11 | * 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT 12 | */ 13 | #define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \ 14 | (0xffff800000000000ULL >> 3)) 15 | /* 47 bits for kernel address -> (47 - 3) bits for shadow */ 16 | #define KASAN_SHADOW_END (KASAN_SHADOW_START + (1ULL << (47 - 3))) 17 | 18 | #ifndef __ASSEMBLY__ 19 | 20 | #ifdef CONFIG_KASAN 21 | void __init kasan_early_init(void); 22 | void __init kasan_init(void); 23 | #else 24 | static inline void kasan_early_init(void) { } 25 | static inline void kasan_init(void) { } 26 | #endif 27 | 28 | #endif 29 | 30 | #endif 31 | -------------------------------------------------------------------------------- /arch/x86/include/asm/kaslr.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_KASLR_H_ 2 | #define _ASM_KASLR_H_ 3 | 4 | unsigned long kaslr_get_random_long(const char *purpose); 5 | 6 | #ifdef CONFIG_RANDOMIZE_MEMORY 7 | extern unsigned long page_offset_base; 8 | extern unsigned long vmalloc_base; 9 | 10 | void kernel_randomize_memory(void); 11 | #else 12 | static inline void kernel_randomize_memory(void) { } 13 | #endif /* CONFIG_RANDOMIZE_MEMORY */ 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /arch/x86/include/asm/kbdleds.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_KBDLEDS_H 2 | #define _ASM_X86_KBDLEDS_H 3 | 4 | /* 5 | * Some laptops take the 789uiojklm,. keys as number pad when NumLock is on. 6 | * This seems a good reason to start with NumLock off. That's why on X86 we 7 | * ask the bios for the correct state. 8 | */ 9 | 10 | #include 11 | 12 | static inline int kbd_defleds(void) 13 | { 14 | return boot_params.kbd_status & 0x20 ? (1 << VC_NUMLOCK) : 0; 15 | } 16 | 17 | #endif /* _ASM_X86_KBDLEDS_H */ 18 | -------------------------------------------------------------------------------- /arch/x86/include/asm/kdebug.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_KDEBUG_H 2 | #define _ASM_X86_KDEBUG_H 3 | 4 | #include 5 | 6 | struct pt_regs; 7 | 8 | /* Grossly misnamed. */ 9 | enum die_val { 10 | DIE_OOPS = 1, 11 | DIE_INT3, 12 | DIE_DEBUG, 13 | DIE_PANIC, 14 | DIE_NMI, 15 | DIE_DIE, 16 | DIE_KERNELDEBUG, 17 | DIE_TRAP, 18 | DIE_GPF, 19 | DIE_CALL, 20 | DIE_PAGE_FAULT, 21 | DIE_NMIUNKNOWN, 22 | }; 23 | 24 | extern void printk_address(unsigned long address); 25 | extern void die(const char *, struct pt_regs *,long); 26 | extern int __must_check __die(const char *, struct pt_regs *, long); 27 | extern void show_trace(struct task_struct *t, struct pt_regs *regs, 28 | unsigned long *sp, unsigned long bp); 29 | extern void show_stack_regs(struct pt_regs *regs); 30 | extern void __show_regs(struct pt_regs *regs, int all); 31 | extern unsigned long oops_begin(void); 32 | extern void oops_end(unsigned long, struct pt_regs *, int signr); 33 | 34 | #endif /* _ASM_X86_KDEBUG_H */ 35 | -------------------------------------------------------------------------------- /arch/x86/include/asm/kexec-bzimage64.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_KEXEC_BZIMAGE64_H 2 | #define _ASM_KEXEC_BZIMAGE64_H 3 | 4 | extern struct kexec_file_ops kexec_bzImage64_ops; 5 | 6 | #endif /* _ASM_KEXE_BZIMAGE64_H */ 7 | -------------------------------------------------------------------------------- /arch/x86/include/asm/kmap_types.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_KMAP_TYPES_H 2 | #define _ASM_X86_KMAP_TYPES_H 3 | 4 | #if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM) 5 | #define __WITH_KM_FENCE 6 | #endif 7 | 8 | #include 9 | 10 | #undef __WITH_KM_FENCE 11 | 12 | #endif /* _ASM_X86_KMAP_TYPES_H */ 13 | -------------------------------------------------------------------------------- /arch/x86/include/asm/kmemcheck.h: -------------------------------------------------------------------------------- 1 | #ifndef ASM_X86_KMEMCHECK_H 2 | #define ASM_X86_KMEMCHECK_H 3 | 4 | #include 5 | #include 6 | 7 | #ifdef CONFIG_KMEMCHECK 8 | bool kmemcheck_active(struct pt_regs *regs); 9 | 10 | void kmemcheck_show(struct pt_regs *regs); 11 | void kmemcheck_hide(struct pt_regs *regs); 12 | 13 | bool kmemcheck_fault(struct pt_regs *regs, 14 | unsigned long address, unsigned long error_code); 15 | bool kmemcheck_trap(struct pt_regs *regs); 16 | #else 17 | static inline bool kmemcheck_active(struct pt_regs *regs) 18 | { 19 | return false; 20 | } 21 | 22 | static inline void kmemcheck_show(struct pt_regs *regs) 23 | { 24 | } 25 | 26 | static inline void kmemcheck_hide(struct pt_regs *regs) 27 | { 28 | } 29 | 30 | static inline bool kmemcheck_fault(struct pt_regs *regs, 31 | unsigned long address, unsigned long error_code) 32 | { 33 | return false; 34 | } 35 | 36 | static inline bool kmemcheck_trap(struct pt_regs *regs) 37 | { 38 | return false; 39 | } 40 | #endif /* CONFIG_KMEMCHECK */ 41 | 42 | #endif 43 | -------------------------------------------------------------------------------- /arch/x86/include/asm/kvm_guest.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_KVM_GUEST_H 2 | #define _ASM_X86_KVM_GUEST_H 3 | 4 | int kvm_setup_vsyscall_timeinfo(void); 5 | 6 | #endif /* _ASM_X86_KVM_GUEST_H */ 7 | -------------------------------------------------------------------------------- /arch/x86/include/asm/kvm_page_track.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_KVM_PAGE_TRACK_H 2 | #define _ASM_X86_KVM_PAGE_TRACK_H 3 | 4 | enum kvm_page_track_mode { 5 | KVM_PAGE_TRACK_WRITE, 6 | KVM_PAGE_TRACK_MAX, 7 | }; 8 | 9 | /* 10 | * The notifier represented by @kvm_page_track_notifier_node is linked into 11 | * the head which will be notified when guest is triggering the track event. 12 | * 13 | * Write access on the head is protected by kvm->mmu_lock, read access 14 | * is protected by track_srcu. 15 | */ 16 | struct kvm_page_track_notifier_head { 17 | struct srcu_struct track_srcu; 18 | struct hlist_head track_notifier_list; 19 | }; 20 | 21 | struct kvm_page_track_notifier_node { 22 | struct hlist_node node; 23 | 24 | /* 25 | * It is called when guest is writing the write-tracked page 26 | * and write emulation is finished at that time. 27 | * 28 | * @vcpu: the vcpu where the write access happened. 29 | * @gpa: the physical address written by guest. 30 | * @new: the data was written to the address. 31 | * @bytes: the written length. 32 | */ 33 | void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, 34 | int bytes); 35 | }; 36 | 37 | void kvm_page_track_init(struct kvm *kvm); 38 | 39 | void kvm_page_track_free_memslot(struct kvm_memory_slot *free, 40 | struct kvm_memory_slot *dont); 41 | int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, 42 | unsigned long npages); 43 | 44 | void kvm_slot_page_track_add_page(struct kvm *kvm, 45 | struct kvm_memory_slot *slot, gfn_t gfn, 46 | enum kvm_page_track_mode mode); 47 | void kvm_slot_page_track_remove_page(struct kvm *kvm, 48 | struct kvm_memory_slot *slot, gfn_t gfn, 49 | enum kvm_page_track_mode mode); 50 | bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, 51 | enum kvm_page_track_mode mode); 52 | 53 | void 54 | kvm_page_track_register_notifier(struct kvm *kvm, 55 | struct kvm_page_track_notifier_node *n); 56 | void 57 | kvm_page_track_unregister_notifier(struct kvm *kvm, 58 | struct kvm_page_track_notifier_node *n); 59 | void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, 60 | int bytes); 61 | #endif 62 | -------------------------------------------------------------------------------- /arch/x86/include/asm/linkage.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_LINKAGE_H 2 | #define _ASM_X86_LINKAGE_H 3 | 4 | #include 5 | 6 | #undef notrace 7 | #define notrace __attribute__((no_instrument_function)) 8 | 9 | #ifdef CONFIG_X86_32 10 | #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) 11 | #endif /* CONFIG_X86_32 */ 12 | 13 | #ifdef __ASSEMBLY__ 14 | 15 | #define GLOBAL(name) \ 16 | .globl name; \ 17 | name: 18 | 19 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16) 20 | #define __ALIGN .p2align 4, 0x90 21 | #define __ALIGN_STR __stringify(__ALIGN) 22 | #endif 23 | 24 | #endif /* __ASSEMBLY__ */ 25 | 26 | #endif /* _ASM_X86_LINKAGE_H */ 27 | 28 | -------------------------------------------------------------------------------- /arch/x86/include/asm/livepatch.h: -------------------------------------------------------------------------------- 1 | /* 2 | * livepatch.h - x86-specific Kernel Live Patching Core 3 | * 4 | * Copyright (C) 2014 Seth Jennings 5 | * Copyright (C) 2014 SUSE 6 | * 7 | * This program is free software; you can redistribute it and/or 8 | * modify it under the terms of the GNU General Public License 9 | * as published by the Free Software Foundation; either version 2 10 | * of the License, or (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful, 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with this program; if not, see . 19 | */ 20 | 21 | #ifndef _ASM_X86_LIVEPATCH_H 22 | #define _ASM_X86_LIVEPATCH_H 23 | 24 | #include 25 | #include 26 | 27 | static inline int klp_check_compiler_support(void) 28 | { 29 | #ifndef CC_USING_FENTRY 30 | return 1; 31 | #endif 32 | return 0; 33 | } 34 | 35 | static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) 36 | { 37 | regs->ip = ip; 38 | } 39 | 40 | #endif /* _ASM_X86_LIVEPATCH_H */ 41 | -------------------------------------------------------------------------------- /arch/x86/include/asm/local64.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/asm/mach_timer.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Machine specific calibrate_tsc() for generic. 3 | * Split out from timer_tsc.c by Osamu Tomita 4 | */ 5 | /* ------ Calibrate the TSC ------- 6 | * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset(). 7 | * Too much 64-bit arithmetic here to do this cleanly in C, and for 8 | * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2) 9 | * output busy loop as low as possible. We avoid reading the CTC registers 10 | * directly because of the awkward 8-bit access mechanism of the 82C54 11 | * device. 12 | */ 13 | #ifndef _ASM_X86_MACH_DEFAULT_MACH_TIMER_H 14 | #define _ASM_X86_MACH_DEFAULT_MACH_TIMER_H 15 | 16 | #define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ 17 | #define CALIBRATE_LATCH \ 18 | ((PIT_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) 19 | 20 | static inline void mach_prepare_counter(void) 21 | { 22 | /* Set the Gate high, disable speaker */ 23 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); 24 | 25 | /* 26 | * Now let's take care of CTC channel 2 27 | * 28 | * Set the Gate high, program CTC channel 2 for mode 0, 29 | * (interrupt on terminal count mode), binary count, 30 | * load 5 * LATCH count, (LSB and MSB) to begin countdown. 31 | * 32 | * Some devices need a delay here. 33 | */ 34 | outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ 35 | outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ 36 | outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ 37 | } 38 | 39 | static inline void mach_countup(unsigned long *count_p) 40 | { 41 | unsigned long count = 0; 42 | do { 43 | count++; 44 | } while ((inb_p(0x61) & 0x20) == 0); 45 | *count_p = count; 46 | } 47 | 48 | #endif /* _ASM_X86_MACH_DEFAULT_MACH_TIMER_H */ 49 | -------------------------------------------------------------------------------- /arch/x86/include/asm/mach_traps.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Machine specific NMI handling for generic. 3 | * Split out from traps.c by Osamu Tomita 4 | */ 5 | #ifndef _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H 6 | #define _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H 7 | 8 | #include 9 | 10 | #define NMI_REASON_PORT 0x61 11 | 12 | #define NMI_REASON_SERR 0x80 13 | #define NMI_REASON_IOCHK 0x40 14 | #define NMI_REASON_MASK (NMI_REASON_SERR | NMI_REASON_IOCHK) 15 | 16 | #define NMI_REASON_CLEAR_SERR 0x04 17 | #define NMI_REASON_CLEAR_IOCHK 0x08 18 | #define NMI_REASON_CLEAR_MASK 0x0f 19 | 20 | static inline unsigned char default_get_nmi_reason(void) 21 | { 22 | return inb(NMI_REASON_PORT); 23 | } 24 | 25 | static inline void reassert_nmi(void) 26 | { 27 | int old_reg = -1; 28 | 29 | if (do_i_have_lock_cmos()) 30 | old_reg = current_lock_cmos_reg(); 31 | else 32 | lock_cmos(0); /* register doesn't matter here */ 33 | outb(0x8f, 0x70); 34 | inb(0x71); /* dummy */ 35 | outb(0x0f, 0x70); 36 | inb(0x71); /* dummy */ 37 | if (old_reg >= 0) 38 | outb(old_reg, 0x70); 39 | else 40 | unlock_cmos(); 41 | } 42 | 43 | #endif /* _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H */ 44 | -------------------------------------------------------------------------------- /arch/x86/include/asm/math_emu.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MATH_EMU_H 2 | #define _ASM_X86_MATH_EMU_H 3 | 4 | #include 5 | 6 | /* This structure matches the layout of the data saved to the stack 7 | following a device-not-present interrupt, part of it saved 8 | automatically by the 80386/80486. 9 | */ 10 | struct math_emu_info { 11 | long ___orig_eip; 12 | struct pt_regs *regs; 13 | }; 14 | #endif /* _ASM_X86_MATH_EMU_H */ 15 | -------------------------------------------------------------------------------- /arch/x86/include/asm/misc.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MISC_H 2 | #define _ASM_X86_MISC_H 3 | 4 | int num_digits(int val); 5 | 6 | #endif /* _ASM_X86_MISC_H */ 7 | -------------------------------------------------------------------------------- /arch/x86/include/asm/mmconfig.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MMCONFIG_H 2 | #define _ASM_X86_MMCONFIG_H 3 | 4 | #ifdef CONFIG_PCI_MMCONFIG 5 | extern void fam10h_check_enable_mmcfg(void); 6 | extern void check_enable_amd_mmconf_dmi(void); 7 | #else 8 | static inline void fam10h_check_enable_mmcfg(void) { } 9 | static inline void check_enable_amd_mmconf_dmi(void) { } 10 | #endif 11 | 12 | #endif /* _ASM_X86_MMCONFIG_H */ 13 | -------------------------------------------------------------------------------- /arch/x86/include/asm/mmu.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MMU_H 2 | #define _ASM_X86_MMU_H 3 | 4 | #include 5 | #include 6 | 7 | /* 8 | * The x86 doesn't have a mmu context, but 9 | * we put the segment information here. 10 | */ 11 | typedef struct { 12 | #ifdef CONFIG_MODIFY_LDT_SYSCALL 13 | struct ldt_struct *ldt; 14 | #endif 15 | 16 | #ifdef CONFIG_X86_64 17 | /* True if mm supports a task running in 32 bit compatibility mode. */ 18 | unsigned short ia32_compat; 19 | #endif 20 | 21 | struct mutex lock; 22 | void __user *vdso; /* vdso base address */ 23 | const struct vdso_image *vdso_image; /* vdso image in use */ 24 | 25 | atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ 26 | } mm_context_t; 27 | 28 | #ifdef CONFIG_SMP 29 | void leave_mm(int cpu); 30 | #else 31 | static inline void leave_mm(int cpu) 32 | { 33 | } 34 | #endif 35 | 36 | #endif /* _ASM_X86_MMU_H */ 37 | -------------------------------------------------------------------------------- /arch/x86/include/asm/mmx.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MMX_H 2 | #define _ASM_X86_MMX_H 3 | 4 | /* 5 | * MMX 3Dnow! helper operations 6 | */ 7 | 8 | #include 9 | 10 | extern void *_mmx_memcpy(void *to, const void *from, size_t size); 11 | extern void mmx_clear_page(void *page); 12 | extern void mmx_copy_page(void *to, void *from); 13 | 14 | #endif /* _ASM_X86_MMX_H */ 15 | -------------------------------------------------------------------------------- /arch/x86/include/asm/mmzone.h: -------------------------------------------------------------------------------- 1 | #ifdef CONFIG_X86_32 2 | # include 3 | #else 4 | # include 5 | #endif 6 | -------------------------------------------------------------------------------- /arch/x86/include/asm/mmzone_32.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002 3 | * 4 | */ 5 | 6 | #ifndef _ASM_X86_MMZONE_32_H 7 | #define _ASM_X86_MMZONE_32_H 8 | 9 | #include 10 | 11 | #ifdef CONFIG_NUMA 12 | extern struct pglist_data *node_data[]; 13 | #define NODE_DATA(nid) (node_data[nid]) 14 | #endif /* CONFIG_NUMA */ 15 | 16 | #ifdef CONFIG_DISCONTIGMEM 17 | 18 | /* 19 | * generic node memory support, the following assumptions apply: 20 | * 21 | * 1) memory comes in 64Mb contiguous chunks which are either present or not 22 | * 2) we will not have more than 64Gb in total 23 | * 24 | * for now assume that 64Gb is max amount of RAM for whole system 25 | * 64Gb / 4096bytes/page = 16777216 pages 26 | */ 27 | #define MAX_NR_PAGES 16777216 28 | #define MAX_SECTIONS 1024 29 | #define PAGES_PER_SECTION (MAX_NR_PAGES/MAX_SECTIONS) 30 | 31 | extern s8 physnode_map[]; 32 | 33 | static inline int pfn_to_nid(unsigned long pfn) 34 | { 35 | #ifdef CONFIG_NUMA 36 | return((int) physnode_map[(pfn) / PAGES_PER_SECTION]); 37 | #else 38 | return 0; 39 | #endif 40 | } 41 | 42 | static inline int pfn_valid(int pfn) 43 | { 44 | int nid = pfn_to_nid(pfn); 45 | 46 | if (nid >= 0) 47 | return (pfn < node_end_pfn(nid)); 48 | return 0; 49 | } 50 | 51 | #define early_pfn_valid(pfn) pfn_valid((pfn)) 52 | 53 | #endif /* CONFIG_DISCONTIGMEM */ 54 | 55 | #endif /* _ASM_X86_MMZONE_32_H */ 56 | -------------------------------------------------------------------------------- /arch/x86/include/asm/mmzone_64.h: -------------------------------------------------------------------------------- 1 | /* K8 NUMA support */ 2 | /* Copyright 2002,2003 by Andi Kleen, SuSE Labs */ 3 | /* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */ 4 | #ifndef _ASM_X86_MMZONE_64_H 5 | #define _ASM_X86_MMZONE_64_H 6 | 7 | #ifdef CONFIG_NUMA 8 | 9 | #include 10 | #include 11 | 12 | extern struct pglist_data *node_data[]; 13 | 14 | #define NODE_DATA(nid) (node_data[nid]) 15 | 16 | #endif 17 | #endif /* _ASM_X86_MMZONE_64_H */ 18 | -------------------------------------------------------------------------------- /arch/x86/include/asm/module.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MODULE_H 2 | #define _ASM_X86_MODULE_H 3 | 4 | #include 5 | 6 | #ifdef CONFIG_X86_64 7 | /* X86_64 does not define MODULE_PROC_FAMILY */ 8 | #elif defined CONFIG_M486 9 | #define MODULE_PROC_FAMILY "486 " 10 | #elif defined CONFIG_M586 11 | #define MODULE_PROC_FAMILY "586 " 12 | #elif defined CONFIG_M586TSC 13 | #define MODULE_PROC_FAMILY "586TSC " 14 | #elif defined CONFIG_M586MMX 15 | #define MODULE_PROC_FAMILY "586MMX " 16 | #elif defined CONFIG_MCORE2 17 | #define MODULE_PROC_FAMILY "CORE2 " 18 | #elif defined CONFIG_MATOM 19 | #define MODULE_PROC_FAMILY "ATOM " 20 | #elif defined CONFIG_M686 21 | #define MODULE_PROC_FAMILY "686 " 22 | #elif defined CONFIG_MPENTIUMII 23 | #define MODULE_PROC_FAMILY "PENTIUMII " 24 | #elif defined CONFIG_MPENTIUMIII 25 | #define MODULE_PROC_FAMILY "PENTIUMIII " 26 | #elif defined CONFIG_MPENTIUMM 27 | #define MODULE_PROC_FAMILY "PENTIUMM " 28 | #elif defined CONFIG_MPENTIUM4 29 | #define MODULE_PROC_FAMILY "PENTIUM4 " 30 | #elif defined CONFIG_MK6 31 | #define MODULE_PROC_FAMILY "K6 " 32 | #elif defined CONFIG_MK7 33 | #define MODULE_PROC_FAMILY "K7 " 34 | #elif defined CONFIG_MK8 35 | #define MODULE_PROC_FAMILY "K8 " 36 | #elif defined CONFIG_MELAN 37 | #define MODULE_PROC_FAMILY "ELAN " 38 | #elif defined CONFIG_MCRUSOE 39 | #define MODULE_PROC_FAMILY "CRUSOE " 40 | #elif defined CONFIG_MEFFICEON 41 | #define MODULE_PROC_FAMILY "EFFICEON " 42 | #elif defined CONFIG_MWINCHIPC6 43 | #define MODULE_PROC_FAMILY "WINCHIPC6 " 44 | #elif defined CONFIG_MWINCHIP3D 45 | #define MODULE_PROC_FAMILY "WINCHIP3D " 46 | #elif defined CONFIG_MCYRIXIII 47 | #define MODULE_PROC_FAMILY "CYRIXIII " 48 | #elif defined CONFIG_MVIAC3_2 49 | #define MODULE_PROC_FAMILY "VIAC3-2 " 50 | #elif defined CONFIG_MVIAC7 51 | #define MODULE_PROC_FAMILY "VIAC7 " 52 | #elif defined CONFIG_MGEODEGX1 53 | #define MODULE_PROC_FAMILY "GEODEGX1 " 54 | #elif defined CONFIG_MGEODE_LX 55 | #define MODULE_PROC_FAMILY "GEODE " 56 | #else 57 | #error unknown processor family 58 | #endif 59 | 60 | #ifdef CONFIG_X86_32 61 | # define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY 62 | #endif 63 | 64 | #endif /* _ASM_X86_MODULE_H */ 65 | -------------------------------------------------------------------------------- /arch/x86/include/asm/mshyperv.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MSHYPER_H 2 | #define _ASM_X86_MSHYPER_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | struct ms_hyperv_info { 9 | u32 features; 10 | u32 misc_features; 11 | u32 hints; 12 | }; 13 | 14 | extern struct ms_hyperv_info ms_hyperv; 15 | 16 | void hyperv_callback_vector(void); 17 | #ifdef CONFIG_TRACING 18 | #define trace_hyperv_callback_vector hyperv_callback_vector 19 | #endif 20 | void hyperv_vector_handler(struct pt_regs *regs); 21 | void hv_setup_vmbus_irq(void (*handler)(void)); 22 | void hv_remove_vmbus_irq(void); 23 | 24 | void hv_setup_kexec_handler(void (*handler)(void)); 25 | void hv_remove_kexec_handler(void); 26 | void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); 27 | void hv_remove_crash_handler(void); 28 | #endif 29 | -------------------------------------------------------------------------------- /arch/x86/include/asm/msi.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MSI_H 2 | #define _ASM_X86_MSI_H 3 | #include 4 | #include 5 | 6 | typedef struct irq_alloc_info msi_alloc_info_t; 7 | 8 | int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, 9 | msi_alloc_info_t *arg); 10 | 11 | void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc); 12 | 13 | #endif /* _ASM_X86_MSI_H */ 14 | -------------------------------------------------------------------------------- /arch/x86/include/asm/msidef.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MSIDEF_H 2 | #define _ASM_X86_MSIDEF_H 3 | 4 | /* 5 | * Constants for Intel APIC based MSI messages. 6 | */ 7 | 8 | /* 9 | * Shifts for MSI data 10 | */ 11 | 12 | #define MSI_DATA_VECTOR_SHIFT 0 13 | #define MSI_DATA_VECTOR_MASK 0x000000ff 14 | #define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \ 15 | MSI_DATA_VECTOR_MASK) 16 | 17 | #define MSI_DATA_DELIVERY_MODE_SHIFT 8 18 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) 19 | #define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) 20 | 21 | #define MSI_DATA_LEVEL_SHIFT 14 22 | #define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) 23 | #define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) 24 | 25 | #define MSI_DATA_TRIGGER_SHIFT 15 26 | #define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) 27 | #define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) 28 | 29 | /* 30 | * Shift/mask fields for msi address 31 | */ 32 | 33 | #define MSI_ADDR_BASE_HI 0 34 | #define MSI_ADDR_BASE_LO 0xfee00000 35 | 36 | #define MSI_ADDR_DEST_MODE_SHIFT 2 37 | #define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT) 38 | #define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) 39 | 40 | #define MSI_ADDR_REDIRECTION_SHIFT 3 41 | #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) 42 | /* dedicated cpu */ 43 | #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) 44 | /* lowest priority */ 45 | 46 | #define MSI_ADDR_DEST_ID_SHIFT 12 47 | #define MSI_ADDR_DEST_ID_MASK 0x00ffff0 48 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ 49 | MSI_ADDR_DEST_ID_MASK) 50 | #define MSI_ADDR_EXT_DEST_ID(dest) ((dest) & 0xffffff00) 51 | 52 | #define MSI_ADDR_IR_EXT_INT (1 << 4) 53 | #define MSI_ADDR_IR_SHV (1 << 3) 54 | #define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13) 55 | #define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5) 56 | #endif /* _ASM_X86_MSIDEF_H */ 57 | -------------------------------------------------------------------------------- /arch/x86/include/asm/msr-trace.h: -------------------------------------------------------------------------------- 1 | #undef TRACE_SYSTEM 2 | #define TRACE_SYSTEM msr 3 | 4 | #undef TRACE_INCLUDE_FILE 5 | #define TRACE_INCLUDE_FILE msr-trace 6 | 7 | #undef TRACE_INCLUDE_PATH 8 | #define TRACE_INCLUDE_PATH asm/ 9 | 10 | #if !defined(_TRACE_MSR_H) || defined(TRACE_HEADER_MULTI_READ) 11 | #define _TRACE_MSR_H 12 | 13 | #include 14 | 15 | /* 16 | * Tracing for x86 model specific registers. Directly maps to the 17 | * RDMSR/WRMSR instructions. 18 | */ 19 | 20 | DECLARE_EVENT_CLASS(msr_trace_class, 21 | TP_PROTO(unsigned msr, u64 val, int failed), 22 | TP_ARGS(msr, val, failed), 23 | TP_STRUCT__entry( 24 | __field( unsigned, msr ) 25 | __field( u64, val ) 26 | __field( int, failed ) 27 | ), 28 | TP_fast_assign( 29 | __entry->msr = msr; 30 | __entry->val = val; 31 | __entry->failed = failed; 32 | ), 33 | TP_printk("%x, value %llx%s", 34 | __entry->msr, 35 | __entry->val, 36 | __entry->failed ? " #GP" : "") 37 | ); 38 | 39 | DEFINE_EVENT(msr_trace_class, read_msr, 40 | TP_PROTO(unsigned msr, u64 val, int failed), 41 | TP_ARGS(msr, val, failed) 42 | ); 43 | 44 | DEFINE_EVENT(msr_trace_class, write_msr, 45 | TP_PROTO(unsigned msr, u64 val, int failed), 46 | TP_ARGS(msr, val, failed) 47 | ); 48 | 49 | DEFINE_EVENT(msr_trace_class, rdpmc, 50 | TP_PROTO(unsigned msr, u64 val, int failed), 51 | TP_ARGS(msr, val, failed) 52 | ); 53 | 54 | #endif /* _TRACE_MSR_H */ 55 | 56 | /* This part must be outside protection */ 57 | #include 58 | -------------------------------------------------------------------------------- /arch/x86/include/asm/mutex.h: -------------------------------------------------------------------------------- 1 | #ifdef CONFIG_X86_32 2 | # include 3 | #else 4 | # include 5 | #endif 6 | -------------------------------------------------------------------------------- /arch/x86/include/asm/nmi.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_NMI_H 2 | #define _ASM_X86_NMI_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #ifdef CONFIG_X86_LOCAL_APIC 10 | 11 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 12 | extern int reserve_perfctr_nmi(unsigned int); 13 | extern void release_perfctr_nmi(unsigned int); 14 | extern int reserve_evntsel_nmi(unsigned int); 15 | extern void release_evntsel_nmi(unsigned int); 16 | 17 | struct ctl_table; 18 | extern int proc_nmi_enabled(struct ctl_table *, int , 19 | void __user *, size_t *, loff_t *); 20 | extern int unknown_nmi_panic; 21 | 22 | #endif /* CONFIG_X86_LOCAL_APIC */ 23 | 24 | #define NMI_FLAG_FIRST 1 25 | 26 | enum { 27 | NMI_LOCAL=0, 28 | NMI_UNKNOWN, 29 | NMI_SERR, 30 | NMI_IO_CHECK, 31 | NMI_MAX 32 | }; 33 | 34 | #define NMI_DONE 0 35 | #define NMI_HANDLED 1 36 | 37 | typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *); 38 | 39 | struct nmiaction { 40 | struct list_head list; 41 | nmi_handler_t handler; 42 | u64 max_duration; 43 | struct irq_work irq_work; 44 | unsigned long flags; 45 | const char *name; 46 | }; 47 | 48 | #define register_nmi_handler(t, fn, fg, n, init...) \ 49 | ({ \ 50 | static struct nmiaction init fn##_na = { \ 51 | .handler = (fn), \ 52 | .name = (n), \ 53 | .flags = (fg), \ 54 | }; \ 55 | __register_nmi_handler((t), &fn##_na); \ 56 | }) 57 | 58 | int __register_nmi_handler(unsigned int, struct nmiaction *); 59 | 60 | void unregister_nmi_handler(unsigned int, const char *); 61 | 62 | void stop_nmi(void); 63 | void restart_nmi(void); 64 | void local_touch_nmi(void); 65 | 66 | #endif /* _ASM_X86_NMI_H */ 67 | -------------------------------------------------------------------------------- /arch/x86/include/asm/numa_32.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_NUMA_32_H 2 | #define _ASM_X86_NUMA_32_H 3 | 4 | #ifdef CONFIG_HIGHMEM 5 | extern void set_highmem_pages_init(void); 6 | #else 7 | static inline void set_highmem_pages_init(void) 8 | { 9 | } 10 | #endif 11 | 12 | #endif /* _ASM_X86_NUMA_32_H */ 13 | -------------------------------------------------------------------------------- /arch/x86/include/asm/numachip/numachip.h: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is subject to the terms and conditions of the GNU General Public 3 | * License. See the file "COPYING" in the main directory of this archive 4 | * for more details. 5 | * 6 | * Numascale NumaConnect-specific header file 7 | * 8 | * Copyright (C) 2012 Numascale AS. All rights reserved. 9 | * 10 | * Send feedback to 11 | * 12 | */ 13 | 14 | #ifndef _ASM_X86_NUMACHIP_NUMACHIP_H 15 | #define _ASM_X86_NUMACHIP_NUMACHIP_H 16 | 17 | extern u8 numachip_system; 18 | extern int __init pci_numachip_init(void); 19 | 20 | #endif /* _ASM_X86_NUMACHIP_NUMACHIP_H */ 21 | -------------------------------------------------------------------------------- /arch/x86/include/asm/olpc_ofw.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_OLPC_OFW_H 2 | #define _ASM_X86_OLPC_OFW_H 3 | 4 | /* index into the page table containing the entry OFW occupies */ 5 | #define OLPC_OFW_PDE_NR 1022 6 | 7 | #define OLPC_OFW_SIG 0x2057464F /* aka "OFW " */ 8 | 9 | #ifdef CONFIG_OLPC 10 | 11 | extern bool olpc_ofw_is_installed(void); 12 | 13 | /* run an OFW command by calling into the firmware */ 14 | #define olpc_ofw(name, args, res) \ 15 | __olpc_ofw((name), ARRAY_SIZE(args), args, ARRAY_SIZE(res), res) 16 | 17 | extern int __olpc_ofw(const char *name, int nr_args, const void **args, int nr_res, 18 | void **res); 19 | 20 | /* determine whether OFW is available and lives in the proper memory */ 21 | extern void olpc_ofw_detect(void); 22 | 23 | /* install OFW's pde permanently into the kernel's pgtable */ 24 | extern void setup_olpc_ofw_pgd(void); 25 | 26 | /* check if OFW was detected during boot */ 27 | extern bool olpc_ofw_present(void); 28 | 29 | extern void olpc_dt_build_devicetree(void); 30 | 31 | #else /* !CONFIG_OLPC */ 32 | static inline void olpc_ofw_detect(void) { } 33 | static inline void setup_olpc_ofw_pgd(void) { } 34 | static inline void olpc_dt_build_devicetree(void) { } 35 | #endif /* !CONFIG_OLPC */ 36 | 37 | #endif /* _ASM_X86_OLPC_OFW_H */ 38 | -------------------------------------------------------------------------------- /arch/x86/include/asm/page_32.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PAGE_32_H 2 | #define _ASM_X86_PAGE_32_H 3 | 4 | #include 5 | 6 | #ifndef __ASSEMBLY__ 7 | 8 | #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) 9 | #ifdef CONFIG_DEBUG_VIRTUAL 10 | extern unsigned long __phys_addr(unsigned long); 11 | #else 12 | #define __phys_addr(x) __phys_addr_nodebug(x) 13 | #endif 14 | #define __phys_addr_symbol(x) __phys_addr(x) 15 | #define __phys_reloc_hide(x) RELOC_HIDE((x), 0) 16 | 17 | #ifdef CONFIG_FLATMEM 18 | #define pfn_valid(pfn) ((pfn) < max_mapnr) 19 | #endif /* CONFIG_FLATMEM */ 20 | 21 | #ifdef CONFIG_X86_USE_3DNOW 22 | #include 23 | 24 | static inline void clear_page(void *page) 25 | { 26 | mmx_clear_page(page); 27 | } 28 | 29 | static inline void copy_page(void *to, void *from) 30 | { 31 | mmx_copy_page(to, from); 32 | } 33 | #else /* !CONFIG_X86_USE_3DNOW */ 34 | #include 35 | 36 | static inline void clear_page(void *page) 37 | { 38 | memset(page, 0, PAGE_SIZE); 39 | } 40 | 41 | static inline void copy_page(void *to, void *from) 42 | { 43 | memcpy(to, from, PAGE_SIZE); 44 | } 45 | #endif /* CONFIG_X86_3DNOW */ 46 | #endif /* !__ASSEMBLY__ */ 47 | 48 | #endif /* _ASM_X86_PAGE_32_H */ 49 | -------------------------------------------------------------------------------- /arch/x86/include/asm/page_32_types.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PAGE_32_DEFS_H 2 | #define _ASM_X86_PAGE_32_DEFS_H 3 | 4 | #include 5 | 6 | /* 7 | * This handles the memory map. 8 | * 9 | * A __PAGE_OFFSET of 0xC0000000 means that the kernel has 10 | * a virtual address space of one gigabyte, which limits the 11 | * amount of physical memory you can use to about 950MB. 12 | * 13 | * If you want more physical memory than this then see the CONFIG_HIGHMEM4G 14 | * and CONFIG_HIGHMEM64G options in the kernel configuration. 15 | */ 16 | #define __PAGE_OFFSET_BASE _AC(CONFIG_PAGE_OFFSET, UL) 17 | #define __PAGE_OFFSET __PAGE_OFFSET_BASE 18 | 19 | #define __START_KERNEL_map __PAGE_OFFSET 20 | 21 | #define THREAD_SIZE_ORDER 1 22 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 23 | 24 | #define DOUBLEFAULT_STACK 1 25 | #define NMI_STACK 0 26 | #define DEBUG_STACK 0 27 | #define MCE_STACK 0 28 | #define N_EXCEPTION_STACKS 1 29 | 30 | #ifdef CONFIG_X86_PAE 31 | /* 44=32+12, the limit we can fit into an unsigned long pfn */ 32 | #define __PHYSICAL_MASK_SHIFT 44 33 | #define __VIRTUAL_MASK_SHIFT 32 34 | 35 | #else /* !CONFIG_X86_PAE */ 36 | #define __PHYSICAL_MASK_SHIFT 32 37 | #define __VIRTUAL_MASK_SHIFT 32 38 | #endif /* CONFIG_X86_PAE */ 39 | 40 | /* 41 | * Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S) 42 | */ 43 | #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 44 | 45 | #ifndef __ASSEMBLY__ 46 | 47 | /* 48 | * This much address space is reserved for vmalloc() and iomap() 49 | * as well as fixmap mappings. 50 | */ 51 | extern unsigned int __VMALLOC_RESERVE; 52 | extern int sysctl_legacy_va_layout; 53 | 54 | extern void find_low_pfn_range(void); 55 | extern void setup_bootmem_allocator(void); 56 | 57 | #endif /* !__ASSEMBLY__ */ 58 | 59 | #endif /* _ASM_X86_PAGE_32_DEFS_H */ 60 | -------------------------------------------------------------------------------- /arch/x86/include/asm/page_64.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PAGE_64_H 2 | #define _ASM_X86_PAGE_64_H 3 | 4 | #include 5 | 6 | #ifndef __ASSEMBLY__ 7 | 8 | /* duplicated to the one in bootmem.h */ 9 | extern unsigned long max_pfn; 10 | extern unsigned long phys_base; 11 | 12 | static inline unsigned long __phys_addr_nodebug(unsigned long x) 13 | { 14 | unsigned long y = x - __START_KERNEL_map; 15 | 16 | /* use the carry flag to determine if x was < __START_KERNEL_map */ 17 | x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET)); 18 | 19 | return x; 20 | } 21 | 22 | #ifdef CONFIG_DEBUG_VIRTUAL 23 | extern unsigned long __phys_addr(unsigned long); 24 | extern unsigned long __phys_addr_symbol(unsigned long); 25 | #else 26 | #define __phys_addr(x) __phys_addr_nodebug(x) 27 | #define __phys_addr_symbol(x) \ 28 | ((unsigned long)(x) - __START_KERNEL_map + phys_base) 29 | #endif 30 | 31 | #define __phys_reloc_hide(x) (x) 32 | 33 | #ifdef CONFIG_FLATMEM 34 | #define pfn_valid(pfn) ((pfn) < max_pfn) 35 | #endif 36 | 37 | void clear_page(void *page); 38 | void copy_page(void *to, void *from); 39 | 40 | #endif /* !__ASSEMBLY__ */ 41 | 42 | #ifdef CONFIG_X86_VSYSCALL_EMULATION 43 | # define __HAVE_ARCH_GATE_AREA 1 44 | #endif 45 | 46 | #endif /* _ASM_X86_PAGE_64_H */ 47 | -------------------------------------------------------------------------------- /arch/x86/include/asm/parport.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PARPORT_H 2 | #define _ASM_X86_PARPORT_H 3 | 4 | static int parport_pc_find_isa_ports(int autoirq, int autodma); 5 | static int parport_pc_find_nonpci_ports(int autoirq, int autodma) 6 | { 7 | return parport_pc_find_isa_ports(autoirq, autodma); 8 | } 9 | 10 | #endif /* _ASM_X86_PARPORT_H */ 11 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pat.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PAT_H 2 | #define _ASM_X86_PAT_H 3 | 4 | #include 5 | #include 6 | 7 | bool pat_enabled(void); 8 | void pat_disable(const char *reason); 9 | extern void pat_init(void); 10 | 11 | extern int reserve_memtype(u64 start, u64 end, 12 | enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); 13 | extern int free_memtype(u64 start, u64 end); 14 | 15 | extern int kernel_map_sync_memtype(u64 base, unsigned long size, 16 | enum page_cache_mode pcm); 17 | 18 | int io_reserve_memtype(resource_size_t start, resource_size_t end, 19 | enum page_cache_mode *pcm); 20 | 21 | void io_free_memtype(resource_size_t start, resource_size_t end); 22 | 23 | #endif /* _ASM_X86_PAT_H */ 24 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pci-direct.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PCI_DIRECT_H 2 | #define _ASM_X86_PCI_DIRECT_H 3 | 4 | #include 5 | 6 | /* Direct PCI access. This is used for PCI accesses in early boot before 7 | the PCI subsystem works. */ 8 | 9 | extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset); 10 | extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); 11 | extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset); 12 | extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val); 13 | extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val); 14 | extern void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val); 15 | 16 | extern int early_pci_allowed(void); 17 | 18 | extern unsigned int pci_early_dump_regs; 19 | extern void early_dump_pci_device(u8 bus, u8 slot, u8 func); 20 | extern void early_dump_pci_devices(void); 21 | #endif /* _ASM_X86_PCI_DIRECT_H */ 22 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pci-functions.h: -------------------------------------------------------------------------------- 1 | /* 2 | * PCI BIOS function numbering for conventional PCI BIOS 3 | * systems 4 | */ 5 | 6 | #define PCIBIOS_PCI_FUNCTION_ID 0xb1XX 7 | #define PCIBIOS_PCI_BIOS_PRESENT 0xb101 8 | #define PCIBIOS_FIND_PCI_DEVICE 0xb102 9 | #define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103 10 | #define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106 11 | #define PCIBIOS_READ_CONFIG_BYTE 0xb108 12 | #define PCIBIOS_READ_CONFIG_WORD 0xb109 13 | #define PCIBIOS_READ_CONFIG_DWORD 0xb10a 14 | #define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b 15 | #define PCIBIOS_WRITE_CONFIG_WORD 0xb10c 16 | #define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d 17 | #define PCIBIOS_GET_ROUTING_OPTIONS 0xb10e 18 | #define PCIBIOS_SET_PCI_HW_INT 0xb10f 19 | 20 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pci_64.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PCI_64_H 2 | #define _ASM_X86_PCI_64_H 3 | 4 | #ifdef __KERNEL__ 5 | 6 | #ifdef CONFIG_CALGARY_IOMMU 7 | static inline void *pci_iommu(struct pci_bus *bus) 8 | { 9 | struct pci_sysdata *sd = bus->sysdata; 10 | return sd->iommu; 11 | } 12 | 13 | static inline void set_pci_iommu(struct pci_bus *bus, void *val) 14 | { 15 | struct pci_sysdata *sd = bus->sysdata; 16 | sd->iommu = val; 17 | } 18 | #endif /* CONFIG_CALGARY_IOMMU */ 19 | 20 | extern int (*pci_config_read)(int seg, int bus, int dev, int fn, 21 | int reg, int len, u32 *value); 22 | extern int (*pci_config_write)(int seg, int bus, int dev, int fn, 23 | int reg, int len, u32 value); 24 | 25 | #endif /* __KERNEL__ */ 26 | 27 | #endif /* _ASM_X86_PCI_64_H */ 28 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pgtable-2level_types.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H 2 | #define _ASM_X86_PGTABLE_2LEVEL_DEFS_H 3 | 4 | #ifndef __ASSEMBLY__ 5 | #include 6 | 7 | typedef unsigned long pteval_t; 8 | typedef unsigned long pmdval_t; 9 | typedef unsigned long pudval_t; 10 | typedef unsigned long pgdval_t; 11 | typedef unsigned long pgprotval_t; 12 | 13 | typedef union { 14 | pteval_t pte; 15 | pteval_t pte_low; 16 | } pte_t; 17 | #endif /* !__ASSEMBLY__ */ 18 | 19 | #define SHARED_KERNEL_PMD 0 20 | 21 | /* 22 | * traditional i386 two-level paging structure: 23 | */ 24 | 25 | #define PGDIR_SHIFT 22 26 | #define PTRS_PER_PGD 1024 27 | 28 | 29 | /* 30 | * the i386 is two-level, so we don't really have any 31 | * PMD directory physically. 32 | */ 33 | 34 | #define PTRS_PER_PTE 1024 35 | 36 | #endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */ 37 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pgtable-3level_types.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H 2 | #define _ASM_X86_PGTABLE_3LEVEL_DEFS_H 3 | 4 | #ifndef __ASSEMBLY__ 5 | #include 6 | 7 | typedef u64 pteval_t; 8 | typedef u64 pmdval_t; 9 | typedef u64 pudval_t; 10 | typedef u64 pgdval_t; 11 | typedef u64 pgprotval_t; 12 | 13 | typedef union { 14 | struct { 15 | unsigned long pte_low, pte_high; 16 | }; 17 | pteval_t pte; 18 | } pte_t; 19 | #endif /* !__ASSEMBLY__ */ 20 | 21 | #ifdef CONFIG_PARAVIRT 22 | #define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) 23 | #else 24 | #define SHARED_KERNEL_PMD 1 25 | #endif 26 | 27 | /* 28 | * PGDIR_SHIFT determines what a top-level page table entry can map 29 | */ 30 | #define PGDIR_SHIFT 30 31 | #define PTRS_PER_PGD 4 32 | 33 | /* 34 | * PMD_SHIFT determines the size of the area a middle-level 35 | * page table can map 36 | */ 37 | #define PMD_SHIFT 21 38 | #define PTRS_PER_PMD 512 39 | 40 | /* 41 | * entries per page directory level 42 | */ 43 | #define PTRS_PER_PTE 512 44 | 45 | 46 | #endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ 47 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pgtable_32_types.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PGTABLE_32_DEFS_H 2 | #define _ASM_X86_PGTABLE_32_DEFS_H 3 | 4 | /* 5 | * The Linux x86 paging architecture is 'compile-time dual-mode', it 6 | * implements both the traditional 2-level x86 page tables and the 7 | * newer 3-level PAE-mode page tables. 8 | */ 9 | #ifdef CONFIG_X86_PAE 10 | # include 11 | # define PMD_SIZE (1UL << PMD_SHIFT) 12 | # define PMD_MASK (~(PMD_SIZE - 1)) 13 | #else 14 | # include 15 | #endif 16 | 17 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 18 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 19 | 20 | /* Just any arbitrary offset to the start of the vmalloc VM area: the 21 | * current 8MB value just means that there will be a 8MB "hole" after the 22 | * physical memory until the kernel virtual memory starts. That means that 23 | * any out-of-bounds memory accesses will hopefully be caught. 24 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced 25 | * area for the same reason. ;) 26 | */ 27 | #define VMALLOC_OFFSET (8 * 1024 * 1024) 28 | 29 | #ifndef __ASSEMBLY__ 30 | extern bool __vmalloc_start_set; /* set once high_memory is set */ 31 | #endif 32 | 33 | #define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) 34 | #ifdef CONFIG_X86_PAE 35 | #define LAST_PKMAP 512 36 | #else 37 | #define LAST_PKMAP 1024 38 | #endif 39 | 40 | #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ 41 | & PMD_MASK) 42 | 43 | #ifdef CONFIG_HIGHMEM 44 | # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) 45 | #else 46 | # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) 47 | #endif 48 | 49 | #define MODULES_VADDR VMALLOC_START 50 | #define MODULES_END VMALLOC_END 51 | #define MODULES_LEN (MODULES_VADDR - MODULES_END) 52 | 53 | #define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) 54 | 55 | #endif /* _ASM_X86_PGTABLE_32_DEFS_H */ 56 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pkeys.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PKEYS_H 2 | #define _ASM_X86_PKEYS_H 3 | 4 | #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) 5 | 6 | extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, 7 | unsigned long init_val); 8 | 9 | /* 10 | * Try to dedicate one of the protection keys to be used as an 11 | * execute-only protection key. 12 | */ 13 | #define PKEY_DEDICATED_EXECUTE_ONLY 15 14 | extern int __execute_only_pkey(struct mm_struct *mm); 15 | static inline int execute_only_pkey(struct mm_struct *mm) 16 | { 17 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) 18 | return 0; 19 | 20 | return __execute_only_pkey(mm); 21 | } 22 | 23 | extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma, 24 | int prot, int pkey); 25 | static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma, 26 | int prot, int pkey) 27 | { 28 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) 29 | return 0; 30 | 31 | return __arch_override_mprotect_pkey(vma, prot, pkey); 32 | } 33 | 34 | #endif /*_ASM_X86_PKEYS_H */ 35 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pm-trace.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PM_TRACE_H 2 | #define _ASM_X86_PM_TRACE_H 3 | 4 | #include 5 | 6 | #define TRACE_RESUME(user) \ 7 | do { \ 8 | if (pm_trace_enabled) { \ 9 | const void *tracedata; \ 10 | asm volatile(_ASM_MOV " $1f,%0\n" \ 11 | ".section .tracedata,\"a\"\n" \ 12 | "1:\t.word %c1\n\t" \ 13 | _ASM_PTR " %c2\n" \ 14 | ".previous" \ 15 | :"=r" (tracedata) \ 16 | : "i" (__LINE__), "i" (__FILE__)); \ 17 | generate_pm_trace(tracedata, user); \ 18 | } \ 19 | } while (0) 20 | 21 | #define TRACE_SUSPEND(user) TRACE_RESUME(user) 22 | 23 | #endif /* _ASM_X86_PM_TRACE_H */ 24 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pmc_core.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Intel Core SoC Power Management Controller Header File 3 | * 4 | * Copyright (c) 2016, Intel Corporation. 5 | * All Rights Reserved. 6 | * 7 | * Authors: Rajneesh Bhardwaj 8 | * Vishwanath Somayaji 9 | * 10 | * This program is free software; you can redistribute it and/or modify it 11 | * under the terms and conditions of the GNU General Public License, 12 | * version 2, as published by the Free Software Foundation. 13 | * 14 | * This program is distributed in the hope it will be useful, but WITHOUT 15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 | * more details. 18 | * 19 | */ 20 | 21 | #ifndef _ASM_PMC_CORE_H 22 | #define _ASM_PMC_CORE_H 23 | 24 | /* API to read SLP_S0_RESIDENCY counter */ 25 | int intel_pmc_slp_s0_counter_read(u32 *data); 26 | 27 | #endif /* _ASM_PMC_CORE_H */ 28 | -------------------------------------------------------------------------------- /arch/x86/include/asm/posix_types.h: -------------------------------------------------------------------------------- 1 | # ifdef CONFIG_X86_32 2 | # include 3 | # else 4 | # include 5 | # endif 6 | -------------------------------------------------------------------------------- /arch/x86/include/asm/probe_roms.h: -------------------------------------------------------------------------------- 1 | #ifndef _PROBE_ROMS_H_ 2 | #define _PROBE_ROMS_H_ 3 | struct pci_dev; 4 | 5 | extern void __iomem *pci_map_biosrom(struct pci_dev *pdev); 6 | extern void pci_unmap_biosrom(void __iomem *rom); 7 | extern size_t pci_biosrom_size(struct pci_dev *pdev); 8 | #endif 9 | -------------------------------------------------------------------------------- /arch/x86/include/asm/processor-cyrix.h: -------------------------------------------------------------------------------- 1 | /* 2 | * NSC/Cyrix CPU indexed register access. Must be inlined instead of 3 | * macros to ensure correct access ordering 4 | * Access order is always 0x22 (=offset), 0x23 (=value) 5 | * 6 | * When using the old macros a line like 7 | * setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); 8 | * gets expanded to: 9 | * do { 10 | * outb((CX86_CCR2), 0x22); 11 | * outb((({ 12 | * outb((CX86_CCR2), 0x22); 13 | * inb(0x23); 14 | * }) | 0x88), 0x23); 15 | * } while (0); 16 | * 17 | * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23). 18 | */ 19 | 20 | static inline u8 getCx86(u8 reg) 21 | { 22 | outb(reg, 0x22); 23 | return inb(0x23); 24 | } 25 | 26 | static inline void setCx86(u8 reg, u8 data) 27 | { 28 | outb(reg, 0x22); 29 | outb(data, 0x23); 30 | } 31 | 32 | #define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); }) 33 | 34 | #define setCx86_old(reg, data) do { \ 35 | outb((reg), 0x22); \ 36 | outb((data), 0x23); \ 37 | } while (0) 38 | 39 | -------------------------------------------------------------------------------- /arch/x86/include/asm/processor-flags.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PROCESSOR_FLAGS_H 2 | #define _ASM_X86_PROCESSOR_FLAGS_H 3 | 4 | #include 5 | 6 | #ifdef CONFIG_VM86 7 | #define X86_VM_MASK X86_EFLAGS_VM 8 | #else 9 | #define X86_VM_MASK 0 /* No VM86 support */ 10 | #endif 11 | #endif /* _ASM_X86_PROCESSOR_FLAGS_H */ 12 | -------------------------------------------------------------------------------- /arch/x86/include/asm/prom.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Definitions for Device tree / OpenFirmware handling on X86 3 | * 4 | * based on arch/powerpc/include/asm/prom.h which is 5 | * Copyright (C) 1996-2005 Paul Mackerras. 6 | * 7 | * This program is free software; you can redistribute it and/or 8 | * modify it under the terms of the GNU General Public License 9 | * as published by the Free Software Foundation; either version 10 | * 2 of the License, or (at your option) any later version. 11 | */ 12 | 13 | #ifndef _ASM_X86_PROM_H 14 | #define _ASM_X86_PROM_H 15 | #ifndef __ASSEMBLY__ 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #include 22 | #include 23 | #include 24 | 25 | #ifdef CONFIG_OF 26 | extern int of_ioapic; 27 | extern u64 initial_dtb; 28 | extern void add_dtb(u64 data); 29 | void x86_of_pci_init(void); 30 | void x86_dtb_init(void); 31 | #else 32 | static inline void add_dtb(u64 data) { } 33 | static inline void x86_of_pci_init(void) { } 34 | static inline void x86_dtb_init(void) { } 35 | #define of_ioapic 0 36 | #endif 37 | 38 | extern char cmd_line[COMMAND_LINE_SIZE]; 39 | 40 | #endif /* __ASSEMBLY__ */ 41 | #endif 42 | -------------------------------------------------------------------------------- /arch/x86/include/asm/proto.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PROTO_H 2 | #define _ASM_X86_PROTO_H 3 | 4 | #include 5 | 6 | /* misc architecture specific prototypes */ 7 | 8 | void syscall_init(void); 9 | 10 | #ifdef CONFIG_X86_64 11 | void entry_SYSCALL_64(void); 12 | #endif 13 | 14 | #ifdef CONFIG_X86_32 15 | void entry_INT80_32(void); 16 | void entry_SYSENTER_32(void); 17 | void __begin_SYSENTER_singlestep_region(void); 18 | void __end_SYSENTER_singlestep_region(void); 19 | #endif 20 | 21 | #ifdef CONFIG_IA32_EMULATION 22 | void entry_SYSENTER_compat(void); 23 | void __end_entry_SYSENTER_compat(void); 24 | void entry_SYSCALL_compat(void); 25 | void entry_INT80_compat(void); 26 | #endif 27 | 28 | void x86_configure_nx(void); 29 | void x86_report_nx(void); 30 | 31 | extern int reboot_force; 32 | 33 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); 34 | 35 | #endif /* _ASM_X86_PROTO_H */ 36 | -------------------------------------------------------------------------------- /arch/x86/include/asm/pvclock-abi.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PVCLOCK_ABI_H 2 | #define _ASM_X86_PVCLOCK_ABI_H 3 | #ifndef __ASSEMBLY__ 4 | 5 | /* 6 | * These structs MUST NOT be changed. 7 | * They are the ABI between hypervisor and guest OS. 8 | * Both Xen and KVM are using this. 9 | * 10 | * pvclock_vcpu_time_info holds the system time and the tsc timestamp 11 | * of the last update. So the guest can use the tsc delta to get a 12 | * more precise system time. There is one per virtual cpu. 13 | * 14 | * pvclock_wall_clock references the point in time when the system 15 | * time was zero (usually boot time), thus the guest calculates the 16 | * current wall clock by adding the system time. 17 | * 18 | * Protocol for the "version" fields is: hypervisor raises it (making 19 | * it uneven) before it starts updating the fields and raises it again 20 | * (making it even) when it is done. Thus the guest can make sure the 21 | * time values it got are consistent by checking the version before 22 | * and after reading them. 23 | */ 24 | 25 | struct pvclock_vcpu_time_info { 26 | // 奇数表示hypervisor正在修改,偶数表示可用 27 | u32 version; 28 | u32 pad0; 29 | // host最后一次更新时,guest的tsc value,即在guest内rdtsc应该得到的值, 30 | // 相当于host的tsc value + tsc offset(若不考虑tsc scaling) 31 | u64 tsc_timestamp; 32 | // host最后一次更新时,host的时间,在kvm中使用的是boot time(这是单调增的,单位是ns), 33 | // 并且可以再加上一个用户指定的offset 34 | u64 system_time; 35 | // 下面两个field用于将tsc值转换为ns,公式为 ns = ((tsc << tsc_shift) * tsc_to_system_mul) >> 32 36 | u32 tsc_to_system_mul; 37 | s8 tsc_shift; 38 | u8 flags; 39 | u8 pad[2]; 40 | } __attribute__((__packed__)); /* 32 bytes */ 41 | 42 | struct pvclock_wall_clock { 43 | // 奇数表示hypervisor正在修改,偶数表示可用 44 | u32 version; 45 | u32 sec; 46 | u32 nsec; 47 | } __attribute__((__packed__)); 48 | 49 | #define PVCLOCK_TSC_STABLE_BIT (1 << 0) 50 | #define PVCLOCK_GUEST_STOPPED (1 << 1) 51 | /* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */ 52 | #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) 53 | #endif /* __ASSEMBLY__ */ 54 | #endif /* _ASM_X86_PVCLOCK_ABI_H */ 55 | -------------------------------------------------------------------------------- /arch/x86/include/asm/qrwlock.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_QRWLOCK_H 2 | #define _ASM_X86_QRWLOCK_H 3 | 4 | #include 5 | #include 6 | 7 | #endif /* _ASM_X86_QRWLOCK_H */ 8 | -------------------------------------------------------------------------------- /arch/x86/include/asm/qspinlock.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_QSPINLOCK_H 2 | #define _ASM_X86_QSPINLOCK_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #define queued_spin_unlock queued_spin_unlock 9 | /** 10 | * queued_spin_unlock - release a queued spinlock 11 | * @lock : Pointer to queued spinlock structure 12 | * 13 | * A smp_store_release() on the least-significant byte. 14 | */ 15 | static inline void native_queued_spin_unlock(struct qspinlock *lock) 16 | { 17 | smp_store_release((u8 *)lock, 0); 18 | } 19 | 20 | #ifdef CONFIG_PARAVIRT_SPINLOCKS 21 | extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); 22 | extern void __pv_init_lock_hash(void); 23 | extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); 24 | extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); 25 | 26 | static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) 27 | { 28 | pv_queued_spin_lock_slowpath(lock, val); 29 | } 30 | 31 | static inline void queued_spin_unlock(struct qspinlock *lock) 32 | { 33 | pv_queued_spin_unlock(lock); 34 | } 35 | #else 36 | static inline void queued_spin_unlock(struct qspinlock *lock) 37 | { 38 | native_queued_spin_unlock(lock); 39 | } 40 | #endif 41 | 42 | #ifdef CONFIG_PARAVIRT 43 | #define virt_spin_lock virt_spin_lock 44 | static inline bool virt_spin_lock(struct qspinlock *lock) 45 | { 46 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) 47 | return false; 48 | 49 | /* 50 | * On hypervisors without PARAVIRT_SPINLOCKS support we fall 51 | * back to a Test-and-Set spinlock, because fair locks have 52 | * horrible lock 'holder' preemption issues. 53 | */ 54 | 55 | do { 56 | while (atomic_read(&lock->val) != 0) 57 | cpu_relax(); 58 | } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); 59 | 60 | return true; 61 | } 62 | #endif /* CONFIG_PARAVIRT */ 63 | 64 | #include 65 | 66 | #endif /* _ASM_X86_QSPINLOCK_H */ 67 | -------------------------------------------------------------------------------- /arch/x86/include/asm/realmode.h: -------------------------------------------------------------------------------- 1 | #ifndef _ARCH_X86_REALMODE_H 2 | #define _ARCH_X86_REALMODE_H 3 | 4 | #include 5 | #include 6 | 7 | /* This must match data at realmode.S */ 8 | struct real_mode_header { 9 | u32 text_start; 10 | u32 ro_end; 11 | /* SMP trampoline */ 12 | u32 trampoline_start; 13 | u32 trampoline_status; 14 | u32 trampoline_header; 15 | #ifdef CONFIG_X86_64 16 | u32 trampoline_pgd; 17 | #endif 18 | /* ACPI S3 wakeup */ 19 | #ifdef CONFIG_ACPI_SLEEP 20 | u32 wakeup_start; 21 | u32 wakeup_header; 22 | #endif 23 | /* APM/BIOS reboot */ 24 | u32 machine_real_restart_asm; 25 | #ifdef CONFIG_X86_64 26 | u32 machine_real_restart_seg; 27 | #endif 28 | }; 29 | 30 | /* This must match data at trampoline_32/64.S */ 31 | struct trampoline_header { 32 | #ifdef CONFIG_X86_32 33 | u32 start; 34 | u16 gdt_pad; 35 | u16 gdt_limit; 36 | u32 gdt_base; 37 | #else 38 | u64 start; 39 | u64 efer; 40 | u32 cr4; 41 | #endif 42 | }; 43 | 44 | extern struct real_mode_header *real_mode_header; 45 | extern unsigned char real_mode_blob_end[]; 46 | 47 | extern unsigned long init_rsp; 48 | extern unsigned long initial_code; 49 | extern unsigned long initial_gs; 50 | 51 | extern unsigned char real_mode_blob[]; 52 | extern unsigned char real_mode_relocs[]; 53 | 54 | #ifdef CONFIG_X86_32 55 | extern unsigned char startup_32_smp[]; 56 | extern unsigned char boot_gdt[]; 57 | #else 58 | extern unsigned char secondary_startup_64[]; 59 | #endif 60 | 61 | static inline size_t real_mode_size_needed(void) 62 | { 63 | if (real_mode_header) 64 | return 0; /* already allocated. */ 65 | 66 | return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE); 67 | } 68 | 69 | void set_real_mode_mem(phys_addr_t mem, size_t size); 70 | void reserve_real_mode(void); 71 | 72 | #endif /* _ARCH_X86_REALMODE_H */ 73 | -------------------------------------------------------------------------------- /arch/x86/include/asm/reboot.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_REBOOT_H 2 | #define _ASM_X86_REBOOT_H 3 | 4 | #include 5 | 6 | struct pt_regs; 7 | 8 | struct machine_ops { 9 | void (*restart)(char *cmd); 10 | void (*halt)(void); 11 | void (*power_off)(void); 12 | void (*shutdown)(void); 13 | void (*crash_shutdown)(struct pt_regs *); 14 | void (*emergency_restart)(void); 15 | }; 16 | 17 | extern struct machine_ops machine_ops; 18 | 19 | void native_machine_crash_shutdown(struct pt_regs *regs); 20 | void native_machine_shutdown(void); 21 | void __noreturn machine_real_restart(unsigned int type); 22 | /* These must match dispatch in arch/x86/realmore/rm/reboot.S */ 23 | #define MRR_BIOS 0 24 | #define MRR_APM 1 25 | 26 | typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); 27 | void nmi_shootdown_cpus(nmi_shootdown_cb callback); 28 | void run_crash_ipi_callback(struct pt_regs *regs); 29 | 30 | #endif /* _ASM_X86_REBOOT_H */ 31 | -------------------------------------------------------------------------------- /arch/x86/include/asm/reboot_fixups.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_REBOOT_FIXUPS_H 2 | #define _ASM_X86_REBOOT_FIXUPS_H 3 | 4 | extern void mach_reboot_fixups(void); 5 | 6 | #endif /* _ASM_X86_REBOOT_FIXUPS_H */ 7 | -------------------------------------------------------------------------------- /arch/x86/include/asm/rmwcc.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_RMWcc 2 | #define _ASM_X86_RMWcc 3 | 4 | #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) 5 | 6 | /* Use asm goto */ 7 | 8 | #define __GEN_RMWcc(fullop, var, cc, ...) \ 9 | do { \ 10 | asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \ 11 | : : "m" (var), ## __VA_ARGS__ \ 12 | : "memory" : cc_label); \ 13 | return 0; \ 14 | cc_label: \ 15 | return 1; \ 16 | } while (0) 17 | 18 | #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ 19 | __GEN_RMWcc(op " " arg0, var, cc) 20 | 21 | #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ 22 | __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val)) 23 | 24 | #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ 25 | 26 | /* Use flags output or a set instruction */ 27 | 28 | #define __GEN_RMWcc(fullop, var, cc, ...) \ 29 | do { \ 30 | bool c; \ 31 | asm volatile (fullop ";" CC_SET(cc) \ 32 | : "+m" (var), CC_OUT(cc) (c) \ 33 | : __VA_ARGS__ : "memory"); \ 34 | return c; \ 35 | } while (0) 36 | 37 | #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ 38 | __GEN_RMWcc(op " " arg0, var, cc) 39 | 40 | #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ 41 | __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val)) 42 | 43 | #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ 44 | 45 | #endif /* _ASM_X86_RMWcc */ 46 | -------------------------------------------------------------------------------- /arch/x86/include/asm/seccomp.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SECCOMP_H 2 | #define _ASM_X86_SECCOMP_H 3 | 4 | #include 5 | 6 | #ifdef CONFIG_X86_32 7 | #define __NR_seccomp_sigreturn __NR_sigreturn 8 | #endif 9 | 10 | #ifdef CONFIG_COMPAT 11 | #include 12 | #define __NR_seccomp_read_32 __NR_ia32_read 13 | #define __NR_seccomp_write_32 __NR_ia32_write 14 | #define __NR_seccomp_exit_32 __NR_ia32_exit 15 | #define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn 16 | #endif 17 | 18 | #include 19 | 20 | #endif /* _ASM_X86_SECCOMP_H */ 21 | -------------------------------------------------------------------------------- /arch/x86/include/asm/sections.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SECTIONS_H 2 | #define _ASM_X86_SECTIONS_H 3 | 4 | #include 5 | #include 6 | 7 | extern char __brk_base[], __brk_limit[]; 8 | extern struct exception_table_entry __stop___ex_table[]; 9 | 10 | #if defined(CONFIG_X86_64) 11 | extern char __end_rodata_hpage_align[]; 12 | #endif 13 | 14 | #endif /* _ASM_X86_SECTIONS_H */ 15 | -------------------------------------------------------------------------------- /arch/x86/include/asm/serial.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SERIAL_H 2 | #define _ASM_X86_SERIAL_H 3 | 4 | /* 5 | * This assumes you have a 1.8432 MHz clock for your UART. 6 | * 7 | * It'd be nice if someone built a serial card with a 24.576 MHz 8 | * clock, since the 16550A is capable of handling a top speed of 1.5 9 | * megabits/second; but this requires a faster clock. 10 | */ 11 | #define BASE_BAUD (1843200/16) 12 | 13 | /* Standard COM flags (except for COM4, because of the 8514 problem) */ 14 | #ifdef CONFIG_SERIAL_8250_DETECT_IRQ 15 | # define STD_COMX_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ) 16 | # define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | 0 | UPF_AUTO_IRQ) 17 | #else 18 | # define STD_COMX_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | 0 ) 19 | # define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | 0 | 0 ) 20 | #endif 21 | 22 | #define SERIAL_PORT_DFNS \ 23 | /* UART CLK PORT IRQ FLAGS */ \ 24 | { .uart = 0, BASE_BAUD, 0x3F8, 4, STD_COMX_FLAGS }, /* ttyS0 */ \ 25 | { .uart = 0, BASE_BAUD, 0x2F8, 3, STD_COMX_FLAGS }, /* ttyS1 */ \ 26 | { .uart = 0, BASE_BAUD, 0x3E8, 4, STD_COMX_FLAGS }, /* ttyS2 */ \ 27 | { .uart = 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ 28 | 29 | #endif /* _ASM_X86_SERIAL_H */ 30 | -------------------------------------------------------------------------------- /arch/x86/include/asm/setup_arch.h: -------------------------------------------------------------------------------- 1 | /* Hook to call BIOS initialisation function */ 2 | 3 | /* no action for generic */ 4 | -------------------------------------------------------------------------------- /arch/x86/include/asm/shmparam.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SHMPARAM_H 2 | #define _ASM_X86_SHMPARAM_H 3 | 4 | #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ 5 | 6 | #endif /* _ASM_X86_SHMPARAM_H */ 7 | -------------------------------------------------------------------------------- /arch/x86/include/asm/sigcontext.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SIGCONTEXT_H 2 | #define _ASM_X86_SIGCONTEXT_H 3 | 4 | /* This is a legacy header - all kernel code includes directly. */ 5 | 6 | #include 7 | 8 | #endif /* _ASM_X86_SIGCONTEXT_H */ 9 | -------------------------------------------------------------------------------- /arch/x86/include/asm/sighandling.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SIGHANDLING_H 2 | #define _ASM_X86_SIGHANDLING_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | #define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ 11 | X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ 12 | X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ 13 | X86_EFLAGS_CF | X86_EFLAGS_RF) 14 | 15 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); 16 | int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, 17 | struct pt_regs *regs, unsigned long mask); 18 | 19 | #endif /* _ASM_X86_SIGHANDLING_H */ 20 | -------------------------------------------------------------------------------- /arch/x86/include/asm/simd.h: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | 4 | /* 5 | * may_use_simd - whether it is allowable at this time to issue SIMD 6 | * instructions or access the SIMD register file 7 | */ 8 | static __must_check inline bool may_use_simd(void) 9 | { 10 | return irq_fpu_usable(); 11 | } 12 | -------------------------------------------------------------------------------- /arch/x86/include/asm/smap.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Supervisor Mode Access Prevention support 3 | * 4 | * Copyright (C) 2012 Intel Corporation 5 | * Author: H. Peter Anvin 6 | * 7 | * This program is free software; you can redistribute it and/or 8 | * modify it under the terms of the GNU General Public License 9 | * as published by the Free Software Foundation; version 2 10 | * of the License. 11 | */ 12 | 13 | #ifndef _ASM_X86_SMAP_H 14 | #define _ASM_X86_SMAP_H 15 | 16 | #include 17 | #include 18 | #include 19 | 20 | /* "Raw" instruction opcodes */ 21 | #define __ASM_CLAC .byte 0x0f,0x01,0xca 22 | #define __ASM_STAC .byte 0x0f,0x01,0xcb 23 | 24 | #ifdef __ASSEMBLY__ 25 | 26 | #include 27 | 28 | #ifdef CONFIG_X86_SMAP 29 | 30 | #define ASM_CLAC \ 31 | ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP 32 | 33 | #define ASM_STAC \ 34 | ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP 35 | 36 | #else /* CONFIG_X86_SMAP */ 37 | 38 | #define ASM_CLAC 39 | #define ASM_STAC 40 | 41 | #endif /* CONFIG_X86_SMAP */ 42 | 43 | #else /* __ASSEMBLY__ */ 44 | 45 | #include 46 | 47 | #ifdef CONFIG_X86_SMAP 48 | 49 | static __always_inline void clac(void) 50 | { 51 | /* Note: a barrier is implicit in alternative() */ 52 | alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); 53 | } 54 | 55 | static __always_inline void stac(void) 56 | { 57 | /* Note: a barrier is implicit in alternative() */ 58 | alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); 59 | } 60 | 61 | /* These macros can be used in asm() statements */ 62 | #define ASM_CLAC \ 63 | ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP) 64 | #define ASM_STAC \ 65 | ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP) 66 | 67 | #else /* CONFIG_X86_SMAP */ 68 | 69 | static inline void clac(void) { } 70 | static inline void stac(void) { } 71 | 72 | #define ASM_CLAC 73 | #define ASM_STAC 74 | 75 | #endif /* CONFIG_X86_SMAP */ 76 | 77 | #endif /* __ASSEMBLY__ */ 78 | 79 | #endif /* _ASM_X86_SMAP_H */ 80 | -------------------------------------------------------------------------------- /arch/x86/include/asm/sparsemem.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SPARSEMEM_H 2 | #define _ASM_X86_SPARSEMEM_H 3 | 4 | #ifdef CONFIG_SPARSEMEM 5 | /* 6 | * generic non-linear memory support: 7 | * 8 | * 1) we will not split memory into more chunks than will fit into the flags 9 | * field of the struct page 10 | * 11 | * SECTION_SIZE_BITS 2^n: size of each section 12 | * MAX_PHYSADDR_BITS 2^n: max size of physical address space 13 | * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space 14 | * 15 | */ 16 | 17 | #ifdef CONFIG_X86_32 18 | # ifdef CONFIG_X86_PAE 19 | # define SECTION_SIZE_BITS 29 20 | # define MAX_PHYSADDR_BITS 36 21 | # define MAX_PHYSMEM_BITS 36 22 | # else 23 | # define SECTION_SIZE_BITS 26 24 | # define MAX_PHYSADDR_BITS 32 25 | # define MAX_PHYSMEM_BITS 32 26 | # endif 27 | #else /* CONFIG_X86_32 */ 28 | # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ 29 | # define MAX_PHYSADDR_BITS 44 30 | # define MAX_PHYSMEM_BITS 46 31 | #endif 32 | 33 | #endif /* CONFIG_SPARSEMEM */ 34 | #endif /* _ASM_X86_SPARSEMEM_H */ 35 | -------------------------------------------------------------------------------- /arch/x86/include/asm/spinlock_types.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SPINLOCK_TYPES_H 2 | #define _ASM_X86_SPINLOCK_TYPES_H 3 | 4 | #include 5 | 6 | #ifdef CONFIG_PARAVIRT_SPINLOCKS 7 | #define __TICKET_LOCK_INC 2 8 | #define TICKET_SLOWPATH_FLAG ((__ticket_t)1) 9 | #else 10 | #define __TICKET_LOCK_INC 1 11 | #define TICKET_SLOWPATH_FLAG ((__ticket_t)0) 12 | #endif 13 | 14 | #if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC)) 15 | typedef u8 __ticket_t; 16 | typedef u16 __ticketpair_t; 17 | #else 18 | typedef u16 __ticket_t; 19 | typedef u32 __ticketpair_t; 20 | #endif 21 | 22 | #define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC) 23 | 24 | #define TICKET_SHIFT (sizeof(__ticket_t) * 8) 25 | 26 | #ifdef CONFIG_QUEUED_SPINLOCKS 27 | #include 28 | #else 29 | typedef struct arch_spinlock { 30 | union { 31 | __ticketpair_t head_tail; 32 | struct __raw_tickets { 33 | __ticket_t head, tail; 34 | } tickets; 35 | }; 36 | } arch_spinlock_t; 37 | 38 | #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } 39 | #endif /* CONFIG_QUEUED_SPINLOCKS */ 40 | 41 | #include 42 | 43 | #endif /* _ASM_X86_SPINLOCK_TYPES_H */ 44 | -------------------------------------------------------------------------------- /arch/x86/include/asm/sta2x11.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Header file for STMicroelectronics ConneXt (STA2X11) IOHub 3 | */ 4 | #ifndef __ASM_STA2X11_H 5 | #define __ASM_STA2X11_H 6 | 7 | #include 8 | 9 | /* This needs to be called from the MFD to configure its sub-devices */ 10 | struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev); 11 | 12 | #endif /* __ASM_STA2X11_H */ 13 | -------------------------------------------------------------------------------- /arch/x86/include/asm/string.h: -------------------------------------------------------------------------------- 1 | #ifdef CONFIG_X86_32 2 | # include 3 | #else 4 | # include 5 | #endif 6 | -------------------------------------------------------------------------------- /arch/x86/include/asm/suspend.h: -------------------------------------------------------------------------------- 1 | #ifdef CONFIG_X86_32 2 | # include 3 | #else 4 | # include 5 | #endif 6 | -------------------------------------------------------------------------------- /arch/x86/include/asm/suspend_32.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2001-2002 Pavel Machek 3 | * Based on code 4 | * Copyright 2001 Patrick Mochel 5 | */ 6 | #ifndef _ASM_X86_SUSPEND_32_H 7 | #define _ASM_X86_SUSPEND_32_H 8 | 9 | #include 10 | #include 11 | 12 | /* image of the saved processor state */ 13 | struct saved_context { 14 | u16 es, fs, gs, ss; 15 | unsigned long cr0, cr2, cr3, cr4; 16 | u64 misc_enable; 17 | bool misc_enable_saved; 18 | struct saved_msrs saved_msrs; 19 | struct desc_ptr gdt_desc; 20 | struct desc_ptr idt; 21 | u16 ldt; 22 | u16 tss; 23 | unsigned long tr; 24 | unsigned long safety; 25 | unsigned long return_address; 26 | } __attribute__((packed)); 27 | 28 | #endif /* _ASM_X86_SUSPEND_32_H */ 29 | -------------------------------------------------------------------------------- /arch/x86/include/asm/suspend_64.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2001-2003 Pavel Machek 3 | * Based on code 4 | * Copyright 2001 Patrick Mochel 5 | */ 6 | #ifndef _ASM_X86_SUSPEND_64_H 7 | #define _ASM_X86_SUSPEND_64_H 8 | 9 | #include 10 | #include 11 | 12 | /* 13 | * Image of the saved processor state, used by the low level ACPI suspend to 14 | * RAM code and by the low level hibernation code. 15 | * 16 | * If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that 17 | * __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c, 18 | * still work as required. 19 | */ 20 | struct saved_context { 21 | struct pt_regs regs; 22 | u16 ds, es, fs, gs, ss; 23 | unsigned long gs_base, gs_kernel_base, fs_base; 24 | unsigned long cr0, cr2, cr3, cr4, cr8; 25 | u64 misc_enable; 26 | bool misc_enable_saved; 27 | struct saved_msrs saved_msrs; 28 | unsigned long efer; 29 | u16 gdt_pad; /* Unused */ 30 | struct desc_ptr gdt_desc; 31 | u16 idt_pad; 32 | u16 idt_limit; 33 | unsigned long idt_base; 34 | u16 ldt; 35 | u16 tss; 36 | unsigned long tr; 37 | unsigned long safety; 38 | unsigned long return_address; 39 | } __attribute__((packed)); 40 | 41 | #define loaddebug(thread,register) \ 42 | set_debugreg((thread)->debugreg##register, register) 43 | 44 | /* routines for saving/restoring kernel state */ 45 | extern int acpi_save_state_mem(void); 46 | extern char core_restore_code; 47 | extern char restore_registers; 48 | 49 | #endif /* _ASM_X86_SUSPEND_64_H */ 50 | -------------------------------------------------------------------------------- /arch/x86/include/asm/swiotlb.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SWIOTLB_H 2 | #define _ASM_X86_SWIOTLB_H 3 | 4 | #include 5 | 6 | #ifdef CONFIG_SWIOTLB 7 | extern int swiotlb; 8 | extern int __init pci_swiotlb_detect_override(void); 9 | extern int __init pci_swiotlb_detect_4gb(void); 10 | extern void __init pci_swiotlb_init(void); 11 | extern void __init pci_swiotlb_late_init(void); 12 | #else 13 | #define swiotlb 0 14 | static inline int pci_swiotlb_detect_override(void) 15 | { 16 | return 0; 17 | } 18 | static inline int pci_swiotlb_detect_4gb(void) 19 | { 20 | return 0; 21 | } 22 | static inline void pci_swiotlb_init(void) 23 | { 24 | } 25 | static inline void pci_swiotlb_late_init(void) 26 | { 27 | } 28 | #endif 29 | 30 | static inline void dma_mark_clean(void *addr, size_t size) {} 31 | 32 | extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 33 | dma_addr_t *dma_handle, gfp_t flags, 34 | unsigned long attrs); 35 | extern void x86_swiotlb_free_coherent(struct device *dev, size_t size, 36 | void *vaddr, dma_addr_t dma_addr, 37 | unsigned long attrs); 38 | 39 | #endif /* _ASM_X86_SWIOTLB_H */ 40 | -------------------------------------------------------------------------------- /arch/x86/include/asm/sys_ia32.h: -------------------------------------------------------------------------------- 1 | /* 2 | * sys_ia32.h - Linux ia32 syscall interfaces 3 | * 4 | * Copyright (c) 2008 Jaswinder Singh Rajput 5 | * 6 | * This file is released under the GPLv2. 7 | * See the file COPYING for more details. 8 | */ 9 | 10 | #ifndef _ASM_X86_SYS_IA32_H 11 | #define _ASM_X86_SYS_IA32_H 12 | 13 | #ifdef CONFIG_COMPAT 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | /* ia32/sys_ia32.c */ 23 | asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long); 24 | asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); 25 | 26 | asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *); 27 | asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *); 28 | asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); 29 | asmlinkage long sys32_fstatat(unsigned int, const char __user *, 30 | struct stat64 __user *, int); 31 | struct mmap_arg_struct32; 32 | asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *); 33 | 34 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); 35 | 36 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); 37 | asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); 38 | 39 | long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); 40 | long sys32_vm86_warning(void); 41 | 42 | asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); 43 | asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, 44 | unsigned, unsigned, int); 45 | asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); 46 | asmlinkage long sys32_fallocate(int, int, unsigned, 47 | unsigned, unsigned, unsigned); 48 | 49 | /* ia32/ia32_signal.c */ 50 | asmlinkage long sys32_sigreturn(void); 51 | asmlinkage long sys32_rt_sigreturn(void); 52 | 53 | #endif /* CONFIG_COMPAT */ 54 | 55 | #endif /* _ASM_X86_SYS_IA32_H */ 56 | -------------------------------------------------------------------------------- /arch/x86/include/asm/syscalls.h: -------------------------------------------------------------------------------- 1 | /* 2 | * syscalls.h - Linux syscall interfaces (arch-specific) 3 | * 4 | * Copyright (c) 2008 Jaswinder Singh Rajput 5 | * 6 | * This file is released under the GPLv2. 7 | * See the file COPYING for more details. 8 | */ 9 | 10 | #ifndef _ASM_X86_SYSCALLS_H 11 | #define _ASM_X86_SYSCALLS_H 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | /* Common in X86_32 and X86_64 */ 19 | /* kernel/ioport.c */ 20 | asmlinkage long sys_ioperm(unsigned long, unsigned long, int); 21 | asmlinkage long sys_iopl(unsigned int); 22 | 23 | /* kernel/ldt.c */ 24 | asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); 25 | 26 | /* kernel/signal.c */ 27 | asmlinkage long sys_rt_sigreturn(void); 28 | 29 | /* kernel/tls.c */ 30 | asmlinkage long sys_set_thread_area(struct user_desc __user *); 31 | asmlinkage long sys_get_thread_area(struct user_desc __user *); 32 | 33 | /* X86_32 only */ 34 | #ifdef CONFIG_X86_32 35 | 36 | /* kernel/signal.c */ 37 | asmlinkage unsigned long sys_sigreturn(void); 38 | 39 | /* kernel/vm86_32.c */ 40 | struct vm86_struct; 41 | asmlinkage long sys_vm86old(struct vm86_struct __user *); 42 | asmlinkage long sys_vm86(unsigned long, unsigned long); 43 | 44 | #else /* CONFIG_X86_32 */ 45 | 46 | /* X86_64 only */ 47 | /* kernel/process_64.c */ 48 | asmlinkage long sys_arch_prctl(int, unsigned long); 49 | 50 | /* kernel/sys_x86_64.c */ 51 | asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, 52 | unsigned long, unsigned long, unsigned long); 53 | 54 | #endif /* CONFIG_X86_32 */ 55 | #endif /* _ASM_X86_SYSCALLS_H */ 56 | -------------------------------------------------------------------------------- /arch/x86/include/asm/tce.h: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is derived from asm-powerpc/tce.h. 3 | * 4 | * Copyright (C) IBM Corporation, 2006 5 | * 6 | * Author: Muli Ben-Yehuda 7 | * Author: Jon Mason 8 | * 9 | * This program is free software; you can redistribute it and/or modify 10 | * it under the terms of the GNU General Public License as published by 11 | * the Free Software Foundation; either version 2 of the License, or 12 | * (at your option) any later version. 13 | * 14 | * This program is distributed in the hope that it will be useful, 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | * GNU General Public License for more details. 18 | * 19 | * You should have received a copy of the GNU General Public License 20 | * along with this program; if not, write to the Free Software 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 | */ 23 | 24 | #ifndef _ASM_X86_TCE_H 25 | #define _ASM_X86_TCE_H 26 | 27 | extern unsigned int specified_table_size; 28 | struct iommu_table; 29 | 30 | #define TCE_ENTRY_SIZE 8 /* in bytes */ 31 | 32 | #define TCE_READ_SHIFT 0 33 | #define TCE_WRITE_SHIFT 1 34 | #define TCE_HUBID_SHIFT 2 /* unused */ 35 | #define TCE_RSVD_SHIFT 8 /* unused */ 36 | #define TCE_RPN_SHIFT 12 37 | #define TCE_UNUSED_SHIFT 48 /* unused */ 38 | 39 | #define TCE_RPN_MASK 0x0000fffffffff000ULL 40 | 41 | extern void tce_build(struct iommu_table *tbl, unsigned long index, 42 | unsigned int npages, unsigned long uaddr, int direction); 43 | extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); 44 | extern void * __init alloc_tce_table(void); 45 | extern void __init free_tce_table(void *tbl); 46 | extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar); 47 | 48 | #endif /* _ASM_X86_TCE_H */ 49 | -------------------------------------------------------------------------------- /arch/x86/include/asm/text-patching.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_TEXT_PATCHING_H 2 | #define _ASM_X86_TEXT_PATCHING_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | struct paravirt_patch_site; 9 | #ifdef CONFIG_PARAVIRT 10 | void apply_paravirt(struct paravirt_patch_site *start, 11 | struct paravirt_patch_site *end); 12 | #else 13 | static inline void apply_paravirt(struct paravirt_patch_site *start, 14 | struct paravirt_patch_site *end) 15 | {} 16 | #define __parainstructions NULL 17 | #define __parainstructions_end NULL 18 | #endif 19 | 20 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); 21 | 22 | /* 23 | * Clear and restore the kernel write-protection flag on the local CPU. 24 | * Allows the kernel to edit read-only pages. 25 | * Side-effect: any interrupt handler running between save and restore will have 26 | * the ability to write to read-only pages. 27 | * 28 | * Warning: 29 | * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and 30 | * no thread can be preempted in the instructions being modified (no iret to an 31 | * invalid instruction possible) or if the instructions are changed from a 32 | * consistent state to another consistent state atomically. 33 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an 34 | * inconsistent instruction while you patch. 35 | */ 36 | extern void *text_poke(void *addr, const void *opcode, size_t len); 37 | extern int poke_int3_handler(struct pt_regs *regs); 38 | extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); 39 | 40 | #endif /* _ASM_X86_TEXT_PATCHING_H */ 41 | -------------------------------------------------------------------------------- /arch/x86/include/asm/time.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_TIME_H 2 | #define _ASM_X86_TIME_H 3 | 4 | #include 5 | #include 6 | 7 | extern void hpet_time_init(void); 8 | extern void time_init(void); 9 | 10 | extern struct clock_event_device *global_clock_event; 11 | 12 | #endif /* _ASM_X86_TIME_H */ 13 | -------------------------------------------------------------------------------- /arch/x86/include/asm/timer.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_TIMER_H 2 | #define _ASM_X86_TIMER_H 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #define TICK_SIZE (tick_nsec / 1000) 9 | 10 | unsigned long long native_sched_clock(void); 11 | extern int recalibrate_cpu_khz(void); 12 | 13 | extern int no_timer_check; 14 | 15 | /* 16 | * We use the full linear equation: f(x) = a + b*x, in order to allow 17 | * a continuous function in the face of dynamic freq changes. 18 | * 19 | * Continuity means that when our frequency changes our slope (b); we want to 20 | * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t. 21 | * 22 | * Without an offset (a) the above would not be possible. 23 | * 24 | * See the comment near cycles_2_ns() for details on how we compute (b). 25 | */ 26 | struct cyc2ns_data { 27 | u32 cyc2ns_mul; 28 | u32 cyc2ns_shift; 29 | u64 cyc2ns_offset; 30 | u32 __count; 31 | /* u32 hole */ 32 | }; /* 24 bytes -- do not grow */ 33 | 34 | extern struct cyc2ns_data *cyc2ns_read_begin(void); 35 | extern void cyc2ns_read_end(struct cyc2ns_data *); 36 | 37 | #endif /* _ASM_X86_TIMER_H */ 38 | -------------------------------------------------------------------------------- /arch/x86/include/asm/timex.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_TIMEX_H 2 | #define _ASM_X86_TIMEX_H 3 | 4 | #include 5 | #include 6 | 7 | /* Assume we use the PIT time source for the clock tick */ 8 | #define CLOCK_TICK_RATE PIT_TICK_RATE 9 | 10 | #define ARCH_HAS_READ_CURRENT_TIMER 11 | 12 | #endif /* _ASM_X86_TIMEX_H */ 13 | -------------------------------------------------------------------------------- /arch/x86/include/asm/tlb.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_TLB_H 2 | #define _ASM_X86_TLB_H 3 | 4 | #define tlb_start_vma(tlb, vma) do { } while (0) 5 | #define tlb_end_vma(tlb, vma) do { } while (0) 6 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) 7 | 8 | #define tlb_flush(tlb) \ 9 | { \ 10 | if (!tlb->fullmm && !tlb->need_flush_all) \ 11 | flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ 12 | else \ 13 | flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ 14 | } 15 | 16 | #include 17 | 18 | #endif /* _ASM_X86_TLB_H */ 19 | -------------------------------------------------------------------------------- /arch/x86/include/asm/trace/exceptions.h: -------------------------------------------------------------------------------- 1 | #undef TRACE_SYSTEM 2 | #define TRACE_SYSTEM exceptions 3 | 4 | #if !defined(_TRACE_PAGE_FAULT_H) || defined(TRACE_HEADER_MULTI_READ) 5 | #define _TRACE_PAGE_FAULT_H 6 | 7 | #include 8 | 9 | extern void trace_irq_vector_regfunc(void); 10 | extern void trace_irq_vector_unregfunc(void); 11 | 12 | DECLARE_EVENT_CLASS(x86_exceptions, 13 | 14 | TP_PROTO(unsigned long address, struct pt_regs *regs, 15 | unsigned long error_code), 16 | 17 | TP_ARGS(address, regs, error_code), 18 | 19 | TP_STRUCT__entry( 20 | __field( unsigned long, address ) 21 | __field( unsigned long, ip ) 22 | __field( unsigned long, error_code ) 23 | ), 24 | 25 | TP_fast_assign( 26 | __entry->address = address; 27 | __entry->ip = regs->ip; 28 | __entry->error_code = error_code; 29 | ), 30 | 31 | TP_printk("address=%pf ip=%pf error_code=0x%lx", 32 | (void *)__entry->address, (void *)__entry->ip, 33 | __entry->error_code) ); 34 | 35 | #define DEFINE_PAGE_FAULT_EVENT(name) \ 36 | DEFINE_EVENT_FN(x86_exceptions, name, \ 37 | TP_PROTO(unsigned long address, struct pt_regs *regs, \ 38 | unsigned long error_code), \ 39 | TP_ARGS(address, regs, error_code), \ 40 | trace_irq_vector_regfunc, \ 41 | trace_irq_vector_unregfunc); 42 | 43 | DEFINE_PAGE_FAULT_EVENT(page_fault_user); 44 | DEFINE_PAGE_FAULT_EVENT(page_fault_kernel); 45 | 46 | #undef TRACE_INCLUDE_PATH 47 | #define TRACE_INCLUDE_PATH . 48 | #define TRACE_INCLUDE_FILE exceptions 49 | #endif /* _TRACE_PAGE_FAULT_H */ 50 | 51 | /* This part must be outside protection */ 52 | #include 53 | -------------------------------------------------------------------------------- /arch/x86/include/asm/trace_clock.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_TRACE_CLOCK_H 2 | #define _ASM_X86_TRACE_CLOCK_H 3 | 4 | #include 5 | #include 6 | 7 | #ifdef CONFIG_X86_TSC 8 | 9 | extern u64 notrace trace_clock_x86_tsc(void); 10 | 11 | # define ARCH_TRACE_CLOCKS \ 12 | { trace_clock_x86_tsc, "x86-tsc", .in_ns = 0 }, 13 | 14 | #else /* !CONFIG_X86_TSC */ 15 | 16 | #define ARCH_TRACE_CLOCKS 17 | 18 | #endif 19 | 20 | #endif /* _ASM_X86_TRACE_CLOCK_H */ 21 | -------------------------------------------------------------------------------- /arch/x86/include/asm/tsc.h: -------------------------------------------------------------------------------- 1 | /* 2 | * x86 TSC related functions 3 | */ 4 | #ifndef _ASM_X86_TSC_H 5 | #define _ASM_X86_TSC_H 6 | 7 | #include 8 | 9 | #define NS_SCALE 10 /* 2^10, carefully chosen */ 10 | #define US_SCALE 32 /* 2^32, arbitralrily chosen */ 11 | 12 | /* 13 | * Standard way to access the cycle counter. 14 | */ 15 | typedef unsigned long long cycles_t; 16 | 17 | extern unsigned int cpu_khz; 18 | extern unsigned int tsc_khz; 19 | 20 | extern void disable_TSC(void); 21 | 22 | static inline cycles_t get_cycles(void) 23 | { 24 | #ifndef CONFIG_X86_TSC 25 | if (!boot_cpu_has(X86_FEATURE_TSC)) 26 | return 0; 27 | #endif 28 | 29 | return rdtsc(); 30 | } 31 | 32 | extern struct system_counterval_t convert_art_to_tsc(cycle_t art); 33 | 34 | extern void tsc_init(void); 35 | extern void mark_tsc_unstable(char *reason); 36 | extern int unsynchronized_tsc(void); 37 | extern int check_tsc_unstable(void); 38 | extern unsigned long native_calibrate_cpu(void); 39 | extern unsigned long native_calibrate_tsc(void); 40 | extern unsigned long long native_sched_clock_from_tsc(u64 tsc); 41 | 42 | extern int tsc_clocksource_reliable; 43 | 44 | /* 45 | * Boot-time check whether the TSCs are synchronized across 46 | * all CPUs/cores: 47 | */ 48 | extern void check_tsc_sync_source(int cpu); 49 | extern void check_tsc_sync_target(void); 50 | 51 | extern int notsc_setup(char *); 52 | extern void tsc_save_sched_clock_state(void); 53 | extern void tsc_restore_sched_clock_state(void); 54 | 55 | unsigned long cpu_khz_from_msr(void); 56 | 57 | #endif /* _ASM_X86_TSC_H */ 58 | -------------------------------------------------------------------------------- /arch/x86/include/asm/unaligned.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_UNALIGNED_H 2 | #define _ASM_X86_UNALIGNED_H 3 | 4 | /* 5 | * The x86 can do unaligned accesses itself. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #define get_unaligned __get_unaligned_le 12 | #define put_unaligned __put_unaligned_le 13 | 14 | #endif /* _ASM_X86_UNALIGNED_H */ 15 | -------------------------------------------------------------------------------- /arch/x86/include/asm/unistd.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_UNISTD_H 2 | #define _ASM_X86_UNISTD_H 1 3 | 4 | #include 5 | 6 | 7 | # ifdef CONFIG_X86_X32_ABI 8 | # define __SYSCALL_MASK (~(__X32_SYSCALL_BIT)) 9 | # else 10 | # define __SYSCALL_MASK (~0) 11 | # endif 12 | 13 | # ifdef CONFIG_X86_32 14 | 15 | # include 16 | # define __ARCH_WANT_STAT64 17 | # define __ARCH_WANT_SYS_IPC 18 | # define __ARCH_WANT_SYS_OLD_MMAP 19 | # define __ARCH_WANT_SYS_OLD_SELECT 20 | 21 | # else 22 | 23 | # include 24 | # include 25 | # define __ARCH_WANT_COMPAT_SYS_TIME 26 | # define __ARCH_WANT_COMPAT_SYS_GETDENTS64 27 | # define __ARCH_WANT_COMPAT_SYS_PREADV64 28 | # define __ARCH_WANT_COMPAT_SYS_PWRITEV64 29 | # define __ARCH_WANT_COMPAT_SYS_PREADV64V2 30 | # define __ARCH_WANT_COMPAT_SYS_PWRITEV64V2 31 | 32 | # endif 33 | 34 | # define __ARCH_WANT_OLD_READDIR 35 | # define __ARCH_WANT_OLD_STAT 36 | # define __ARCH_WANT_SYS_ALARM 37 | # define __ARCH_WANT_SYS_FADVISE64 38 | # define __ARCH_WANT_SYS_GETHOSTNAME 39 | # define __ARCH_WANT_SYS_GETPGRP 40 | # define __ARCH_WANT_SYS_LLSEEK 41 | # define __ARCH_WANT_SYS_NICE 42 | # define __ARCH_WANT_SYS_OLDUMOUNT 43 | # define __ARCH_WANT_SYS_OLD_GETRLIMIT 44 | # define __ARCH_WANT_SYS_OLD_UNAME 45 | # define __ARCH_WANT_SYS_PAUSE 46 | # define __ARCH_WANT_SYS_SIGNAL 47 | # define __ARCH_WANT_SYS_SIGPENDING 48 | # define __ARCH_WANT_SYS_SIGPROCMASK 49 | # define __ARCH_WANT_SYS_SOCKETCALL 50 | # define __ARCH_WANT_SYS_TIME 51 | # define __ARCH_WANT_SYS_UTIME 52 | # define __ARCH_WANT_SYS_WAITPID 53 | # define __ARCH_WANT_SYS_FORK 54 | # define __ARCH_WANT_SYS_VFORK 55 | # define __ARCH_WANT_SYS_CLONE 56 | 57 | #endif /* _ASM_X86_UNISTD_H */ 58 | -------------------------------------------------------------------------------- /arch/x86/include/asm/uprobes.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_UPROBES_H 2 | #define _ASM_UPROBES_H 3 | /* 4 | * User-space Probes (UProbes) for x86 5 | * 6 | * This program is free software; you can redistribute it and/or modify 7 | * it under the terms of the GNU General Public License as published by 8 | * the Free Software Foundation; either version 2 of the License, or 9 | * (at your option) any later version. 10 | * 11 | * This program is distributed in the hope that it will be useful, 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | * GNU General Public License for more details. 15 | * 16 | * You should have received a copy of the GNU General Public License 17 | * along with this program; if not, write to the Free Software 18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 | * 20 | * Copyright (C) IBM Corporation, 2008-2011 21 | * Authors: 22 | * Srikar Dronamraju 23 | * Jim Keniston 24 | */ 25 | 26 | #include 27 | 28 | typedef u8 uprobe_opcode_t; 29 | 30 | #define MAX_UINSN_BYTES 16 31 | #define UPROBE_XOL_SLOT_BYTES 128 /* to keep it cache aligned */ 32 | 33 | #define UPROBE_SWBP_INSN 0xcc 34 | #define UPROBE_SWBP_INSN_SIZE 1 35 | 36 | struct uprobe_xol_ops; 37 | 38 | struct arch_uprobe { 39 | union { 40 | u8 insn[MAX_UINSN_BYTES]; 41 | u8 ixol[MAX_UINSN_BYTES]; 42 | }; 43 | 44 | const struct uprobe_xol_ops *ops; 45 | 46 | union { 47 | struct { 48 | s32 offs; 49 | u8 ilen; 50 | u8 opc1; 51 | } branch; 52 | struct { 53 | u8 fixups; 54 | u8 ilen; 55 | } defparam; 56 | }; 57 | }; 58 | 59 | struct arch_uprobe_task { 60 | #ifdef CONFIG_X86_64 61 | unsigned long saved_scratch_register; 62 | #endif 63 | unsigned int saved_trap_nr; 64 | unsigned int saved_tf; 65 | }; 66 | 67 | #endif /* _ASM_UPROBES_H */ 68 | -------------------------------------------------------------------------------- /arch/x86/include/asm/uv/uv.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_UV_UV_H 2 | #define _ASM_X86_UV_UV_H 3 | 4 | enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; 5 | 6 | struct cpumask; 7 | struct mm_struct; 8 | 9 | #ifdef CONFIG_X86_UV 10 | 11 | extern enum uv_system_type get_uv_system_type(void); 12 | extern int is_uv_system(void); 13 | extern void uv_cpu_init(void); 14 | extern void uv_nmi_init(void); 15 | extern void uv_system_init(void); 16 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 17 | struct mm_struct *mm, 18 | unsigned long start, 19 | unsigned long end, 20 | unsigned int cpu); 21 | 22 | #else /* X86_UV */ 23 | 24 | static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } 25 | static inline int is_uv_system(void) { return 0; } 26 | static inline void uv_cpu_init(void) { } 27 | static inline void uv_system_init(void) { } 28 | static inline const struct cpumask * 29 | uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, 30 | unsigned long start, unsigned long end, unsigned int cpu) 31 | { return cpumask; } 32 | 33 | #endif /* X86_UV */ 34 | 35 | #endif /* _ASM_X86_UV_UV_H */ 36 | -------------------------------------------------------------------------------- /arch/x86/include/asm/uv/uv_irq.h: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is subject to the terms and conditions of the GNU General Public 3 | * License. See the file "COPYING" in the main directory of this archive 4 | * for more details. 5 | * 6 | * SGI UV IRQ definitions 7 | * 8 | * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. 9 | */ 10 | 11 | #ifndef _ASM_X86_UV_UV_IRQ_H 12 | #define _ASM_X86_UV_UV_IRQ_H 13 | 14 | /* If a generic version of this structure gets defined, eliminate this one. */ 15 | struct uv_IO_APIC_route_entry { 16 | __u64 vector : 8, 17 | delivery_mode : 3, 18 | dest_mode : 1, 19 | delivery_status : 1, 20 | polarity : 1, 21 | __reserved_1 : 1, 22 | trigger : 1, 23 | mask : 1, 24 | __reserved_2 : 15, 25 | dest : 32; 26 | }; 27 | 28 | enum { 29 | UV_AFFINITY_ALL, 30 | UV_AFFINITY_NODE, 31 | UV_AFFINITY_CPU 32 | }; 33 | 34 | extern int uv_irq_2_mmr_info(int, unsigned long *, int *); 35 | extern int uv_setup_irq(char *, int, int, unsigned long, int); 36 | extern void uv_teardown_irq(unsigned int); 37 | 38 | #endif /* _ASM_X86_UV_UV_IRQ_H */ 39 | -------------------------------------------------------------------------------- /arch/x86/include/asm/vdso.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_VDSO_H 2 | #define _ASM_X86_VDSO_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #ifndef __ASSEMBLER__ 9 | 10 | #include 11 | 12 | struct vdso_image { 13 | void *data; 14 | unsigned long size; /* Always a multiple of PAGE_SIZE */ 15 | 16 | unsigned long alt, alt_len; 17 | 18 | long sym_vvar_start; /* Negative offset to the vvar area */ 19 | 20 | long sym_vvar_page; 21 | long sym_hpet_page; 22 | long sym_pvclock_page; 23 | long sym_VDSO32_NOTE_MASK; 24 | long sym___kernel_sigreturn; 25 | long sym___kernel_rt_sigreturn; 26 | long sym___kernel_vsyscall; 27 | long sym_int80_landing_pad; 28 | }; 29 | 30 | #ifdef CONFIG_X86_64 31 | extern const struct vdso_image vdso_image_64; 32 | #endif 33 | 34 | #ifdef CONFIG_X86_X32 35 | extern const struct vdso_image vdso_image_x32; 36 | #endif 37 | 38 | #if defined CONFIG_X86_32 || defined CONFIG_COMPAT 39 | extern const struct vdso_image vdso_image_32; 40 | #endif 41 | 42 | extern void __init init_vdso_image(const struct vdso_image *image); 43 | 44 | #endif /* __ASSEMBLER__ */ 45 | 46 | #endif /* _ASM_X86_VDSO_H */ 47 | -------------------------------------------------------------------------------- /arch/x86/include/asm/vga.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Access to VGA videoram 3 | * 4 | * (c) 1998 Martin Mares 5 | */ 6 | 7 | #ifndef _ASM_X86_VGA_H 8 | #define _ASM_X86_VGA_H 9 | 10 | /* 11 | * On the PC, we can just recalculate addresses and then 12 | * access the videoram directly without any black magic. 13 | */ 14 | 15 | #define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) 16 | 17 | #define vga_readb(x) (*(x)) 18 | #define vga_writeb(x, y) (*(y) = (x)) 19 | 20 | #endif /* _ASM_X86_VGA_H */ 21 | -------------------------------------------------------------------------------- /arch/x86/include/asm/vsyscall.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_VSYSCALL_H 2 | #define _ASM_X86_VSYSCALL_H 3 | 4 | #include 5 | #include 6 | 7 | #ifdef CONFIG_X86_VSYSCALL_EMULATION 8 | extern void map_vsyscall(void); 9 | 10 | /* 11 | * Called on instruction fetch fault in vsyscall page. 12 | * Returns true if handled. 13 | */ 14 | extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); 15 | #else 16 | static inline void map_vsyscall(void) {} 17 | static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) 18 | { 19 | return false; 20 | } 21 | #endif 22 | 23 | #endif /* _ASM_X86_VSYSCALL_H */ 24 | -------------------------------------------------------------------------------- /arch/x86/include/asm/vvar.h: -------------------------------------------------------------------------------- 1 | /* 2 | * vvar.h: Shared vDSO/kernel variable declarations 3 | * Copyright (c) 2011 Andy Lutomirski 4 | * Subject to the GNU General Public License, version 2 5 | * 6 | * A handful of variables are accessible (read-only) from userspace 7 | * code in the vsyscall page and the vdso. They are declared here. 8 | * Some other file must define them with DEFINE_VVAR. 9 | * 10 | * In normal kernel code, they are used like any other variable. 11 | * In user code, they are accessed through the VVAR macro. 12 | * 13 | * These variables live in a page of kernel data that has an extra RO 14 | * mapping for userspace. Each variable needs a unique offset within 15 | * that page; specify that offset with the DECLARE_VVAR macro. (If 16 | * you mess up, the linker will catch it.) 17 | */ 18 | 19 | #ifndef _ASM_X86_VVAR_H 20 | #define _ASM_X86_VVAR_H 21 | 22 | #if defined(__VVAR_KERNEL_LDS) 23 | 24 | /* The kernel linker script defines its own magic to put vvars in the 25 | * right place. 26 | */ 27 | #define DECLARE_VVAR(offset, type, name) \ 28 | EMIT_VVAR(name, offset) 29 | 30 | #else 31 | 32 | extern char __vvar_page; 33 | 34 | #define DECLARE_VVAR(offset, type, name) \ 35 | extern type vvar_ ## name __attribute__((visibility("hidden"))); 36 | 37 | #define VVAR(name) (vvar_ ## name) 38 | 39 | #define DEFINE_VVAR(type, name) \ 40 | type name \ 41 | __attribute__((section(".vvar_" #name), aligned(16))) __visible 42 | 43 | #endif 44 | 45 | /* DECLARE_VVAR(offset, type, name) */ 46 | 47 | DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data) 48 | 49 | #undef DECLARE_VVAR 50 | 51 | #endif 52 | -------------------------------------------------------------------------------- /arch/x86/include/asm/x2apic.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Common bits for X2APIC cluster/physical modes. 3 | */ 4 | 5 | #ifndef _ASM_X86_X2APIC_H 6 | #define _ASM_X86_X2APIC_H 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | static int x2apic_apic_id_valid(int apicid) 13 | { 14 | return 1; 15 | } 16 | 17 | static int x2apic_apic_id_registered(void) 18 | { 19 | return 1; 20 | } 21 | 22 | static void 23 | __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) 24 | { 25 | unsigned long cfg = __prepare_ICR(0, vector, dest); 26 | native_x2apic_icr_write(cfg, apicid); 27 | } 28 | 29 | static unsigned int x2apic_get_apic_id(unsigned long id) 30 | { 31 | return id; 32 | } 33 | 34 | static unsigned long x2apic_set_apic_id(unsigned int id) 35 | { 36 | return id; 37 | } 38 | 39 | static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) 40 | { 41 | return initial_apicid >> index_msb; 42 | } 43 | 44 | static void x2apic_send_IPI_self(int vector) 45 | { 46 | apic_write(APIC_SELF_IPI, vector); 47 | } 48 | 49 | #endif /* _ASM_X86_X2APIC_H */ 50 | -------------------------------------------------------------------------------- /arch/x86/include/asm/xen/events.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_XEN_EVENTS_H 2 | #define _ASM_X86_XEN_EVENTS_H 3 | 4 | enum ipi_vector { 5 | XEN_RESCHEDULE_VECTOR, 6 | XEN_CALL_FUNCTION_VECTOR, 7 | XEN_CALL_FUNCTION_SINGLE_VECTOR, 8 | XEN_SPIN_UNLOCK_VECTOR, 9 | XEN_IRQ_WORK_VECTOR, 10 | XEN_NMI_VECTOR, 11 | 12 | XEN_NR_IPIS, 13 | }; 14 | 15 | static inline int xen_irqs_disabled(struct pt_regs *regs) 16 | { 17 | return raw_irqs_disabled_flags(regs->flags); 18 | } 19 | 20 | /* No need for a barrier -- XCHG is a barrier on x86. */ 21 | #define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) 22 | 23 | extern int xen_have_vector_callback; 24 | 25 | /* 26 | * Events delivered via platform PCI interrupts are always 27 | * routed to vcpu 0 and hence cannot be rebound. 28 | */ 29 | static inline bool xen_support_evtchn_rebind(void) 30 | { 31 | return (!xen_hvm_domain() || xen_have_vector_callback); 32 | } 33 | 34 | #endif /* _ASM_X86_XEN_EVENTS_H */ 35 | -------------------------------------------------------------------------------- /arch/x86/include/asm/xen/page-coherent.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_XEN_PAGE_COHERENT_H 2 | #define _ASM_X86_XEN_PAGE_COHERENT_H 3 | 4 | #include 5 | #include 6 | 7 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 8 | dma_addr_t *dma_handle, gfp_t flags, 9 | unsigned long attrs) 10 | { 11 | void *vstart = (void*)__get_free_pages(flags, get_order(size)); 12 | *dma_handle = virt_to_phys(vstart); 13 | return vstart; 14 | } 15 | 16 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 17 | void *cpu_addr, dma_addr_t dma_handle, 18 | unsigned long attrs) 19 | { 20 | free_pages((unsigned long) cpu_addr, get_order(size)); 21 | } 22 | 23 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 24 | dma_addr_t dev_addr, unsigned long offset, size_t size, 25 | enum dma_data_direction dir, unsigned long attrs) { } 26 | 27 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 28 | size_t size, enum dma_data_direction dir, 29 | unsigned long attrs) { } 30 | 31 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 32 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } 33 | 34 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, 35 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } 36 | 37 | #endif /* _ASM_X86_XEN_PAGE_COHERENT_H */ 38 | -------------------------------------------------------------------------------- /arch/x86/include/asm/xen/swiotlb-xen.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SWIOTLB_XEN_H 2 | #define _ASM_X86_SWIOTLB_XEN_H 3 | 4 | #ifdef CONFIG_SWIOTLB_XEN 5 | extern int xen_swiotlb; 6 | extern int __init pci_xen_swiotlb_detect(void); 7 | extern void __init pci_xen_swiotlb_init(void); 8 | extern int pci_xen_swiotlb_init_late(void); 9 | #else 10 | #define xen_swiotlb (0) 11 | static inline int __init pci_xen_swiotlb_detect(void) { return 0; } 12 | static inline void __init pci_xen_swiotlb_init(void) { } 13 | static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; } 14 | #endif 15 | 16 | #endif /* _ASM_X86_SWIOTLB_XEN_H */ 17 | -------------------------------------------------------------------------------- /arch/x86/include/asm/xen/trace_types.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_XEN_TRACE_TYPES_H 2 | #define _ASM_XEN_TRACE_TYPES_H 3 | 4 | enum xen_mc_flush_reason { 5 | XEN_MC_FL_NONE, /* explicit flush */ 6 | XEN_MC_FL_BATCH, /* out of hypercall space */ 7 | XEN_MC_FL_ARGS, /* out of argument space */ 8 | XEN_MC_FL_CALLBACK, /* out of callback space */ 9 | }; 10 | 11 | enum xen_mc_extend_args { 12 | XEN_MC_XE_OK, 13 | XEN_MC_XE_BAD_OP, 14 | XEN_MC_XE_NO_SPACE 15 | }; 16 | typedef void (*xen_mc_callback_fn_t)(void *); 17 | 18 | #endif /* _ASM_XEN_TRACE_TYPES_H */ 19 | -------------------------------------------------------------------------------- /arch/x86/include/asm/xor_64.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_XOR_64_H 2 | #define _ASM_X86_XOR_64_H 3 | 4 | static struct xor_block_template xor_block_sse = { 5 | .name = "generic_sse", 6 | .do_2 = xor_sse_2, 7 | .do_3 = xor_sse_3, 8 | .do_4 = xor_sse_4, 9 | .do_5 = xor_sse_5, 10 | }; 11 | 12 | 13 | /* Also try the AVX routines */ 14 | #include 15 | 16 | /* We force the use of the SSE xor block because it can write around L2. 17 | We may also be able to load into the L1 only depending on how the cpu 18 | deals with a load to a line that is being prefetched. */ 19 | #undef XOR_TRY_TEMPLATES 20 | #define XOR_TRY_TEMPLATES \ 21 | do { \ 22 | AVX_XOR_SPEED; \ 23 | xor_speed(&xor_block_sse_pf64); \ 24 | xor_speed(&xor_block_sse); \ 25 | } while (0) 26 | 27 | #endif /* _ASM_X86_XOR_64_H */ 28 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/Kbuild: -------------------------------------------------------------------------------- 1 | # UAPI Header export list 2 | include include/uapi/asm-generic/Kbuild.asm 3 | 4 | genhdr-y += unistd_32.h 5 | genhdr-y += unistd_64.h 6 | genhdr-y += unistd_x32.h 7 | header-y += a.out.h 8 | header-y += auxvec.h 9 | header-y += bitsperlong.h 10 | header-y += boot.h 11 | header-y += bootparam.h 12 | header-y += byteorder.h 13 | header-y += debugreg.h 14 | header-y += e820.h 15 | header-y += errno.h 16 | header-y += fcntl.h 17 | header-y += hw_breakpoint.h 18 | header-y += hyperv.h 19 | header-y += ioctl.h 20 | header-y += ioctls.h 21 | header-y += ipcbuf.h 22 | header-y += ist.h 23 | header-y += kvm.h 24 | header-y += kvm_para.h 25 | header-y += kvm_perf.h 26 | header-y += ldt.h 27 | header-y += mce.h 28 | header-y += mman.h 29 | header-y += msgbuf.h 30 | header-y += msr-index.h 31 | header-y += msr.h 32 | header-y += mtrr.h 33 | header-y += param.h 34 | header-y += perf_regs.h 35 | header-y += poll.h 36 | header-y += posix_types.h 37 | header-y += posix_types_32.h 38 | header-y += posix_types_64.h 39 | header-y += posix_types_x32.h 40 | header-y += prctl.h 41 | header-y += processor-flags.h 42 | header-y += ptrace-abi.h 43 | header-y += ptrace.h 44 | header-y += resource.h 45 | header-y += sembuf.h 46 | header-y += setup.h 47 | header-y += shmbuf.h 48 | header-y += sigcontext.h 49 | header-y += sigcontext32.h 50 | header-y += siginfo.h 51 | header-y += signal.h 52 | header-y += socket.h 53 | header-y += sockios.h 54 | header-y += stat.h 55 | header-y += statfs.h 56 | header-y += svm.h 57 | header-y += swab.h 58 | header-y += termbits.h 59 | header-y += termios.h 60 | header-y += types.h 61 | header-y += ucontext.h 62 | header-y += unistd.h 63 | header-y += vm86.h 64 | header-y += vmx.h 65 | header-y += vsyscall.h 66 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/a.out.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_A_OUT_H 2 | #define _ASM_X86_A_OUT_H 3 | 4 | struct exec 5 | { 6 | unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 7 | unsigned a_text; /* length of text, in bytes */ 8 | unsigned a_data; /* length of data, in bytes */ 9 | unsigned a_bss; /* length of uninitialized data area for file, in bytes */ 10 | unsigned a_syms; /* length of symbol table data in file, in bytes */ 11 | unsigned a_entry; /* start address */ 12 | unsigned a_trsize; /* length of relocation info for text, in bytes */ 13 | unsigned a_drsize; /* length of relocation info for data, in bytes */ 14 | }; 15 | 16 | #define N_TRSIZE(a) ((a).a_trsize) 17 | #define N_DRSIZE(a) ((a).a_drsize) 18 | #define N_SYMSIZE(a) ((a).a_syms) 19 | 20 | #endif /* _ASM_X86_A_OUT_H */ 21 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/auxvec.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_AUXVEC_H 2 | #define _ASM_X86_AUXVEC_H 3 | /* 4 | * Architecture-neutral AT_ values in 0-17, leave some room 5 | * for more of them, start the x86-specific ones at 32. 6 | */ 7 | #ifdef __i386__ 8 | #define AT_SYSINFO 32 9 | #endif 10 | #define AT_SYSINFO_EHDR 33 11 | 12 | /* entries in ARCH_DLINFO: */ 13 | #if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) 14 | # define AT_VECTOR_SIZE_ARCH 2 15 | #else /* else it's non-compat x86-64 */ 16 | # define AT_VECTOR_SIZE_ARCH 1 17 | #endif 18 | 19 | #endif /* _ASM_X86_AUXVEC_H */ 20 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/bitsperlong.h: -------------------------------------------------------------------------------- 1 | #ifndef __ASM_X86_BITSPERLONG_H 2 | #define __ASM_X86_BITSPERLONG_H 3 | 4 | #if defined(__x86_64__) && !defined(__ILP32__) 5 | # define __BITS_PER_LONG 64 6 | #else 7 | # define __BITS_PER_LONG 32 8 | #endif 9 | 10 | #include 11 | 12 | #endif /* __ASM_X86_BITSPERLONG_H */ 13 | 14 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/boot.h: -------------------------------------------------------------------------------- 1 | #ifndef _UAPI_ASM_X86_BOOT_H 2 | #define _UAPI_ASM_X86_BOOT_H 3 | 4 | /* Internal svga startup constants */ 5 | #define NORMAL_VGA 0xffff /* 80x25 mode */ 6 | #define EXTENDED_VGA 0xfffe /* 80x50 mode */ 7 | #define ASK_VGA 0xfffd /* ask for it at bootup */ 8 | 9 | 10 | #endif /* _UAPI_ASM_X86_BOOT_H */ 11 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/byteorder.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_BYTEORDER_H 2 | #define _ASM_X86_BYTEORDER_H 3 | 4 | #include 5 | 6 | #endif /* _ASM_X86_BYTEORDER_H */ 7 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/errno.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/fcntl.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/hw_breakpoint.h: -------------------------------------------------------------------------------- 1 | /* */ 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/ioctl.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/ioctls.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/ipcbuf.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/ist.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Include file for the interface to IST BIOS 3 | * Copyright 2002 Andy Grover 4 | * 5 | * This program is free software; you can redistribute it and/or modify it 6 | * under the terms of the GNU General Public License as published by the 7 | * Free Software Foundation; either version 2, or (at your option) any 8 | * later version. 9 | * 10 | * This program is distributed in the hope that it will be useful, but 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 | * General Public License for more details. 14 | */ 15 | #ifndef _UAPI_ASM_X86_IST_H 16 | #define _UAPI_ASM_X86_IST_H 17 | 18 | 19 | 20 | #include 21 | 22 | struct ist_info { 23 | __u32 signature; 24 | __u32 command; 25 | __u32 event; 26 | __u32 perf_level; 27 | }; 28 | 29 | #endif /* _UAPI_ASM_X86_IST_H */ 30 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/kvm_perf.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_KVM_PERF_H 2 | #define _ASM_X86_KVM_PERF_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #define DECODE_STR_LEN 20 9 | 10 | #define VCPU_ID "vcpu_id" 11 | 12 | #define KVM_ENTRY_TRACE "kvm:kvm_entry" 13 | #define KVM_EXIT_TRACE "kvm:kvm_exit" 14 | #define KVM_EXIT_REASON "exit_reason" 15 | 16 | #endif /* _ASM_X86_KVM_PERF_H */ 17 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/ldt.h: -------------------------------------------------------------------------------- 1 | /* 2 | * ldt.h 3 | * 4 | * Definitions of structures used with the modify_ldt system call. 5 | */ 6 | #ifndef _ASM_X86_LDT_H 7 | #define _ASM_X86_LDT_H 8 | 9 | /* Maximum number of LDT entries supported. */ 10 | #define LDT_ENTRIES 8192 11 | /* The size of each LDT entry. */ 12 | #define LDT_ENTRY_SIZE 8 13 | 14 | #ifndef __ASSEMBLY__ 15 | /* 16 | * Note on 64bit base and limit is ignored and you cannot set DS/ES/CS 17 | * not to the default values if you still want to do syscalls. This 18 | * call is more for 32bit mode therefore. 19 | */ 20 | struct user_desc { 21 | unsigned int entry_number; 22 | unsigned int base_addr; 23 | unsigned int limit; 24 | unsigned int seg_32bit:1; 25 | unsigned int contents:2; 26 | unsigned int read_exec_only:1; 27 | unsigned int limit_in_pages:1; 28 | unsigned int seg_not_present:1; 29 | unsigned int useable:1; 30 | #ifdef __x86_64__ 31 | /* 32 | * Because this bit is not present in 32-bit user code, user 33 | * programs can pass uninitialized values here. Therefore, in 34 | * any context in which a user_desc comes from a 32-bit program, 35 | * the kernel must act as though lm == 0, regardless of the 36 | * actual value. 37 | */ 38 | unsigned int lm:1; 39 | #endif 40 | }; 41 | 42 | #define MODIFY_LDT_CONTENTS_DATA 0 43 | #define MODIFY_LDT_CONTENTS_STACK 1 44 | #define MODIFY_LDT_CONTENTS_CODE 2 45 | 46 | #endif /* !__ASSEMBLY__ */ 47 | #endif /* _ASM_X86_LDT_H */ 48 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/mce.h: -------------------------------------------------------------------------------- 1 | #ifndef _UAPI_ASM_X86_MCE_H 2 | #define _UAPI_ASM_X86_MCE_H 3 | 4 | #include 5 | #include 6 | 7 | /* Fields are zero when not available */ 8 | struct mce { 9 | __u64 status; 10 | __u64 misc; 11 | __u64 addr; 12 | __u64 mcgstatus; 13 | __u64 ip; 14 | __u64 tsc; /* cpu time stamp counter */ 15 | __u64 time; /* wall time_t when error was detected */ 16 | __u8 cpuvendor; /* cpu vendor as encoded in system.h */ 17 | __u8 inject_flags; /* software inject flags */ 18 | __u8 severity; 19 | __u8 pad; 20 | __u32 cpuid; /* CPUID 1 EAX */ 21 | __u8 cs; /* code segment */ 22 | __u8 bank; /* machine check bank */ 23 | __u8 cpu; /* cpu number; obsolete; use extcpu now */ 24 | __u8 finished; /* entry is valid */ 25 | __u32 extcpu; /* linux cpu number that detected the error */ 26 | __u32 socketid; /* CPU socket ID */ 27 | __u32 apicid; /* CPU initial apic ID */ 28 | __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ 29 | }; 30 | 31 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) 32 | #define MCE_GET_LOG_LEN _IOR('M', 2, int) 33 | #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) 34 | 35 | #endif /* _UAPI_ASM_X86_MCE_H */ 36 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/mman.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MMAN_H 2 | #define _ASM_X86_MMAN_H 3 | 4 | #define MAP_32BIT 0x40 /* only give out 32bit addresses */ 5 | 6 | #define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT) 7 | #define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT) 8 | 9 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 10 | /* 11 | * Take the 4 protection key bits out of the vma->vm_flags 12 | * value and turn them in to the bits that we can put in 13 | * to a pte. 14 | * 15 | * Only override these if Protection Keys are available 16 | * (which is only on 64-bit). 17 | */ 18 | #define arch_vm_get_page_prot(vm_flags) __pgprot( \ 19 | ((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) | \ 20 | ((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) | \ 21 | ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \ 22 | ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0)) 23 | 24 | #define arch_calc_vm_prot_bits(prot, key) ( \ 25 | ((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \ 26 | ((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \ 27 | ((key) & 0x4 ? VM_PKEY_BIT2 : 0) | \ 28 | ((key) & 0x8 ? VM_PKEY_BIT3 : 0)) 29 | #endif 30 | 31 | #include 32 | 33 | #endif /* _ASM_X86_MMAN_H */ 34 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/msgbuf.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/msr.h: -------------------------------------------------------------------------------- 1 | #ifndef _UAPI_ASM_X86_MSR_H 2 | #define _UAPI_ASM_X86_MSR_H 3 | 4 | #ifndef __ASSEMBLY__ 5 | 6 | #include 7 | #include 8 | 9 | #define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8]) 10 | #define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8]) 11 | 12 | #endif /* __ASSEMBLY__ */ 13 | #endif /* _UAPI_ASM_X86_MSR_H */ 14 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/param.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/perf_regs.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PERF_REGS_H 2 | #define _ASM_X86_PERF_REGS_H 3 | 4 | enum perf_event_x86_regs { 5 | PERF_REG_X86_AX, 6 | PERF_REG_X86_BX, 7 | PERF_REG_X86_CX, 8 | PERF_REG_X86_DX, 9 | PERF_REG_X86_SI, 10 | PERF_REG_X86_DI, 11 | PERF_REG_X86_BP, 12 | PERF_REG_X86_SP, 13 | PERF_REG_X86_IP, 14 | PERF_REG_X86_FLAGS, 15 | PERF_REG_X86_CS, 16 | PERF_REG_X86_SS, 17 | PERF_REG_X86_DS, 18 | PERF_REG_X86_ES, 19 | PERF_REG_X86_FS, 20 | PERF_REG_X86_GS, 21 | PERF_REG_X86_R8, 22 | PERF_REG_X86_R9, 23 | PERF_REG_X86_R10, 24 | PERF_REG_X86_R11, 25 | PERF_REG_X86_R12, 26 | PERF_REG_X86_R13, 27 | PERF_REG_X86_R14, 28 | PERF_REG_X86_R15, 29 | 30 | PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1, 31 | PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1, 32 | }; 33 | #endif /* _ASM_X86_PERF_REGS_H */ 34 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/poll.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/posix_types.h: -------------------------------------------------------------------------------- 1 | #ifndef __KERNEL__ 2 | # ifdef __i386__ 3 | # include 4 | # elif defined(__ILP32__) 5 | # include 6 | # else 7 | # include 8 | # endif 9 | #endif 10 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/posix_types_32.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_POSIX_TYPES_32_H 2 | #define _ASM_X86_POSIX_TYPES_32_H 3 | 4 | /* 5 | * This file is generally used by user-level software, so you need to 6 | * be a little careful about namespace pollution etc. Also, we cannot 7 | * assume GCC is being used. 8 | */ 9 | 10 | typedef unsigned short __kernel_mode_t; 11 | #define __kernel_mode_t __kernel_mode_t 12 | 13 | typedef unsigned short __kernel_ipc_pid_t; 14 | #define __kernel_ipc_pid_t __kernel_ipc_pid_t 15 | 16 | typedef unsigned short __kernel_uid_t; 17 | typedef unsigned short __kernel_gid_t; 18 | #define __kernel_uid_t __kernel_uid_t 19 | 20 | typedef unsigned short __kernel_old_dev_t; 21 | #define __kernel_old_dev_t __kernel_old_dev_t 22 | 23 | #include 24 | 25 | #endif /* _ASM_X86_POSIX_TYPES_32_H */ 26 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/posix_types_64.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_POSIX_TYPES_64_H 2 | #define _ASM_X86_POSIX_TYPES_64_H 3 | 4 | /* 5 | * This file is generally used by user-level software, so you need to 6 | * be a little careful about namespace pollution etc. Also, we cannot 7 | * assume GCC is being used. 8 | */ 9 | 10 | typedef unsigned short __kernel_old_uid_t; 11 | typedef unsigned short __kernel_old_gid_t; 12 | #define __kernel_old_uid_t __kernel_old_uid_t 13 | 14 | typedef unsigned long __kernel_old_dev_t; 15 | #define __kernel_old_dev_t __kernel_old_dev_t 16 | 17 | #include 18 | 19 | #endif /* _ASM_X86_POSIX_TYPES_64_H */ 20 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/posix_types_x32.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_POSIX_TYPES_X32_H 2 | #define _ASM_X86_POSIX_TYPES_X32_H 3 | 4 | /* 5 | * This file is only used by user-level software, so you need to 6 | * be a little careful about namespace pollution etc. Also, we cannot 7 | * assume GCC is being used. 8 | * 9 | * These types should generally match the ones used by the 64-bit kernel, 10 | * 11 | */ 12 | 13 | typedef long long __kernel_long_t; 14 | typedef unsigned long long __kernel_ulong_t; 15 | #define __kernel_long_t __kernel_long_t 16 | 17 | #include 18 | 19 | #endif /* _ASM_X86_POSIX_TYPES_X32_H */ 20 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/prctl.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PRCTL_H 2 | #define _ASM_X86_PRCTL_H 3 | 4 | #define ARCH_SET_GS 0x1001 5 | #define ARCH_SET_FS 0x1002 6 | #define ARCH_GET_FS 0x1003 7 | #define ARCH_GET_GS 0x1004 8 | 9 | #endif /* _ASM_X86_PRCTL_H */ 10 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/ptrace.h: -------------------------------------------------------------------------------- 1 | #ifndef _UAPI_ASM_X86_PTRACE_H 2 | #define _UAPI_ASM_X86_PTRACE_H 3 | 4 | #include /* For __user */ 5 | #include 6 | #include 7 | 8 | 9 | #ifndef __ASSEMBLY__ 10 | 11 | #ifdef __i386__ 12 | /* this struct defines the way the registers are stored on the 13 | stack during a system call. */ 14 | 15 | #ifndef __KERNEL__ 16 | 17 | struct pt_regs { 18 | long ebx; 19 | long ecx; 20 | long edx; 21 | long esi; 22 | long edi; 23 | long ebp; 24 | long eax; 25 | int xds; 26 | int xes; 27 | int xfs; 28 | int xgs; 29 | long orig_eax; 30 | long eip; 31 | int xcs; 32 | long eflags; 33 | long esp; 34 | int xss; 35 | }; 36 | 37 | #endif /* __KERNEL__ */ 38 | 39 | #else /* __i386__ */ 40 | 41 | #ifndef __KERNEL__ 42 | 43 | struct pt_regs { 44 | /* 45 | * C ABI says these regs are callee-preserved. They aren't saved on kernel entry 46 | * unless syscall needs a complete, fully filled "struct pt_regs". 47 | */ 48 | unsigned long r15; 49 | unsigned long r14; 50 | unsigned long r13; 51 | unsigned long r12; 52 | unsigned long rbp; 53 | unsigned long rbx; 54 | /* These regs are callee-clobbered. Always saved on kernel entry. */ 55 | unsigned long r11; 56 | unsigned long r10; 57 | unsigned long r9; 58 | unsigned long r8; 59 | unsigned long rax; 60 | unsigned long rcx; 61 | unsigned long rdx; 62 | unsigned long rsi; 63 | unsigned long rdi; 64 | /* 65 | * On syscall entry, this is syscall#. On CPU exception, this is error code. 66 | * On hw interrupt, it's IRQ number: 67 | */ 68 | unsigned long orig_rax; 69 | /* Return frame for iretq */ 70 | unsigned long rip; 71 | unsigned long cs; 72 | unsigned long eflags; 73 | unsigned long rsp; 74 | unsigned long ss; 75 | /* top of stack page */ 76 | }; 77 | 78 | #endif /* __KERNEL__ */ 79 | #endif /* !__i386__ */ 80 | 81 | 82 | 83 | #endif /* !__ASSEMBLY__ */ 84 | 85 | #endif /* _UAPI_ASM_X86_PTRACE_H */ 86 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/resource.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/sembuf.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SEMBUF_H 2 | #define _ASM_X86_SEMBUF_H 3 | 4 | /* 5 | * The semid64_ds structure for x86 architecture. 6 | * Note extra padding because this structure is passed back and forth 7 | * between kernel and user space. 8 | * 9 | * Pad space is left for: 10 | * - 64-bit time_t to solve y2038 problem 11 | * - 2 miscellaneous 32-bit values 12 | */ 13 | struct semid64_ds { 14 | struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ 15 | __kernel_time_t sem_otime; /* last semop time */ 16 | __kernel_ulong_t __unused1; 17 | __kernel_time_t sem_ctime; /* last change time */ 18 | __kernel_ulong_t __unused2; 19 | __kernel_ulong_t sem_nsems; /* no. of semaphores in array */ 20 | __kernel_ulong_t __unused3; 21 | __kernel_ulong_t __unused4; 22 | }; 23 | 24 | #endif /* _ASM_X86_SEMBUF_H */ 25 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/setup.h: -------------------------------------------------------------------------------- 1 | /* */ 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/shmbuf.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/sigcontext32.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SIGCONTEXT32_H 2 | #define _ASM_X86_SIGCONTEXT32_H 3 | 4 | /* This is a legacy file - all the type definitions are in sigcontext.h: */ 5 | 6 | #include 7 | 8 | #endif /* _ASM_X86_SIGCONTEXT32_H */ 9 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/siginfo.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SIGINFO_H 2 | #define _ASM_X86_SIGINFO_H 3 | 4 | #ifdef __x86_64__ 5 | # ifdef __ILP32__ /* x32 */ 6 | typedef long long __kernel_si_clock_t __attribute__((aligned(4))); 7 | # define __ARCH_SI_CLOCK_T __kernel_si_clock_t 8 | # define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8))) 9 | # else /* x86-64 */ 10 | # define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) 11 | # endif 12 | #endif 13 | 14 | #include 15 | 16 | #endif /* _ASM_X86_SIGINFO_H */ 17 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/socket.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/sockios.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/statfs.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_STATFS_H 2 | #define _ASM_X86_STATFS_H 3 | 4 | /* 5 | * We need compat_statfs64 to be packed, because the i386 ABI won't 6 | * add padding at the end to bring it to a multiple of 8 bytes, but 7 | * the x86_64 ABI will. 8 | */ 9 | #define ARCH_PACK_COMPAT_STATFS64 __attribute__((packed,aligned(4))) 10 | 11 | #include 12 | #endif /* _ASM_X86_STATFS_H */ 13 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/swab.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_SWAB_H 2 | #define _ASM_X86_SWAB_H 3 | 4 | #include 5 | #include 6 | 7 | static inline __attribute_const__ __u32 __arch_swab32(__u32 val) 8 | { 9 | asm("bswapl %0" : "=r" (val) : "0" (val)); 10 | return val; 11 | } 12 | #define __arch_swab32 __arch_swab32 13 | 14 | static inline __attribute_const__ __u64 __arch_swab64(__u64 val) 15 | { 16 | #ifdef __i386__ 17 | union { 18 | struct { 19 | __u32 a; 20 | __u32 b; 21 | } s; 22 | __u64 u; 23 | } v; 24 | v.u = val; 25 | asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" 26 | : "=r" (v.s.a), "=r" (v.s.b) 27 | : "0" (v.s.a), "1" (v.s.b)); 28 | return v.u; 29 | #else /* __i386__ */ 30 | asm("bswapq %0" : "=r" (val) : "0" (val)); 31 | return val; 32 | #endif 33 | } 34 | #define __arch_swab64 __arch_swab64 35 | 36 | #endif /* _ASM_X86_SWAB_H */ 37 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/termbits.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/termios.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/types.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_TYPES_H 2 | #define _ASM_X86_TYPES_H 3 | 4 | #include 5 | 6 | #endif /* _ASM_X86_TYPES_H */ 7 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/unistd.h: -------------------------------------------------------------------------------- 1 | #ifndef _UAPI_ASM_X86_UNISTD_H 2 | #define _UAPI_ASM_X86_UNISTD_H 3 | 4 | /* x32 syscall flag bit */ 5 | #define __X32_SYSCALL_BIT 0x40000000 6 | 7 | #ifndef __KERNEL__ 8 | # ifdef __i386__ 9 | # include 10 | # elif defined(__ILP32__) 11 | # include 12 | # else 13 | # include 14 | # endif 15 | #endif 16 | 17 | #endif /* _UAPI_ASM_X86_UNISTD_H */ 18 | -------------------------------------------------------------------------------- /arch/x86/include/uapi/asm/vsyscall.h: -------------------------------------------------------------------------------- 1 | #ifndef _UAPI_ASM_X86_VSYSCALL_H 2 | #define _UAPI_ASM_X86_VSYSCALL_H 3 | 4 | enum vsyscall_num { 5 | __NR_vgettimeofday, 6 | __NR_vtime, 7 | __NR_vgetcpu, 8 | }; 9 | 10 | #define VSYSCALL_ADDR (-10UL << 20) 11 | 12 | #endif /* _UAPI_ASM_X86_VSYSCALL_H */ 13 | -------------------------------------------------------------------------------- /arch/x86/kvm/Makefile: -------------------------------------------------------------------------------- 1 | 2 | ccflags-y += -Iarch/x86/kvm 3 | 4 | CFLAGS_x86.o := -I. 5 | CFLAGS_svm.o := -I. 6 | CFLAGS_vmx.o := -I. 7 | 8 | KVM := ../../../virt/kvm 9 | 10 | kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ 11 | $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o 12 | kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o 13 | 14 | kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ 15 | i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ 16 | hyperv.o page_track.o 17 | 18 | kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o 19 | 20 | kvm-intel-y += vmx.o pmu_intel.o 21 | kvm-amd-y += svm.o pmu_amd.o 22 | 23 | obj-$(CONFIG_KVM) += kvm.o 24 | obj-$(CONFIG_KVM_INTEL) += kvm-intel.o 25 | obj-$(CONFIG_KVM_AMD) += kvm-amd.o 26 | -------------------------------------------------------------------------------- /arch/x86/kvm/assigned-dev.h: -------------------------------------------------------------------------------- 1 | #ifndef ARCH_X86_KVM_ASSIGNED_DEV_H 2 | #define ARCH_X86_KVM_ASSIGNED_DEV_H 3 | 4 | #include 5 | 6 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 7 | int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev); 8 | int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev); 9 | 10 | int kvm_iommu_map_guest(struct kvm *kvm); 11 | int kvm_iommu_unmap_guest(struct kvm *kvm); 12 | 13 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 14 | unsigned long arg); 15 | 16 | void kvm_free_all_assigned_devices(struct kvm *kvm); 17 | #else 18 | static inline int kvm_iommu_unmap_guest(struct kvm *kvm) 19 | { 20 | return 0; 21 | } 22 | 23 | static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 24 | unsigned long arg) 25 | { 26 | return -ENOTTY; 27 | } 28 | 29 | static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {} 30 | #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */ 31 | 32 | #endif /* ARCH_X86_KVM_ASSIGNED_DEV_H */ 33 | -------------------------------------------------------------------------------- /arch/x86/kvm/i8254.h: -------------------------------------------------------------------------------- 1 | #ifndef __I8254_H 2 | #define __I8254_H 3 | 4 | #include 5 | 6 | #include 7 | 8 | struct kvm_kpit_channel_state { 9 | u32 count; /* can be 65536 */ 10 | u16 latched_count; 11 | u8 count_latched; 12 | u8 status_latched; 13 | u8 status; 14 | u8 read_state; 15 | u8 write_state; 16 | u8 write_latch; 17 | u8 rw_mode; 18 | u8 mode; 19 | u8 bcd; /* not supported */ 20 | u8 gate; /* timer start */ 21 | ktime_t count_load_time; 22 | }; 23 | 24 | struct kvm_kpit_state { 25 | /* All members before "struct mutex lock" are protected by the lock. */ 26 | struct kvm_kpit_channel_state channels[3]; 27 | u32 flags; 28 | bool is_periodic; 29 | s64 period; /* unit: ns */ 30 | struct hrtimer timer; 31 | u32 speaker_data_on; 32 | 33 | struct mutex lock; 34 | atomic_t reinject; 35 | atomic_t pending; /* accumulated triggered timers */ 36 | atomic_t irq_ack; 37 | struct kvm_irq_ack_notifier irq_ack_notifier; 38 | }; 39 | 40 | struct kvm_pit { 41 | struct kvm_io_device dev; 42 | struct kvm_io_device speaker_dev; 43 | struct kvm *kvm; 44 | struct kvm_kpit_state pit_state; 45 | int irq_source_id; 46 | struct kvm_irq_mask_notifier mask_notifier; 47 | struct kthread_worker worker; 48 | struct task_struct *worker_task; 49 | struct kthread_work expired; 50 | }; 51 | 52 | #define KVM_PIT_BASE_ADDRESS 0x40 53 | #define KVM_SPEAKER_BASE_ADDRESS 0x61 54 | #define KVM_PIT_MEM_LENGTH 4 55 | #define KVM_PIT_FREQ 1193181 56 | #define KVM_MAX_PIT_INTR_INTERVAL HZ / 100 57 | #define KVM_PIT_CHANNEL_MASK 0x3 58 | 59 | struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags); 60 | void kvm_free_pit(struct kvm *kvm); 61 | 62 | void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val, 63 | int hpet_legacy_start); 64 | void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject); 65 | 66 | #endif 67 | -------------------------------------------------------------------------------- /arch/x86/kvm/tss.h: -------------------------------------------------------------------------------- 1 | #ifndef __TSS_SEGMENT_H 2 | #define __TSS_SEGMENT_H 3 | 4 | struct tss_segment_32 { 5 | u32 prev_task_link; 6 | u32 esp0; 7 | u32 ss0; 8 | u32 esp1; 9 | u32 ss1; 10 | u32 esp2; 11 | u32 ss2; 12 | u32 cr3; 13 | u32 eip; 14 | u32 eflags; 15 | u32 eax; 16 | u32 ecx; 17 | u32 edx; 18 | u32 ebx; 19 | u32 esp; 20 | u32 ebp; 21 | u32 esi; 22 | u32 edi; 23 | u32 es; 24 | u32 cs; 25 | u32 ss; 26 | u32 ds; 27 | u32 fs; 28 | u32 gs; 29 | u32 ldt_selector; 30 | u16 t; 31 | u16 io_map; 32 | }; 33 | 34 | struct tss_segment_16 { 35 | u16 prev_task_link; 36 | u16 sp0; 37 | u16 ss0; 38 | u16 sp1; 39 | u16 ss1; 40 | u16 sp2; 41 | u16 ss2; 42 | u16 ip; 43 | u16 flag; 44 | u16 ax; 45 | u16 cx; 46 | u16 dx; 47 | u16 bx; 48 | u16 sp; 49 | u16 bp; 50 | u16 si; 51 | u16 di; 52 | u16 es; 53 | u16 cs; 54 | u16 ss; 55 | u16 ds; 56 | u16 ldt; 57 | }; 58 | 59 | #endif 60 | -------------------------------------------------------------------------------- /include/linux/kvm_para.h: -------------------------------------------------------------------------------- 1 | #ifndef __LINUX_KVM_PARA_H 2 | #define __LINUX_KVM_PARA_H 3 | 4 | #include 5 | 6 | 7 | static inline bool kvm_para_has_feature(unsigned int feature) 8 | { 9 | return !!(kvm_arch_para_features() & (1UL << feature)); 10 | } 11 | #endif /* __LINUX_KVM_PARA_H */ 12 | -------------------------------------------------------------------------------- /include/linux/kvm_types.h: -------------------------------------------------------------------------------- 1 | /* 2 | * This program is free software; you can redistribute it and/or modify 3 | * it under the terms of the GNU General Public License as published by 4 | * the Free Software Foundation; either version 2 of the License. 5 | * 6 | * This program is distributed in the hope that it will be useful, 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 | * GNU General Public License for more details. 10 | * 11 | * You should have received a copy of the GNU General Public License 12 | * along with this program; if not, write to the Free Software 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 | * 15 | */ 16 | 17 | #ifndef __KVM_TYPES_H__ 18 | #define __KVM_TYPES_H__ 19 | 20 | struct kvm; 21 | struct kvm_async_pf; 22 | struct kvm_device_ops; 23 | struct kvm_interrupt; 24 | struct kvm_irq_routing_table; 25 | struct kvm_memory_slot; 26 | struct kvm_one_reg; 27 | struct kvm_run; 28 | struct kvm_userspace_memory_region; 29 | struct kvm_vcpu; 30 | struct kvm_vcpu_init; 31 | struct kvm_memslots; 32 | 33 | enum kvm_mr_change; 34 | 35 | #include 36 | 37 | /* 38 | * Address types: 39 | * 40 | * gva - guest virtual address 41 | * gpa - guest physical address 42 | * gfn - guest frame number 43 | * hva - host virtual address 44 | * hpa - host physical address 45 | * hfn - host frame number 46 | */ 47 | 48 | typedef unsigned long gva_t; 49 | typedef u64 gpa_t; 50 | typedef u64 gfn_t; 51 | 52 | typedef unsigned long hva_t; 53 | typedef u64 hpa_t; 54 | typedef u64 hfn_t; 55 | 56 | typedef hfn_t kvm_pfn_t; 57 | 58 | struct gfn_to_hva_cache { 59 | u64 generation; 60 | gpa_t gpa; 61 | unsigned long hva; 62 | unsigned long len; 63 | struct kvm_memory_slot *memslot; 64 | }; 65 | 66 | #endif /* __KVM_TYPES_H__ */ 67 | -------------------------------------------------------------------------------- /include/uapi/linux/kvm_para.h: -------------------------------------------------------------------------------- 1 | #ifndef _UAPI__LINUX_KVM_PARA_H 2 | #define _UAPI__LINUX_KVM_PARA_H 3 | 4 | /* 5 | * This header file provides a method for making a hypercall to the host 6 | * Architectures should define: 7 | * - kvm_hypercall0, kvm_hypercall1... 8 | * - kvm_arch_para_features 9 | * - kvm_para_available 10 | */ 11 | 12 | /* Return values for hypercalls */ 13 | #define KVM_ENOSYS 1000 14 | #define KVM_EFAULT EFAULT 15 | #define KVM_E2BIG E2BIG 16 | #define KVM_EPERM EPERM 17 | 18 | #define KVM_HC_VAPIC_POLL_IRQ 1 19 | #define KVM_HC_MMU_OP 2 20 | #define KVM_HC_FEATURES 3 21 | #define KVM_HC_PPC_MAP_MAGIC_PAGE 4 22 | #define KVM_HC_KICK_CPU 5 23 | #define KVM_HC_MIPS_GET_CLOCK_FREQ 6 24 | #define KVM_HC_MIPS_EXIT_VM 7 25 | #define KVM_HC_MIPS_CONSOLE_OUTPUT 8 26 | 27 | /* 28 | * hypercalls use architecture specific 29 | */ 30 | #include 31 | 32 | #endif /* _UAPI__LINUX_KVM_PARA_H */ 33 | -------------------------------------------------------------------------------- /virt/Makefile: -------------------------------------------------------------------------------- 1 | obj-y += lib/ 2 | -------------------------------------------------------------------------------- /virt/kvm/Kconfig: -------------------------------------------------------------------------------- 1 | # KVM common configuration items and defaults 2 | 3 | config HAVE_KVM 4 | bool 5 | 6 | config HAVE_KVM_IRQCHIP 7 | bool 8 | 9 | config HAVE_KVM_IRQFD 10 | bool 11 | 12 | config HAVE_KVM_IRQ_ROUTING 13 | bool 14 | 15 | config HAVE_KVM_EVENTFD 16 | bool 17 | select EVENTFD 18 | 19 | config KVM_MMIO 20 | bool 21 | 22 | config KVM_ASYNC_PF 23 | bool 24 | 25 | # Toggle to switch between direct notification and batch job 26 | config KVM_ASYNC_PF_SYNC 27 | bool 28 | 29 | config HAVE_KVM_MSI 30 | bool 31 | 32 | config HAVE_KVM_CPU_RELAX_INTERCEPT 33 | bool 34 | 35 | config KVM_VFIO 36 | bool 37 | 38 | config HAVE_KVM_ARCH_TLB_FLUSH_ALL 39 | bool 40 | 41 | config HAVE_KVM_INVALID_WAKEUPS 42 | bool 43 | 44 | config KVM_GENERIC_DIRTYLOG_READ_PROTECT 45 | bool 46 | 47 | config KVM_COMPAT 48 | def_bool y 49 | depends on KVM && COMPAT && !S390 50 | 51 | config HAVE_KVM_IRQ_BYPASS 52 | bool 53 | -------------------------------------------------------------------------------- /virt/kvm/arm/trace.h: -------------------------------------------------------------------------------- 1 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 2 | #define _TRACE_KVM_H 3 | 4 | #include 5 | 6 | #undef TRACE_SYSTEM 7 | #define TRACE_SYSTEM kvm 8 | 9 | /* 10 | * Tracepoints for vgic 11 | */ 12 | TRACE_EVENT(vgic_update_irq_pending, 13 | TP_PROTO(unsigned long vcpu_id, __u32 irq, bool level), 14 | TP_ARGS(vcpu_id, irq, level), 15 | 16 | TP_STRUCT__entry( 17 | __field( unsigned long, vcpu_id ) 18 | __field( __u32, irq ) 19 | __field( bool, level ) 20 | ), 21 | 22 | TP_fast_assign( 23 | __entry->vcpu_id = vcpu_id; 24 | __entry->irq = irq; 25 | __entry->level = level; 26 | ), 27 | 28 | TP_printk("VCPU: %ld, IRQ %d, level: %d", 29 | __entry->vcpu_id, __entry->irq, __entry->level) 30 | ); 31 | 32 | /* 33 | * Tracepoints for arch_timer 34 | */ 35 | TRACE_EVENT(kvm_timer_update_irq, 36 | TP_PROTO(unsigned long vcpu_id, __u32 irq, int level), 37 | TP_ARGS(vcpu_id, irq, level), 38 | 39 | TP_STRUCT__entry( 40 | __field( unsigned long, vcpu_id ) 41 | __field( __u32, irq ) 42 | __field( int, level ) 43 | ), 44 | 45 | TP_fast_assign( 46 | __entry->vcpu_id = vcpu_id; 47 | __entry->irq = irq; 48 | __entry->level = level; 49 | ), 50 | 51 | TP_printk("VCPU: %ld, IRQ %d, level %d", 52 | __entry->vcpu_id, __entry->irq, __entry->level) 53 | ); 54 | 55 | #endif /* _TRACE_KVM_H */ 56 | 57 | #undef TRACE_INCLUDE_PATH 58 | #define TRACE_INCLUDE_PATH ../../../virt/kvm/arm 59 | #undef TRACE_INCLUDE_FILE 60 | #define TRACE_INCLUDE_FILE trace 61 | 62 | /* This part must be outside protection */ 63 | #include 64 | -------------------------------------------------------------------------------- /virt/kvm/async_pf.h: -------------------------------------------------------------------------------- 1 | /* 2 | * kvm asynchronous fault support 3 | * 4 | * Copyright 2010 Red Hat, Inc. 5 | * 6 | * Author: 7 | * Gleb Natapov 8 | * 9 | * This file is free software; you can redistribute it and/or modify 10 | * it under the terms of version 2 of the GNU General Public License 11 | * as published by the Free Software Foundation. 12 | * 13 | * This program is distributed in the hope that it will be useful, 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | * GNU General Public License for more details. 17 | * 18 | * You should have received a copy of the GNU General Public License 19 | * along with this program; if not, write to the Free Software Foundation, 20 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. 21 | */ 22 | 23 | #ifndef __KVM_ASYNC_PF_H__ 24 | #define __KVM_ASYNC_PF_H__ 25 | 26 | #ifdef CONFIG_KVM_ASYNC_PF 27 | int kvm_async_pf_init(void); 28 | void kvm_async_pf_deinit(void); 29 | void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu); 30 | #else 31 | #define kvm_async_pf_init() (0) 32 | #define kvm_async_pf_deinit() do {} while (0) 33 | #define kvm_async_pf_vcpu_init(C) do {} while (0) 34 | #endif 35 | 36 | #endif 37 | -------------------------------------------------------------------------------- /virt/kvm/coalesced_mmio.h: -------------------------------------------------------------------------------- 1 | #ifndef __KVM_COALESCED_MMIO_H__ 2 | #define __KVM_COALESCED_MMIO_H__ 3 | 4 | /* 5 | * KVM coalesced MMIO 6 | * 7 | * Copyright (c) 2008 Bull S.A.S. 8 | * 9 | * Author: Laurent Vivier 10 | * 11 | */ 12 | 13 | #ifdef CONFIG_KVM_MMIO 14 | 15 | #include 16 | 17 | struct kvm_coalesced_mmio_dev { 18 | struct list_head list; 19 | struct kvm_io_device dev; 20 | struct kvm *kvm; 21 | struct kvm_coalesced_mmio_zone zone; 22 | }; 23 | 24 | int kvm_coalesced_mmio_init(struct kvm *kvm); 25 | void kvm_coalesced_mmio_free(struct kvm *kvm); 26 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, 27 | struct kvm_coalesced_mmio_zone *zone); 28 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, 29 | struct kvm_coalesced_mmio_zone *zone); 30 | 31 | #else 32 | 33 | static inline int kvm_coalesced_mmio_init(struct kvm *kvm) { return 0; } 34 | static inline void kvm_coalesced_mmio_free(struct kvm *kvm) { } 35 | 36 | #endif 37 | 38 | #endif 39 | -------------------------------------------------------------------------------- /virt/kvm/vfio.h: -------------------------------------------------------------------------------- 1 | #ifndef __KVM_VFIO_H 2 | #define __KVM_VFIO_H 3 | 4 | #ifdef CONFIG_KVM_VFIO 5 | int kvm_vfio_ops_init(void); 6 | void kvm_vfio_ops_exit(void); 7 | #else 8 | static inline int kvm_vfio_ops_init(void) 9 | { 10 | return 0; 11 | } 12 | static inline void kvm_vfio_ops_exit(void) 13 | { 14 | } 15 | #endif 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /virt/lib/Kconfig: -------------------------------------------------------------------------------- 1 | config IRQ_BYPASS_MANAGER 2 | tristate 3 | -------------------------------------------------------------------------------- /virt/lib/Makefile: -------------------------------------------------------------------------------- 1 | obj-$(CONFIG_IRQ_BYPASS_MANAGER) += irqbypass.o 2 | --------------------------------------------------------------------------------