diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/block/aio.h | 38 | ||||
-rw-r--r-- | include/block/block.h | 2 | ||||
-rw-r--r-- | include/block/block_int.h | 3 | ||||
-rw-r--r-- | include/exec/exec-all.h | 14 | ||||
-rw-r--r-- | include/hw/arm/virt.h | 5 | ||||
-rw-r--r-- | include/hw/compat.h | 10 | ||||
-rw-r--r-- | include/hw/intc/arm_gic_common.h | 2 | ||||
-rw-r--r-- | include/hw/intc/arm_gicv3_common.h | 21 | ||||
-rw-r--r-- | include/hw/loader.h | 7 | ||||
-rw-r--r-- | include/hw/m68k/mcf.h | 4 | ||||
-rw-r--r-- | include/hw/m68k/mcf_fec.h | 13 | ||||
-rw-r--r-- | include/hw/nvram/fw_cfg.h | 3 | ||||
-rw-r--r-- | include/hw/nvram/fw_cfg_keys.h | 3 | ||||
-rw-r--r-- | include/hw/sparc/sparc64.h | 5 | ||||
-rw-r--r-- | include/hw/ssi/aspeed_smc.h | 4 | ||||
-rw-r--r-- | include/hw/timer/sun4v-rtc.h | 1 | ||||
-rw-r--r-- | include/hw/virtio/vhost-backend.h | 13 | ||||
-rw-r--r-- | include/hw/virtio/vhost.h | 4 | ||||
-rw-r--r-- | include/hw/virtio/virtio.h | 1 | ||||
-rw-r--r-- | include/qapi/error.h | 3 | ||||
-rw-r--r-- | include/qemu/futex.h | 36 | ||||
-rw-r--r-- | include/qemu/thread.h | 112 | ||||
-rw-r--r-- | include/standard-headers/linux/pci_regs.h | 1 | ||||
-rw-r--r-- | include/standard-headers/linux/virtio_crypto.h | 481 | ||||
-rw-r--r-- | include/standard-headers/linux/virtio_mmio.h | 141 |
25 files changed, 651 insertions, 276 deletions
diff --git a/include/block/aio.h b/include/block/aio.h index 4dca54d9c7..7df271d2b9 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -53,18 +53,12 @@ struct LinuxAioState; struct AioContext { GSource source; - /* Protects all fields from multi-threaded access */ + /* Used by AioContext users to protect from multi-threaded access. */ QemuRecMutex lock; - /* The list of registered AIO handlers */ + /* The list of registered AIO handlers. Protected by ctx->list_lock. */ QLIST_HEAD(, AioHandler) aio_handlers; - /* This is a simple lock used to protect the aio_handlers list. - * Specifically, it's used to ensure that no callbacks are removed while - * we're walking and dispatching callbacks. - */ - int walking_handlers; - /* Used to avoid unnecessary event_notifier_set calls in aio_notify; * accessed with atomic primitives. If this field is 0, everything * (file descriptors, bottom halves, timers) will be re-evaluated @@ -90,17 +84,15 @@ struct AioContext { */ uint32_t notify_me; - /* lock to protect between bh's adders and deleter */ - QemuMutex bh_lock; + /* A lock to protect between QEMUBH and AioHandler adders and deleter, + * and to ensure that no callbacks are removed while we're walking and + * dispatching them. + */ + QemuLockCnt list_lock; /* Anchor of the list of Bottom Halves belonging to the context */ struct QEMUBH *first_bh; - /* A simple lock used to protect the first_bh list, and ensure that - * no callbacks are removed while we're walking and dispatching callbacks. - */ - int walking_bh; - /* Used by aio_notify. * * "notified" is used to avoid expensive event_notifier_test_and_clear @@ -116,7 +108,9 @@ struct AioContext { bool notified; EventNotifier notifier; - /* Thread pool for performing work and receiving completion callbacks */ + /* Thread pool for performing work and receiving completion callbacks. + * Has its own locking. + */ struct ThreadPool *thread_pool; #ifdef CONFIG_LINUX_AIO @@ -126,7 +120,9 @@ struct AioContext { struct LinuxAioState *linux_aio; #endif - /* TimerLists for calling timers - one per clock type */ + /* TimerLists for calling timers - one per clock type. Has its own + * locking. + */ QEMUTimerListGroup tlg; int external_disable_cnt; @@ -180,9 +176,11 @@ void aio_context_unref(AioContext *ctx); * automatically takes care of calling aio_context_acquire and * aio_context_release. * - * Access to timers and BHs from a thread that has not acquired AioContext - * is possible. Access to callbacks for now must be done while the AioContext - * is owned by the thread (FIXME). + * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A + * thread still has to call those to avoid being interrupted by the guest. + * + * Bottom halves, timers and callbacks can be created or removed without + * acquiring the AioContext. */ void aio_context_acquire(AioContext *ctx); diff --git a/include/block/block.h b/include/block/block.h index 49bb0b239a..8b0dcdaa70 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -526,8 +526,6 @@ int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); void bdrv_io_plug(BlockDriverState *bs); void bdrv_io_unplug(BlockDriverState *bs); -void bdrv_io_unplugged_begin(BlockDriverState *bs); -void bdrv_io_unplugged_end(BlockDriverState *bs); /** * bdrv_drained_begin: diff --git a/include/block/block_int.h b/include/block/block_int.h index 4e4562d444..2d92d7edfe 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -526,9 +526,8 @@ struct BlockDriverState { uint64_t write_threshold_offset; NotifierWithReturn write_threshold_notifier; - /* counters for nested bdrv_io_plug and bdrv_io_unplugged_begin */ + /* counter for nested bdrv_io_plug */ unsigned io_plugged; - unsigned io_plug_disabled; int quiesce_counter; }; diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index a8c13cee66..bbc9478a50 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -95,15 +95,13 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr); /** * tlb_flush: * @cpu: CPU whose TLB should be flushed - * @flush_global: ignored * - * Flush the entire TLB for the specified CPU. - * The flush_global flag is in theory an indicator of whether the whole - * TLB should be flushed, or only those entries not marked global. - * In practice QEMU does not implement any global/not global flag for - * TLB entries, and the argument is ignored. + * Flush the entire TLB for the specified CPU. Most CPU architectures + * allow the implementation to drop entries from the TLB at any time + * so this is generally safe. If more selective flushing is required + * use one of the other functions for efficiency. */ -void tlb_flush(CPUState *cpu, int flush_global); +void tlb_flush(CPUState *cpu); /** * tlb_flush_page_by_mmuidx: * @cpu: CPU whose TLB should be flushed @@ -165,7 +163,7 @@ static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) { } -static inline void tlb_flush(CPUState *cpu, int flush_global) +static inline void tlb_flush(CPUState *cpu) { } diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index eb1c63d688..58ce74e0e5 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -39,6 +39,8 @@ #define NUM_GICV2M_SPIS 64 #define NUM_VIRTIO_TRANSPORTS 32 +#define ARCH_GICV3_MAINT_IRQ 9 + #define ARCH_TIMER_VIRT_IRQ 11 #define ARCH_TIMER_S_EL1_IRQ 13 #define ARCH_TIMER_NS_EL1_IRQ 14 @@ -91,6 +93,7 @@ typedef struct { FWCfgState *fw_cfg; bool secure; bool highmem; + bool virt; int32_t gic_version; struct arm_boot_info bootinfo; const MemMapEntry *memmap; @@ -101,7 +104,7 @@ typedef struct { uint32_t clock_phandle; uint32_t gic_phandle; uint32_t msi_phandle; - bool using_psci; + int psci_conduit; } VirtMachineState; #define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt") diff --git a/include/hw/compat.h b/include/hw/compat.h index 4fe44d1c7a..34e9b4a660 100644 --- a/include/hw/compat.h +++ b/include/hw/compat.h @@ -2,7 +2,15 @@ #define HW_COMPAT_H #define HW_COMPAT_2_8 \ - /* empty */ + {\ + .driver = "fw_cfg_mem",\ + .property = "x-file-slots",\ + .value = stringify(0x10),\ + },{\ + .driver = "fw_cfg_io",\ + .property = "x-file-slots",\ + .value = stringify(0x10),\ + }, #define HW_COMPAT_2_7 \ {\ diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h index f4c349a2ef..af3ca18e2f 100644 --- a/include/hw/intc/arm_gic_common.h +++ b/include/hw/intc/arm_gic_common.h @@ -55,6 +55,8 @@ typedef struct GICState { qemu_irq parent_irq[GIC_NCPU]; qemu_irq parent_fiq[GIC_NCPU]; + qemu_irq parent_virq[GIC_NCPU]; + qemu_irq parent_vfiq[GIC_NCPU]; /* GICD_CTLR; for a GIC with the security extensions the NS banked version * of this register is just an alias of bit 1 of the S banked version. */ diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h index 341a3118f0..4156051d98 100644 --- a/include/hw/intc/arm_gicv3_common.h +++ b/include/hw/intc/arm_gicv3_common.h @@ -38,6 +38,9 @@ /* Number of SGI target-list bits */ #define GICV3_TARGETLIST_BITS 16 +/* Maximum number of list registers (architectural limit) */ +#define GICV3_LR_MAX 16 + /* Minimum BPR for Secure, or when security not enabled */ #define GIC_MIN_BPR 0 /* Minimum BPR for Nonsecure when security is enabled */ @@ -145,6 +148,9 @@ struct GICv3CPUState { CPUState *cpu; qemu_irq parent_irq; qemu_irq parent_fiq; + qemu_irq parent_virq; + qemu_irq parent_vfiq; + qemu_irq maintenance_irq; /* Redistributor */ uint32_t level; /* Current IRQ level */ @@ -173,6 +179,21 @@ struct GICv3CPUState { uint64_t icc_igrpen[3]; uint64_t icc_ctlr_el3; + /* Virtualization control interface */ + uint64_t ich_apr[3][4]; /* ich_apr[GICV3_G1][x] never used */ + uint64_t ich_hcr_el2; + uint64_t ich_lr_el2[GICV3_LR_MAX]; + uint64_t ich_vmcr_el2; + + /* Properties of the CPU interface. These are initialized from + * the settings in the CPU proper. + * If the number of implemented list registers is 0 then the + * virtualization support is not implemented. + */ + int num_list_regs; + int vpribits; /* number of virtual priority bits */ + int vprebits; /* number of virtual preemption bits */ + /* Current highest priority pending interrupt for this CPU. * This is cached information that can be recalculated from the * real state above; it doesn't need to be migrated. diff --git a/include/hw/loader.h b/include/hw/loader.h index 0c864cfd60..0dbd8d6bf3 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -180,7 +180,8 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len, size_t max_len, hwaddr addr, const char *fw_file_name, FWCfgReadCallback fw_callback, - void *callback_opaque, AddressSpace *as); + void *callback_opaque, AddressSpace *as, + bool read_only); int rom_add_elf_program(const char *name, void *data, size_t datasize, size_t romsize, hwaddr addr, AddressSpace *as); int rom_check_and_register_reset(void); @@ -194,7 +195,7 @@ void hmp_info_roms(Monitor *mon, const QDict *qdict); #define rom_add_file_fixed(_f, _a, _i) \ rom_add_file(_f, NULL, _a, _i, false, NULL, NULL) #define rom_add_blob_fixed(_f, _b, _l, _a) \ - rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, NULL) + rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, NULL, true) #define rom_add_file_mr(_f, _mr, _i) \ rom_add_file(_f, NULL, 0, _i, false, _mr, NULL) #define rom_add_file_as(_f, _as, _i) \ @@ -202,7 +203,7 @@ void hmp_info_roms(Monitor *mon, const QDict *qdict); #define rom_add_file_fixed_as(_f, _a, _i, _as) \ rom_add_file(_f, NULL, _a, _i, false, NULL, _as) #define rom_add_blob_fixed_as(_f, _b, _l, _a, _as) \ - rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, _as) + rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, _as, true) #define PC_ROM_MIN_VGA 0xc0000 #define PC_ROM_MIN_OPTION 0xc8000 diff --git a/include/hw/m68k/mcf.h b/include/hw/m68k/mcf.h index fdae229502..bf43998d9b 100644 --- a/include/hw/m68k/mcf.h +++ b/include/hw/m68k/mcf.h @@ -21,10 +21,6 @@ qemu_irq *mcf_intc_init(struct MemoryRegion *sysmem, hwaddr base, M68kCPU *cpu); -/* mcf_fec.c */ -void mcf_fec_init(struct MemoryRegion *sysmem, NICInfo *nd, - hwaddr base, qemu_irq *irq); - /* mcf5206.c */ qemu_irq *mcf5206_init(struct MemoryRegion *sysmem, uint32_t base, M68kCPU *cpu); diff --git a/include/hw/m68k/mcf_fec.h b/include/hw/m68k/mcf_fec.h new file mode 100644 index 0000000000..7f029f7b59 --- /dev/null +++ b/include/hw/m68k/mcf_fec.h @@ -0,0 +1,13 @@ +/* + * Definitions for the ColdFire Fast Ethernet Controller emulation. + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#define TYPE_MCF_FEC_NET "mcf-fec" +#define MCF_FEC_NET(obj) OBJECT_CHECK(mcf_fec_state, (obj), TYPE_MCF_FEC_NET) + +#define FEC_NUM_IRQ 13 diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h index 5c27a1f0d5..b980cbaebf 100644 --- a/include/hw/nvram/fw_cfg.h +++ b/include/hw/nvram/fw_cfg.h @@ -136,6 +136,7 @@ void fw_cfg_add_file(FWCfgState *s, const char *filename, void *data, * @callback_opaque: argument to be passed into callback function * @data: pointer to start of item data * @len: size of item data + * @read_only: is file read only * * Add a new NAMED fw_cfg item as a raw "blob" of the given size. The data * referenced by the starting pointer is only linked, NOT copied, into the @@ -151,7 +152,7 @@ void fw_cfg_add_file(FWCfgState *s, const char *filename, void *data, */ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, FWCfgReadCallback callback, void *callback_opaque, - void *data, size_t len); + void *data, size_t len, bool read_only); /** * fw_cfg_modify_file: diff --git a/include/hw/nvram/fw_cfg_keys.h b/include/hw/nvram/fw_cfg_keys.h index 0f3e871884..b6919451f5 100644 --- a/include/hw/nvram/fw_cfg_keys.h +++ b/include/hw/nvram/fw_cfg_keys.h @@ -29,8 +29,7 @@ #define FW_CFG_FILE_DIR 0x19 #define FW_CFG_FILE_FIRST 0x20 -#define FW_CFG_FILE_SLOTS 0x10 -#define FW_CFG_MAX_ENTRY (FW_CFG_FILE_FIRST + FW_CFG_FILE_SLOTS) +#define FW_CFG_FILE_SLOTS_MIN 0x10 #define FW_CFG_WRITE_CHANNEL 0x4000 #define FW_CFG_ARCH_LOCAL 0x8000 diff --git a/include/hw/sparc/sparc64.h b/include/hw/sparc/sparc64.h new file mode 100644 index 0000000000..7748939a97 --- /dev/null +++ b/include/hw/sparc/sparc64.h @@ -0,0 +1,5 @@ + +SPARCCPU *sparc64_cpu_devinit(const char *cpu_model, + const char *dflt_cpu_model, uint64_t prom_addr); + +void sparc64_cpu_set_ivec_irq(void *opaque, int irq, int level); diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h index bdfbcc0ffa..1f557313fa 100644 --- a/include/hw/ssi/aspeed_smc.h +++ b/include/hw/ssi/aspeed_smc.h @@ -44,10 +44,12 @@ typedef struct AspeedSMCController { const AspeedSegments *segments; hwaddr flash_window_base; uint32_t flash_window_size; + bool has_dma; + uint32_t nregs; } AspeedSMCController; typedef struct AspeedSMCFlash { - const struct AspeedSMCState *controller; + struct AspeedSMCState *controller; uint8_t id; uint32_t size; diff --git a/include/hw/timer/sun4v-rtc.h b/include/hw/timer/sun4v-rtc.h new file mode 100644 index 0000000000..407278f918 --- /dev/null +++ b/include/hw/timer/sun4v-rtc.h @@ -0,0 +1 @@ +void sun4v_rtc_init(hwaddr addr); diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index 30abc11cf1..c3cf4a72bc 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -11,6 +11,8 @@ #ifndef VHOST_BACKEND_H #define VHOST_BACKEND_H +#include "exec/memory.h" + typedef enum VhostBackendType { VHOST_BACKEND_TYPE_NONE = 0, VHOST_BACKEND_TYPE_KERNEL = 1, @@ -77,6 +79,14 @@ typedef bool (*vhost_backend_can_merge_op)(struct vhost_dev *dev, typedef int (*vhost_vsock_set_guest_cid_op)(struct vhost_dev *dev, uint64_t guest_cid); typedef int (*vhost_vsock_set_running_op)(struct vhost_dev *dev, int start); +typedef void (*vhost_set_iotlb_callback_op)(struct vhost_dev *dev, + int enabled); +typedef int (*vhost_update_device_iotlb_op)(struct vhost_dev *dev, + uint64_t iova, uint64_t uaddr, + uint64_t len, + IOMMUAccessFlags perm); +typedef int (*vhost_invalidate_device_iotlb_op)(struct vhost_dev *dev, + uint64_t iova, uint64_t len); typedef struct VhostOps { VhostBackendType backend_type; @@ -109,6 +119,9 @@ typedef struct VhostOps { vhost_backend_can_merge_op vhost_backend_can_merge; vhost_vsock_set_guest_cid_op vhost_vsock_set_guest_cid; vhost_vsock_set_running_op vhost_vsock_set_running; + vhost_set_iotlb_callback_op vhost_set_iotlb_callback; + vhost_update_device_iotlb_op vhost_update_device_iotlb; + vhost_invalidate_device_iotlb_op vhost_invalidate_device_iotlb; } VhostOps; extern const VhostOps user_ops; diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index 1fe5aadef5..52f633ec89 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -21,6 +21,7 @@ struct vhost_virtqueue { unsigned long long used_phys; unsigned used_size; EventNotifier masked_notifier; + struct vhost_dev *dev; }; typedef unsigned long vhost_log_chunk_t; @@ -38,6 +39,7 @@ struct vhost_log { struct vhost_memory; struct vhost_dev { + VirtIODevice *vdev; MemoryListener memory_listener; struct vhost_memory *mem; int n_mem_sections; @@ -62,6 +64,7 @@ struct vhost_dev { void *opaque; struct vhost_log *log; QLIST_ENTRY(vhost_dev) entry; + IOMMUNotifier n; }; int vhost_dev_init(struct vhost_dev *hdev, void *opaque, @@ -91,4 +94,5 @@ bool vhost_has_free_slot(void); int vhost_net_set_backend(struct vhost_dev *hdev, struct vhost_vring_file *file); +void vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write); #endif diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index e5541c61f7..6523bacd2f 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -228,6 +228,7 @@ void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr); hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n); void virtio_queue_set_num(VirtIODevice *vdev, int n, int num); int virtio_queue_get_num(VirtIODevice *vdev, int n); +int virtio_queue_get_max_num(VirtIODevice *vdev, int n); int virtio_get_num_queues(VirtIODevice *vdev); void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, hwaddr avail, hwaddr used); diff --git a/include/qapi/error.h b/include/qapi/error.h index 0576659603..7e532d00e9 100644 --- a/include/qapi/error.h +++ b/include/qapi/error.h @@ -170,6 +170,9 @@ void error_setg_internal(Error **errp, * Just like error_setg(), with @os_error info added to the message. * If @os_error is non-zero, ": " + strerror(os_error) is appended to * the human-readable error message. + * + * The value of errno (which usually can get clobbered by almost any + * function call) will be preserved. */ #define error_setg_errno(errp, os_error, fmt, ...) \ error_setg_errno_internal((errp), __FILE__, __LINE__, __func__, \ diff --git a/include/qemu/futex.h b/include/qemu/futex.h new file mode 100644 index 0000000000..bb7dc9e296 --- /dev/null +++ b/include/qemu/futex.h @@ -0,0 +1,36 @@ +/* + * Wrappers around Linux futex syscall + * + * Copyright Red Hat, Inc. 2017 + * + * Author: + * Paolo Bonzini <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include <sys/syscall.h> +#include <linux/futex.h> + +#define qemu_futex(...) syscall(__NR_futex, __VA_ARGS__) + +static inline void qemu_futex_wake(void *f, int n) +{ + qemu_futex(f, FUTEX_WAKE, n, NULL, NULL, 0); +} + +static inline void qemu_futex_wait(void *f, unsigned val) +{ + while (qemu_futex(f, FUTEX_WAIT, (int) val, NULL, NULL, 0)) { + switch (errno) { + case EWOULDBLOCK: + return; + case EINTR: + break; /* get out of switch and retry */ + default: + abort(); + } + } +} diff --git a/include/qemu/thread.h b/include/qemu/thread.h index e8e665f020..9910f49b3a 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -8,6 +8,7 @@ typedef struct QemuMutex QemuMutex; typedef struct QemuCond QemuCond; typedef struct QemuSemaphore QemuSemaphore; typedef struct QemuEvent QemuEvent; +typedef struct QemuLockCnt QemuLockCnt; typedef struct QemuThread QemuThread; #ifdef _WIN32 @@ -98,4 +99,115 @@ static inline void qemu_spin_unlock(QemuSpin *spin) __sync_lock_release(&spin->value); } +struct QemuLockCnt { +#ifndef CONFIG_LINUX + QemuMutex mutex; +#endif + unsigned count; +}; + +/** + * qemu_lockcnt_init: initialize a QemuLockcnt + * @lockcnt: the lockcnt to initialize + * + * Initialize lockcnt's counter to zero and prepare its mutex + * for usage. + */ +void qemu_lockcnt_init(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_destroy: destroy a QemuLockcnt + * @lockcnt: the lockcnt to destruct + * + * Destroy lockcnt's mutex. + */ +void qemu_lockcnt_destroy(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_inc: increment a QemuLockCnt's counter + * @lockcnt: the lockcnt to operate on + * + * If the lockcnt's count is zero, wait for critical sections + * to finish and increment lockcnt's count to 1. If the count + * is not zero, just increment it. + * + * Because this function can wait on the mutex, it must not be + * called while the lockcnt's mutex is held by the current thread. + * For the same reason, qemu_lockcnt_inc can also contribute to + * AB-BA deadlocks. This is a sample deadlock scenario: + * + * thread 1 thread 2 + * ------------------------------------------------------- + * qemu_lockcnt_lock(&lc1); + * qemu_lockcnt_lock(&lc2); + * qemu_lockcnt_inc(&lc2); + * qemu_lockcnt_inc(&lc1); + */ +void qemu_lockcnt_inc(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_dec: decrement a QemuLockCnt's counter + * @lockcnt: the lockcnt to operate on + */ +void qemu_lockcnt_dec(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and + * possibly lock it. + * @lockcnt: the lockcnt to operate on + * + * Decrement lockcnt's count. If the new count is zero, lock + * the mutex and return true. Otherwise, return false. + */ +bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and + * lock it. + * @lockcnt: the lockcnt to operate on + * + * If the count is 1, decrement the count to zero, lock + * the mutex and return true. Otherwise, return false. + */ +bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_lock: lock a QemuLockCnt's mutex. + * @lockcnt: the lockcnt to operate on + * + * Remember that concurrent visits are not blocked unless the count is + * also zero. You can use qemu_lockcnt_count to check for this inside a + * critical section. + */ +void qemu_lockcnt_lock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_unlock: release a QemuLockCnt's mutex. + * @lockcnt: the lockcnt to operate on. + */ +void qemu_lockcnt_unlock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt. + * @lockcnt: the lockcnt to operate on. + * + * This is the same as + * + * qemu_lockcnt_unlock(lockcnt); + * qemu_lockcnt_inc(lockcnt); + * + * but more efficient. + */ +void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_count: query a LockCnt's count. + * @lockcnt: the lockcnt to query. + * + * Note that the count can change at any time. Still, while the + * lockcnt is locked, one can usefully check whether the count + * is non-zero. + */ +unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt); + #endif diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h index be5b066aa4..e5a2e68b22 100644 --- a/include/standard-headers/linux/pci_regs.h +++ b/include/standard-headers/linux/pci_regs.h @@ -678,7 +678,6 @@ #define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM #define PCI_EXT_CAP_DSN_SIZEOF 12 -#define PCI_EXT_CAP_ATS_SIZEOF 8 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 /* Advanced Error Reporting */ diff --git a/include/standard-headers/linux/virtio_crypto.h b/include/standard-headers/linux/virtio_crypto.h index 82275a84d8..5ff0b4ee59 100644 --- a/include/standard-headers/linux/virtio_crypto.h +++ b/include/standard-headers/linux/virtio_crypto.h @@ -1,5 +1,5 @@ -#ifndef _LINUX_VIRTIO_CRYPTO_H -#define _LINUX_VIRTIO_CRYPTO_H +#ifndef _VIRTIO_CRYPTO_H +#define _VIRTIO_CRYPTO_H /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * @@ -14,52 +14,54 @@ * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. */ - + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ #include "standard-headers/linux/types.h" -#include "standard-headers/linux/virtio_config.h" #include "standard-headers/linux/virtio_types.h" +#include "standard-headers/linux/virtio_ids.h" +#include "standard-headers/linux/virtio_config.h" #define VIRTIO_CRYPTO_SERVICE_CIPHER 0 -#define VIRTIO_CRYPTO_SERVICE_HASH 1 -#define VIRTIO_CRYPTO_SERVICE_MAC 2 -#define VIRTIO_CRYPTO_SERVICE_AEAD 3 +#define VIRTIO_CRYPTO_SERVICE_HASH 1 +#define VIRTIO_CRYPTO_SERVICE_MAC 2 +#define VIRTIO_CRYPTO_SERVICE_AEAD 3 #define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op)) struct virtio_crypto_ctrl_header { #define VIRTIO_CRYPTO_CIPHER_CREATE_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02) #define VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03) #define VIRTIO_CRYPTO_HASH_CREATE_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02) #define VIRTIO_CRYPTO_HASH_DESTROY_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03) #define VIRTIO_CRYPTO_MAC_CREATE_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02) #define VIRTIO_CRYPTO_MAC_DESTROY_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03) #define VIRTIO_CRYPTO_AEAD_CREATE_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02) #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03) - __virtio32 opcode; - __virtio32 algo; - __virtio32 flag; - /* data virtqueue id */ - __virtio32 queue_id; + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03) + uint32_t opcode; + uint32_t algo; + uint32_t flag; + /* data virtqueue id */ + uint32_t queue_id; }; struct virtio_crypto_cipher_session_para { @@ -78,26 +80,27 @@ struct virtio_crypto_cipher_session_para { #define VIRTIO_CRYPTO_CIPHER_AES_F8 12 #define VIRTIO_CRYPTO_CIPHER_AES_XTS 13 #define VIRTIO_CRYPTO_CIPHER_ZUC_EEA3 14 - __virtio32 algo; - /* length of key */ - __virtio32 keylen; + uint32_t algo; + /* length of key */ + uint32_t keylen; #define VIRTIO_CRYPTO_OP_ENCRYPT 1 #define VIRTIO_CRYPTO_OP_DECRYPT 2 - /* encrypt or decrypt */ - __virtio32 op; - __virtio32 padding; + /* encrypt or decrypt */ + uint32_t op; + uint32_t padding; }; struct virtio_crypto_session_input { - /* Device-writable part */ - __virtio64 session_id; - __virtio32 status; - __virtio32 padding; + /* Device-writable part */ + uint64_t session_id; + uint32_t status; + uint32_t padding; }; struct virtio_crypto_cipher_session_req { - struct virtio_crypto_cipher_session_para para; + struct virtio_crypto_cipher_session_para para; + uint8_t padding[32]; }; struct virtio_crypto_hash_session_para { @@ -114,13 +117,15 @@ struct virtio_crypto_hash_session_para { #define VIRTIO_CRYPTO_HASH_SHA3_512 10 #define VIRTIO_CRYPTO_HASH_SHA3_SHAKE128 11 #define VIRTIO_CRYPTO_HASH_SHA3_SHAKE256 12 - __virtio32 algo; - /* hash result length */ - __virtio32 hash_result_len; + uint32_t algo; + /* hash result length */ + uint32_t hash_result_len; + uint8_t padding[8]; }; struct virtio_crypto_hash_create_session_req { - struct virtio_crypto_hash_session_para para; + struct virtio_crypto_hash_session_para para; + uint8_t padding[40]; }; struct virtio_crypto_mac_session_para { @@ -140,16 +145,17 @@ struct virtio_crypto_mac_session_para { #define VIRTIO_CRYPTO_MAC_CBCMAC_AES 49 #define VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 50 #define VIRTIO_CRYPTO_MAC_XCBC_AES 53 - __virtio32 algo; - /* hash result length */ - __virtio32 hash_result_len; - /* length of authenticated key */ - __virtio32 auth_key_len; - __virtio32 padding; + uint32_t algo; + /* hash result length */ + uint32_t hash_result_len; + /* length of authenticated key */ + uint32_t auth_key_len; + uint32_t padding; }; struct virtio_crypto_mac_create_session_req { - struct virtio_crypto_mac_session_para para; + struct virtio_crypto_mac_session_para para; + uint8_t padding[40]; }; struct virtio_crypto_aead_session_para { @@ -157,273 +163,288 @@ struct virtio_crypto_aead_session_para { #define VIRTIO_CRYPTO_AEAD_GCM 1 #define VIRTIO_CRYPTO_AEAD_CCM 2 #define VIRTIO_CRYPTO_AEAD_CHACHA20_POLY1305 3 - __virtio32 algo; - /* length of key */ - __virtio32 key_len; - /* digest result length */ - __virtio32 digest_result_len; - /* length of the additional authenticated data (AAD) in bytes */ - __virtio32 aad_len; - /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */ - __virtio32 op; - __virtio32 padding; + uint32_t algo; + /* length of key */ + uint32_t key_len; + /* hash result length */ + uint32_t hash_result_len; + /* length of the additional authenticated data (AAD) in bytes */ + uint32_t aad_len; + /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */ + uint32_t op; + uint32_t padding; }; struct virtio_crypto_aead_create_session_req { - struct virtio_crypto_aead_session_para para; + struct virtio_crypto_aead_session_para para; + uint8_t padding[32]; }; struct virtio_crypto_alg_chain_session_para { #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2 - __virtio32 alg_chain_order; + uint32_t alg_chain_order; /* Plain hash */ #define VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN 1 /* Authenticated hash (mac) */ #define VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH 2 /* Nested hash */ #define VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED 3 - __virtio32 hash_mode; - struct virtio_crypto_cipher_session_para cipher_param; - union { - struct virtio_crypto_hash_session_para hash_param; - struct virtio_crypto_mac_session_para mac_param; - } u; - /* length of the additional authenticated data (AAD) in bytes */ - __virtio32 aad_len; - __virtio32 padding; + uint32_t hash_mode; + struct virtio_crypto_cipher_session_para cipher_param; + union { + struct virtio_crypto_hash_session_para hash_param; + struct virtio_crypto_mac_session_para mac_param; + uint8_t padding[16]; + } u; + /* length of the additional authenticated data (AAD) in bytes */ + uint32_t aad_len; + uint32_t padding; }; struct virtio_crypto_alg_chain_session_req { - struct virtio_crypto_alg_chain_session_para para; + struct virtio_crypto_alg_chain_session_para para; }; struct virtio_crypto_sym_create_session_req { - union { - struct virtio_crypto_cipher_session_req cipher; - struct virtio_crypto_alg_chain_session_req chain; - } u; + union { + struct virtio_crypto_cipher_session_req cipher; + struct virtio_crypto_alg_chain_session_req chain; + uint8_t padding[48]; + } u; - /* Device-readable part */ + /* Device-readable part */ /* No operation */ #define VIRTIO_CRYPTO_SYM_OP_NONE 0 /* Cipher only operation on the data */ #define VIRTIO_CRYPTO_SYM_OP_CIPHER 1 -/* Chain any cipher with any hash or mac operation. The order - depends on the value of alg_chain_order param */ +/* + * Chain any cipher with any hash or mac operation. The order + * depends on the value of alg_chain_order param + */ #define VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING 2 - __virtio32 op_type; - __virtio32 padding; + uint32_t op_type; + uint32_t padding; }; struct virtio_crypto_destroy_session_req { - /* Device-readable part */ - __virtio64 session_id; + /* Device-readable part */ + uint64_t session_id; + uint8_t padding[48]; }; -/* The request of the control viritqueue's packet */ +/* The request of the control virtqueue's packet */ struct virtio_crypto_op_ctrl_req { - struct virtio_crypto_ctrl_header header; - - union { - struct virtio_crypto_sym_create_session_req sym_create_session; - struct virtio_crypto_hash_create_session_req hash_create_session; - struct virtio_crypto_mac_create_session_req mac_create_session; - struct virtio_crypto_aead_create_session_req aead_create_session; - struct virtio_crypto_destroy_session_req destroy_session; - } u; + struct virtio_crypto_ctrl_header header; + + union { + struct virtio_crypto_sym_create_session_req + sym_create_session; + struct virtio_crypto_hash_create_session_req + hash_create_session; + struct virtio_crypto_mac_create_session_req + mac_create_session; + struct virtio_crypto_aead_create_session_req + aead_create_session; + struct virtio_crypto_destroy_session_req + destroy_session; + uint8_t padding[56]; + } u; }; struct virtio_crypto_op_header { #define VIRTIO_CRYPTO_CIPHER_ENCRYPT \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00) #define VIRTIO_CRYPTO_CIPHER_DECRYPT \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01) #define VIRTIO_CRYPTO_HASH \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00) #define VIRTIO_CRYPTO_MAC \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00) #define VIRTIO_CRYPTO_AEAD_ENCRYPT \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00) #define VIRTIO_CRYPTO_AEAD_DECRYPT \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01) - __virtio32 opcode; - /* algo should be service-specific algorithms */ - __virtio32 algo; - /* session_id should be service-specific algorithms */ - __virtio64 session_id; - /* control flag to control the request */ - __virtio32 flag; - __virtio32 padding; + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01) + uint32_t opcode; + /* algo should be service-specific algorithms */ + uint32_t algo; + /* session_id should be service-specific algorithms */ + uint64_t session_id; + /* control flag to control the request */ + uint32_t flag; + uint32_t padding; }; struct virtio_crypto_cipher_para { - /* - * Byte Length of valid IV/Counter - * - * - For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for - * SNOW3G in UEA2 mode, this is the length of the IV (which - * must be the same as the block length of the cipher). - * - For block ciphers in CTR mode, this is the length of the counter - * (which must be the same as the block length of the cipher). - * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007. - * - * The IV/Counter will be updated after every partial cryptographic - * operation. - */ - __virtio32 iv_len; - /* length of source data */ - __virtio32 src_data_len; - /* length of dst data */ - __virtio32 dst_data_len; - __virtio32 padding; + /* + * Byte Length of valid IV/Counter + * + * For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for + * SNOW3G in UEA2 mode, this is the length of the IV (which + * must be the same as the block length of the cipher). + * For block ciphers in CTR mode, this is the length of the counter + * (which must be the same as the block length of the cipher). + * For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007. + * + * The IV/Counter will be updated after every partial cryptographic + * operation. + */ + uint32_t iv_len; + /* length of source data */ + uint32_t src_data_len; + /* length of dst data */ + uint32_t dst_data_len; + uint32_t padding; }; struct virtio_crypto_hash_para { - /* length of source data */ - __virtio32 src_data_len; - /* hash result length */ - __virtio32 hash_result_len; + /* length of source data */ + uint32_t src_data_len; + /* hash result length */ + uint32_t hash_result_len; }; struct virtio_crypto_mac_para { - struct virtio_crypto_hash_para hash; + struct virtio_crypto_hash_para hash; }; struct virtio_crypto_aead_para { - /* - * Byte Length of valid IV data pointed to by the below iv_addr - * parameter. - * - * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which - * case iv_addr points to J0. - * - For CCM mode, this is the length of the nonce, which can be in the - * range 7 to 13 inclusive. - */ - __virtio32 iv_len; - /* length of additional auth data */ - __virtio32 aad_len; - /* length of source data */ - __virtio32 src_data_len; - /* length of dst data */ - __virtio32 dst_data_len; + /* + * Byte Length of valid IV data pointed to by the below iv_addr + * parameter. + * + * For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which + * case iv_addr points to J0. + * For CCM mode, this is the length of the nonce, which can be in the + * range 7 to 13 inclusive. + */ + uint32_t iv_len; + /* length of additional auth data */ + uint32_t aad_len; + /* length of source data */ + uint32_t src_data_len; + /* length of dst data */ + uint32_t dst_data_len; }; struct virtio_crypto_cipher_data_req { - /* Device-readable part */ - struct virtio_crypto_cipher_para para; + /* Device-readable part */ + struct virtio_crypto_cipher_para para; + uint8_t padding[24]; }; struct virtio_crypto_hash_data_req { - /* Device-readable part */ - struct virtio_crypto_hash_para para; + /* Device-readable part */ + struct virtio_crypto_hash_para para; + uint8_t padding[40]; }; struct virtio_crypto_mac_data_req { - /* Device-readable part */ - struct virtio_crypto_mac_para para; + /* Device-readable part */ + struct virtio_crypto_mac_para para; + uint8_t padding[40]; }; struct virtio_crypto_alg_chain_data_para { - __virtio32 iv_len; - /* Length of source data */ - __virtio32 src_data_len; - /* Length of destination data */ - __virtio32 dst_data_len; - /* Starting point for cipher processing in source data */ - __virtio32 cipher_start_src_offset; - /* Length of the source data that the cipher will be computed on */ - __virtio32 len_to_cipher; - /* Starting point for hash processing in source data */ - __virtio32 hash_start_src_offset; - /* Length of the source data that the hash will be computed on */ - __virtio32 len_to_hash; - /* Length of the additional auth data */ - __virtio32 aad_len; - /* Length of the hash result */ - __virtio32 hash_result_len; - __virtio32 reserved; + uint32_t iv_len; + /* Length of source data */ + uint32_t src_data_len; + /* Length of destination data */ + uint32_t dst_data_len; + /* Starting point for cipher processing in source data */ + uint32_t cipher_start_src_offset; + /* Length of the source data that the cipher will be computed on */ + uint32_t len_to_cipher; + /* Starting point for hash processing in source data */ + uint32_t hash_start_src_offset; + /* Length of the source data that the hash will be computed on */ + uint32_t len_to_hash; + /* Length of the additional auth data */ + uint32_t aad_len; + /* Length of the hash result */ + uint32_t hash_result_len; + uint32_t reserved; }; struct virtio_crypto_alg_chain_data_req { - /* Device-readable part */ - struct virtio_crypto_alg_chain_data_para para; + /* Device-readable part */ + struct virtio_crypto_alg_chain_data_para para; }; struct virtio_crypto_sym_data_req { - union { - struct virtio_crypto_cipher_data_req cipher; - struct virtio_crypto_alg_chain_data_req chain; - } u; - - /* See above VIRTIO_CRYPTO_SYM_OP_* */ - __virtio32 op_type; - __virtio32 padding; + union { + struct virtio_crypto_cipher_data_req cipher; + struct virtio_crypto_alg_chain_data_req chain; + uint8_t padding[40]; + } u; + + /* See above VIRTIO_CRYPTO_SYM_OP_* */ + uint32_t op_type; + uint32_t padding; }; struct virtio_crypto_aead_data_req { - /* Device-readable part */ - struct virtio_crypto_aead_para para; + /* Device-readable part */ + struct virtio_crypto_aead_para para; + uint8_t padding[32]; }; -/* The request of the data viritqueue's packet */ +/* The request of the data virtqueue's packet */ struct virtio_crypto_op_data_req { - struct virtio_crypto_op_header header; - - union { - struct virtio_crypto_sym_data_req sym_req; - struct virtio_crypto_hash_data_req hash_req; - struct virtio_crypto_mac_data_req mac_req; - struct virtio_crypto_aead_data_req aead_req; - } u; + struct virtio_crypto_op_header header; + + union { + struct virtio_crypto_sym_data_req sym_req; + struct virtio_crypto_hash_data_req hash_req; + struct virtio_crypto_mac_data_req mac_req; + struct virtio_crypto_aead_data_req aead_req; + uint8_t padding[48]; + } u; }; #define VIRTIO_CRYPTO_OK 0 #define VIRTIO_CRYPTO_ERR 1 #define VIRTIO_CRYPTO_BADMSG 2 #define VIRTIO_CRYPTO_NOTSUPP 3 -#define VIRTIO_CRYPTO_INVSESS 4 /* Invaild session id */ +#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */ /* The accelerator hardware is ready */ #define VIRTIO_CRYPTO_S_HW_READY (1 << 0) -#define VIRTIO_CRYPTO_S_STARTED (1 << 1) struct virtio_crypto_config { - /* See VIRTIO_CRYPTO_* above */ - __virtio32 status; - - /* - * Maximum number of data queue legal values are between 1 and 0x8000 - */ - __virtio32 max_dataqueues; - - /* Specifies the services mask which the devcie support, - see VIRTIO_CRYPTO_SERVICE_* above */ - __virtio32 crypto_services; - - /* Detailed algorithms mask */ - __virtio32 cipher_algo_l; - __virtio32 cipher_algo_h; - __virtio32 hash_algo; - __virtio32 mac_algo_l; - __virtio32 mac_algo_h; - __virtio32 aead_algo; - - /* Maximum length of cipher key */ - uint32_t max_cipher_key_len; - /* Maximum length of authenticated key */ - uint32_t max_auth_key_len; - - __virtio32 reserve; - - /* The maximum size of per request's content */ - __virtio64 max_size; + /* See VIRTIO_CRYPTO_OP_* above */ + uint32_t status; + + /* + * Maximum number of data queue + */ + uint32_t max_dataqueues; + + /* + * Specifies the services mask which the device support, + * see VIRTIO_CRYPTO_SERVICE_* above + */ + uint32_t crypto_services; + + /* Detailed algorithms mask */ + uint32_t cipher_algo_l; + uint32_t cipher_algo_h; + uint32_t hash_algo; + uint32_t mac_algo_l; + uint32_t mac_algo_h; + uint32_t aead_algo; + /* Maximum length of cipher key */ + uint32_t max_cipher_key_len; + /* Maximum length of authenticated key */ + uint32_t max_auth_key_len; + uint32_t reserve; + /* Maximum size of each crypto request's content */ + uint64_t max_size; }; struct virtio_crypto_inhdr { - /* See VIRTIO_CRYPTO_* above */ - uint8_t status; + /* See VIRTIO_CRYPTO_* above */ + uint8_t status; }; - -#endif /* _LINUX_VIRTIO_CRYPTO_H */ +#endif diff --git a/include/standard-headers/linux/virtio_mmio.h b/include/standard-headers/linux/virtio_mmio.h new file mode 100644 index 0000000000..c4b09689ab --- /dev/null +++ b/include/standard-headers/linux/virtio_mmio.h @@ -0,0 +1,141 @@ +/* + * Virtio platform device driver + * + * Copyright 2011, ARM Ltd. + * + * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _LINUX_VIRTIO_MMIO_H +#define _LINUX_VIRTIO_MMIO_H + +/* + * Control registers + */ + +/* Magic value ("virt" string) - Read Only */ +#define VIRTIO_MMIO_MAGIC_VALUE 0x000 + +/* Virtio device version - Read Only */ +#define VIRTIO_MMIO_VERSION 0x004 + +/* Virtio device ID - Read Only */ +#define VIRTIO_MMIO_DEVICE_ID 0x008 + +/* Virtio vendor ID - Read Only */ +#define VIRTIO_MMIO_VENDOR_ID 0x00c + +/* Bitmask of the features supported by the device (host) + * (32 bits per set) - Read Only */ +#define VIRTIO_MMIO_DEVICE_FEATURES 0x010 + +/* Device (host) features set selector - Write Only */ +#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014 + +/* Bitmask of features activated by the driver (guest) + * (32 bits per set) - Write Only */ +#define VIRTIO_MMIO_DRIVER_FEATURES 0x020 + +/* Activated features set selector - Write Only */ +#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024 + + +#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */ + +/* Guest's memory page size in bytes - Write Only */ +#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 + +#endif + + +/* Queue selector - Write Only */ +#define VIRTIO_MMIO_QUEUE_SEL 0x030 + +/* Maximum size of the currently selected queue - Read Only */ +#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034 + +/* Queue size for the currently selected queue - Write Only */ +#define VIRTIO_MMIO_QUEUE_NUM 0x038 + + +#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */ + +/* Used Ring alignment for the currently selected queue - Write Only */ +#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c + +/* Guest's PFN for the currently selected queue - Read Write */ +#define VIRTIO_MMIO_QUEUE_PFN 0x040 + +#endif + + +/* Ready bit for the currently selected queue - Read Write */ +#define VIRTIO_MMIO_QUEUE_READY 0x044 + +/* Queue notifier - Write Only */ +#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050 + +/* Interrupt status - Read Only */ +#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060 + +/* Interrupt acknowledge - Write Only */ +#define VIRTIO_MMIO_INTERRUPT_ACK 0x064 + +/* Device status register - Read Write */ +#define VIRTIO_MMIO_STATUS 0x070 + +/* Selected queue's Descriptor Table address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080 +#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084 + +/* Selected queue's Available Ring address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090 +#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094 + +/* Selected queue's Used Ring address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0 +#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4 + +/* Configuration atomicity value */ +#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc + +/* The config space is defined by each driver as + * the per-driver configuration space - Read Write */ +#define VIRTIO_MMIO_CONFIG 0x100 + + + +/* + * Interrupt flags (re: interrupt status & acknowledge registers) + */ + +#define VIRTIO_MMIO_INT_VRING (1 << 0) +#define VIRTIO_MMIO_INT_CONFIG (1 << 1) + +#endif |