mirror of https://github.com/xemu-project/xemu.git
trivial patches for 2023-09-21
-----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEe3O61ovnosKJMUsicBtPaxppPlkFAmUL/84PHG1qdEB0bHMu bXNrLnJ1AAoJEHAbT2saaT5Zlz4H/iI7Rhmsw6E46WhQPz1oly8p5I3m6Tcxs5B3 nagfaJC0EYjKyMZC1bsATJwRj8robCb5SDhZeUfudt1ytZYFfH3ulvlUrGYrMQRW YEfBFIDLexqrLpsykc6ovl2NB5BXQsK3n6NNbnYE1OxQt8Cy4kNQi1bStrZ8JzDE lIxvWZdwoQJ2K0VRDGRLrL6XG80qeONSXEoppXxJlfhk1Ar3Ruhijn3REzfQybvV 1zIa1/h80fSLuwOGSPuOLqVCt6JzTuOOrfYc9F+sjcmIQWHLECy6CwTHEbb921Tw 9HD6ah4rvkxoN2NWSPo/kM6tNW/pyOiYwYldx5rfWcQ5mhScuO8= =u6P0 -----END PGP SIGNATURE----- Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging trivial patches for 2023-09-21 # -----BEGIN PGP SIGNATURE----- # # iQFDBAABCAAtFiEEe3O61ovnosKJMUsicBtPaxppPlkFAmUL/84PHG1qdEB0bHMu # bXNrLnJ1AAoJEHAbT2saaT5Zlz4H/iI7Rhmsw6E46WhQPz1oly8p5I3m6Tcxs5B3 # nagfaJC0EYjKyMZC1bsATJwRj8robCb5SDhZeUfudt1ytZYFfH3ulvlUrGYrMQRW # YEfBFIDLexqrLpsykc6ovl2NB5BXQsK3n6NNbnYE1OxQt8Cy4kNQi1bStrZ8JzDE # lIxvWZdwoQJ2K0VRDGRLrL6XG80qeONSXEoppXxJlfhk1Ar3Ruhijn3REzfQybvV # 1zIa1/h80fSLuwOGSPuOLqVCt6JzTuOOrfYc9F+sjcmIQWHLECy6CwTHEbb921Tw # 9HD6ah4rvkxoN2NWSPo/kM6tNW/pyOiYwYldx5rfWcQ5mhScuO8= # =u6P0 # -----END PGP SIGNATURE----- # gpg: Signature made Thu 21 Sep 2023 04:33:18 EDT # gpg: using RSA key 7B73BAD68BE7A2C289314B22701B4F6B1A693E59 # gpg: issuer "mjt@tls.msk.ru" # gpg: Good signature from "Michael Tokarev <mjt@tls.msk.ru>" [full] # gpg: aka "Michael Tokarev <mjt@corpit.ru>" [full] # gpg: aka "Michael Tokarev <mjt@debian.org>" [full] # Primary key fingerprint: 6EE1 95D1 886E 8FFB 810D 4324 457C E0A0 8044 65C5 # Subkey fingerprint: 7B73 BAD6 8BE7 A2C2 8931 4B22 701B 4F6B 1A69 3E59 * tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu: docs/devel/reset.rst: Correct function names docs/cxl: Cleanout some more aarch64 examples. hw/mem/cxl_type3: Add missing copyright and license notice hw/cxl: Fix out of bound array access docs/cxl: Change to lowercase as others hw/cxl/cxl_device: Replace magic number in CXLError definition hw/pci-bridge/cxl_upstream: Fix bandwidth entry base unit for SSLBIS hw/cxl: Fix CFMW config memory leak hw/i386/pc: fix code comment on cumulative flash size subprojects: Use the correct .git suffix in the repository URLs hw/other: spelling fixes hw/tpm: spelling fixes hw/pci: spelling fixes hw/net: spelling fixes i386: spelling fixes bsd-user: spelling fixes ppc: spelling fixes Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
b55e4b9c05
|
@ -149,7 +149,7 @@
|
|||
#define TARGET_ELAST 90 /* Must be equal largest errno */
|
||||
|
||||
/* Internal errors: */
|
||||
#define TARGET_EJUSTRETURN 254 /* Just return without modifing regs */
|
||||
#define TARGET_EJUSTRETURN 254 /* Just return without modifying regs */
|
||||
#define TARGET_ERESTART 255 /* Restart syscall */
|
||||
|
||||
#include "special-errno.h"
|
||||
|
|
|
@ -72,7 +72,7 @@ typedef struct target_siginfo {
|
|||
int32_t _mqd;
|
||||
} _mesgp;
|
||||
|
||||
/* SIGPOLL -- Not really genreated in FreeBSD ??? */
|
||||
/* SIGPOLL -- Not really generated in FreeBSD ??? */
|
||||
struct {
|
||||
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
|
||||
} _poll;
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "qemu/guest-random.h"
|
||||
|
||||
/*
|
||||
* The inital FreeBSD stack is as follows:
|
||||
* The initial FreeBSD stack is as follows:
|
||||
* (see kern/kern_exec.c exec_copyout_strings() )
|
||||
*
|
||||
* Hi Address -> char **ps_argvstr (struct ps_strings for ps, w, etc.)
|
||||
|
@ -59,7 +59,7 @@ static inline int setup_initial_stack(struct bsd_binprm *bprm,
|
|||
/* Save some space for ps_strings. */
|
||||
p -= sizeof(struct target_ps_strings);
|
||||
|
||||
/* Add machine depedent sigcode. */
|
||||
/* Add machine dependent sigcode. */
|
||||
p -= TARGET_SZSIGCODE;
|
||||
if (setup_sigtramp(p, (unsigned)offsetof(struct target_sigframe, sf_uc),
|
||||
TARGET_FREEBSD_NR_sigreturn)) {
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
struct target_priority {
|
||||
uint8_t pri_class; /* Scheduling class. */
|
||||
uint8_t pri_level; /* Normal priority level. */
|
||||
uint8_t pri_native; /* Priority before propogation. */
|
||||
uint8_t pri_native; /* Priority before propagation. */
|
||||
uint8_t pri_user; /* User priority based on p_cpu and p_nice. */
|
||||
};
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ extern const char *qemu_uname_release;
|
|||
/*
|
||||
* TARGET_ARG_MAX defines the number of bytes allocated for arguments
|
||||
* and envelope for the new program. 256k should suffice for a reasonable
|
||||
* maxiumum env+arg in 32-bit environments, bump it up to 512k for !ILP32
|
||||
* maximum env+arg in 32-bit environments, bump it up to 512k for !ILP32
|
||||
* platforms.
|
||||
*/
|
||||
#if TARGET_ABI_BITS > 32
|
||||
|
|
|
@ -49,11 +49,11 @@ void target_to_host_sigset(sigset_t *d, const target_sigset_t *s);
|
|||
* union in target_siginfo is valid. This only applies between
|
||||
* host_to_target_siginfo_noswap() and tswap_siginfo(); it does not appear
|
||||
* either within host siginfo_t or in target_siginfo structures which we get
|
||||
* from the guest userspace program. Linux kenrels use this internally, but BSD
|
||||
* from the guest userspace program. Linux kernels use this internally, but BSD
|
||||
* kernels don't do this, but its a useful abstraction.
|
||||
*
|
||||
* The linux-user version of this uses the top 16 bits, but FreeBSD's SI_USER
|
||||
* and other signal indepenent SI_ codes have bit 16 set, so we only use the top
|
||||
* and other signal independent SI_ codes have bit 16 set, so we only use the top
|
||||
* byte instead.
|
||||
*
|
||||
* For FreeBSD, we have si_pid, si_uid, si_status, and si_addr always. Linux and
|
||||
|
|
|
@ -44,7 +44,7 @@ static inline int sas_ss_flags(TaskState *ts, unsigned long sp)
|
|||
}
|
||||
|
||||
/*
|
||||
* The BSD ABIs use the same singal numbers across all the CPU architectures, so
|
||||
* The BSD ABIs use the same signal numbers across all the CPU architectures, so
|
||||
* (unlike Linux) these functions are just the identity mapping. This might not
|
||||
* be true for XyzBSD running on AbcBSD, which doesn't currently work.
|
||||
*/
|
||||
|
@ -241,7 +241,7 @@ static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
|
|||
#endif
|
||||
/*
|
||||
* Unsure that this can actually be generated, and our support for
|
||||
* capsicum is somewhere between weak and non-existant, but if we get
|
||||
* capsicum is somewhere between weak and non-existent, but if we get
|
||||
* one, then we know what to save.
|
||||
*/
|
||||
#ifdef QEMU_SI_CAPSICUM
|
||||
|
@ -319,7 +319,7 @@ int block_signals(void)
|
|||
/*
|
||||
* It's OK to block everything including SIGSEGV, because we won't run any
|
||||
* further guest code before unblocking signals in
|
||||
* process_pending_signals(). We depend on the FreeBSD behaivor here where
|
||||
* process_pending_signals(). We depend on the FreeBSD behavior here where
|
||||
* this will only affect this thread's signal mask. We don't use
|
||||
* pthread_sigmask which might seem more correct because that routine also
|
||||
* does odd things with SIGCANCEL to implement pthread_cancel().
|
||||
|
|
|
@ -184,21 +184,20 @@ in reset.
|
|||
{
|
||||
MyDevClass *myclass = MYDEV_CLASS(class);
|
||||
ResettableClass *rc = RESETTABLE_CLASS(class);
|
||||
resettable_class_set_parent_reset_phases(rc,
|
||||
mydev_reset_enter,
|
||||
mydev_reset_hold,
|
||||
mydev_reset_exit,
|
||||
&myclass->parent_phases);
|
||||
resettable_class_set_parent_phases(rc,
|
||||
mydev_reset_enter,
|
||||
mydev_reset_hold,
|
||||
mydev_reset_exit,
|
||||
&myclass->parent_phases);
|
||||
}
|
||||
|
||||
In the above example, we override all three phases. It is possible to override
|
||||
only some of them by passing NULL instead of a function pointer to
|
||||
``resettable_class_set_parent_reset_phases()``. For example, the following will
|
||||
``resettable_class_set_parent_phases()``. For example, the following will
|
||||
only override the *enter* phase and leave *hold* and *exit* untouched::
|
||||
|
||||
resettable_class_set_parent_reset_phases(rc, mydev_reset_enter,
|
||||
NULL, NULL,
|
||||
&myclass->parent_phases);
|
||||
resettable_class_set_parent_phases(rc, mydev_reset_enter, NULL, NULL,
|
||||
&myclass->parent_phases);
|
||||
|
||||
This is equivalent to providing a trivial implementation of the hold and exit
|
||||
phases which does nothing but call the parent class's implementation of the
|
||||
|
|
|
@ -157,7 +157,7 @@ responsible for allocating appropriate ranges from within the CFMWs
|
|||
and exposing those via normal memory configurations as would be done
|
||||
for system RAM.
|
||||
|
||||
Example system Topology. x marks the match in each decoder level::
|
||||
Example system topology. x marks the match in each decoder level::
|
||||
|
||||
|<------------------SYSTEM PHYSICAL ADDRESS MAP (1)----------------->|
|
||||
| __________ __________________________________ __________ |
|
||||
|
@ -187,8 +187,8 @@ Example system Topology. x marks the match in each decoder level::
|
|||
___________|___ __________|__ __|_________ ___|_________
|
||||
(3)| Root Port 0 | | Root Port 1 | | Root Port 2| | Root Port 3 |
|
||||
| Appears in | | Appears in | | Appears in | | Appear in |
|
||||
| PCI topology | | PCI Topology| | PCI Topo | | PCI Topo |
|
||||
| As 0c:00.0 | | as 0c:01.0 | | as de:00.0 | | as de:01.0 |
|
||||
| PCI topology | | PCI topology| | PCI topo | | PCI topo |
|
||||
| as 0c:00.0 | | as 0c:01.0 | | as de:00.0 | | as de:01.0 |
|
||||
|_______________| |_____________| |____________| |_____________|
|
||||
| | | |
|
||||
| | | |
|
||||
|
@ -272,7 +272,7 @@ Example topology involving a switch::
|
|||
| Root Port 0 |
|
||||
| Appears in |
|
||||
| PCI topology |
|
||||
| As 0c:00.0 |
|
||||
| as 0c:00.0 |
|
||||
|___________x___|
|
||||
|
|
||||
|
|
||||
|
@ -313,7 +313,7 @@ A very simple setup with just one directly attached CXL Type 3 Persistent Memory
|
|||
|
||||
A very simple setup with just one directly attached CXL Type 3 Volatile Memory device::
|
||||
|
||||
qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \
|
||||
qemu-system-x86_64 -M q35,cxl=on -m 4G,maxmem=8G,slots=8 -smp 4 \
|
||||
...
|
||||
-object memory-backend-ram,id=vmem0,share=on,size=256M \
|
||||
-device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
|
||||
|
@ -323,7 +323,7 @@ A very simple setup with just one directly attached CXL Type 3 Volatile Memory d
|
|||
|
||||
The same volatile setup may optionally include an LSA region::
|
||||
|
||||
qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \
|
||||
qemu-system-x86_64 -M q35,cxl=on -m 4G,maxmem=8G,slots=8 -smp 4 \
|
||||
...
|
||||
-object memory-backend-ram,id=vmem0,share=on,size=256M \
|
||||
-object memory-backend-file,id=cxl-lsa0,share=on,mem-path=/tmp/lsa.raw,size=256M \
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Host specific cpu indentification for x86.
|
||||
* Host specific cpu identification for x86.
|
||||
*/
|
||||
|
||||
#ifndef HOST_CPUINFO_H
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Host specific cpu indentification for ppc.
|
||||
* Host specific cpu identification for ppc.
|
||||
*/
|
||||
|
||||
#ifndef HOST_CPUINFO_H
|
||||
|
|
|
@ -312,7 +312,7 @@ build_prepend_package_length(GArray *package, unsigned length, bool incl_self)
|
|||
/*
|
||||
* PkgLength is the length of the inclusive length of the data
|
||||
* and PkgLength's length itself when used for terms with
|
||||
* explitit length.
|
||||
* explicit length.
|
||||
*/
|
||||
length += length_bytes;
|
||||
}
|
||||
|
@ -680,7 +680,7 @@ Aml *aml_store(Aml *val, Aml *target)
|
|||
* "Op Operand Operand Target"
|
||||
* pattern.
|
||||
*
|
||||
* Returns: The newly allocated and composed according to patter Aml object.
|
||||
* Returns: The newly allocated and composed according to pattern Aml object.
|
||||
*/
|
||||
static Aml *
|
||||
build_opcode_2arg_dst(uint8_t op, Aml *arg1, Aml *arg2, Aml *dst)
|
||||
|
@ -2159,7 +2159,7 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
|
|||
/* FADT Minor Version */
|
||||
build_append_int_noprefix(tbl, f->minor_ver, 1);
|
||||
} else {
|
||||
build_append_int_noprefix(tbl, 0, 3); /* Reserved upto ACPI 5.0 */
|
||||
build_append_int_noprefix(tbl, 0, 3); /* Reserved up to ACPI 5.0 */
|
||||
}
|
||||
build_append_int_noprefix(tbl, 0, 8); /* X_FIRMWARE_CTRL */
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb,
|
|||
uint32_t base;
|
||||
/* Length in bytes for entire structure */
|
||||
uint32_t lb_length
|
||||
= 32 /* Table length upto and including Entry Base Unit */
|
||||
= 32 /* Table length up to and including Entry Base Unit */
|
||||
+ 4 * num_initiator /* Initiator Proximity Domain List */
|
||||
+ 4 * num_target /* Target Proximity Domain List */
|
||||
+ 2 * num_initiator * num_target; /* Latency or Bandwidth Entries */
|
||||
|
|
|
@ -1102,7 +1102,7 @@ static void nvdimm_build_common_dsm(Aml *dev,
|
|||
* be treated as an integer. Moreover, the integer size depends on
|
||||
* DSDT tables revision number. If revision number is < 2, integer
|
||||
* size is 32 bits, otherwise it is 64 bits.
|
||||
* Because of this CreateField() canot be used if RLEN < Integer Size.
|
||||
* Because of this CreateField() cannot be used if RLEN < Integer Size.
|
||||
*
|
||||
* Also please note that APCI ASL operator SizeOf() doesn't support
|
||||
* Integer and there isn't any other way to figure out the Integer
|
||||
|
|
|
@ -50,7 +50,7 @@ struct partition {
|
|||
uint32_t nr_sects; /* nr of sectors in partition */
|
||||
} QEMU_PACKED;
|
||||
|
||||
/* try to guess the disk logical geometry from the MSDOS partition table.
|
||||
/* try to guess the disk logical geometry from the MS-DOS partition table.
|
||||
Return 0 if OK, -1 if could not guess */
|
||||
static int guess_disk_lchs(BlockBackend *blk,
|
||||
int *pcylinders, int *pheads, int *psectors)
|
||||
|
@ -66,7 +66,7 @@ static int guess_disk_lchs(BlockBackend *blk,
|
|||
if (blk_pread(blk, 0, BDRV_SECTOR_SIZE, buf, 0) < 0) {
|
||||
return -1;
|
||||
}
|
||||
/* test msdos magic */
|
||||
/* test MS-DOS magic */
|
||||
if (buf[510] != 0x55 || buf[511] != 0xaa) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -891,7 +891,7 @@ static Property pflash_cfi01_properties[] = {
|
|||
/* num-blocks is the number of blocks actually visible to the guest,
|
||||
* ie the total size of the device divided by the sector length.
|
||||
* If we're emulating flash devices wired in parallel the actual
|
||||
* number of blocks per indvidual device will differ.
|
||||
* number of blocks per individual device will differ.
|
||||
*/
|
||||
DEFINE_PROP_UINT32("num-blocks", PFlashCFI01, nb_blocs, 0),
|
||||
DEFINE_PROP_UINT64("sector-length", PFlashCFI01, sector_len, 0),
|
||||
|
|
|
@ -575,7 +575,7 @@ static int cadence_uart_pre_load(void *opaque)
|
|||
{
|
||||
CadenceUARTState *s = opaque;
|
||||
|
||||
/* the frequency will be overriden if the refclk field is present */
|
||||
/* the frequency will be overridden if the refclk field is present */
|
||||
clock_set_hz(s->refclk, UART_DEFAULT_REF_CLK);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ static void imx_serial_reset_at_boot(DeviceState *dev)
|
|||
imx_serial_reset(s);
|
||||
|
||||
/*
|
||||
* enable the uart on boot, so messages from the linux decompresser
|
||||
* enable the uart on boot, so messages from the linux decompressor
|
||||
* are visible. On real hardware this is done by the boot rom
|
||||
* before anything else is loaded.
|
||||
*/
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */
|
||||
#define UART_IIR_CTI 0x0C /* Character Timeout Indication */
|
||||
|
||||
#define UART_IIR_FENF 0x80 /* Fifo enabled, but not functionning */
|
||||
#define UART_IIR_FENF 0x80 /* Fifo enabled, but not functioning */
|
||||
#define UART_IIR_FE 0xC0 /* Fifo enabled */
|
||||
|
||||
/*
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
* callback that does the memory operations.
|
||||
|
||||
* This device allows the user to monkey patch memory. To be able to do
|
||||
* this it needs a backend to manage the datas, the same as other
|
||||
* this it needs a backend to manage the data, the same as other
|
||||
* memory-related devices. In this case as the backend is so trivial we
|
||||
* have merged it with the frontend instead of creating and maintaining a
|
||||
* separate backend.
|
||||
|
@ -166,7 +166,7 @@ static void generic_loader_realize(DeviceState *dev, Error **errp)
|
|||
}
|
||||
}
|
||||
|
||||
/* Convert the data endiannes */
|
||||
/* Convert the data endianness */
|
||||
if (s->data_be) {
|
||||
s->data = cpu_to_be64(s->data);
|
||||
} else {
|
||||
|
|
|
@ -1426,7 +1426,7 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error *
|
|||
for (i = 0; machine_class->valid_cpu_types[i]; i++) {
|
||||
if (object_class_dynamic_cast(oc,
|
||||
machine_class->valid_cpu_types[i])) {
|
||||
/* The user specificed CPU is in the valid field, we are
|
||||
/* The user specified CPU is in the valid field, we are
|
||||
* good to go.
|
||||
*/
|
||||
break;
|
||||
|
|
|
@ -107,7 +107,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name,
|
|||
}
|
||||
|
||||
if (*ptr) {
|
||||
/* BlockBackend alread exists. So, we want to change attached node */
|
||||
/* BlockBackend already exists. So, we want to change attached node */
|
||||
blk = *ptr;
|
||||
ctx = blk_get_aio_context(blk);
|
||||
bs = bdrv_lookup_bs(NULL, str, errp);
|
||||
|
|
|
@ -161,7 +161,7 @@ static void a15mp_priv_class_init(ObjectClass *klass, void *data)
|
|||
|
||||
dc->realize = a15mp_priv_realize;
|
||||
device_class_set_props(dc, a15mp_priv_properties);
|
||||
/* We currently have no savable state */
|
||||
/* We currently have no saveable state */
|
||||
}
|
||||
|
||||
static const TypeInfo a15mp_priv_info = {
|
||||
|
|
|
@ -197,7 +197,7 @@ CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds, CXLClearEventPayload *
|
|||
|
||||
QEMU_LOCK_GUARD(&log->lock);
|
||||
/*
|
||||
* Must itterate the queue twice.
|
||||
* Must iterate the queue twice.
|
||||
* "The device shall verify the event record handles specified in the input
|
||||
* payload are in temporal order. If the device detects an older event
|
||||
* record that will not be cleared when Clear Event Records is executed,
|
||||
|
|
|
@ -39,12 +39,6 @@ static void cxl_fixed_memory_window_config(CXLState *cxl_state,
|
|||
return;
|
||||
}
|
||||
|
||||
fw->targets = g_malloc0_n(fw->num_targets, sizeof(*fw->targets));
|
||||
for (i = 0, target = object->targets; target; i++, target = target->next) {
|
||||
/* This link cannot be resolved yet, so stash the name for now */
|
||||
fw->targets[i] = g_strdup(target->value);
|
||||
}
|
||||
|
||||
if (object->size % (256 * MiB)) {
|
||||
error_setg(errp,
|
||||
"Size of a CXL fixed memory window must be a multiple of 256MiB");
|
||||
|
@ -64,6 +58,12 @@ static void cxl_fixed_memory_window_config(CXLState *cxl_state,
|
|||
fw->enc_int_gran = 0;
|
||||
}
|
||||
|
||||
fw->targets = g_malloc0_n(fw->num_targets, sizeof(*fw->targets));
|
||||
for (i = 0, target = object->targets; target; i++, target = target->next) {
|
||||
/* This link cannot be resolved yet, so stash the name for now */
|
||||
fw->targets[i] = g_strdup(target->value);
|
||||
}
|
||||
|
||||
cxl_state->fixed_windows = g_list_append(cxl_state->fixed_windows,
|
||||
g_steal_pointer(&fw));
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
* fill the output data into cmd->payload (overwriting what was there),
|
||||
* setting the length, and returning a valid return code.
|
||||
*
|
||||
* XXX: The handler need not worry about endianess. The payload is read out of
|
||||
* XXX: The handler need not worry about endianness. The payload is read out of
|
||||
* a register interface that already deals with it.
|
||||
*/
|
||||
|
||||
|
@ -501,7 +501,7 @@ static CXLRetCode cmd_media_get_poison_list(struct cxl_cmd *cmd,
|
|||
uint16_t out_pl_len;
|
||||
|
||||
query_start = ldq_le_p(&in->pa);
|
||||
/* 64 byte alignemnt required */
|
||||
/* 64 byte alignment required */
|
||||
if (query_start & 0x3f) {
|
||||
return CXL_MBOX_INVALID_INPUT;
|
||||
}
|
||||
|
|
|
@ -247,7 +247,7 @@ static void omap_dma_deactivate_channel(struct omap_dma_s *s,
|
|||
return;
|
||||
}
|
||||
|
||||
/* Don't deactive the channel if it is synchronized and the DMA request is
|
||||
/* Don't deactivate the channel if it is synchronized and the DMA request is
|
||||
active */
|
||||
if (ch->sync && ch->enable && (s->dma->drqbmp & (1ULL << ch->sync)))
|
||||
return;
|
||||
|
@ -422,7 +422,7 @@ static void omap_dma_transfer_generic(struct soc_dma_ch_s *dma)
|
|||
|
||||
if (ch->fs && ch->bs) {
|
||||
a->pck_element ++;
|
||||
/* Check if a full packet has beed transferred. */
|
||||
/* Check if a full packet has been transferred. */
|
||||
if (a->pck_element == a->pck_elements) {
|
||||
a->pck_element = 0;
|
||||
|
||||
|
|
|
@ -779,7 +779,7 @@ static Aml *initialize_route(Aml *route, const char *link_name,
|
|||
*
|
||||
* Returns an array of 128 routes, one for each device,
|
||||
* based on device location.
|
||||
* The main goal is to equaly distribute the interrupts
|
||||
* The main goal is to equally distribute the interrupts
|
||||
* over the 4 existing ACPI links (works only for i440fx).
|
||||
* The hash function is (slot + pin) & 3 -> "LNK[D|A|B|C]".
|
||||
*
|
||||
|
@ -2079,7 +2079,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
|
|||
}
|
||||
|
||||
/*
|
||||
* Insert DMAR scope for PCI bridges and endpoint devcie
|
||||
* Insert DMAR scope for PCI bridges and endpoint devices
|
||||
*/
|
||||
static void
|
||||
insert_scope(PCIBus *bus, PCIDevice *dev, void *opaque)
|
||||
|
|
|
@ -259,7 +259,7 @@ static void amdvi_log_command_error(AMDVIState *s, hwaddr addr)
|
|||
pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
|
||||
PCI_STATUS_SIG_TARGET_ABORT);
|
||||
}
|
||||
/* log an illegal comand event
|
||||
/* log an illegal command event
|
||||
* @addr : address of illegal command
|
||||
*/
|
||||
static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info,
|
||||
|
@ -767,7 +767,7 @@ static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val,
|
|||
break;
|
||||
case AMDVI_MMIO_COMMAND_BASE:
|
||||
amdvi_mmio_reg_write(s, size, val, addr);
|
||||
/* FIXME - make sure System Software has finished writing incase
|
||||
/* FIXME - make sure System Software has finished writing in case
|
||||
* it writes in chucks less than 8 bytes in a robust way.As for
|
||||
* now, this hacks works for the linux driver
|
||||
*/
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
|
||||
/*
|
||||
* PCI bus number (or SID) is not reliable since the device is usaully
|
||||
* initalized before guest can configure the PCI bridge
|
||||
* initialized before guest can configure the PCI bridge
|
||||
* (SECONDARY_BUS_NUMBER).
|
||||
*/
|
||||
struct vtd_as_key {
|
||||
|
@ -1694,7 +1694,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
|
|||
* """
|
||||
*
|
||||
* We enable per as memory region (iommu_ir_fault) for catching
|
||||
* the tranlsation for interrupt range through PASID + PT.
|
||||
* the translation for interrupt range through PASID + PT.
|
||||
*/
|
||||
if (pt && as->pasid != PCI_NO_PASID) {
|
||||
memory_region_set_enabled(&as->iommu_ir_fault, true);
|
||||
|
|
|
@ -1156,7 +1156,7 @@ static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr,
|
|||
|
||||
/*
|
||||
* This matches the barrier in copy_to_ring() (or the guest's
|
||||
* equivalent) betweem writing the data to the ring and updating
|
||||
* equivalent) between writing the data to the ring and updating
|
||||
* rsp_prod. It protects against the pathological case (which
|
||||
* again I think never happened except on Alpha) where our
|
||||
* subsequent writes to the ring could *cross* the read of
|
||||
|
|
|
@ -1436,7 +1436,7 @@ static void save_node(gpointer key, gpointer value, gpointer opaque)
|
|||
/*
|
||||
* If we already wrote this node, refer to the previous copy.
|
||||
* There's no rename/move in XenStore, so all we need to find
|
||||
* it is the tx_id of the transation in which it exists. Which
|
||||
* it is the tx_id of the transaction in which it exists. Which
|
||||
* may be the root tx.
|
||||
*/
|
||||
if (n->serialized_tx != XBT_NULL) {
|
||||
|
|
16
hw/i386/pc.c
16
hw/i386/pc.c
|
@ -436,7 +436,7 @@ static uint64_t ioport80_read(void *opaque, hwaddr addr, unsigned size)
|
|||
return 0xffffffffffffffffULL;
|
||||
}
|
||||
|
||||
/* MSDOS compatibility mode FPU exception support */
|
||||
/* MS-DOS compatibility mode FPU exception support */
|
||||
static void ioportF0_write(void *opaque, hwaddr addr, uint64_t data,
|
||||
unsigned size)
|
||||
{
|
||||
|
@ -1746,16 +1746,16 @@ static void pc_machine_set_max_fw_size(Object *obj, Visitor *v,
|
|||
}
|
||||
|
||||
/*
|
||||
* We don't have a theoretically justifiable exact lower bound on the base
|
||||
* address of any flash mapping. In practice, the IO-APIC MMIO range is
|
||||
* [0xFEE00000..0xFEE01000] -- see IO_APIC_DEFAULT_ADDRESS --, leaving free
|
||||
* only 18MB-4KB below 4G. For now, restrict the cumulative mapping to 8MB in
|
||||
* size.
|
||||
*/
|
||||
* We don't have a theoretically justifiable exact lower bound on the base
|
||||
* address of any flash mapping. In practice, the IO-APIC MMIO range is
|
||||
* [0xFEE00000..0xFEE01000] -- see IO_APIC_DEFAULT_ADDRESS --, leaving free
|
||||
* only 18MiB-4KiB below 4GiB. For now, restrict the cumulative mapping to
|
||||
* 16MiB in size.
|
||||
*/
|
||||
if (value > 16 * MiB) {
|
||||
error_setg(errp,
|
||||
"User specified max allowed firmware size %" PRIu64 " is "
|
||||
"greater than 16MiB. If combined firwmare size exceeds "
|
||||
"greater than 16MiB. If combined firmware size exceeds "
|
||||
"16MiB the system may not boot, or experience intermittent"
|
||||
"stability issues.",
|
||||
value);
|
||||
|
|
|
@ -209,7 +209,7 @@ static void hid_pointer_sync(DeviceState *dev)
|
|||
prev->dz += curr->dz;
|
||||
curr->dz = 0;
|
||||
} else {
|
||||
/* prepate next (clear rel, copy abs + btns) */
|
||||
/* prepare next (clear rel, copy abs + btns) */
|
||||
if (hs->kind == HID_MOUSE) {
|
||||
next->xdx = 0;
|
||||
next->ydy = 0;
|
||||
|
|
|
@ -157,14 +157,14 @@ static uint16_t tsc2005_read(TSC2005State *s, int reg)
|
|||
s->reset = true;
|
||||
return ret;
|
||||
|
||||
case 0x8: /* AUX high treshold */
|
||||
case 0x8: /* AUX high threshold */
|
||||
return s->aux_thr[1];
|
||||
case 0x9: /* AUX low treshold */
|
||||
case 0x9: /* AUX low threshold */
|
||||
return s->aux_thr[0];
|
||||
|
||||
case 0xa: /* TEMP high treshold */
|
||||
case 0xa: /* TEMP high threshold */
|
||||
return s->temp_thr[1];
|
||||
case 0xb: /* TEMP low treshold */
|
||||
case 0xb: /* TEMP low threshold */
|
||||
return s->temp_thr[0];
|
||||
|
||||
case 0xc: /* CFR0 */
|
||||
|
@ -186,17 +186,17 @@ static uint16_t tsc2005_read(TSC2005State *s, int reg)
|
|||
static void tsc2005_write(TSC2005State *s, int reg, uint16_t data)
|
||||
{
|
||||
switch (reg) {
|
||||
case 0x8: /* AUX high treshold */
|
||||
case 0x8: /* AUX high threshold */
|
||||
s->aux_thr[1] = data;
|
||||
break;
|
||||
case 0x9: /* AUX low treshold */
|
||||
case 0x9: /* AUX low threshold */
|
||||
s->aux_thr[0] = data;
|
||||
break;
|
||||
|
||||
case 0xa: /* TEMP high treshold */
|
||||
case 0xa: /* TEMP high threshold */
|
||||
s->temp_thr[1] = data;
|
||||
break;
|
||||
case 0xb: /* TEMP low treshold */
|
||||
case 0xb: /* TEMP low threshold */
|
||||
s->temp_thr[0] = data;
|
||||
break;
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr,
|
|||
cpu = attrs.requester_id;
|
||||
old_data = s->coreisr[cpu][index];
|
||||
s->coreisr[cpu][index] = old_data & ~val;
|
||||
/* write 1 to clear interrrupt */
|
||||
/* write 1 to clear interrupt */
|
||||
old_data &= val;
|
||||
irq = ctz32(old_data);
|
||||
while (irq != 32) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* QEMU Loongson Local I/O interrupt controler.
|
||||
* QEMU Loongson Local I/O interrupt controller.
|
||||
*
|
||||
* Copyright (c) 2020 Huacai Chen <chenhc@lemote.com>
|
||||
* Copyright (c) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||
|
|
|
@ -68,7 +68,7 @@ static void omap_inth_sir_update(OMAPIntcState *s, int is_fiq)
|
|||
p_intr = 255;
|
||||
|
||||
/* Find the interrupt line with the highest dynamic priority.
|
||||
* Note: 0 denotes the hightest priority.
|
||||
* Note: 0 denotes the highest priority.
|
||||
* If all interrupts have the same priority, the default order is IRQ_N,
|
||||
* IRQ_N-1,...,IRQ_0. */
|
||||
for (j = 0; j < s->nbanks; ++j) {
|
||||
|
|
|
@ -988,7 +988,7 @@ static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
|
|||
*/
|
||||
case VC_SBC_CONFIG: /* Store EOI configuration */
|
||||
/*
|
||||
* Configure store EOI if required by firwmare (skiboot has removed
|
||||
* Configure store EOI if required by firmware (skiboot has removed
|
||||
* support recently though)
|
||||
*/
|
||||
if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include "trace.h"
|
||||
|
||||
/*
|
||||
* XIVE Virtualization Controller BAR and Thread Managment BAR that we
|
||||
* XIVE Virtualization Controller BAR and Thread Management BAR that we
|
||||
* use for the ESB pages and the TIMA pages
|
||||
*/
|
||||
#define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
|
||||
|
|
|
@ -485,7 +485,7 @@ static int kvmppc_xive_get_queues(SpaprXive *xive, Error **errp)
|
|||
*
|
||||
* Whenever the VM is stopped, the VM change handler sets the source
|
||||
* PQs to PENDING to stop the flow of events and to possibly catch a
|
||||
* triggered interrupt occuring while the VM is stopped. The previous
|
||||
* triggered interrupt occurring while the VM is stopped. The previous
|
||||
* state is saved in anticipation of a migration. The XIVE controller
|
||||
* is then synced through KVM to flush any in-flight event
|
||||
* notification and stabilize the EQs.
|
||||
|
@ -551,7 +551,7 @@ static void kvmppc_xive_change_state_handler(void *opaque, bool running,
|
|||
|
||||
/*
|
||||
* PQ is set to PENDING to possibly catch a triggered
|
||||
* interrupt occuring while the VM is stopped (hotplug event
|
||||
* interrupt occurring while the VM is stopped (hotplug event
|
||||
* for instance) .
|
||||
*/
|
||||
if (pq != XIVE_ESB_OFF) {
|
||||
|
@ -633,7 +633,7 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
|
|||
/* The KVM XIVE device should be in use */
|
||||
assert(xive->fd != -1);
|
||||
|
||||
/* Restore the ENDT first. The targetting depends on it. */
|
||||
/* Restore the ENDT first. The targeting depends on it. */
|
||||
for (i = 0; i < xive->nr_ends; i++) {
|
||||
if (!xive_end_is_valid(&xive->endt[i])) {
|
||||
continue;
|
||||
|
|
|
@ -1608,7 +1608,7 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
|
|||
*
|
||||
* It receives notification requests sent by the IVRE to find one
|
||||
* matching NVT (or more) dispatched on the processor threads. In case
|
||||
* of a single NVT notification, the process is abreviated and the
|
||||
* of a single NVT notification, the process is abbreviated and the
|
||||
* thread is signaled if a match is found. In case of a logical server
|
||||
* notification (bits ignored at the end of the NVT identifier), the
|
||||
* IVPE and IVRE select a winning thread using different filters. This
|
||||
|
|
|
@ -542,7 +542,7 @@ static void xive2_router_realize(DeviceState *dev, Error **errp)
|
|||
|
||||
/*
|
||||
* Notification using the END ESe/ESn bit (Event State Buffer for
|
||||
* escalation and notification). Profide futher coalescing in the
|
||||
* escalation and notification). Profide further coalescing in the
|
||||
* Router.
|
||||
*/
|
||||
static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk,
|
||||
|
@ -621,7 +621,7 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
|
|||
|
||||
/*
|
||||
* Check the END ESn (Event State Buffer for notification) for
|
||||
* even futher coalescing in the Router
|
||||
* even further coalescing in the Router
|
||||
*/
|
||||
if (!xive2_end_is_notify(&end)) {
|
||||
/* ESn[Q]=1 : end of notification */
|
||||
|
@ -702,7 +702,7 @@ do_escalation:
|
|||
|
||||
/*
|
||||
* Check the END ESe (Event State Buffer for escalation) for even
|
||||
* futher coalescing in the Router
|
||||
* further coalescing in the Router
|
||||
*/
|
||||
if (!xive2_end_is_uncond_escalation(&end)) {
|
||||
/* ESe[Q]=1 : end of escalation notification */
|
||||
|
|
|
@ -301,7 +301,7 @@ static void handle_msg(IPMIBmcExtern *ibe)
|
|||
ipmi_debug("msg checksum failure\n");
|
||||
return;
|
||||
} else {
|
||||
ibe->inpos--; /* Remove checkum */
|
||||
ibe->inpos--; /* Remove checksum */
|
||||
}
|
||||
|
||||
timer_del(ibe->extern_timer);
|
||||
|
|
|
@ -1,3 +1,14 @@
|
|||
/*
|
||||
* CXL Type 3 (memory expander) device
|
||||
*
|
||||
* Copyright(C) 2020 Intel Corporation.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See the
|
||||
* COPYING file in the top-level directory.
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-v2-only
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/units.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
@ -538,7 +549,7 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
|
|||
FIRST_ERROR_POINTER, cxl_err->type);
|
||||
} else {
|
||||
/*
|
||||
* If no more errors, then follow recomendation of PCI spec
|
||||
* If no more errors, then follow recommendation of PCI spec
|
||||
* r6.0 6.2.4.2 to set the first error pointer to a status
|
||||
* bit that will never be used.
|
||||
*/
|
||||
|
@ -697,7 +708,7 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
|
|||
PCI_BASE_ADDRESS_MEM_TYPE_64,
|
||||
&ct3d->cxl_dstate.device_registers);
|
||||
|
||||
/* MSI(-X) Initailization */
|
||||
/* MSI(-X) Initialization */
|
||||
rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
|
||||
if (rc) {
|
||||
goto err_address_space_free;
|
||||
|
@ -706,7 +717,7 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
|
|||
msix_vector_use(pci_dev, i);
|
||||
}
|
||||
|
||||
/* DOE Initailization */
|
||||
/* DOE Initialization */
|
||||
pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
|
||||
|
||||
cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
/*
|
||||
* CXL Type 3 (memory expander) device QMP stubs
|
||||
*
|
||||
* Copyright(C) 2020 Intel Corporation.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See the
|
||||
* COPYING file in the top-level directory.
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-v2-only
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
|
|
|
@ -227,7 +227,7 @@ static uint32_t imx7_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
|
|||
* have fixed frequencies and we can provide requested frequency
|
||||
* easily. However for CCM provided clocks (like IPG) each GPT
|
||||
* timer can have its own clock root.
|
||||
* This means we need additionnal information when calling this
|
||||
* This means we need additional information when calling this
|
||||
* function to know the requester's identity.
|
||||
*/
|
||||
uint32_t freq = 0;
|
||||
|
|
|
@ -246,7 +246,7 @@
|
|||
#define vT2CL 0x1000 /* [VIA only] Timer two counter low. */
|
||||
#define vT2CH 0x1200 /* [VIA only] Timer two counter high. */
|
||||
#define vSR 0x1400 /* [VIA only] Shift register. */
|
||||
#define vACR 0x1600 /* [VIA only] Auxilary control register. */
|
||||
#define vACR 0x1600 /* [VIA only] Auxiliary control register. */
|
||||
#define vPCR 0x1800 /* [VIA only] Peripheral control register. */
|
||||
/*
|
||||
* CHRP sez never ever to *write* this.
|
||||
|
|
|
@ -94,12 +94,12 @@ static void stm32f2xx_syscfg_write(void *opaque, hwaddr addr,
|
|||
switch (addr) {
|
||||
case SYSCFG_MEMRMP:
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"%s: Changeing the memory mapping isn't supported " \
|
||||
"%s: Changing the memory mapping isn't supported " \
|
||||
"in QEMU\n", __func__);
|
||||
return;
|
||||
case SYSCFG_PMC:
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"%s: Changeing the memory mapping isn't supported " \
|
||||
"%s: Changing the memory mapping isn't supported " \
|
||||
"in QEMU\n", __func__);
|
||||
return;
|
||||
case SYSCFG_EXTICR1:
|
||||
|
|
|
@ -155,7 +155,7 @@ stm32f4xx_syscfg_read(uint64_t addr) "reg read: addr: 0x%" PRIx64 " "
|
|||
stm32f4xx_syscfg_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 ""
|
||||
|
||||
# stm32f4xx_exti.c
|
||||
stm32f4xx_exti_set_irq(int irq, int leve) "Set EXTI: %d to %d"
|
||||
stm32f4xx_exti_set_irq(int irq, int level) "Set EXTI: %d to %d"
|
||||
stm32f4xx_exti_read(uint64_t addr) "reg read: addr: 0x%" PRIx64 " "
|
||||
stm32f4xx_exti_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 ""
|
||||
|
||||
|
|
|
@ -285,7 +285,7 @@ static void zynq_slcr_compute_clocks_internal(ZynqSLCRState *s, uint64_t ps_clk)
|
|||
}
|
||||
|
||||
/**
|
||||
* Compute and set the ouputs clocks periods.
|
||||
* Compute and set the outputs clocks periods.
|
||||
* But do not propagate them further. Connected clocks
|
||||
* will not receive any updates (See zynq_slcr_compute_clocks())
|
||||
*/
|
||||
|
|
|
@ -81,8 +81,8 @@
|
|||
#define GEM_IPGSTRETCH (0x000000BC / 4) /* IPG Stretch reg */
|
||||
#define GEM_SVLAN (0x000000C0 / 4) /* Stacked VLAN reg */
|
||||
#define GEM_MODID (0x000000FC / 4) /* Module ID reg */
|
||||
#define GEM_OCTTXLO (0x00000100 / 4) /* Octects transmitted Low reg */
|
||||
#define GEM_OCTTXHI (0x00000104 / 4) /* Octects transmitted High reg */
|
||||
#define GEM_OCTTXLO (0x00000100 / 4) /* Octets transmitted Low reg */
|
||||
#define GEM_OCTTXHI (0x00000104 / 4) /* Octets transmitted High reg */
|
||||
#define GEM_TXCNT (0x00000108 / 4) /* Error-free Frames transmitted */
|
||||
#define GEM_TXBCNT (0x0000010C / 4) /* Error-free Broadcast Frames */
|
||||
#define GEM_TXMCNT (0x00000110 / 4) /* Error-free Multicast Frame */
|
||||
|
@ -101,8 +101,8 @@
|
|||
#define GEM_LATECOLLCNT (0x00000144 / 4) /* Late Collision Frames */
|
||||
#define GEM_DEFERTXCNT (0x00000148 / 4) /* Deferred Transmission Frames */
|
||||
#define GEM_CSENSECNT (0x0000014C / 4) /* Carrier Sense Error Counter */
|
||||
#define GEM_OCTRXLO (0x00000150 / 4) /* Octects Received register Low */
|
||||
#define GEM_OCTRXHI (0x00000154 / 4) /* Octects Received register High */
|
||||
#define GEM_OCTRXLO (0x00000150 / 4) /* Octets Received register Low */
|
||||
#define GEM_OCTRXHI (0x00000154 / 4) /* Octets Received register High */
|
||||
#define GEM_RXCNT (0x00000158 / 4) /* Error-free Frames Received */
|
||||
#define GEM_RXBROADCNT (0x0000015C / 4) /* Error-free Broadcast Frames RX */
|
||||
#define GEM_RXMULTICNT (0x00000160 / 4) /* Error-free Multicast Frames RX */
|
||||
|
@ -954,7 +954,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
|
|||
/* Is this destination MAC address "for us" ? */
|
||||
maf = gem_mac_address_filter(s, buf);
|
||||
if (maf == GEM_RX_REJECT) {
|
||||
return size; /* no, drop siliently b/c it's not an error */
|
||||
return size; /* no, drop silently b/c it's not an error */
|
||||
}
|
||||
|
||||
/* Discard packets with receive length error enabled ? */
|
||||
|
|
|
@ -551,7 +551,7 @@ static uint64_t dp8393x_read(void *opaque, hwaddr addr, unsigned int size)
|
|||
val = s->cam[s->regs[SONIC_CEP] & 0xf][SONIC_CAP0 - reg];
|
||||
}
|
||||
break;
|
||||
/* All other registers have no special contraints */
|
||||
/* All other registers have no special constraints */
|
||||
default:
|
||||
val = s->regs[reg];
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@
|
|||
|
||||
#define E1000_GCR2 0x05B64 /* 3GIO Control Register 2 */
|
||||
#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
|
||||
#define E1000_HICR 0x08F00 /* Host Inteface Control */
|
||||
#define E1000_HICR 0x08F00 /* Host Interface Control */
|
||||
|
||||
#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
|
||||
#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
|
||||
|
|
|
@ -839,7 +839,7 @@ union e1000_rx_desc_packet_split {
|
|||
#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
|
||||
#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
|
||||
#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
|
||||
#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
|
||||
#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
|
||||
#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
|
||||
#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
|
||||
#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
|
||||
|
|
|
@ -365,7 +365,7 @@ void etsec_walk_tx_ring(eTSEC *etsec, int ring_nbr)
|
|||
} while (TRUE);
|
||||
|
||||
/* Save the Buffer Descriptor Pointers to last bd that was not
|
||||
* succesfully closed */
|
||||
* successfully closed */
|
||||
etsec->regs[TBPTR0 + ring_nbr].value = bd_addr;
|
||||
|
||||
/* Set transmit halt THLTx */
|
||||
|
|
|
@ -364,7 +364,7 @@ union e1000_adv_rx_desc {
|
|||
/* Indicates that VF is still clear to send requests */
|
||||
#define E1000_VT_MSGTYPE_CTS 0x20000000
|
||||
#define E1000_VT_MSGINFO_SHIFT 16
|
||||
/* bits 23:16 are used for exra info for certain messages */
|
||||
/* bits 23:16 are used for extra info for certain messages */
|
||||
#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
|
||||
|
||||
#define E1000_VF_RESET 0x01 /* VF requests reset */
|
||||
|
@ -491,7 +491,7 @@ union e1000_adv_rx_desc {
|
|||
#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */
|
||||
|
||||
#define E1000_VT_MSGINFO_SHIFT 16
|
||||
/* bits 23:16 are used for exra info for certain messages */
|
||||
/* bits 23:16 are used for extra info for certain messages */
|
||||
#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
|
||||
|
||||
#define E1000_VF_RESET 0x01 /* VF requests reset */
|
||||
|
|
|
@ -571,7 +571,7 @@ static ssize_t mcf_fec_receive(NetClientState *nc, const uint8_t *buf, size_t si
|
|||
size += 4;
|
||||
crc = cpu_to_be32(crc32(~0, buf, size));
|
||||
crc_ptr = (uint8_t *)&crc;
|
||||
/* Huge frames are truncted. */
|
||||
/* Huge frames are truncated. */
|
||||
if (size > FEC_MAX_FRAME_SIZE) {
|
||||
size = FEC_MAX_FRAME_SIZE;
|
||||
flags |= FEC_BD_TR | FEC_BD_LG;
|
||||
|
|
|
@ -134,7 +134,7 @@ static ssize_t fp_port_receive_iov(NetClientState *nc, const struct iovec *iov,
|
|||
FpPort *port = qemu_get_nic_opaque(nc);
|
||||
|
||||
/* If the port is disabled, we want to drop this pkt
|
||||
* now rather than queing it for later. We don't want
|
||||
* now rather than queueing it for later. We don't want
|
||||
* any stale pkts getting into the device when the port
|
||||
* transitions to enabled.
|
||||
*/
|
||||
|
|
|
@ -100,7 +100,7 @@ enum RTL8139_registers {
|
|||
MAC0 = 0, /* Ethernet hardware address. */
|
||||
MAR0 = 8, /* Multicast filter. */
|
||||
TxStatus0 = 0x10,/* Transmit status (Four 32bit registers). C mode only */
|
||||
/* Dump Tally Conter control register(64bit). C+ mode only */
|
||||
/* Dump Tally Counter control register(64bit). C+ mode only */
|
||||
TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */
|
||||
RxBuf = 0x30,
|
||||
ChipCmd = 0x37,
|
||||
|
|
|
@ -361,7 +361,7 @@ static void smc91c111_writeb(void *opaque, hwaddr offset,
|
|||
case 4: case 5: case 6: case 7: case 8: case 9: /* IA */
|
||||
/* Not implemented. */
|
||||
return;
|
||||
case 10: /* Genral Purpose */
|
||||
case 10: /* General Purpose */
|
||||
SET_LOW(gpr, value);
|
||||
return;
|
||||
case 11:
|
||||
|
|
|
@ -1228,7 +1228,7 @@ static void sungem_mmio_mif_write(void *opaque, hwaddr addr, uint64_t val,
|
|||
case MIF_SMACHINE:
|
||||
return; /* No actual write */
|
||||
case MIF_CFG:
|
||||
/* Maintain the RO MDI bits to advertize an MDIO PHY on MDI0 */
|
||||
/* Maintain the RO MDI bits to advertise an MDIO PHY on MDI0 */
|
||||
val &= ~MIF_CFG_MDI1;
|
||||
val |= MIF_CFG_MDI0;
|
||||
break;
|
||||
|
|
|
@ -901,7 +901,7 @@ static void sunhme_reset(DeviceState *ds)
|
|||
/* Configure internal transceiver */
|
||||
s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0;
|
||||
|
||||
/* Advetise auto, 100Mbps FD */
|
||||
/* Advertise auto, 100Mbps FD */
|
||||
s->miiregs[MII_ANAR] = MII_ANAR_TXFD;
|
||||
s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD |
|
||||
MII_BMSR_AN_COMP;
|
||||
|
|
|
@ -1330,7 +1330,7 @@ static void virtio_net_detach_epbf_rss(VirtIONet *n)
|
|||
static bool virtio_net_load_ebpf(VirtIONet *n)
|
||||
{
|
||||
if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
|
||||
/* backend does't support steering ebpf */
|
||||
/* backend doesn't support steering ebpf */
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -2069,7 +2069,7 @@ static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
|
|||
+ sizeof(struct ip6_header));
|
||||
unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
|
||||
|
||||
/* There is a difference between payload lenght in ipv4 and v6,
|
||||
/* There is a difference between payload length in ipv4 and v6,
|
||||
ip header is excluded in ipv6 */
|
||||
unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen;
|
||||
}
|
||||
|
@ -3818,7 +3818,7 @@ static void virtio_net_instance_init(Object *obj)
|
|||
|
||||
/*
|
||||
* The default config_size is sizeof(struct virtio_net_config).
|
||||
* Can be overriden with virtio_net_set_config_size.
|
||||
* Can be overridden with virtio_net_set_config_size.
|
||||
*/
|
||||
n->config_size = sizeof(struct virtio_net_config);
|
||||
device_add_bootindex_property(obj, &n->nic_conf.bootindex,
|
||||
|
|
|
@ -1889,7 +1889,7 @@ vmxnet3_io_bar1_read(void *opaque, hwaddr addr, unsigned size)
|
|||
break;
|
||||
|
||||
default:
|
||||
VMW_CBPRN("Unknow read BAR1[%" PRIx64 "], %d bytes", addr, size);
|
||||
VMW_CBPRN("Unknown read BAR1[%" PRIx64 "], %d bytes", addr, size);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -733,7 +733,7 @@ struct Vmxnet3_TxQueueDesc {
|
|||
struct Vmxnet3_RxQueueDesc {
|
||||
struct Vmxnet3_RxQueueCtrl ctrl;
|
||||
struct Vmxnet3_RxQueueConf conf;
|
||||
/* Driver read after a GET commad */
|
||||
/* Driver read after a GET command */
|
||||
struct Vmxnet3_QueueStatus status;
|
||||
struct UPT1_RxStats stats;
|
||||
u8 __pad[88]; /* 128 aligned */
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* Notes on coding style
|
||||
* ---------------------
|
||||
* While QEMU coding style prefers lowercase hexadecimals in constants, the
|
||||
* NVMe subsystem use thes format from the NVMe specifications in the comments
|
||||
* NVMe subsystem use this format from the NVMe specifications in the comments
|
||||
* (i.e. 'h' suffix instead of '0x' prefix).
|
||||
*
|
||||
* Usage
|
||||
|
@ -730,7 +730,7 @@ static inline void nvme_sg_unmap(NvmeSg *sg)
|
|||
}
|
||||
|
||||
/*
|
||||
* When metadata is transfered as extended LBAs, the DPTR mapped into `sg`
|
||||
* When metadata is transferred as extended LBAs, the DPTR mapped into `sg`
|
||||
* holds both data and metadata. This function splits the data and metadata
|
||||
* into two separate QSG/IOVs.
|
||||
*/
|
||||
|
@ -7587,7 +7587,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
|
|||
/*
|
||||
* NVM Express v1.3d, Section 4.1 state: "If host software writes
|
||||
* an invalid value to the Submission Queue Tail Doorbell or
|
||||
* Completion Queue Head Doorbell regiter and an Asynchronous Event
|
||||
* Completion Queue Head Doorbell register and an Asynchronous Event
|
||||
* Request command is outstanding, then an asynchronous event is
|
||||
* posted to the Admin Completion Queue with a status code of
|
||||
* Invalid Doorbell Write Value."
|
||||
|
|
|
@ -51,7 +51,7 @@ struct EEPROMState {
|
|||
bool writable;
|
||||
/* cells changed since last START? */
|
||||
bool changed;
|
||||
/* during WRITE, # of address bytes transfered */
|
||||
/* during WRITE, # of address bytes transferred */
|
||||
uint8_t haveaddr;
|
||||
|
||||
uint8_t *mem;
|
||||
|
|
|
@ -877,7 +877,7 @@ static struct {
|
|||
/*
|
||||
* Any sub-page size update to these table MRs will be lost during migration,
|
||||
* as we use aligned size in ram_load_precopy() -> qemu_ram_resize() path.
|
||||
* In order to avoid the inconsistency in sizes save them seperately and
|
||||
* In order to avoid the inconsistency in sizes save them separately and
|
||||
* migrate over in vmstate post_load().
|
||||
*/
|
||||
static void fw_cfg_acpi_mr_save(FWCfgState *s, const char *filename, size_t len)
|
||||
|
|
|
@ -42,7 +42,7 @@ static void latch_registers(CXLDownstreamPort *dsp)
|
|||
CXL2_DOWNSTREAM_PORT);
|
||||
}
|
||||
|
||||
/* TODO: Look at sharing this code acorss all CXL port types */
|
||||
/* TODO: Look at sharing this code across all CXL port types */
|
||||
static void cxl_dsp_dvsec_write_config(PCIDevice *dev, uint32_t addr,
|
||||
uint32_t val, int len)
|
||||
{
|
||||
|
|
|
@ -262,7 +262,7 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
|
|||
.length = sslbis_size,
|
||||
},
|
||||
.data_type = HMATLB_DATA_TYPE_ACCESS_BANDWIDTH,
|
||||
.entry_base_unit = 1000,
|
||||
.entry_base_unit = 1024,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -263,7 +263,7 @@ static int pxb_map_irq_fn(PCIDevice *pci_dev, int pin)
|
|||
|
||||
/*
|
||||
* First carry out normal swizzle to handle
|
||||
* multple root ports on a pxb instance.
|
||||
* multiple root ports on a pxb instance.
|
||||
*/
|
||||
pin = pci_swizzle_map_irq_fn(pci_dev, pin);
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@
|
|||
#define DPRINTF(fmt, ...)
|
||||
#endif
|
||||
|
||||
/* from linux soure code. include/asm-mips/mips-boards/bonito64.h*/
|
||||
/* from linux source code. include/asm-mips/mips-boards/bonito64.h*/
|
||||
#define BONITO_BOOT_BASE 0x1fc00000
|
||||
#define BONITO_BOOT_SIZE 0x00100000
|
||||
#define BONITO_BOOT_TOP (BONITO_BOOT_BASE + BONITO_BOOT_SIZE - 1)
|
||||
|
|
|
@ -488,7 +488,7 @@ static void designware_pcie_root_realize(PCIDevice *dev, Error **errp)
|
|||
|
||||
/*
|
||||
* If no inbound iATU windows are configured, HW defaults to
|
||||
* letting inbound TLPs to pass in. We emulate that by exlicitly
|
||||
* letting inbound TLPs to pass in. We emulate that by explicitly
|
||||
* configuring first inbound window to cover all of target's
|
||||
* address space.
|
||||
*
|
||||
|
@ -503,7 +503,7 @@ static void designware_pcie_root_realize(PCIDevice *dev, Error **errp)
|
|||
&designware_pci_host_msi_ops,
|
||||
root, "pcie-msi", 0x4);
|
||||
/*
|
||||
* We initially place MSI interrupt I/O region a adress 0 and
|
||||
* We initially place MSI interrupt I/O region at address 0 and
|
||||
* disable it. It'll be later moved to correct offset and enabled
|
||||
* in designware_pcie_root_update_msi_mapping() as a part of
|
||||
* initialization done by guest OS
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* HP-PARISC Dino PCI chipset emulation, as in B160L and similiar machines
|
||||
* HP-PARISC Dino PCI chipset emulation, as in B160L and similar machines
|
||||
*
|
||||
* (C) 2017-2019 by Helge Deller <deller@gmx.de>
|
||||
*
|
||||
|
|
|
@ -177,7 +177,7 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg)
|
|||
acpi_dsdt_add_pci_route_table(dev, cfg->irq);
|
||||
|
||||
/*
|
||||
* Resources defined for PXBs are composed by the folling parts:
|
||||
* Resources defined for PXBs are composed of the following parts:
|
||||
* 1. The resources the pci-brige/pcie-root-port need.
|
||||
* 2. The resources the devices behind pxb need.
|
||||
*/
|
||||
|
|
|
@ -331,9 +331,9 @@ static void gt64120_update_pci_cfgdata_mapping(GT64120State *s)
|
|||
/*
|
||||
* The setting of the MByteSwap bit and MWordSwap bit in the PCI Internal
|
||||
* Command Register determines how data transactions from the CPU to/from
|
||||
* PCI are handled along with the setting of the Endianess bit in the CPU
|
||||
* PCI are handled along with the setting of the Endianness bit in the CPU
|
||||
* Configuration Register. See:
|
||||
* - Table 16: 32-bit PCI Transaction Endianess
|
||||
* - Table 16: 32-bit PCI Transaction Endianness
|
||||
* - Table 158: PCI_0 Command, Offset: 0xc00
|
||||
*/
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
* state associated with the child has an id, use it as QOM id.
|
||||
* Otherwise use object_typename[index] as QOM id.
|
||||
*
|
||||
* This helper does both operations at the same time because seting
|
||||
* This helper does both operations at the same time because setting
|
||||
* a new QOM child will erase the bus parent of the device. This happens
|
||||
* because object_unparent() will call object_property_del_child(),
|
||||
* which in turn calls the property release callback prop->release if
|
||||
|
|
|
@ -757,7 +757,7 @@ static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr,
|
|||
* We only support non-translate in top window.
|
||||
*
|
||||
* TODO: Venice/Murano support it on bottom window above 4G and
|
||||
* Naples suports it on everything
|
||||
* Naples supports it on everything
|
||||
*/
|
||||
if (!(tve & PPC_BIT(51))) {
|
||||
phb3_error(phb, "xlate for invalid non-translate TVE");
|
||||
|
|
|
@ -281,7 +281,7 @@ static void phb3_msi_instance_init(Object *obj)
|
|||
object_property_allow_set_link,
|
||||
OBJ_PROP_LINK_STRONG);
|
||||
|
||||
/* Will be overriden later */
|
||||
/* Will be overridden later */
|
||||
ics->offset = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -207,7 +207,7 @@ static void pnv_phb4_check_mbt(PnvPHB4 *phb, uint32_t index)
|
|||
start = base | (phb->regs[PHB_M64_UPPER_BITS >> 3]);
|
||||
}
|
||||
|
||||
/* TODO: Figure out how to implemet/decode AOMASK */
|
||||
/* TODO: Figure out how to implement/decode AOMASK */
|
||||
|
||||
/* Check if it matches an enabled MMIO region in the PEC stack */
|
||||
if (memory_region_is_mapped(&phb->mmbar0) &&
|
||||
|
@ -391,7 +391,7 @@ static void pnv_phb4_ioda_write(PnvPHB4 *phb, uint64_t val)
|
|||
case IODA3_TBL_MBT:
|
||||
*tptr = val;
|
||||
|
||||
/* Copy accross the valid bit to the other half */
|
||||
/* Copy across the valid bit to the other half */
|
||||
phb->ioda_MBT[idx ^ 1] &= 0x7fffffffffffffffull;
|
||||
phb->ioda_MBT[idx ^ 1] |= 0x8000000000000000ull & val;
|
||||
|
||||
|
@ -1408,7 +1408,7 @@ static void pnv_phb4_msi_write(void *opaque, hwaddr addr,
|
|||
return;
|
||||
}
|
||||
|
||||
/* TODO: check PE/MSI assignement */
|
||||
/* TODO: check PE/MSI assignment */
|
||||
|
||||
qemu_irq_pulse(phb->qirqs[src]);
|
||||
}
|
||||
|
|
|
@ -324,7 +324,7 @@ static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
|
|||
* it isn't implemented in qemu right now.
|
||||
* So just discard the error for now.
|
||||
* OS which cares of aer would receive errors via
|
||||
* native aer mechanims, so this wouldn't matter.
|
||||
* native aer mechanisms, so this wouldn't matter.
|
||||
*/
|
||||
}
|
||||
|
||||
|
|
|
@ -615,7 +615,7 @@ int shpc_init(PCIDevice *d, PCIBus *sec_bus, MemoryRegion *bar,
|
|||
}
|
||||
if (nslots > SHPC_MAX_SLOTS ||
|
||||
SHPC_IDX_TO_PCI(nslots) > PCI_SLOT_MAX) {
|
||||
/* TODO: report an error mesage that makes sense. */
|
||||
/* TODO: report an error message that makes sense. */
|
||||
return -EINVAL;
|
||||
}
|
||||
shpc->nslots = nslots;
|
||||
|
|
|
@ -738,7 +738,7 @@ static target_ulong _cpu_ppc_load_decr(CPUPPCState *env, int64_t now)
|
|||
decr = __cpu_ppc_load_decr(env, now, tb_env->decr_next);
|
||||
|
||||
/*
|
||||
* If large decrementer is enabled then the decrementer is signed extened
|
||||
* If large decrementer is enabled then the decrementer is signed extended
|
||||
* to 64 bits, otherwise it is a 32 bit value.
|
||||
*/
|
||||
if (env->spr[SPR_LPCR] & LPCR_LD) {
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#define TYPE_PREP_SYSTEMIO "prep-systemio"
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(PrepSystemIoState, PREP_SYSTEMIO)
|
||||
|
||||
/* Bit as defined in PowerPC Reference Plaform v1.1, sect. 6.1.5, p. 132 */
|
||||
/* Bit as defined in PowerPC Reference Platform v1.1, sect. 6.1.5, p. 132 */
|
||||
#define PREP_BIT(n) (1 << (7 - (n)))
|
||||
|
||||
struct PrepSystemIoState {
|
||||
|
|
|
@ -2573,7 +2573,7 @@ static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Detemine the VSMT mode to use: */
|
||||
/* Determine the VSMT mode to use: */
|
||||
if (vsmt_user) {
|
||||
if (spapr->vsmt < smp_threads) {
|
||||
error_setg(errp, "Cannot support VSMT mode %d"
|
||||
|
@ -3107,7 +3107,7 @@ static int spapr_kvm_type(MachineState *machine, const char *vm_type)
|
|||
{
|
||||
/*
|
||||
* The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to
|
||||
* accomodate the 'HV' and 'PV' formats that exists in the
|
||||
* accommodate the 'HV' and 'PV' formats that exists in the
|
||||
* wild. The 'auto' mode is being introduced already as
|
||||
* lower-case, thus we don't need to bother checking for
|
||||
* "AUTO".
|
||||
|
@ -4340,7 +4340,7 @@ spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
|
|||
CPUArchId *core_slot;
|
||||
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
||||
|
||||
/* make sure possible_cpu are intialized */
|
||||
/* make sure possible_cpu are initialized */
|
||||
mc->possible_cpu_arch_ids(machine);
|
||||
/* get CPU core slot containing thread that matches cpu_index */
|
||||
core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
|
||||
|
@ -5034,7 +5034,7 @@ static void spapr_machine_2_12_class_options(MachineClass *mc)
|
|||
|
||||
/* We depend on kvm_enabled() to choose a default value for the
|
||||
* hpt-max-page-size capability. Of course we can't do it here
|
||||
* because this is too early and the HW accelerator isn't initialzed
|
||||
* because this is too early and the HW accelerator isn't initialized
|
||||
* yet. Postpone this to machine init (see default_caps_with_cpu()).
|
||||
*/
|
||||
smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0;
|
||||
|
|
|
@ -1615,7 +1615,7 @@ static void hypercall_register_types(void)
|
|||
spapr_register_hypercall(H_GET_CPU_CHARACTERISTICS,
|
||||
h_get_cpu_characteristics);
|
||||
|
||||
/* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
|
||||
/* "debugger" hcalls (also used by SLOF). Note: We do -not- differentiate
|
||||
* here between the "CI" and the "CACHE" variants, they will use whatever
|
||||
* mapping attributes qemu is using. When using KVM, the kernel will
|
||||
* enforce the attributes more strongly
|
||||
|
|
|
@ -378,7 +378,7 @@ static target_ulong h_scm_bind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
|
||||
/*
|
||||
* Currently continue token should be zero qemu has already bound
|
||||
* everything and this hcall doesnt return H_BUSY.
|
||||
* everything and this hcall doesn't return H_BUSY.
|
||||
*/
|
||||
if (continue_token > 0) {
|
||||
return H_P5;
|
||||
|
@ -589,7 +589,7 @@ void spapr_nvdimm_finish_flushes(void)
|
|||
* Called on reset path, the main loop thread which calls
|
||||
* the pending BHs has gotten out running in the reset path,
|
||||
* finally reaching here. Other code path being guest
|
||||
* h_client_architecture_support, thats early boot up.
|
||||
* h_client_architecture_support, that's early boot up.
|
||||
*/
|
||||
nvdimms = nvdimm_get_device_list();
|
||||
for (list = nvdimms; list; list = list->next) {
|
||||
|
|
|
@ -78,7 +78,7 @@ int spapr_phb_vfio_eeh_set_option(SpaprPhbState *sphb,
|
|||
* call. Now we just need to check the validity of the PCI
|
||||
* pass-through devices (vfio-pci) under this sphb bus.
|
||||
* We have already validated that all the devices under this sphb
|
||||
* are from same iommu group (within same PE) before comming here.
|
||||
* are from same iommu group (within same PE) before coming here.
|
||||
*
|
||||
* Prior to linux commit 98ba956f6a389 ("powerpc/pseries/eeh:
|
||||
* Rework device EEH PE determination") kernel would call
|
||||
|
|
|
@ -202,7 +202,7 @@ static void exynos4210_rtc_update_freq(Exynos4210RTCState *s,
|
|||
uint32_t freq;
|
||||
|
||||
freq = s->freq;
|
||||
/* set frequncy for time generator */
|
||||
/* set frequency for time generator */
|
||||
s->freq = RTC_BASE_FREQ / (1 << TICCKSEL(reg_value));
|
||||
|
||||
if (freq != s->freq) {
|
||||
|
|
|
@ -114,7 +114,7 @@ static const uint8_t ipr_table[NR_IRQS] = {
|
|||
};
|
||||
|
||||
/*
|
||||
* Level triggerd IRQ list
|
||||
* Level triggered IRQ list
|
||||
* Not listed IRQ is Edge trigger.
|
||||
* See "11.3.1 Interrupt Vector Table" in hardware manual.
|
||||
*/
|
||||
|
|
|
@ -1321,7 +1321,7 @@ again:
|
|||
}
|
||||
trace_lsi_execute_script_io_selected(id,
|
||||
insn & (1 << 3) ? " ATN" : "");
|
||||
/* ??? Linux drivers compain when this is set. Maybe
|
||||
/* ??? Linux drivers complain when this is set. Maybe
|
||||
it only applies in low-level mode (unimplemented).
|
||||
lsi_script_scsi_interrupt(s, LSI_SIST0_CMP, 0); */
|
||||
s->select_tag = id << 8;
|
||||
|
|
|
@ -65,7 +65,7 @@
|
|||
#define MFI_IQPH 0xc4 /* Inbound queue port (high bytes) */
|
||||
#define MFI_DIAG 0xf8 /* Host diag */
|
||||
#define MFI_SEQ 0xfc /* Sequencer offset */
|
||||
#define MFI_1078_EIM 0x80000004 /* 1078 enable intrrupt mask */
|
||||
#define MFI_1078_EIM 0x80000004 /* 1078 enable interrupt mask */
|
||||
#define MFI_RMI 0x2 /* reply message interrupt */
|
||||
#define MFI_1078_RM 0x80000000 /* reply 1078 message interrupt */
|
||||
#define MFI_ODC 0x4 /* outbound doorbell change interrupt */
|
||||
|
|
|
@ -113,7 +113,7 @@
|
|||
#define SH7750_TTB SH7750_P4_REG32(SH7750_TTB_REGOFS)
|
||||
#define SH7750_TTB_A7 SH7750_A7_REG32(SH7750_TTB_REGOFS)
|
||||
|
||||
/* TLB exeption address register - TEA */
|
||||
/* TLB exception address register - TEA */
|
||||
#define SH7750_TEA_REGOFS 0x00000c /* offset */
|
||||
#define SH7750_TEA SH7750_P4_REG32(SH7750_TEA_REGOFS)
|
||||
#define SH7750_TEA_A7 SH7750_A7_REG32(SH7750_TEA_REGOFS)
|
||||
|
@ -183,19 +183,19 @@
|
|||
#define SH7750_TRA_IMM 0x000003fd /* Immediate data operand */
|
||||
#define SH7750_TRA_IMM_S 2
|
||||
|
||||
/* Exeption event register - EXPEVT */
|
||||
/* Exception event register - EXPEVT */
|
||||
#define SH7750_EXPEVT_REGOFS 0x000024
|
||||
#define SH7750_EXPEVT SH7750_P4_REG32(SH7750_EXPEVT_REGOFS)
|
||||
#define SH7750_EXPEVT_A7 SH7750_A7_REG32(SH7750_EXPEVT_REGOFS)
|
||||
|
||||
#define SH7750_EXPEVT_EX 0x00000fff /* Exeption code */
|
||||
#define SH7750_EXPEVT_EX 0x00000fff /* Exception code */
|
||||
#define SH7750_EXPEVT_EX_S 0
|
||||
|
||||
/* Interrupt event register */
|
||||
#define SH7750_INTEVT_REGOFS 0x000028
|
||||
#define SH7750_INTEVT SH7750_P4_REG32(SH7750_INTEVT_REGOFS)
|
||||
#define SH7750_INTEVT_A7 SH7750_A7_REG32(SH7750_INTEVT_REGOFS)
|
||||
#define SH7750_INTEVT_EX 0x00000fff /* Exeption code */
|
||||
#define SH7750_INTEVT_EX 0x00000fff /* Exception code */
|
||||
#define SH7750_INTEVT_EX_S 0
|
||||
|
||||
/*
|
||||
|
@ -1274,15 +1274,15 @@
|
|||
/*
|
||||
* User Break Controller registers
|
||||
*/
|
||||
#define SH7750_BARA 0x200000 /* Break address regiser A */
|
||||
#define SH7750_BAMRA 0x200004 /* Break address mask regiser A */
|
||||
#define SH7750_BBRA 0x200008 /* Break bus cycle regiser A */
|
||||
#define SH7750_BARB 0x20000c /* Break address regiser B */
|
||||
#define SH7750_BAMRB 0x200010 /* Break address mask regiser B */
|
||||
#define SH7750_BBRB 0x200014 /* Break bus cycle regiser B */
|
||||
#define SH7750_BASRB 0x000018 /* Break ASID regiser B */
|
||||
#define SH7750_BDRB 0x200018 /* Break data regiser B */
|
||||
#define SH7750_BDMRB 0x20001c /* Break data mask regiser B */
|
||||
#define SH7750_BARA 0x200000 /* Break address register A */
|
||||
#define SH7750_BAMRA 0x200004 /* Break address mask register A */
|
||||
#define SH7750_BBRA 0x200008 /* Break bus cycle register A */
|
||||
#define SH7750_BARB 0x20000c /* Break address register B */
|
||||
#define SH7750_BAMRB 0x200010 /* Break address mask register B */
|
||||
#define SH7750_BBRB 0x200014 /* Break bus cycle register B */
|
||||
#define SH7750_BASRB 0x000018 /* Break ASID register B */
|
||||
#define SH7750_BDRB 0x200018 /* Break data register B */
|
||||
#define SH7750_BDMRB 0x20001c /* Break data mask register B */
|
||||
#define SH7750_BRCR 0x200020 /* Break control register */
|
||||
|
||||
#define SH7750_BRCR_UDBE 0x0001 /* User break debug enable bit */
|
||||
|
|
|
@ -1110,7 +1110,7 @@ void smbios_get_tables(MachineState *ms,
|
|||
dimm_cnt = QEMU_ALIGN_UP(current_machine->ram_size, MAX_DIMM_SZ) / MAX_DIMM_SZ;
|
||||
|
||||
/*
|
||||
* The offset determines if we need to keep additional space betweeen
|
||||
* The offset determines if we need to keep additional space between
|
||||
* table 17 and table 19 header handle numbers so that they do
|
||||
* not overlap. For example, for a VM with larger than 8 TB guest
|
||||
* memory and DIMM like chunks of 16 GiB, the default space between
|
||||
|
|
|
@ -163,7 +163,7 @@
|
|||
FIELD(GQSPI_CNFG, ENDIAN, 26, 1)
|
||||
/* Poll timeout not implemented */
|
||||
FIELD(GQSPI_CNFG, EN_POLL_TIMEOUT, 20, 1)
|
||||
/* QEMU doesnt care about any of these last three */
|
||||
/* QEMU doesn't care about any of these last three */
|
||||
FIELD(GQSPI_CNFG, BR, 3, 3)
|
||||
FIELD(GQSPI_CNFG, CPH, 2, 1)
|
||||
FIELD(GQSPI_CNFG, CPL, 1, 1)
|
||||
|
@ -469,7 +469,7 @@ static void xlnx_zynqmp_qspips_flush_fifo_g(XlnxZynqMPQSPIPS *s)
|
|||
|
||||
imm = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA);
|
||||
if (!ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_XFER)) {
|
||||
/* immedate transfer */
|
||||
/* immediate transfer */
|
||||
if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT) ||
|
||||
ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE)) {
|
||||
s->regs[R_GQSPI_DATA_STS] = 1;
|
||||
|
@ -768,7 +768,7 @@ static void xilinx_spips_check_zero_pump(XilinxSPIPS *s)
|
|||
*/
|
||||
while (s->regs[R_TRANSFER_SIZE] &&
|
||||
s->rx_fifo.num + s->tx_fifo.num < RXFF_A_Q - 3) {
|
||||
/* endianess just doesn't matter when zero pumping */
|
||||
/* endianness just doesn't matter when zero pumping */
|
||||
tx_data_bytes(&s->tx_fifo, 0, 4, false);
|
||||
s->regs[R_TRANSFER_SIZE] &= ~0x03ull;
|
||||
s->regs[R_TRANSFER_SIZE] -= 4;
|
||||
|
|
|
@ -837,7 +837,7 @@ static void ospi_do_ind_read(XlnxVersalOspi *s)
|
|||
/* Continue to read flash until we run out of space in sram */
|
||||
while (!ospi_ind_op_completed(op) &&
|
||||
!fifo8_is_full(&s->rx_sram)) {
|
||||
/* Read reqested number of bytes, max bytes limited to size of sram */
|
||||
/* Read requested number of bytes, max bytes limited to size of sram */
|
||||
next_b = ind_op_next_byte(op);
|
||||
end_b = next_b + fifo8_num_free(&s->rx_sram);
|
||||
end_b = MIN(end_b, ind_op_end_byte(op));
|
||||
|
|
|
@ -236,7 +236,7 @@ static void watchdog_hit(void *opaque)
|
|||
{
|
||||
ETRAXTimerState *t = opaque;
|
||||
if (t->wd_hits == 0) {
|
||||
/* real hw gives a single tick before reseting but we are
|
||||
/* real hw gives a single tick before resetting but we are
|
||||
a bit friendlier to compensate for our slower execution. */
|
||||
ptimer_set_count(t->ptimer_wd, 10);
|
||||
ptimer_run(t->ptimer_wd, 1);
|
||||
|
|
|
@ -115,7 +115,7 @@ static int elapsed_time(RTMRState *tmr, int ch, int64_t delta)
|
|||
et = tmr->div_round[ch] / divrate;
|
||||
tmr->div_round[ch] %= divrate;
|
||||
} else {
|
||||
/* disble clock. so no update */
|
||||
/* disable clock. so no update */
|
||||
et = 0;
|
||||
}
|
||||
return et;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
* specification.
|
||||
*
|
||||
* TPM TIS for TPM 2 implementation following TCG PC Client Platform
|
||||
* TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43
|
||||
* TPM Profile (PTP) Specification, Family 2.0, Revision 00.43
|
||||
*/
|
||||
#ifndef TPM_TPM_TIS_H
|
||||
#define TPM_TPM_TIS_H
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue