/qemu/hw/sensor/ |
A D | max31785.c | 175 if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { in max31785_read_byte() 181 if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { in max31785_read_byte() 187 if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { in max31785_read_byte() 193 if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { in max31785_read_byte() 263 if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { in max31785_read_byte() 269 if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { in max31785_read_byte() 275 if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { in max31785_read_byte() 281 if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { in max31785_read_byte() 287 if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { in max31785_read_byte() 293 if (pmdev->page <= MAX31785_MAX_FAN_PAGE) { in max31785_read_byte() [all …]
|
A D | max34451.c | 211 if (pmdev->page < 16) { in max34451_read_byte() 217 if (pmdev->page < 16) { in max34451_read_byte() 223 if (pmdev->page < 12) { in max34451_read_byte() 229 if (pmdev->page < 12) { in max34451_read_byte() 235 if (pmdev->page < 12) { in max34451_read_byte() 241 if (pmdev->page < 16) { in max34451_read_byte() 271 if (pmdev->page < 12) { in max34451_read_byte() 289 if (15 < pmdev->page && pmdev->page < 21) { in max34451_read_byte() 367 if (15 < pmdev->page && pmdev->page < 21) { in max34451_read_byte() 486 if (15 < pmdev->page && pmdev->page < 21) { in max34451_write_data() [all …]
|
/qemu/tests/tcg/multiarch/linux/ |
A D | linux-madvise.c | 9 char *page; in test_anonymous() local 13 assert(page != MAP_FAILED); in test_anonymous() 20 *page = 42; in test_anonymous() 21 ret = madvise(page, pagesize, MADV_DONTNEED); in test_anonymous() 23 assert(*page == 0); in test_anonymous() 25 ret = munmap(page, pagesize); in test_anonymous() 35 char *page; in test_file() local 48 assert(page != MAP_FAILED); in test_file() 55 *page = 0; in test_file() 58 assert(*page == c); in test_file() [all …]
|
/qemu/hw/intc/ |
A D | riscv_imsic.c | 53 base = page * imsic->num_irqs; in riscv_imsic_topei() 84 if (imsic->eidelivery[page] && riscv_imsic_topei(imsic, page)) { in riscv_imsic_update() 104 riscv_imsic_update(imsic, page); in riscv_imsic_eidelivery_rmw() 122 riscv_imsic_update(imsic, page); in riscv_imsic_eithreshold_rmw() 140 base = page * imsic->num_irqs; in riscv_imsic_topei_rmw() 146 riscv_imsic_update(imsic, page); in riscv_imsic_topei_rmw() 196 riscv_imsic_update(imsic, page); in riscv_imsic_eix_rmw() 214 page = 0; in riscv_imsic_rmw() 222 page = vgein; in riscv_imsic_rmw() 227 page = 0; in riscv_imsic_rmw() [all …]
|
/qemu/include/exec/ |
A D | ram_addr.h | 159 unsigned long end, page; in cpu_physical_memory_get_dirty() local 173 base = page - offset; in cpu_physical_memory_get_dirty() 174 while (page < end) { in cpu_physical_memory_get_dirty() 184 page = next; in cpu_physical_memory_get_dirty() 199 unsigned long end, page; in cpu_physical_memory_all_dirty() local 214 base = page - offset; in cpu_physical_memory_all_dirty() 215 while (page < end) { in cpu_physical_memory_all_dirty() 224 page = next; in cpu_physical_memory_all_dirty() 293 unsigned long end, page; in cpu_physical_memory_set_dirty_range() local 328 page = next; in cpu_physical_memory_set_dirty_range() [all …]
|
/qemu/tests/qtest/ |
A D | aspeed_smc-test.c | 206 uint32_t page[FLASH_PAGE_SIZE / 4]; in assert_page_mem() local 207 read_page_mem(addr, page); in assert_page_mem() 216 uint32_t page[FLASH_PAGE_SIZE / 4]; in test_erase_sector() local 243 read_page(some_page_addr, page); in test_erase_sector() 256 read_page(some_page_addr, page); in test_erase_sector() 294 read_page(some_page_addr, page); in test_erase_all() 305 read_page(some_page_addr, page); in test_erase_all() 335 read_page(my_page_addr, page); in test_write_page() 341 read_page(some_page_addr, page); in test_write_page() 378 read_page_mem(my_page_addr, page); in test_read_page_mem() [all …]
|
/qemu/hw/display/ |
A D | xenfb.c | 119 if (c->page) { in common_unbind() 130 struct xenkbd_page *page = xenfb->c.page; in xenfb_kbd_event() local 135 if (!page) in xenfb_kbd_event() 416 struct xenkbd_page *page = xenfb->c.page; in input_event() local 419 if (page->out_prod == page->out_cons) in input_event() 421 page->out_cons = page->out_prod; in input_event() 439 struct xenfb_page *page = xenfb->c.page; in xenfb_map_fb() local 675 struct xenfb_page *page = xenfb->c.page; in xenfb_queue_full() local 678 if (!page) in xenfb_queue_full() 689 struct xenfb_page *page = xenfb->c.page; in xenfb_send_event() local [all …]
|
/qemu/include/hw/xen/interface/io/ |
A D | fbif.h | 101 #define XENFB_IN_RING(page) \ argument 102 ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS)) 103 #define XENFB_IN_RING_REF(page, idx) \ argument 104 (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN]) 109 #define XENFB_OUT_RING(page) \ argument 110 ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS)) 111 #define XENFB_OUT_RING_REF(page, idx) \ argument 112 (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
|
A D | kbdif.h | 530 #define XENKBD_IN_RING(page) \ argument 531 ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS)) 532 #define XENKBD_IN_RING_REF(page, idx) \ argument 533 (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN]) 538 #define XENKBD_OUT_RING(page) \ argument 539 ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS)) 540 #define XENKBD_OUT_RING_REF(page, idx) \ argument 541 (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
|
/qemu/tests/tcg/s390x/ |
A D | signals-s390x.c | 61 void *page; in handle_signal() local 80 page = mmap(expected.addr, 4096, PROT_READ | PROT_WRITE, in handle_signal() 82 if (page != expected.addr) { in handle_signal() 114 unsigned long *page; in check_sigsegv() local 119 page = mmap(NULL, 4096, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); in check_sigsegv() 120 assert(page != MAP_FAILED); in check_sigsegv() 123 err = munmap(page, 4096); in check_sigsegv() 126 addr = page + (val & 0x1ff); in check_sigsegv() 129 expected.addr = page; in check_sigsegv() 140 err = munmap(page, 4096); in check_sigsegv()
|
/qemu/tests/tcg/aarch64/ |
A D | test-826.c | 25 void *page; in main() local 33 page = mmap(0, getpagesize(), PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0); in main() 34 if (page == MAP_FAILED) { in main() 40 expected = page + ofs; in main() 47 : : "r"(page), "r"(ofs) : "v0", "v1"); in main()
|
/qemu/accel/tcg/ |
A D | cputlb.c | 449 page &= mask; in tlb_hit_page_mask_anyprot() 522 if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) { in tlb_flush_page_locked() 711 vaddr page = addr + i; in tlb_flush_range_locked() local 1307 if (cmp == page) { in victim_tlb_hit() 1744 l->page[0].addr = addr; in mmu_lookup() 1746 l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; in mmu_lookup() 1747 l->page[1].size = 0; in mmu_lookup() 1763 l->page[1].size = l->page[0].size - size0; in mmu_lookup() 1776 flags = l->page[0].flags | l->page[1].flags; in mmu_lookup() 2422 first = l.page[0].size; in do_ld16_mmu() [all …]
|
/qemu/contrib/plugins/ |
A D | hotpages.c | 113 uint64_t page; in vcpu_haddr() local 119 page = vaddr; in vcpu_haddr() 125 page = (uint64_t) qemu_plugin_hwaddr_phys_addr(hwaddr); in vcpu_haddr() 127 page = vaddr; in vcpu_haddr() 130 page &= ~page_mask; in vcpu_haddr() 133 count = (PageCounters *) g_hash_table_lookup(pages, GUINT_TO_POINTER(page)); in vcpu_haddr() 137 count->page_address = page; in vcpu_haddr() 138 g_hash_table_insert(pages, GUINT_TO_POINTER(page), (gpointer) count); in vcpu_haddr()
|
/qemu/docs/devel/migration/ |
A D | dirty-limit.rst | 4 The dirty limit, short for dirty page rate upper limit, is a new capability 36 page rate value and the corresponding upper limit of the VM: 37 The dirty page rate is calculated via the KVM dirty ring mechanism, 39 last KVM_EXIT_DIRTY_RING_FULL exception; The dirty page rate upper 46 computation must be done carefully in order to reduce the dirty page 49 subtract sleep time based on the ratio of the current dirty page rate 50 to the limit, which is used when the current dirty page rate is far 52 the current dirty page rate is close to the limit. 69 algorithm will restrict virtual CPUs as needed to keep their dirty page
|
A D | postcopy.rst | 88 on processing the blob. With this thread able to process page 123 When postcopy starts the source sends the page discard data and then 138 however when a page request is received from the destination, the dirty page 158 | (page request) 161 listen thread: --- page -- page -- page -- page -- page -- 186 page data until the end of migration. 188 Source side page bitmap 192 where each of the bit to indicate that page is 'dirty' - i.e. needs 238 running, and it will not be impacted from any page access to pages that 245 configurations of the guest. For example, when with async page fault [all …]
|
/qemu/docs/specs/ |
A D | acpi_nvdimm.rst | 90 This page is RAM-based and it is used to transfer data between _DSM 95 ACPI writes _DSM Input Data (based on the offset in the page): 119 QEMU writes Output Data (based on the offset in the page): 128 ACPI writes the address of the memory page allocated by BIOS to this 134 4 bytes, the address of the memory page allocated by BIOS. 139 "MEMA" indicates the address of memory page allocated by BIOS. 146 | to the page +----->| IO port 0x0a18 +------------+ 154 | to the page +<------+ _DSM +<-----+ the page indicated by the | 165 | result from the page | | | 181 in 1 page sized increments which are then concatenated and returned [all …]
|
/qemu/docs/ |
A D | xbzrle.txt | 11 Instead of sending the changed guest memory page this solution will send a 17 The larger the cache size the better the chances are that the page has already 26 of the page, where zero represents an unchanged value. 27 The page data delta is represented by zero and non zero runs. 35 page = zrun nzrun 36 | zrun nzrun page 45 retrieving the old page content from the cache (default size of 64MB). The 46 receiving side uses the existing page's content and XBZRLE to decode the new 47 page's content. 77 misses. XBZRLE uses a counter as the age of each page. The counter will [all …]
|
/qemu/hw/block/ |
A D | nand.c | 122 # define PAGE_START(page) (PAGE(page) * (NAND_PAGE_SIZE + OOB_SIZE)) argument 672 uint64_t off, page, sector, soff; 692 page = PAGE(s->addr); 724 uint64_t i, page, addr; 740 for (; i < page; i ++) 747 page = addr >> 9; 748 if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, 753 if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, 768 page = i >> 9; 769 if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, [all …]
|
/qemu/bsd-user/netbsd/ |
A D | target_os_stack.h | 41 if (bprm->page[i]) { in setup_initial_stack() 43 if (!memcpy_to_target(stack_base, bprm->page[i], in setup_initial_stack() 48 g_free(bprm->page[i]); in setup_initial_stack()
|
/qemu/bsd-user/openbsd/ |
A D | target_os_stack.h | 41 if (bprm->page[i]) { in setup_initial_stack() 43 if (!memcpy_to_target(stack_base, bprm->page[i], in setup_initial_stack() 48 g_free(bprm->page[i]); in setup_initial_stack()
|
/qemu/hw/i386/kvm/ |
A D | xen_overlay.c | 52 void xen_overlay_do_map_page(MemoryRegion *page, uint64_t gpa) in xen_overlay_do_map_page() argument 61 if (memory_region_is_mapped(page)) { in xen_overlay_do_map_page() 63 memory_region_del_subregion(get_system_memory(), page); in xen_overlay_do_map_page() 66 memory_region_set_address(page, gpa); in xen_overlay_do_map_page() 69 memory_region_add_subregion_overlap(get_system_memory(), gpa, page, 0); in xen_overlay_do_map_page()
|
/qemu/hw/scsi/ |
A D | mptconfig.c | 759 const MPTSASConfigPage *page; in mptsas_find_config_page() local 763 page = &mptsas_config_pages[i]; in mptsas_find_config_page() 764 if (page->type == type && page->number == number) { in mptsas_find_config_page() 765 return page; in mptsas_find_config_page() 777 const MPTSASConfigPage *page; in mptsas_process_config() local 812 page = mptsas_find_config_page(type, req->PageNumber); in mptsas_process_config() 829 if (!page) { in mptsas_process_config() 830 page = mptsas_find_config_page(type, 1); in mptsas_process_config() 831 if (page) { in mptsas_process_config() 880 length = page->mpt_config_build(s, &data, req->PageAddress); in mptsas_process_config() [all …]
|
/qemu/migration/ |
A D | ram.c | 124 unsigned long page; member 483 pss->page = page; in pss_init() 720 pss->page = size; in pss_find_next_dirty() 733 pss->page = find_next_bit(bitmap, size, pss->page); in pss_find_next_dirty() 1313 pss->page >= rs->last_page) { in find_dirty_block() 1323 pss->page = 0; in find_dirty_block() 1808 unsigned long page; in get_queued_page() local 2054 pss->host_page_start = pss->page; in pss_host_page_prepare() 2284 rs->last_page = pss->page; in ram_find_and_save_block() 2551 unsigned long page; in postcopy_chunk_hostpages_pass() local [all …]
|
/qemu/hw/net/ |
A D | xen_nic.c | 133 void *page; in net_tx_packets() local 189 page = xen_device_map_grant_refs(&netdev->xendev, &txreq.gref, 1, in net_tx_packets() 191 if (page == NULL) { in net_tx_packets() 203 memcpy(tmpbuf, page + txreq.offset, txreq.size); in net_tx_packets() 209 page + txreq.offset, txreq.size); in net_tx_packets() 211 xen_device_unmap_grant_refs(&netdev->xendev, page, &txreq.gref, 1, in net_tx_packets() 261 void *page; in net_rx_packet() local 286 page = xen_device_map_grant_refs(&netdev->xendev, &rxreq.gref, 1, in net_rx_packet() 288 if (page == NULL) { in net_rx_packet() 295 memcpy(page + NET_IP_ALIGN, buf, size); in net_rx_packet() [all …]
|
/qemu/hw/misc/ |
A D | pc-testdev.c | 127 hwaddr page = 4096; in test_flush_page_write() local 128 void *a = cpu_physical_memory_map(data & ~0xffful, &page, false); in test_flush_page_write() 133 mprotect(a, page, PROT_NONE); in test_flush_page_write() 134 mprotect(a, page, PROT_READ|PROT_WRITE); in test_flush_page_write() 136 cpu_physical_memory_unmap(a, page, 0, 0); in test_flush_page_write()
|