25 #include <sys/types.h>
41 #include <xenaccess/xenaccess.h>
42 #include <xenaccess/xa_private.h>
65 if (xa_init_vm_id_strict_noos(xstate->
id,&mstate->
xa_instance) == XA_FAILURE) {
68 verror(
"failed to init xa instance for dom %d\n",xstate->
id);
81 ADDR init_task_addr,pgd_addr;
83 OFFSET tasks_offset,pid_offset,mm_offset,pgd_offset;
94 val = (
char *)g_hash_table_lookup(target->
config,
"OS_KERNEL_INIT_TASK_ADDR");
96 init_task_addr = (
ADDR)strtol(val,NULL,0);
97 val = (
char *)g_hash_table_lookup(target->
config,
"OS_KERNEL_PGD_ADDR");
99 pgd_addr = (
ADDR)strtol(val,NULL,0);
102 val = (
char *)g_hash_table_lookup(target->
config,
"OS_KERNEL_TASKS_OFFSET");
104 tasks_offset = (
ADDR)strtol(val,NULL,0);
105 val = (
char *)g_hash_table_lookup(target->
config,
"OS_KERNEL_PID_OFFSET");
107 pid_offset = (
ADDR)strtol(val,NULL,0);
108 val = (
char *)g_hash_table_lookup(target->
config,
"OS_KERNEL_MM_OFFSET");
110 mm_offset = (
ADDR)strtol(val,NULL,0);
111 val = (
char *)g_hash_table_lookup(target->
config,
"OS_KERNEL_MM_PGD_OFFSET");
113 pgd_offset = (
ADDR)strtol(val,NULL,0);
115 xstate->xa_instance.init_task = init_task_addr;
116 xstate->xa_instance.page_offset = 0;
117 xstate->xa_instance.os.linux_instance.tasks_offset = tasks_offset;
118 xstate->xa_instance.os.linux_instance.pid_offset = pid_offset;
119 xstate->xa_instance.os.linux_instance.mm_offset = mm_offset;
120 xstate->xa_instance.os.linux_instance.pgd_offset = pgd_offset;
121 xstate->xa_instance.kpgd = pgd_addr;
160 verror(
"no XenAccess support for 64-bit host!\n");
164 tpaddr = xa_pagetable_lookup(&xstate->xa_instance,pgd,tvaddr,0);
183 unsigned long length,
184 unsigned char *buf) {
187 unsigned char *retval = NULL;
188 unsigned long npages;
189 unsigned long page_offset;
199 retval = (
unsigned char *)
malloc(length+1);
211 for (i = 0; i < npages; ++
i) {
212 mmap = xa_access_pa(&xstate->xa_instance,cur,&offset,
PROT_READ);
214 verror(
"failed to mmap paddr 0x%lx (for write to"
216 cur,paddr,strerror(errno));
220 memcpy(retval + rc,mmap + page_offset,
__PAGE_SIZE - page_offset);
223 else if (i == (npages - 1)) {
224 memcpy(retval + rc,mmap,(length - rc));
246 unsigned long length,
247 unsigned char *buf) {
249 unsigned long npages;
250 unsigned long page_offset;
267 for (i = 0; i < npages; ++
i) {
268 mmap = xa_access_pa(&xstate->xa_instance,cur,&offset,
PROT_READ);
270 verror(
"failed to mmap paddr 0x%lx (for write to"
272 cur,paddr,strerror(errno));
276 memcpy(mmap + page_offset,buf + rc,
__PAGE_SIZE - page_offset);
279 else if (i == (npages - 1)) {
280 memcpy(mmap,buf + rc,(length - rc));
299 static unsigned char *
300 __xen_vm_mem_xenaccess_mmap_pages(xa_instance_t *xa_instance,
ADDR addr,
301 unsigned long size,uint32_t *offset,
302 int *npages,
int prot,
int pid) {
303 unsigned char *pages;
304 unsigned long page_size, page_offset;
305 char *dstr =
"small";
307 page_size = xa_instance->page_size;
308 page_offset = addr & (page_size - 1);
310 if (size > 0 && size <= (page_size - page_offset)) {
312 pages = xa_access_user_va(xa_instance,addr,offset,pid,prot);
317 pages = xa_access_user_va(xa_instance,addr,offset,0,prot);
326 pages = xa_access_user_va_range(xa_instance,addr,size,offset,pid,prot);
334 pages = xa_access_user_va_range(xa_instance,addr,size,offset,0,prot);
345 *npages = (*offset + size) / page_size;
346 if ((*offset + size) % page_size)
363 unsigned long target_length,
364 unsigned char *buf) {
365 unsigned char *pages;
366 unsigned int offset = 0;
367 unsigned long length = target_length, size = 0;
368 unsigned long page_size;
369 unsigned char *retval = NULL;
370 unsigned int page_offset;
388 page_size = xstate->xa_instance.page_size;
389 page_offset = addr & (page_size - 1);
392 "read dom %d: addr=0x%"PRIxADDR" offset=%d len=%d pid=%d\n",
393 xstate->
id,addr,page_offset,target_length,pid);
397 pages = (
unsigned char *) \
398 __xen_vm_mem_xenaccess_mmap_pages(&xstate->xa_instance,addr,
399 length,&offset,&no_pages,
404 assert(offset == page_offset);
406 "read dom %d: addr=0x%"PRIxADDR" offset=%d pid=%d len=%d mapped pages=%d\n",
407 xstate->
id,addr,page_offset,pid,length,no_pages);
412 size = (page_size - page_offset);
415 if (1 || size > page_size)
417 "increasing size to %d (dom=%d,addr=%"PRIxADDR",pid=%d)\n",
418 size,xstate->
id,addr,pid);
419 pages = (
unsigned char *) \
420 __xen_vm_mem_xenaccess_mmap_pages(&xstate->xa_instance,addr,size,
426 length = strnlen((
const char *)(pages + offset), size);
432 if (munmap(pages,no_pages * page_size))
433 vwarn(
"munmap of %p failed\n",pages);
439 retval = (
unsigned char *)
malloc(length+1);
443 memcpy(retval,pages + offset,length);
444 if (target_length == 0) {
445 retval[length] =
'\0';
449 if (munmap(pages,no_pages * page_size))
450 vwarn(
"munmap of %p failed\n",pages);
457 unsigned long length,
458 unsigned char *buf) {
461 unsigned char *pages;
462 unsigned int offset = 0;
463 unsigned long page_size;
464 unsigned int page_offset;
477 page_size = xstate->xa_instance.page_size;
478 page_offset = addr & (page_size - 1);
481 "write dom %d: addr=0x%"PRIxADDR" offset=%d len=%d pid=%d\n",
482 xstate->
id,addr,page_offset,length,pid);
496 pages = (
unsigned char *) \
497 __xen_vm_mem_xenaccess_mmap_pages(&xstate->xa_instance,addr,
498 length,&offset,&no_pages,
505 assert(offset == page_offset);
507 "write dom %d: addr=0x%"PRIxADDR" offset=%d pid=%d len=%d mapped pages=%d\n",
508 xstate->
id,addr,page_offset,pid,length,no_pages);
510 memcpy(pages + offset,buf,length);
512 if (munmap(pages,no_pages * page_size))
513 vwarn(
"munmap of %p failed\n",pages);
int xen_vm_mem_xenaccess_init(struct target *target)
xa_instance_t xa_instance
int xen_vm_mem_xenaccess_handle_exception_ours(struct target *target)
static uint64_t unsigned int i
int xen_vm_mem_xenaccess_addr_v2p(struct target *target, tid_t tid, ADDR pgd, ADDR vaddr, ADDR *paddr)
struct malloc_state * mstate
#define verror(format,...)
int target_find_memory_real(struct target *target, ADDR addr, struct addrspace **space_saveptr, struct memregion **region_saveptr, struct memrange **range_saveptr)
int xen_vm_mem_xenaccess_attach(struct target *target)
unsigned char * xen_vm_mem_xenaccess_read_tid(struct target *target, tid_t tid, ADDR addr, unsigned long target_length, unsigned char *buf)
#define vwarn(format,...)
int xen_vm_mem_xenaccess_handle_exception_any(struct target *target)
void * mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
#define vdebug(devel, areas, flags, format,...)
void * calloc(size_t nmemb, size_t size)
void * malloc(size_t size)
unsigned long xen_vm_mem_xenaccess_write_phys(struct target *target, ADDR paddr, unsigned long length, unsigned char *buf)
unsigned long xen_vm_mem_xenaccess_write_tid(struct target *target, int pid, ADDR addr, unsigned long length, unsigned char *buf)
unsigned char * xen_vm_mem_xenaccess_read_phys(struct target *target, ADDR paddr, unsigned long length, unsigned char *buf)