Stackdb
Stackdb is a stackable, multi-target and -level source debugger and memory forensics library.
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
target_xen_vm_mem_xenaccess.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012, 2013, 2014 The University of Utah
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of
7  * the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
17  */
18 
19 #include "config.h"
20 
21 #include <errno.h>
22 #include <assert.h>
23 #include <ctype.h>
24 #include <unistd.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <fcntl.h>
28 #include <sys/mman.h>
29 
30 #include "common.h"
31 #include "arch.h"
32 #include "arch_x86.h"
33 #include "arch_x86_64.h"
34 #include "target_api.h"
35 #include "target.h"
36 #include "target_arch_x86.h"
37 #include "target_os.h"
38 
39 #include <xenctrl.h>
40 #include <xs.h>
41 #include <xenaccess/xenaccess.h>
42 #include <xenaccess/xa_private.h>
43 
44 #include "target_xen_vm.h"
45 
46 
48  /* XenAccess instance used to read/write domain's memory */
49  xa_instance_t xa_instance;
50 };
51 
52 /*
53  * Prototypes.
54  */
55 
57  struct xen_vm_state *xstate;
59 
60  xstate = (struct xen_vm_state *)target->state;
61 
62  mstate = (struct xen_vm_mem_xenaccess_state *)calloc(1,sizeof(*mstate));
63 
64  mstate->xa_instance.os_type = XA_OS_LINUX;
65  if (xa_init_vm_id_strict_noos(xstate->id,&mstate->xa_instance) == XA_FAILURE) {
66  if (mstate->xa_instance.sysmap)
67  free(mstate->xa_instance.sysmap);
68  verror("failed to init xa instance for dom %d\n",xstate->id);
69  free(mstate);
70  return -1;
71  }
72 
73  xstate->memops_priv = mstate;
74 
75  return 0;
76 }
77 
79  struct xen_vm_state *xstate;
81  ADDR init_task_addr,pgd_addr;
82  char *val;
83  OFFSET tasks_offset,pid_offset,mm_offset,pgd_offset;
84 
85  xstate = (struct xen_vm_state *)target->state;
86  mstate = (struct xen_vm_mem_xenaccess_state *)xstate->memops_priv;
87 
88  /*
89  * Make sure xenaccess is setup to read from userspace memory.
90  *
91  * This is hacky, but we do it by reading properties that the
92  * personality has (hopefully) set.
93  */
94  val = (char *)g_hash_table_lookup(target->config,"OS_KERNEL_INIT_TASK_ADDR");
95  if (val)
96  init_task_addr = (ADDR)strtol(val,NULL,0);
97  val = (char *)g_hash_table_lookup(target->config,"OS_KERNEL_PGD_ADDR");
98  if (val)
99  pgd_addr = (ADDR)strtol(val,NULL,0);
100 
101 
102  val = (char *)g_hash_table_lookup(target->config,"OS_KERNEL_TASKS_OFFSET");
103  if (val)
104  tasks_offset = (ADDR)strtol(val,NULL,0);
105  val = (char *)g_hash_table_lookup(target->config,"OS_KERNEL_PID_OFFSET");
106  if (val)
107  pid_offset = (ADDR)strtol(val,NULL,0);
108  val = (char *)g_hash_table_lookup(target->config,"OS_KERNEL_MM_OFFSET");
109  if (val)
110  mm_offset = (ADDR)strtol(val,NULL,0);
111  val = (char *)g_hash_table_lookup(target->config,"OS_KERNEL_MM_PGD_OFFSET");
112  if (val)
113  pgd_offset = (ADDR)strtol(val,NULL,0);
114 
115  xstate->xa_instance.init_task = init_task_addr;
116  xstate->xa_instance.page_offset = 0;
117  xstate->xa_instance.os.linux_instance.tasks_offset = tasks_offset;
118  xstate->xa_instance.os.linux_instance.pid_offset = pid_offset;
119  xstate->xa_instance.os.linux_instance.mm_offset = mm_offset;
120  xstate->xa_instance.os.linux_instance.pgd_offset = pgd_offset;
121  xstate->xa_instance.kpgd = pgd_addr;
122 
123  return 0;
124 }
125 
127  return 0;
128 }
129 
131  struct xen_vm_state *xstate;
133 
134  xstate = (struct xen_vm_state *)target->state;
135  mstate = (struct xen_vm_mem_xenaccess_state *)xstate->memops_priv;
136 
137  /* From previous */
138  xa_destroy_cache(&mstate->xa_instance);
139  xa_destroy_pid_cache(&mstate->xa_instance);
140 
141  return 0;
142 }
143 
145  ADDR vaddr,ADDR *paddr) {
146  struct xen_vm_state *xstate;
148  uint64_t tvaddr = 0;
149  uint64_t tpaddr = 0;
150 
151  xstate = (struct xen_vm_state *)target->state;
152  mstate = (struct xen_vm_mem_xenaccess_state *)xstate->memops_priv;
153 
154  /*
155  * Strip the offset bits to improve xenaccess cache perf.
156  */
157  tvaddr = vaddr & ~(__PAGE_SIZE - 1);
158 
159 #if __WORDSIZE == 64
160  verror("no XenAccess support for 64-bit host!\n");
161  errno = ENOTSUP;
162  return -1;
163 #else
164  tpaddr = xa_pagetable_lookup(&xstate->xa_instance,pgd,tvaddr,0);
165  if (tpaddr == 0) {
166  verror("could not lookup vaddr 0x%"PRIxADDR" in tid %"PRIiTID
167  " pgd 0x%"PRIxADDR"!\n",
168  vaddr,tid,pgd);
169  return -1;
170  }
171 #endif
172 
173  *paddr = tpaddr | (vaddr & (__PAGE_SIZE - 1));
174 
176  "tid %"PRIiTID" vaddr 0x%"PRIxADDR" -> paddr 0x%"PRIxADDR"\n",
177  tid,vaddr,*paddr);
178 
179  return 0;
180 }
181 
182 unsigned char *xen_vm_mem_xenaccess_read_phys(struct target *target,ADDR paddr,
183  unsigned long length,
184  unsigned char *buf) {
185  struct xen_vm_state *xstate;
187  unsigned char *retval = NULL;
188  unsigned long npages;
189  unsigned long page_offset;
190  unsigned long i;
191  unsigned long cur;
192  unsigned char *mmap;
193  unsigned long rc;
194  uint32_t offset;
195 
196  xstate = (struct xen_vm_state *)target->state;
197 
198  if (!buf)
199  retval = (unsigned char *)malloc(length+1);
200  else
201  retval = buf;
202 
203  page_offset = paddr & (__PAGE_SIZE - 1);
204  npages = (page_offset + length) / __PAGE_SIZE;
205  if ((page_offset + length) % __PAGE_SIZE)
206  ++npages;
207 
208  /* Have to mmap them one by one. */
209  cur = paddr & ~(__PAGE_SIZE - 1);
210  rc = 0;
211  for (i = 0; i < npages; ++i) {
212  mmap = xa_access_pa(&xstate->xa_instance,cur,&offset,PROT_READ);
213  if (!mmap) {
214  verror("failed to mmap paddr 0x%lx (for write to"
215  " 0x%"PRIxADDR"): %s!\n",
216  cur,paddr,strerror(errno));
217  goto errout;
218  }
219  if (i == 0) {
220  memcpy(retval + rc,mmap + page_offset,__PAGE_SIZE - page_offset);
221  rc = __PAGE_SIZE - page_offset;
222  }
223  else if (i == (npages - 1)) {
224  memcpy(retval + rc,mmap,(length - rc));
225  rc += length - rc;
226  }
227  else {
228  memcpy(retval + rc,mmap,__PAGE_SIZE);
229  rc += __PAGE_SIZE;
230  }
231  munmap(mmap,__PAGE_SIZE);
232  cur += __PAGE_SIZE;
233  }
234 
235  return retval;
236 
237  errout:
238  if (!buf && retval)
239  free(retval);
240  if (!errno)
241  errno = EFAULT;
242  return NULL;
243 }
244 
245 unsigned long xen_vm_mem_xenaccess_write_phys(struct target *target,ADDR paddr,
246  unsigned long length,
247  unsigned char *buf) {
248  struct xen_vm_state *xstate;
249  unsigned long npages;
250  unsigned long page_offset;
251  unsigned long i;
252  unsigned long cur;
253  unsigned char *mmap;
254  unsigned long rc;
255  uint32_t offset;
256 
257  xstate = (struct xen_vm_state *)target->state;
258 
259  page_offset = paddr & (__PAGE_SIZE - 1);
260  npages = (page_offset + length) / __PAGE_SIZE;
261  if ((page_offset + length) % __PAGE_SIZE)
262  ++npages;
263 
264  /* Have to mmap them one by one. */
265  cur = paddr & ~(__PAGE_SIZE - 1);
266  rc = 0;
267  for (i = 0; i < npages; ++i) {
268  mmap = xa_access_pa(&xstate->xa_instance,cur,&offset,PROT_READ);
269  if (!mmap) {
270  verror("failed to mmap paddr 0x%lx (for write to"
271  " 0x%"PRIxADDR"): %s!\n",
272  cur,paddr,strerror(errno));
273  goto errout;
274  }
275  if (i == 0) {
276  memcpy(mmap + page_offset,buf + rc,__PAGE_SIZE - page_offset);
277  rc = __PAGE_SIZE - page_offset;
278  }
279  else if (i == (npages - 1)) {
280  memcpy(mmap,buf + rc,(length - rc));
281  rc += length - rc;
282  }
283  else {
284  memcpy(mmap,buf + rc,__PAGE_SIZE);
285  rc += __PAGE_SIZE;
286  }
287  munmap(mmap,__PAGE_SIZE);
288  cur += __PAGE_SIZE;
289  }
290 
291  return length;
292 
293  errout:
294  if (!errno)
295  errno = EFAULT;
296  return 0;
297 }
298 
299 static unsigned char *
300 __xen_vm_mem_xenaccess_mmap_pages(xa_instance_t *xa_instance,ADDR addr,
301  unsigned long size,uint32_t *offset,
302  int *npages,int prot,int pid) {
303  unsigned char *pages;
304  unsigned long page_size, page_offset;
305  char *dstr = "small";
306 
307  page_size = xa_instance->page_size;
308  page_offset = addr & (page_size - 1);
309 
310  if (size > 0 && size <= (page_size - page_offset)) {
311  /* let xenaccess use its memory cache for small size */
312  pages = xa_access_user_va(xa_instance,addr,offset,pid,prot);
313  if (!pages) {
314  if (!pid)
315  return NULL;
316 
317  pages = xa_access_user_va(xa_instance,addr,offset,0,prot);
318  if (!pages)
319  return NULL;
320  }
321  *npages = 1;
322  }
323  else {
324  dstr = "large";
325  /* xenaccess can't map multiple pages properly, use our own function */
326  pages = xa_access_user_va_range(xa_instance,addr,size,offset,pid,prot);
327 
328  if (!pages) { // && pid) {
329  //return NULL;
330  if (!pid)
331  return NULL;
332 
333  /* try kernel */
334  pages = xa_access_user_va_range(xa_instance,addr,size,offset,0,prot);
335  if (!pages)
336  return NULL;
337  }
338 
339  /*
340  * Compute how many pages were mapped.
341  * *offset is the offset within the initial page mapped.
342  * Number of pages is thus:
343  * round((*offset+size), page_size)
344  */
345  *npages = (*offset + size) / page_size;
346  if ((*offset + size) % page_size)
347  (*npages)++;
348  }
349 
350  vdebug(9,LA_TARGET,LF_XV,"%ld bytes at %lx mapped (%s)\n",size,addr,dstr);
351 
352  return pages; /* munmap it later */
353 }
354 
355 /*
356  * Our xen read and write functions are a little special. First,
357  * xenaccess has the ability to read/write using the current cr3
358  * contents as the pgdir location, or it can use a different pgdir
359  * (i.e., for a thread that is not running).
360  */
362  ADDR addr,
363  unsigned long target_length,
364  unsigned char *buf) {
365  unsigned char *pages;
366  unsigned int offset = 0;
367  unsigned long length = target_length, size = 0;
368  unsigned long page_size;
369  unsigned char *retval = NULL;
370  unsigned int page_offset;
371  int no_pages;
372  struct xen_vm_state *xstate;
373 
374  xstate = (struct xen_vm_state *)(target->state);
375 
376  /*
377  * Change the TID to 0 if TID was global. The Xen backend always
378  * defaults non-tid-specific reads/writes to the kernel, via
379  * TID_GLOBAL.
380  */
381  if (tid == TID_GLOBAL)
382  tid = 0;
383 
384  // XXX: need to check, if pid > 0, if we can actually read it --
385  // i.e., do we have the necessary task_struct offsets for xenaccess,
386  // and is it in mem...
387 
388  page_size = xstate->xa_instance.page_size;
389  page_offset = addr & (page_size - 1);
390 
392  "read dom %d: addr=0x%"PRIxADDR" offset=%d len=%d pid=%d\n",
393  xstate->id,addr,page_offset,target_length,pid);
394 
395  /* if we know what length we need, just grab it */
396  if (length > 0) {
397  pages = (unsigned char *) \
398  __xen_vm_mem_xenaccess_mmap_pages(&xstate->xa_instance,addr,
399  length,&offset,&no_pages,
400  PROT_READ,pid);
401  if (!pages)
402  return NULL;
403 
404  assert(offset == page_offset);
406  "read dom %d: addr=0x%"PRIxADDR" offset=%d pid=%d len=%d mapped pages=%d\n",
407  xstate->id,addr,page_offset,pid,length,no_pages);
408  }
409  else {
410  /* increase the mapping size by this much if the string is longer
411  than we expect at first attempt. */
412  size = (page_size - page_offset);
413 
414  while (1) {
415  if (1 || size > page_size)
417  "increasing size to %d (dom=%d,addr=%"PRIxADDR",pid=%d)\n",
418  size,xstate->id,addr,pid);
419  pages = (unsigned char *) \
420  __xen_vm_mem_xenaccess_mmap_pages(&xstate->xa_instance,addr,size,
421  &offset,&no_pages,
422  PROT_READ,pid);
423  if (!pages)
424  return NULL;
425 
426  length = strnlen((const char *)(pages + offset), size);
427  if (length < size) {
428  vdebug(9,LA_TARGET,LF_XV,"got string of length %d, mapped %d pages\n",
429  length,no_pages);
430  break;
431  }
432  if (munmap(pages,no_pages * page_size))
433  vwarn("munmap of %p failed\n",pages);
434  size += page_size;
435  }
436  }
437 
438  if (!buf)
439  retval = (unsigned char *)malloc(length+1);
440  else
441  retval = buf;
442  if (retval) {
443  memcpy(retval,pages + offset,length);
444  if (target_length == 0) {
445  retval[length] = '\0';
446  }
447  }
448 
449  if (munmap(pages,no_pages * page_size))
450  vwarn("munmap of %p failed\n",pages);
451 
452  return retval;
453 }
454 
456  int pid,ADDR addr,
457  unsigned long length,
458  unsigned char *buf) {
459  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
460  struct memrange *range = NULL;
461  unsigned char *pages;
462  unsigned int offset = 0;
463  unsigned long page_size;
464  unsigned int page_offset;
465  int no_pages;
466 
467  xstate = (struct xen_vm_state *)(target->state);
468 
469  /*
470  * Change the TID to 0 if TID was global. The Xen backend always
471  * defaults non-tid-specific reads/writes to the kernel, via
472  * TID_GLOBAL.
473  */
474  if (tid == TID_GLOBAL)
475  tid = 0;
476 
477  page_size = xstate->xa_instance.page_size;
478  page_offset = addr & (page_size - 1);
479 
481  "write dom %d: addr=0x%"PRIxADDR" offset=%d len=%d pid=%d\n",
482  xstate->id,addr,page_offset,length,pid);
483 
484  target_find_memory_real(target,addr,NULL,NULL,&range);
485 
486  /*
487  * This is mostly a stub for later, when we might actually check
488  * bounds of writes.
489  */
490  if (!range || !(range->prot_flags & PROT_WRITE)) {
491  errno = EFAULT;
492  return 0;
493  }
494 
495  /* Map the pages we have to write to. */
496  pages = (unsigned char *) \
497  __xen_vm_mem_xenaccess_mmap_pages(&xstate->xa_instance,addr,
498  length,&offset,&no_pages,
499  PROT_WRITE,pid);
500  if (!pages) {
501  errno = EFAULT;
502  return 0;
503  }
504 
505  assert(offset == page_offset);
507  "write dom %d: addr=0x%"PRIxADDR" offset=%d pid=%d len=%d mapped pages=%d\n",
508  xstate->id,addr,page_offset,pid,length,no_pages);
509 
510  memcpy(pages + offset,buf,length);
511 
512  if (munmap(pages,no_pages * page_size))
513  vwarn("munmap of %p failed\n",pages);
514 
515  return length;
516 }
int xen_vm_mem_xenaccess_init(struct target *target)
GHashTable * config
Definition: target_api.h:2622
void * state
Definition: target_api.h:2526
int32_t tid_t
Definition: common.h:36
int xen_vm_mem_xenaccess_handle_exception_ours(struct target *target)
static uint64_t unsigned int i
int xen_vm_mem_xenaccess_addr_v2p(struct target *target, tid_t tid, ADDR pgd, ADDR vaddr, ADDR *paddr)
struct malloc_state * mstate
Definition: dlmalloc.c:2609
#define assert(x)
Definition: dlmalloc.c:1456
int32_t OFFSET
Definition: common.h:65
#define verror(format,...)
Definition: log.h:30
int target_find_memory_real(struct target *target, ADDR addr, struct addrspace **space_saveptr, struct memregion **region_saveptr, struct memrange **range_saveptr)
Definition: target.c:3578
int xen_vm_mem_xenaccess_attach(struct target *target)
unsigned char * xen_vm_mem_xenaccess_read_tid(struct target *target, tid_t tid, ADDR addr, unsigned long target_length, unsigned char *buf)
#define vwarn(format,...)
Definition: log.h:33
void free(void *ptr)
Definition: debugserver.c:207
int xen_vm_mem_xenaccess_handle_exception_any(struct target *target)
void * memops_priv
#define __PAGE_SIZE
void * mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
Definition: qemuhacks.c:213
#define PROT_WRITE
Definition: common.h:107
#define vdebug(devel, areas, flags, format,...)
Definition: log.h:302
Definition: log.h:172
unsigned int prot_flags
Definition: target.h:997
void * calloc(size_t nmemb, size_t size)
Definition: debugserver.c:200
Definition: log.h:70
#define PRIiTID
Definition: common.h:37
uint32_t ADDR
Definition: common.h:64
#define PROT_READ
Definition: common.h:106
#define PRIxADDR
Definition: common.h:67
void * malloc(size_t size)
Definition: debugserver.c:214
unsigned long xen_vm_mem_xenaccess_write_phys(struct target *target, ADDR paddr, unsigned long length, unsigned char *buf)
unsigned long xen_vm_mem_xenaccess_write_tid(struct target *target, int pid, ADDR addr, unsigned long length, unsigned char *buf)
unsigned char * xen_vm_mem_xenaccess_read_phys(struct target *target, ADDR paddr, unsigned long length, unsigned char *buf)
#define TID_GLOBAL
Definition: target_api.h:145