Stackdb
Stackdb is a stackable, multi-target and -level source debugger and memory forensics library.
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
target_xen_vm_mem_builtin.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014 The University of Utah
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of
7  * the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
17  */
18 
19 #include "config.h"
20 
21 #include <errno.h>
22 #include <assert.h>
23 #include <ctype.h>
24 #include <unistd.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <fcntl.h>
28 #include <sys/mman.h>
29 
30 #include "common.h"
31 #include "arch.h"
32 #include "arch_x86.h"
33 #include "arch_x86_64.h"
34 #include "target_api.h"
35 #include "target.h"
36 #include "target_arch_x86.h"
37 #include "target_os.h"
38 
39 #include <xenctrl.h>
40 #include <xs.h>
41 #include "target_xen_vm.h"
42 
43 #ifdef XENCTRL_HAS_XC_INTERFACE
44 extern xc_interface *xc_handle;
45 #define XC_IF_INVALID (NULL)
46 #else
47 extern int xc_handle;
48 #define XC_IF_INVALID (-1)
49 #endif
50 
52  /* Nothing yet! */
53 };
54 
56  struct xen_vm_state *xstate;
58  struct xen_vm_spec *xspec;
59 
60  xstate = (struct xen_vm_state *)target->state;
61  xspec = (struct xen_vm_spec *)target->spec->backend_spec;
62 
63  mstate = (struct xen_vm_mem_builtin_state *)NULL; //calloc(1,sizeof(*mstate));
64 
65  xstate->memops_priv = mstate;
66 
67  /* We use memcache -- create one. */
68  if (target->memcache) {
69  verror("memcache already in use!\n");
70  errno = EINVAL;
71  return -1;
72  }
73 
74  target->memcache = memcache_create(0,xspec->memcache_mmap_size,NULL);
75 
76  return 0;
77 }
78 
80  return 0;
81 }
82 
84  return 0;
85 }
86 
88  struct xen_vm_state *xstate;
90  struct xen_vm_spec *xspec;
91 
92  xstate = (struct xen_vm_state *)target->state;
93  xspec = (struct xen_vm_spec *)target->spec->backend_spec;
94  mstate = (struct xen_vm_mem_builtin_state *)xstate->memops_priv;
95 
96  /* XXX: invalidate caches? */
99  }
100  else
101  memcache_inc_ticks(target->memcache,1);
102 
103  return 0;
104 }
105 
107  struct xen_vm_state *xstate;
109  struct xen_vm_spec *xspec;
110 
111  xstate = (struct xen_vm_state *)target->state;
112  xspec = (struct xen_vm_spec *)target->spec->backend_spec;
113  mstate = (struct xen_vm_mem_builtin_state *)xstate->memops_priv;
114 
115  /* XXX: invalidate caches? */
116  if (xspec->clear_mem_caches_each_exception) {
118  }
119  else
120  memcache_inc_ticks(target->memcache,1);
121 
122  return 0;
123 }
124 
126  ADDR vaddr,ADDR *paddr) {
127  struct xen_vm_state *xstate;
129  ADDR tvaddr,tpaddr;
130  int rc;
131 
132  xstate = (struct xen_vm_state *)target->state;
133  mstate = (struct xen_vm_mem_builtin_state *)xstate->memops_priv;
134 
135  /*
136  * Strip the offset bits to improve builtin/xenaccess cache perf.
137  */
138  tvaddr = vaddr & ~(__PAGE_SIZE - 1);
139 
140  rc = memcache_get_v2p(target->memcache,pgd,tvaddr,paddr,NULL);
141  if (rc == 0) {
142  *paddr |= (vaddr & (__PAGE_SIZE - 1));
143  return 0;
144  }
145  else if (rc < 0) {
146  vwarn("error while looking up vaddr 0x%"PRIxADDR" (for vaddr"
147  " 0x%"PRIxADDR") in memcache: %s (%d); trying full lookup!\n",
148  tvaddr,vaddr,strerror(errno),rc);
149  }
150 
151  rc = target_arch_x86_v2p(target,pgd,vaddr,ARCH_X86_V2P_LMA,&tpaddr);
152  if (rc) {
153  verror("could not lookup vaddr 0x%"PRIxADDR" in tid %"PRIiTID
154  " pgd 0x%"PRIxADDR"!\n",
155  vaddr,tid,pgd);
156  return -1;
157  }
158 
159  *paddr = tpaddr | (vaddr & (__PAGE_SIZE - 1));
160 
162  "tid %"PRIiTID" vaddr 0x%"PRIxADDR" -> paddr 0x%"PRIxADDR"\n",
163  tid,vaddr,*paddr);
164 
165  memcache_set_v2p(target->memcache,pgd,vaddr,*paddr);
166 
167  return 0;
168 }
169 
170 static void *__xen_vm_mem_builtin_mmap_phys(struct target *target,ADDR paddr,
171  unsigned long length,int prot,
172  ADDR *pbase,OFFSET *poffset,
173  unsigned long *plength) {
174  struct xen_vm_state *xstate;
175  xen_pfn_t *pfn_arr;
176  int num,i,again;
177  OFFSET paddr_offset;
178  ADDR lpaddr,pfn;
179  void *mmap;
180  unsigned long int evicted;
181 
182  xstate = (struct xen_vm_state *)target->state;
183 
184  paddr_offset = paddr & (((ADDR)__PAGE_SIZE) - 1);
185  num = (paddr_offset + length) / ((ADDR)__PAGE_SIZE);
186  if ((paddr_offset + length) % ((ADDR)__PAGE_SIZE))
187  ++num;
188  //num = (length - paddr_offset) / __PAGE_SIZE;
189  //num += ((length - paddr_offset) & (__PAGE_SIZE - 1)) ? 1 : 0;
190 
191  pfn_arr = calloc(num,sizeof(*pfn_arr));
192  lpaddr = paddr & ~(((ADDR)__PAGE_SIZE) - 1);
193  pfn = lpaddr / ((ADDR)__PAGE_SIZE);
194  for (i = 0; i < num; ++i) {
195  pfn_arr[i] = pfn + i;
196  }
197 
198  again = 0;
199  again:
200  mmap = xc_map_foreign_pages(xc_handle,xstate->id,prot,pfn_arr,num);
201  //mmap = xc_map_foreign_range(xc_handle,xstate->id,prot,num * __PAGE_SIZE,
202  // pfn);
203  if (!mmap) {
204  if (again) {
205  verror("failed to mmap %d pages at paddr 0x%"PRIxADDR
206  " (page 0x%"PRIxADDR"); aborting!\n",
207  num,paddr,lpaddr);
208  }
209  else {
211  "failed to mmap %d pages at paddr 0x%"PRIxADDR
212  " (page 0x%"PRIxADDR"); evicting and trying again!\n",
213  num,paddr,lpaddr);
214  }
215 
216  if (!again) {
218  MEMCACHE_VIRT,num * __PAGE_SIZE);
219  if (evicted < (unsigned long int)(num * __PAGE_SIZE))
220  evicted += memcache_lru_evict_mmap(target->memcache,
223  num * __PAGE_SIZE - evicted);
224  errno = 0;
225  again = 1;
226  goto again;
227  }
228  if (!errno)
229  errno = EFAULT;
230  return NULL;
231  }
232  else {
233  if (pbase)
234  *pbase = lpaddr;
235  if (poffset)
236  *poffset = paddr_offset;
237  if (plength)
238  *plength = num * __PAGE_SIZE;
239 
241  "mmap'd %d pages at phys 0x%"PRIxADDR" at 0x%p\n",
242  num,lpaddr,mmap);
243 
244  return mmap;
245  }
246 }
247 
248 unsigned char *xen_vm_mem_builtin_read_phys_str(struct target *target,
249  ADDR addr) {
250  ADDR lvaddr;
251  OFFSET voffset = 0;
252  char *mmap = NULL;
253  unsigned long mlen;
254  int rc,i,j,pad;
255  ADDR paddr;
256  char *lbuf = NULL;
257  int lbuf_alen = 0,lbuf_len = 0;
258  OFFSET coffset;
259  short didmmap,savedmmap;
260  ADDR pbase;
261  unsigned long plength = 0;
262 
263  /*
264  * Read phys pages until we see a '\0'.
265  *
266  * NB: Cache physical pages as tid 0!
267  */
268 
269  lvaddr = addr & ~(__PAGE_SIZE - 1);
270  voffset = addr & (__PAGE_SIZE - 1);
271  for (i = 0; ; ++i) {
272  paddr = lvaddr + i * __PAGE_SIZE;
273 
274  mlen = __PAGE_SIZE;
275  coffset = 0;
276  if (i == 0) {
277  mlen -= voffset;
278  coffset = voffset;
279  }
280 
281  mmap = NULL;
282  pbase = 0;
283  plength = 0;
284 
285  rc = memcache_get_mmap(target->memcache,0,paddr,1,MEMCACHE_PHYS,
286  &pbase,NULL,(void **)&mmap,&plength,NULL);
287  if (rc < 0) {
288  vwarn("memcache_get_mmap error: v 0x%"PRIxADDR" len %lu: %s (%d); continuing\n",
289  addr,1ul,strerror(errno),rc);
290  }
291  if (mmap) {
292  didmmap = savedmmap = 0;
293  }
294  else {
295  mmap = __xen_vm_mem_builtin_mmap_phys(target,paddr,mlen,PROT_READ,
296  &pbase,NULL,&plength);
297  if (!mmap) {
298  verror("could not mmap p 0x%"PRIxADDR" (start p 0x%"PRIxADDR")!\n",
299  paddr,addr);
300  if (lbuf)
301  free(lbuf);
302  return NULL;
303  }
304 
305  didmmap = 1;
306  savedmmap = 0;
307 
308  /* Cache it. */
309  rc = memcache_set_mmap(target->memcache,0,pbase,MEMCACHE_PHYS,
310  mmap,plength);
311  if (rc == 1) {
313  "something already cached at p 0x%"PRIxADDR" len %lu"
314  " for read p 0x%"PRIxADDR"; skipping!\n",
315  pbase,plength,addr);
316  }
317  else if (rc < 0) {
318  vwarn("memcache_set_mmap error: p 0x%"PRIxADDR" len %lu (for read"
319  " p 0x%"PRIxADDR": %s (%d); continuing\n",
320  pbase,plength,addr,strerror(errno),rc);
321  }
322  else {
323  savedmmap = 1;
324  }
325  }
326 
327  /*
328  * Scan the mmap as necessary for '\0', malloc as necessary, and
329  * break or keep going.
330  */
331  for (j = coffset; j < __PAGE_SIZE; ++j) {
332  if (mmap[j] == '\0')
333  break;
334  }
335 
336  pad = (j < __PAGE_SIZE) ? 1 : 0;
337  if (!lbuf) {
338  lbuf_alen = j - coffset + pad;
339  lbuf = malloc(lbuf_alen);
340  }
341  else {
342  lbuf_alen += j - coffset + pad;
343  lbuf = realloc(lbuf,lbuf_alen);
344  }
345  memcpy(lbuf + lbuf_len,mmap + coffset,j - coffset);
346  lbuf_len += j - coffset;
347 
348  if (didmmap && !savedmmap)
349  munmap(mmap,plength);
350 
351  if (pad) {
352  lbuf[lbuf_len] = '\0';
353  break;
354  }
355  }
356 
357  return (unsigned char *)lbuf;
358 }
359 
360 unsigned char *xen_vm_mem_builtin_read_phys(struct target *target,ADDR paddr,
361  unsigned long length,
362  unsigned char *buf) {
363  unsigned long plength = 0;
364  OFFSET poffset = 0;
365  ADDR pbase = 0;
366  void *mmap = NULL;
367  int rc;
368 
369  if (length == 0)
370  return xen_vm_mem_builtin_read_phys_str(target,paddr);
371 
372  rc = memcache_get_mmap(target->memcache,0,paddr,length,0,
373  &pbase,&poffset,&mmap,&plength,NULL);
374  if (rc == 0)
375  goto out;
376  else if (rc < 0) {
377  vwarn("memcache_get_mmap error: p 0x%"PRIxADDR" len %lu: %s (%d); continuing\n",
378  paddr,length,strerror(errno),rc);
379  }
380 
381  mmap = __xen_vm_mem_builtin_mmap_phys(target,paddr,length,PROT_READ,
382  &pbase,&poffset,&plength);
383  if (!mmap)
384  return NULL;
385 
386  rc = memcache_set_mmap(target->memcache,0,pbase,0,mmap,plength);
387  if (rc == 1) {
389  "something already cached at p 0x%"PRIxADDR" len %lu"
390  " (for read p 0x%"PRIxADDR")!\n",
391  pbase,plength,paddr);
392  }
393  else if (rc < 0) {
394  vwarn("memcache_set_mmap error: p 0x%"PRIxADDR" len %lu (for read p"
395  " 0x%"PRIxADDR": %s (%d); continuing\n",
396  pbase,plength,paddr,strerror(errno),rc);
397  }
398 
399  out:
400  /* allocate buffer if necessary */
401  if (!buf) {
402  buf = malloc(length + 1);
403  buf[length] = '\0';
404  }
405  memcpy(buf,mmap + poffset,length);
406  /*
407  * Only unmap if the mmap wasn't cache, or if we couldn't cache the
408  * mmap we just made.
409  */
410  if (rc)
411  munmap(mmap,plength);
412 
413  return buf;
414 }
415 
416 unsigned long xen_vm_mem_builtin_write_phys(struct target *target,ADDR paddr,
417  unsigned long length,
418  unsigned char *buf) {
419  unsigned long plength = 0;
420  OFFSET poffset = 0;
421  ADDR pbase = 0;
422  void *mmap = NULL;
423  int rc;
424 
425  mmap = __xen_vm_mem_builtin_mmap_phys(target,paddr,length,PROT_WRITE,
426  &pbase,&poffset,&plength);
427  if (!mmap)
428  return 0;
429 
430  memcpy(mmap + poffset,buf,length);
431 
432  /*
433  * Always unmap writable maps.
434  */
435  munmap(mmap,plength);
436 
437  return length;
438 }
439 
440 static void *__xen_vm_mem_builtin_mmap_virt(struct target *target,
441  tid_t tid,ADDR pgd,ADDR vaddr,
442  unsigned long length,int prot,
443  ADDR *vbase,OFFSET *voffset,
444  unsigned long *vlength) {
445  struct xen_vm_state *xstate;
446  xen_pfn_t *pfn_arr;
447  int num,i,rc,again;
448  ADDR paddr;
449  OFFSET vaddr_offset;
450  ADDR pfn;
451  void *mmap;
452  ADDR lvaddr;
453  unsigned long int evicted;
454 
455  xstate = (struct xen_vm_state *)target->state;
456 
457  lvaddr = vaddr & ~(__PAGE_SIZE - 1);
458  vaddr_offset = vaddr & (__PAGE_SIZE - 1);
459  num = (vaddr_offset + length) / __PAGE_SIZE;
460  if ((vaddr_offset + length) % __PAGE_SIZE)
461  ++num;
462  //num = (length - vaddr_offset) / __PAGE_SIZE;
463  //num += ((length - vaddr_offset) & (__PAGE_SIZE - 1)) ? 1 : 0;
464 
465  pfn_arr = calloc(num,sizeof(*pfn_arr));
466  for (i = 0; i < num; ++i) {
467  rc = xen_vm_mem_builtin_addr_v2p(target,tid,pgd,lvaddr + i * __PAGE_SIZE,
468  &paddr);
469  if (rc) {
470  free(pfn_arr);
471  return NULL;
472  }
473  pfn = paddr / __PAGE_SIZE;
474  pfn_arr[i] = pfn;
475  }
476 
477  again = 0;
478  again:
479  mmap = xc_map_foreign_pages(xc_handle,xstate->id,prot,pfn_arr,num);
480  if (!mmap) {
481  if (again) {
482  verror("failed to mmap %d pages at vaddr 0x%"PRIxADDR
483  " (for 0x%"PRIxADDR") (first page paddr 0x%"PRIxADDR");"
484  " aborting!\n",
485  num,lvaddr,vaddr,pfn_arr[0] * __PAGE_SIZE);
486  }
487  else {
489  "failed to mmap %d pages at vaddr 0x%"PRIxADDR
490  " (for 0x%"PRIxADDR") (first page paddr 0x%"PRIxADDR");"
491  " evicting and trying again!\n",
492  num,lvaddr,vaddr,pfn_arr[0] * __PAGE_SIZE);
493  }
494 
495  if (!again) {
497  MEMCACHE_VIRT,num * __PAGE_SIZE);
498  if (evicted < (unsigned long int)(num * __PAGE_SIZE))
499  evicted += memcache_lru_evict_mmap(target->memcache,
502  num * __PAGE_SIZE - evicted);
503  errno = 0;
504  again = 1;
505  goto again;
506  }
507 
508  if (!errno)
509  errno = EFAULT;
510  return NULL;
511  }
512  else {
513  if (vbase)
514  *vbase = lvaddr;
515  if (voffset)
516  *voffset = vaddr_offset;
517  if (vlength)
518  *vlength = num * __PAGE_SIZE;
519 
521  "mmap'd %d pages at virt 0x%"PRIxADDR" (for 0x%"PRIxADDR" at 0x%p\n",
522  num,vbase,vaddr,mmap);
523 
524  return mmap;
525  }
526 }
527 
528 unsigned char *xen_vm_mem_builtin_read_v_str(struct target *target,
529  tid_t tid,ADDR pgd,ADDR addr) {
530  ADDR lvaddr;
531  OFFSET voffset = 0;
532  char *mmap = NULL;
533  unsigned long mlen;
534  int rc,i,j,pad;
535  ADDR paddr;
536  char *lbuf = NULL;
537  int lbuf_alen = 0,lbuf_len = 0;
538  OFFSET coffset;
539  short didmmap,savedmmap;
540  ADDR pbase;
541  unsigned long plength = 0;
542 
543  /*
544  * Ok, translate vaddrs to paddrs page by page until we see a '\0'.
545  *
546  * NB:Cache physical pages as tid 0!
547  */
548 
549  lvaddr = addr & ~(__PAGE_SIZE - 1);
550  voffset = addr & (__PAGE_SIZE - 1);
551  for (i = 0; ; ++i) {
552  rc = xen_vm_mem_builtin_addr_v2p(target,tid,pgd,lvaddr + i * __PAGE_SIZE,
553  &paddr);
554  if (rc) {
555  verror("could not translate v 0x%"PRIxADDR"; start v 0x%"PRIxADDR"!\n",
556  lvaddr,addr);
557  if (lbuf)
558  free(lbuf);
559  return NULL;
560  }
561  mlen = __PAGE_SIZE;
562  coffset = 0;
563  if (i == 0) {
564  mlen -= voffset;
565  coffset = voffset;
566  }
567 
568  mmap = NULL;
569  pbase = 0;
570  plength = 0;
571 
572  rc = memcache_get_mmap(target->memcache,0,paddr,1,MEMCACHE_PHYS,
573  &pbase,NULL,(void **)&mmap,&plength,NULL);
574  if (rc < 0) {
575  vwarn("memcache_get_mmap error: v 0x%"PRIxADDR" len %lu: %s (%d); continuing\n",
576  addr,1ul,strerror(errno),rc);
577  }
578  if (mmap) {
579  didmmap = savedmmap = 0;
580  }
581  else {
582  mmap = __xen_vm_mem_builtin_mmap_phys(target,paddr,mlen,PROT_READ,
583  &pbase,NULL,&plength);
584  if (!mmap) {
585  verror("could not mmap p 0x%"PRIxADDR" (after translating"
586  " v 0x%"PRIxADDR"; start v 0x%"PRIxADDR")!\n",
587  paddr,lvaddr,addr);
588  if (lbuf)
589  free(lbuf);
590  return NULL;
591  }
592 
593  didmmap = 1;
594  savedmmap = 0;
595 
596  /* Cache it. */
597  rc = memcache_set_mmap(target->memcache,0,pbase,MEMCACHE_PHYS,
598  mmap,plength);
599  if (rc == 1) {
601  "something already cached at p 0x%"PRIxADDR" len %lu"
602  " for read v 0x%"PRIxADDR"; skipping!\n",
603  pbase,plength,addr);
604  }
605  else if (rc < 0) {
606  vwarn("memcache_set_mmap error: p 0x%"PRIxADDR" len %lu (for read"
607  " v 0x%"PRIxADDR": %s (%d); continuing\n",
608  pbase,plength,addr,strerror(errno),rc);
609  }
610  else {
611  savedmmap = 1;
612  }
613  }
614 
615  /*
616  * Scan the mmap as necessary for '\0', malloc as necessary, and
617  * break or keep going.
618  */
619  for (j = coffset; j < __PAGE_SIZE; ++j) {
620  if (mmap[j] == '\0')
621  break;
622  }
623 
624  pad = (j < __PAGE_SIZE) ? 1 : 0;
625  if (!lbuf) {
626  lbuf_alen = j - coffset + pad;
627  lbuf = malloc(lbuf_alen);
628  }
629  else {
630  lbuf_alen += j - coffset + pad;
631  lbuf = realloc(lbuf,lbuf_alen);
632  }
633  memcpy(lbuf + lbuf_len,mmap + coffset,j - coffset);
634  lbuf_len += j - coffset;
635 
636  if (didmmap && !savedmmap)
637  munmap(mmap,plength);
638 
639  if (pad) {
640  lbuf[lbuf_len] = '\0';
641  break;
642  }
643  }
644 
645  return (unsigned char *)lbuf;
646 }
647 
648 /*
649  * Reads a block of memory from the target. If @buf is non-NULL, we
650  * assume it is at least @length bytes long; the result is placed into
651  * @buf and @buf is returned. If @buf is NULL, we allocate a buffer
652  * large enough to hold the result (@length if @length >0; if @length is
653  * 0 we attempt to read a string at that address; we stop when we hit a
654  * NULL byte).
655  *
656  * On error, returns NULL, and sets errno.
657  */
658 unsigned char *xen_vm_mem_builtin_read_tid(struct target *target,
659  tid_t tid,ADDR pgd,ADDR addr,
660  unsigned long length,
661  unsigned char *buf) {
662  ADDR vbase = 0;
663  unsigned long vlength = 0;
664  OFFSET voffset = 0;
665  void *mmap = NULL;
666  int rc;
667 
668  if (length == 0)
669  return xen_vm_mem_builtin_read_v_str(target,tid,pgd,addr);
670 
671  rc = memcache_get_mmap(target->memcache,pgd,addr,length,MEMCACHE_VIRT,
672  &vbase,&voffset,&mmap,&vlength,NULL);
673  if (rc == 0)
674  goto out;
675  else if (rc < 0) {
676  vwarn("memcache_get_mmap error: v 0x%"PRIxADDR" len %lu: %s (%d); continuing\n",
677  addr,length,strerror(errno),rc);
678  }
679 
680  mmap = __xen_vm_mem_builtin_mmap_virt(target,tid,pgd,addr,length,PROT_READ,
681  &vbase,&voffset,&vlength);
682  if (!mmap)
683  return NULL;
684 
685  rc = memcache_set_mmap(target->memcache,pgd,vbase,MEMCACHE_VIRT,mmap,vlength);
686  if (rc == 1) {
688  "something already cached at v 0x%"PRIxADDR" len %lu"
689  " (for read v 0x%"PRIxADDR")!\n",
690  vbase,vlength,addr);
691  }
692  else if (rc < 0) {
693  vwarn("memcache_set_mmap error: p 0x%"PRIxADDR" len %lu (for read"
694  " v 0x%"PRIxADDR": %s (%d); continuing\n",
695  vbase,vlength,addr,strerror(errno),rc);
696  }
697 
698  out:
699  /* allocate buffer if necessary */
700  if (!buf) {
701  buf = malloc(length + 1);
702  buf[length] = '\0';
703  }
704  memcpy(buf,mmap + voffset,length);
705  /*
706  * Only unmap if the mmap wasn't cache, or if we couldn't cache the
707  * mmap we just made.
708  */
709  if (rc)
710  munmap(mmap,vlength);
711 
712  return buf;
713 }
714 
715 /*
716  * Writes @length bytes from @buf to @addr. Returns the number of bytes
717  * written (and sets errno nonzero if there is an error). Successful if
718  * @return == @length.
719  */
720 unsigned long xen_vm_mem_builtin_write_tid(struct target *target,
721  tid_t tid,ADDR pgd,ADDR addr,
722  unsigned long length,
723  unsigned char *buf) {
724  ADDR vbase = 0;
725  unsigned long vlength = 0;
726  OFFSET voffset = 0;
727  void *mmap = NULL;
728  int rc;
729 
730  mmap = __xen_vm_mem_builtin_mmap_virt(target,tid,pgd,addr,length,PROT_WRITE,
731  &vbase,&voffset,&vlength);
732  if (!mmap)
733  return 0;
734 
735  memcpy(mmap + voffset,buf,length);
736 
737  /*
738  * Never cache writable-mapped pages.
739  */
740  munmap(mmap,vlength);
741 
742  return length;
743 }
744 
745 int xen_vm_mem_builtin_fini(struct target *target) {
746  if (target->memcache) {
747  memcache_destroy(target->memcache);
748  target->memcache = NULL;
749  }
750 
751  return 0;
752 }
753 
756  .attach = xen_vm_mem_builtin_attach,
757  .handle_exception_any = xen_vm_mem_builtin_handle_exception_any,
758  .handle_exception_ours = xen_vm_mem_builtin_handle_exception_ours,
759  .handle_pause = xen_vm_mem_builtin_handle_pause,
760  .addr_v2p = xen_vm_mem_builtin_addr_v2p,
761  .read_phys = xen_vm_mem_builtin_read_phys,
762  .write_phys = xen_vm_mem_builtin_write_phys,
763  .read_tid = xen_vm_mem_builtin_read_tid,
764  .write_tid = xen_vm_mem_builtin_write_tid,
765  .fini = xen_vm_mem_builtin_fini,
766 };
#define MEMCACHE_TAG_ANY
Definition: memcache.h:45
int memcache_get_mmap(struct memcache *memcache, ADDR tag, ADDR pa, unsigned long int pa_len, memcache_flags_t flags, ADDR *pa_start, OFFSET *pa_offset, void **mem, unsigned long int *mem_len, void **tag_priv)
Definition: memcache.c:265
int target_arch_x86_v2p(struct target *target, ADDR pgd, ADDR virt, arch_x86_v2p_flags_t flags, ADDR *phys)
#define vwarnopt(level, area, flags, format,...)
Definition: log.h:37
void * state
Definition: target_api.h:2526
unsigned char * xen_vm_mem_builtin_read_tid(struct target *target, tid_t tid, ADDR pgd, ADDR addr, unsigned long length, unsigned char *buf)
void * backend_spec
Definition: target_api.h:2290
int32_t tid_t
Definition: common.h:36
int memcache_set_mmap(struct memcache *memcache, ADDR tag, ADDR pa, memcache_flags_t flags, void *mem, unsigned long int mem_len)
Definition: memcache.c:402
static uint64_t unsigned int i
int xen_vm_mem_builtin_handle_pause(struct target *target)
int(* init)(struct target *target)
int xen_vm_mem_builtin_handle_exception_ours(struct target *target)
int xc_handle
struct malloc_state * mstate
Definition: dlmalloc.c:2609
int xen_vm_mem_builtin_addr_v2p(struct target *target, tid_t tid, ADDR pgd, ADDR vaddr, ADDR *paddr)
int32_t OFFSET
Definition: common.h:65
#define verror(format,...)
Definition: log.h:30
#define vwarn(format,...)
Definition: log.h:33
unsigned int clear_mem_caches_each_exception
Definition: target_xen_vm.h:77
void free(void *ptr)
Definition: debugserver.c:207
struct xen_vm_mem_ops xen_vm_mem_ops_builtin
int xen_vm_mem_builtin_fini(struct target *target)
unsigned long xen_vm_mem_builtin_write_tid(struct target *target, tid_t tid, ADDR pgd, ADDR addr, unsigned long length, unsigned char *buf)
int xen_vm_mem_builtin_attach(struct target *target)
int memcache_get_v2p(struct memcache *memcache, ADDR tag, ADDR va, ADDR *pa, void **tag_priv)
Definition: memcache.c:232
void * memops_priv
int memcache_set_v2p(struct memcache *memcache, ADDR tag, ADDR va, ADDR pa)
Definition: memcache.c:369
#define __PAGE_SIZE
void * mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
Definition: qemuhacks.c:213
struct memcache * memcache
Definition: target_api.h:2791
unsigned long int memcache_lru_evict_mmap(struct memcache *memcache, ADDR tag, memcache_flags_t flags, unsigned long int mem_len)
Definition: memcache.c:486
#define PROT_WRITE
Definition: common.h:107
void memcache_destroy(struct memcache *memcache)
Definition: memcache.c:49
unsigned char * xen_vm_mem_builtin_read_phys_str(struct target *target, ADDR addr)
#define vdebug(devel, areas, flags, format,...)
Definition: log.h:302
int memcache_invalidate_all(struct memcache *memcache)
Definition: memcache.c:138
void * realloc(void *ptr, size_t size)
Definition: debugserver.c:221
unsigned char * xen_vm_mem_builtin_read_phys(struct target *target, ADDR paddr, unsigned long length, unsigned char *buf)
int xen_vm_mem_builtin_init(struct target *target)
Definition: log.h:172
void * calloc(size_t nmemb, size_t size)
Definition: debugserver.c:200
Definition: log.h:70
#define PRIiTID
Definition: common.h:37
unsigned long int memcache_mmap_size
Definition: target_xen_vm.h:75
uint32_t ADDR
Definition: common.h:64
struct memcache * memcache_create(unsigned long int max_v2p, unsigned long int max_mmap_size, memcache_tag_priv_dtor pdtor)
Definition: memcache.c:33
void memcache_inc_ticks(struct memcache *memcache, unsigned int new_ticks)
Definition: memcache.c:220
#define PROT_READ
Definition: common.h:106
#define PRIxADDR
Definition: common.h:67
struct target_spec * spec
Definition: target_api.h:2605
void * malloc(size_t size)
Definition: debugserver.c:214
unsigned char * xen_vm_mem_builtin_read_v_str(struct target *target, tid_t tid, ADDR pgd, ADDR addr)
int xen_vm_mem_builtin_handle_exception_any(struct target *target)
unsigned long xen_vm_mem_builtin_write_phys(struct target *target, ADDR paddr, unsigned long length, unsigned char *buf)