Stackdb
Stackdb is a stackable, multi-target and -level source debugger and memory forensics library.
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
target_xen_vm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012, 2013, 2014, 2015 The University of Utah
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of
7  * the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
17  */
18 
19 #include "config.h"
20 
21 #include <errno.h>
22 #include <assert.h>
23 #include <ctype.h>
24 #include <unistd.h>
25 #include <getopt.h>
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <sys/mman.h>
30 #include <sys/socket.h>
31 #include <sys/un.h>
32 #if !defined(UNIX_PATH_MAX)
33 #define UNIX_PATH_MAX (size_t)sizeof(((struct sockaddr_un *) 0)->sun_path)
34 #endif
35 #include <libgen.h>
36 #include <endian.h>
37 #include <gelf.h>
38 #include <elf.h>
39 #include <libelf.h>
40 #include <argp.h>
41 
42 #include "common.h"
43 #include "glib_wrapper.h"
44 #include "object.h"
45 #include "arch.h"
46 #include "arch_x86.h"
47 #include "arch_x86_64.h"
48 #include "evloop.h"
49 #include "binfile.h"
50 #include "dwdebug.h"
51 #include "dwdebug_priv.h"
52 #include "target_api.h"
53 #include "target.h"
54 #include "target_event.h"
55 #include "target_arch_x86.h"
56 #include "target_os.h"
57 #include "probe_api.h"
58 
59 #include <xenctrl.h>
60 #include <xs.h>
61 
62 #include "target_xen_vm.h"
63 #include "target_xen_vm_vmp.h"
64 
65 #ifdef ENABLE_XENACCESS
66 extern struct xen_vm_mem_ops xen_vm_mem_ops_xenaccess;
67 #endif
68 #ifdef ENABLE_LIBVMI
70 #endif
71 
73 
74 /*
75  * Prototypes.
76  */
78  struct evloop *evloop);
79 
80 static struct target *xen_vm_attach(struct target_spec *spec,
81  struct evloop *evloop);
82 
83 static int xen_vm_snprintf(struct target *target,char *buf,int bufsiz);
84 static int xen_vm_init(struct target *target);
85 static int xen_vm_attach_internal(struct target *target);
86 static int xen_vm_detach(struct target *target,int stay_paused);
87 static int xen_vm_fini(struct target *target);
88 static int xen_vm_kill(struct target *target,int sig);
89 static int xen_vm_loadspaces(struct target *target);
90 static int xen_vm_loadregions(struct target *target,struct addrspace *space);
91 static int xen_vm_loaddebugfiles(struct target *target,struct addrspace *space,
92  struct memregion *region);
93 static int xen_vm_postloadinit(struct target *target);
94 static int xen_vm_postopened(struct target *target);
95 static int xen_vm_set_active_probing(struct target *target,
96  active_probe_flags_t flags);
97 
98 static target_status_t xen_vm_handle_exception(struct target *target,
100  int *again,void *priv);
101 
102 static struct target_spec *xen_vm_build_default_overlay_spec(struct target *target,
103  tid_t tid);
104 static struct target *
105 xen_vm_instantiate_overlay(struct target *target,
106  struct target_thread *tthread,
107  struct target_spec *spec,
108  struct target_thread **ntthread);
109 static struct target_thread *
110 xen_vm_lookup_overlay_thread_by_id(struct target *target,int id);
111 static struct target_thread *
112 xen_vm_lookup_overlay_thread_by_name(struct target *target,char *name);
113 int xen_vm_attach_overlay_thread(struct target *base,struct target *overlay,
114  tid_t newtid);
115 int xen_vm_detach_overlay_thread(struct target *base,struct target *overlay,
116  tid_t tid);
117 static target_status_t xen_vm_status(struct target *target);
118 static int xen_vm_pause(struct target *target,int nowait);
119 static int __xen_vm_resume(struct target *target,int detaching);
120 static int xen_vm_resume(struct target *target);
121 static target_status_t xen_vm_monitor(struct target *target);
122 static target_status_t xen_vm_poll(struct target *target,struct timeval *tv,
123  target_poll_outcome_t *outcome,int *pstatus);
124 int xen_vm_attach_evloop(struct target *target,struct evloop *evloop);
125 int xen_vm_detach_evloop(struct target *target);
126 static unsigned char *xen_vm_read(struct target *target,ADDR addr,
127  unsigned long length,unsigned char *buf);
128 static unsigned long xen_vm_write(struct target *target,ADDR addr,
129  unsigned long length,unsigned char *buf);
130 /*
131  * NB: initially, we will use VM phys addrs here. We could have also
132  * used Xen machine addrs; but for now, given the current
133  * libvmi code, using VM phys addrs is easiest. Later on,
134  * machine addrs will probably be *faster*. The risk with that approach
135  * is if the VM pfn/mfn mapping ever changes out from under us.
136  */
137 static int xen_vm_addr_v2p(struct target *target,tid_t tid,
138  ADDR vaddr,ADDR *paddr);
139 static unsigned char *xen_vm_read_phys(struct target *target,ADDR paddr,
140  unsigned long length,unsigned char *buf);
141 static unsigned long xen_vm_write_phys(struct target *target,ADDR paddr,
142  unsigned long length,unsigned char *buf);
143 
144 static tid_t xen_vm_gettid(struct target *target);
145 static void xen_vm_free_thread_state(struct target *target,void *state);
146 static struct array_list *xen_vm_list_available_tids(struct target *target);
147 static struct target_thread *xen_vm_load_thread(struct target *target,tid_t tid,
148  int force);
149 static struct target_thread *xen_vm_load_current_thread(struct target *target,
150  int force);
151 static int xen_vm_load_all_threads(struct target *target,int force);
152 static int xen_vm_load_available_threads(struct target *target,int force);
153 static int xen_vm_pause_thread(struct target *target,tid_t tid,int nowait);
154 static int xen_vm_flush_thread(struct target *target,tid_t tid);
155 static int xen_vm_flush_current_thread(struct target *target);
156 static int xen_vm_flush_all_threads(struct target *target);
157 static int xen_vm_invalidate_thread(struct target *target,
158  struct target_thread *tthread);
159 static int xen_vm_thread_snprintf(struct target *target,
160  struct target_thread *tthread,
161  char *buf,int bufsiz,
162  int detail,char *sep,char *key_val_sep);
163 /*
164 static REGVAL xen_vm_read_reg(struct target *target,tid_t tid,REG reg);
165 static int xen_vm_write_reg(struct target *target,tid_t tid,REG reg,REGVAL value);
166 static GHashTable *xen_vm_copy_registers(struct target *target,tid_t tid);
167 REGVAL xen_vm_read_reg_tidctxt(struct target *target,
168  tid_t tid,thread_ctxt_t tidctxt,REG reg);
169 int xen_vm_write_reg_tidctxt(struct target *target,
170  tid_t tid,thread_ctxt_t tidctxt,
171  REG reg,REGVAL value);
172 */
173 static REG xen_vm_get_unused_debug_reg(struct target *target,tid_t tid);
174 static int xen_vm_set_hw_breakpoint(struct target *target,tid_t tid,REG num,ADDR addr);
175 static int xen_vm_set_hw_watchpoint(struct target *target,tid_t tid,REG num,ADDR addr,
176  probepoint_whence_t whence,
177  probepoint_watchsize_t watchsize);
178 static int xen_vm_unset_hw_breakpoint(struct target *target,tid_t tid,REG num);
179 static int xen_vm_unset_hw_watchpoint(struct target *target,tid_t tid,REG num);
185  int notification);
186 int xen_vm_singlestep(struct target *target,tid_t tid,int isbp,
187  struct target *overlay);
189  struct target *overlay);
190 
191 uint64_t xen_vm_get_tsc(struct target *target);
192 uint64_t xen_vm_get_time(struct target *target);
193 uint64_t xen_vm_get_counter(struct target *target);
194 
195 int xen_vm_enable_feature(struct target *target,int feature,void *arg);
196 int xen_vm_disable_feature(struct target *target,int feature);
197 
199 
200 /* Internal prototypes. */
201 static int __xen_vm_pgd(struct target *target,tid_t tid,uint64_t *pgd);
203  struct vcpu_guest_context *context,
204  struct target_thread *tthread,
205  thread_ctxt_t tctxt);
207  struct target_thread *tthread,
208  thread_ctxt_t tctxt,
209  struct vcpu_guest_context *context);
210 static result_t xen_vm_active_memory_handler(struct probe *probe,tid_t tid,
211  void *handler_data,
212  struct probe *trigger,
213  struct probe *base);
214 static result_t xen_vm_active_thread_entry_handler(struct probe *probe,tid_t tid,
215  void *handler_data,
216  struct probe *trigger,
217  struct probe *base);
218 static result_t xen_vm_active_thread_exit_handler(struct probe *probe,tid_t tid,
219  void *handler_data,
220  struct probe *trigger,
221  struct probe *base);
222 
223 /* Format chars to print context registers. */
224 #if __WORDSIZE == 64
225 #define RF "lx"
226 #define DRF "lx"
227 #else
228 #define RF "x"
229 #define DRF "lx"
230 #endif
231 
232 /*
233  * Globals.
234  *
235  * We support a couple different ways of listening for debug exceptions
236  * from the hypervisor. Exceptions come via the VIRQ_DEBUGGER virq; and
237  * only one consumer may bind to that irq. This is a problem if we want
238  * to have multiple VMI programs each debugging one or more
239  * domains... we have to demultiplex the IRQ signal to the right VMI
240  * program. Unforunately, it's tricky to figure out which domain the
241  * IRQ was for, because of Xen bugs in the handling of x86 debug
242  * registers. So, the demultiplexer must pass the notification to *all*
243  * clients and let them decide if the signal was for them.
244  *
245  * So... we support a dedicated mode, where only one VMI program can run
246  * at a time; and a "shared" mode, where a demultiplexer process is
247  * spawned (if it doesn't already exist), and the VMI program(s) connect
248  * to it to receive VIRQ notifications.
249  *
250  * The xc_handle variable is always valid. For dedicated mode,
251  * xce_handle and dbg_port are valid; for shared mode,
252  * xen_vm_vmp_client_fd is valid instead.
253  */
254 static int xc_refcnt = 0;
255 
256 #ifdef XENCTRL_HAS_XC_INTERFACE
257 xc_interface *xc_handle = NULL;
258 static xc_interface *xce_handle = NULL;
259 #define XC_IF_INVALID (NULL)
260 #else
261 int xc_handle = -1;
262 static int xce_handle = -1;
263 #define XC_IF_INVALID (-1)
264 #endif
265 int xce_handle_fd = -1;
266 
267 #if !defined(XC_EVTCHN_PORT_T)
268 #error "XC_EVTCHN_PORT_T undefined!"
269 #endif
270 static XC_EVTCHN_PORT_T dbg_port = -1;
271 
272 /*
273  * Set up the target interface for this library.
274  */
276  .snprintf = xen_vm_snprintf,
277 
278  .init = xen_vm_init,
279  .fini = xen_vm_fini,
280  .attach = xen_vm_attach_internal,
281  .detach = xen_vm_detach,
282  .kill = xen_vm_kill,
283  .loadspaces = xen_vm_loadspaces,
284  .loadregions = xen_vm_loadregions,
285  .loaddebugfiles = xen_vm_loaddebugfiles,
286  .postloadinit = xen_vm_postloadinit,
287  .postopened = xen_vm_postopened,
288  .set_active_probing = xen_vm_set_active_probing,
289 
290  .handle_exception = xen_vm_handle_exception,
291  .handle_break = probepoint_bp_handler,
292  .handle_step = probepoint_ss_handler,
293  .handle_interrupted_step = NULL,
294 
295  .build_default_overlay_spec = xen_vm_build_default_overlay_spec,
296  .instantiate_overlay = xen_vm_instantiate_overlay,
297  .lookup_overlay_thread_by_id = xen_vm_lookup_overlay_thread_by_id,
298  .lookup_overlay_thread_by_name = xen_vm_lookup_overlay_thread_by_name,
299  .attach_overlay_thread = xen_vm_attach_overlay_thread,
300  .detach_overlay_thread = xen_vm_detach_overlay_thread,
301 
302  .status = xen_vm_status,
303  .pause = xen_vm_pause,
304  .resume = xen_vm_resume,
305  .monitor = xen_vm_monitor,
306  .poll = xen_vm_poll,
307  .read = xen_vm_read,
308  .write = xen_vm_write,
309  .addr_v2p = xen_vm_addr_v2p,
310  .read_phys = xen_vm_read_phys,
311  .write_phys = xen_vm_write_phys,
312 
313  .gettid = xen_vm_gettid,
314  .free_thread_state = xen_vm_free_thread_state,
315  .list_available_tids = xen_vm_list_available_tids,
316  .load_available_threads = xen_vm_load_available_threads,
317  .load_thread = xen_vm_load_thread,
318  .load_current_thread = xen_vm_load_current_thread,
319  .load_all_threads = xen_vm_load_all_threads,
320  .pause_thread = xen_vm_pause_thread,
321  .flush_thread = xen_vm_flush_thread,
322  .flush_current_thread = xen_vm_flush_current_thread,
323  .flush_all_threads = xen_vm_flush_all_threads,
324  .invalidate_thread = xen_vm_invalidate_thread,
325  .thread_snprintf = xen_vm_thread_snprintf,
326 
327  .attach_evloop = xen_vm_attach_evloop,
328  .detach_evloop = xen_vm_detach_evloop,
329 
330  .readreg = target_regcache_readreg,
331  .writereg = target_regcache_writereg,
332  .copy_registers = target_regcache_copy_registers,
333  .readreg_tidctxt = target_regcache_readreg_tidctxt,
334  .writereg_tidctxt = target_regcache_writereg_tidctxt,
335 
336  .get_unused_debug_reg = xen_vm_get_unused_debug_reg,
337  .set_hw_breakpoint = xen_vm_set_hw_breakpoint,
338  .set_hw_watchpoint = xen_vm_set_hw_watchpoint,
339  .unset_hw_breakpoint = xen_vm_unset_hw_breakpoint,
340  .unset_hw_watchpoint = xen_vm_unset_hw_watchpoint,
341  .disable_hw_breakpoints = xen_vm_disable_hw_breakpoints,
342  .enable_hw_breakpoints = xen_vm_enable_hw_breakpoints,
343  .disable_hw_breakpoint = xen_vm_disable_hw_breakpoint,
344  .enable_hw_breakpoint = xen_vm_enable_hw_breakpoint,
345  .notify_sw_breakpoint = xen_vm_notify_sw_breakpoint,
346  .singlestep = xen_vm_singlestep,
347  .singlestep_end = xen_vm_singlestep_end,
348  .instr_can_switch_context = xen_vm_instr_can_switch_context,
349  .get_tsc = xen_vm_get_tsc,
350  .get_time = xen_vm_get_time,
351  .get_counter = xen_vm_get_counter,
352  .enable_feature = xen_vm_enable_feature,
353  .disable_feature = xen_vm_disable_feature,
354 };
355 
356 #define XV_ARGP_USE_XENACCESS 0x550001
357 #define XV_ARGP_USE_LIBVMI 0x550002
358 #define XV_ARGP_CLEAR_MEM_CACHES 0x550003
359 #define XV_ARGP_MEMCACHE_MMAP_SIZE 0x550004
360 #define XV_ARGP_HIUE 0x550005
361 #define XV_ARGP_REPLAYDIR 0x550006
362 
363 struct argp_option xen_vm_argp_opts[] = {
364  /* These options set a flag. */
365  { "domain",'m',"DOMAIN",0,"The Xen domain ID or name.",-4 },
366  { "kernel-filename",'K',"FILE",0,
367  "Override xenstore kernel filepath for guest.",-4 },
368  { "no-clear-hw-debug-regs",'H',NULL,0,
369  "Don't clear hardware debug registers at target attach.",-4 },
370  { "clear-mem-caches-each-exception",XV_ARGP_CLEAR_MEM_CACHES,NULL,0,
371  "Clear mem caches on each debug exception.",-4 },
372 #ifdef ENABLE_LIBVMI
373  { "use-libvmi",XV_ARGP_USE_LIBVMI,NULL,0,
374  "Clear mem caches on each debug exception.",-4 },
375 #endif
376 #ifdef ENABLE_XENACCESS
377  { "use-xenaccess",XV_ARGP_USE_XENACCESS,NULL,0,
378  "Clear mem caches on each debug exception.",-4 },
379 #endif
380  { "memcache-mmap-size",XV_ARGP_MEMCACHE_MMAP_SIZE,"BYTES",0,
381  "Max size (bytes) of the mmap cache (default 128MB).",-4 },
382  { "no-hvm-setcontext",'V',NULL,0,
383  "Don't use HVM-specific libxc get/set context functions to access"
384  "virtual CPU info.",-4 },
385  { "configfile",'c',"FILE",0,"The Xen config file.",-4 },
386  { "replaydir",XV_ARGP_REPLAYDIR,"DIR",0,"The XenTT replay directory.",-4 },
387  { "no-use-multiplexer",'M',NULL,0,"Do not spawn/attach to the Xen multiplexer server",-4 },
388  { "dominfo-timeout",'T',"MICROSECONDS",0,"If libxc gets a \"NULL\" dominfo status, the number of microseconds we should keep retrying",-4 },
389  { "hypervisor-ignores-userspace-exceptions",XV_ARGP_HIUE,NULL,0,"If your Xen hypervisor is not a Utah-patched version, make sure to supply this flag!",-4 },
390  { 0,0,0,0,0,0 }
391 };
392 
393 int xen_vm_spec_to_argv(struct target_spec *spec,int *argc,char ***argv) {
394  struct xen_vm_spec *xspec =
395  (struct xen_vm_spec *)spec->backend_spec;
396  char **av = NULL;
397  int ac = 0;
398  int j;
399 
400  if (!xspec) {
401  if (argv)
402  *argv = NULL;
403  if (argc)
404  *argc = 0;
405  return 0;
406  }
407 
408  if (xspec->domain)
409  ac += 2;
410  if (xspec->kernel_filename)
411  ac += 2;
412  if (xspec->no_hw_debug_reg_clear)
413  ac += 1;
414  if (xspec->no_hvm_setcontext)
415  ac += 1;
417  ac += 1;
418 #ifdef ENABLE_LIBVMI
419  if (xspec->use_libvmi)
420  ac += 1;
421 #endif
422 #ifdef ENABLE_XENACCESS
423  if (xspec->use_xenaccess)
424  ac += 1;
425 #endif
426  if (xspec->memcache_mmap_size)
427  ac += 2;
428  if (xspec->config_file)
429  ac += 2;
430  if (xspec->replay_dir)
431  ac += 2;
432  if (xspec->no_use_multiplexer)
433  ac += 1;
434  if (xspec->dominfo_timeout > 0)
435  ac += 2;
437  ac += 1;
438 
439  av = calloc(ac + 1,sizeof(char *));
440  j = 0;
441  if (xspec->domain) {
442  av[j++] = strdup("-m");
443  av[j++] = strdup(xspec->domain);
444  }
445  if (xspec->kernel_filename) {
446  av[j++] = strdup("-K");
447  av[j++] = strdup(xspec->kernel_filename);
448  }
449  if (xspec->no_hw_debug_reg_clear) {
450  av[j++] = strdup("--no-clear-hw-debug-regs");
451  }
452  if (xspec->no_hvm_setcontext) {
453  av[j++] = strdup("--no-hvm-setcontext");
454  }
455  if (xspec->clear_mem_caches_each_exception) {
456  av[j++] = strdup("--clear-mem-caches-each-exception");
457  }
458 #ifdef ENABLE_LIBVMI
459  if (xspec->use_libvmi)
460  av[j++] = strdup("--use-libvmi");
461 #endif
462 #ifdef ENABLE_XENACCESS
463  if (xspec->use_xenaccess)
464  av[j++] = strdup("--use-xenaccess");
465 #endif
466  if (xspec->memcache_mmap_size) {
467  av[j++] = strdup("--memcache-mmap-size");
468  av[j] = malloc(32);
469  snprintf(av[j],32,"%lu",xspec->memcache_mmap_size);
470  j++;
471  }
472  if (xspec->config_file) {
473  av[j++] = strdup("-c");
474  av[j++] = strdup(xspec->config_file);
475  }
476  if (xspec->replay_dir) {
477  av[j++] = strdup("--replaydir");
478  av[j++] = strdup(xspec->replay_dir);
479  }
480  if (xspec->no_use_multiplexer) {
481  av[j++] = strdup("--no-use-multiplexer");
482  }
483  if (xspec->dominfo_timeout > 0) {
484  av[j++] = strdup("-T");
485  av[j] = malloc(16);
486  snprintf(av[j],16,"%d",xspec->dominfo_timeout);
487  j++;
488  }
490  av[j++] = strdup("--hypervisor-ignores-userspace-exceptions");
491  av[j++] = NULL;
492 
493  if (argv)
494  *argv = av;
495  if (argc)
496  *argc = ac;
497 
498  return 0;
499 }
500 
501 error_t xen_vm_argp_parse_opt(int key,char *arg,struct argp_state *state) {
502  struct target_argp_parser_state *tstate = \
503  (struct target_argp_parser_state *)state->input;
504  struct target_spec *spec;
505  struct xen_vm_spec *xspec;
506  struct argp_option *opti;
507  int ourkey;
508 
509  if (key == ARGP_KEY_INIT)
510  return 0;
511  else if (!state->input)
512  return ARGP_ERR_UNKNOWN;
513 
514  if (tstate)
515  spec = tstate->spec;
516 
517  /*
518  * Check to see if this is really one of our keys. If it is, we
519  * need to see if some other backend has already started parsing
520  * args; if it has, we throw an error. Otherwise, we assume we are
521  * using this backend, and process the arg.
522  */
523  ourkey = 0;
524  for (opti = &xen_vm_argp_opts[0]; opti->key != 0; ++opti) {
525  if (key == opti->key) {
526  ourkey = 1;
527  break;
528  }
529  }
530 
531  if (ourkey) {
534  xspec = xen_vm_build_spec();
535  spec->backend_spec = xspec;
536  }
537  else if (spec->target_type != TARGET_TYPE_XEN) {
538  verror("cannot mix arguments for Xen target (%c) with non-Xen"
539  " target!\n",key);
540  return EINVAL;
541  }
542 
543  /* Only "claim" these args if this is our key. */
546  xspec = calloc(1,sizeof(*xspec));
547  spec->backend_spec = xspec;
548  }
549  else if (spec->target_type != TARGET_TYPE_XEN) {
550  verror("cannot mix arguments for Xen target with non-Xen target!\n");
551  return EINVAL;
552  }
553  }
554 
556  xspec = (struct xen_vm_spec *)spec->backend_spec;
557  else
558  xspec = NULL;
559 
560  switch (key) {
561  case ARGP_KEY_ARG:
562  case ARGP_KEY_ARGS:
563  /* Only handle these if you need arguments. */
564  return ARGP_ERR_UNKNOWN;
565  case ARGP_KEY_INIT:
566  case ARGP_KEY_END:
567  case ARGP_KEY_NO_ARGS:
568  /* Nothing to do unless you malloc something in _INIT. */
569  return 0;
570  case ARGP_KEY_SUCCESS:
571  case ARGP_KEY_ERROR:
572  case ARGP_KEY_FINI:
573  /* Check spec for sanity if necessary. */
574  return 0;
575 
576  case 'm':
577  xspec->domain = strdup(arg);
578  break;
579  case 'K':
580  xspec->kernel_filename = strdup(arg);
581  break;
582  case 'H':
583  xspec->no_hw_debug_reg_clear = 1;
584  break;
585  case 'V':
586  xspec->no_hvm_setcontext = 1;
587  break;
588  case 'c':
589  xspec->config_file = strdup(arg);
590  break;
592  xspec->clear_mem_caches_each_exception = 1;
593  break;
594 #ifdef ENABLE_LIBVMI
595  case XV_ARGP_USE_LIBVMI:
596  xspec->use_libvmi = 1;
597  break;
598 #endif
599 #ifdef ENABLE_XENACCESSS
601  xspec->use_xenaccess = 1;
602  break;
603 #endif
605  xspec->memcache_mmap_size = atoi(arg);
606  break;
607  case XV_ARGP_REPLAYDIR:
608  xspec->replay_dir = strdup(arg);
609  break;
610  case 'M':
611  xspec->no_use_multiplexer = 1;
612  break;
613  case 'T':
614  xspec->dominfo_timeout = atoi(arg);
615  break;
616  case XV_ARGP_HIUE:
617  xspec->hypervisor_ignores_userspace_exceptions = 1;
618  break;
619  default:
620  return ARGP_ERR_UNKNOWN;
621  }
622 
623  return 0;
624 }
625 
626 struct argp xen_vm_argp = {
627  xen_vm_argp_opts,xen_vm_argp_parse_opt,NULL,NULL,NULL,NULL,NULL
628 };
629 char *xen_vm_argp_header = "Xen Backend Options";
630 
636  struct evloop *evloop) {
637  return xen_vm_attach(spec,evloop);
638 }
639 
641  struct xen_vm_spec *xspec;
642 
643  xspec = calloc(1,sizeof(*xspec));
644  /* default to 128MB. */
645  xspec->memcache_mmap_size = 128 * 1024 * 1024;
646 
647  return xspec;
648 }
649 
650 void xen_vm_free_spec(struct xen_vm_spec *xspec) {
651  if (xspec->domain)
652  free(xspec->domain);
653  if (xspec->config_file)
654  free(xspec->config_file);
655  if(xspec->replay_dir)
656  free(xspec->replay_dir);
657 
658  free(xspec);
659 }
660 
661 /*
662  * Attaches to domid. We basically check the xenstore to figure out
663  * what kernel the domain is running, and try to find vmlinux based on
664  * that. We also read how much mem the domain has; if it is
665  * PAE-enabled;
666  */
667 struct target *xen_vm_attach(struct target_spec *spec,
668  struct evloop *evloop) {
669  struct xen_vm_spec *xspec = (struct xen_vm_spec *)spec->backend_spec;
670  struct target *target = NULL;
671  struct xen_vm_state *xstate = NULL;
672  struct xs_handle *xsh = NULL;
673  xs_transaction_t xth = XBT_NULL;
674  char *buf = NULL;
675  char *tmp = NULL;
676  char **domains = NULL;
677  unsigned int size;
678  unsigned int i;
679  int have_id = 0;
680  char *domain;
681 
682  domain = xspec->domain;
683 
684  if (geteuid() != 0) {
685  verror("must be root!\n");
686  errno = EPERM;
687  return NULL;
688  }
689 
690  vdebug(5,LA_TARGET,LF_XV,"attaching to domain %s\n",domain);
691 
692  if (!(target = target_create("xen_vm",spec)))
693  return NULL;
694 
695  if (!(xstate = (struct xen_vm_state *)malloc(sizeof(*xstate)))) {
696  free(target);
697  return NULL;
698  }
699  memset(xstate,0,sizeof(*xstate));
700 
701  target->state = xstate;
702 
703  if (!(buf = malloc(PATH_MAX))) {
704  verror("could not allocate tmp path buffer: %s\n",strerror(errno));
705  goto errout;
706  }
707 
708  if (!(xsh = xs_domain_open())) {
709  verror("could not open xenstore!\n");
710  goto errout;
711  }
712 
713  xstate->evloop_fd = -1;
714 
715  /* First figure out whether we need to resolve the ID, or the name. */
716  errno = 0;
717  xstate->id = (domid_t)strtol(domain,&tmp,10);
718  if (errno == ERANGE) {
719  verror("bad domain id: %s\n",strerror(errno));
720  goto errout;
721  }
722  else if (errno == EINVAL || tmp == domain)
723  have_id = 0;
724  else {
725  vdebug(4,LA_TARGET,LF_XV,"found id %d (from %s)\n",xstate->id,domain);
726  have_id = 1;
727  }
728  tmp = NULL;
729 
730  /* We have to try to find the ID first. */
731  if (!have_id) {
732  domains = xs_directory(xsh,xth,"/local/domain",&size);
733  for (i = 0; i < size; ++i) {
734  /* read in name */
735  snprintf(buf,PATH_MAX,"/local/domain/%s/name",domains[i]);
736  tmp = xs_read(xsh,xth,buf,NULL);
737 
738  if (tmp && strcmp(domain,tmp) == 0) {
739  vdebug(9,LA_TARGET,LF_XV,"dom %s (from %s) matches\n",tmp,domain);
740  errno = 0;
741  xstate->id = (domid_t)strtol(domains[i],NULL,10);
742  if (errno) {
743  if (have_id) {
744  free(tmp);
745  tmp = NULL;
746  break;
747  }
748  else {
749  verror("matching domain name for %s; but bad"
750  " domain id %s: %s\n",
751  tmp,domains[i],strerror(errno));
752  free(tmp);
753  tmp = NULL;
754  goto errout;
755  }
756  }
757  else {
758  if (have_id)
759  free(xstate->name);
760  xstate->name = strdup(tmp);
761  have_id = 1;
762  vdebug(4,LA_TARGET,LF_XV,"dom %d (from %s) matches id\n",
763  xstate->id,domain);
764  }
765  }
766  else if (tmp) {
767  free(tmp);
768  tmp = NULL;
769  }
770  }
771 
772  free(domains);
773  domains = NULL;
774 
775  if (!have_id) {
776  verror("could not find domain id for %s!\n",domain);
777  errno = EINVAL;
778  goto errout;
779  }
780  }
781 
782  /* Once we have an ID, try that to find the name if we need. */
783  if (!xstate->name) {
784  snprintf(buf,PATH_MAX,"/local/domain/%d/name",xstate->id);
785  xstate->name = xs_read(xsh,xth,buf,NULL);
786  if (!xstate->name)
787  vwarn("could not read name for dom %d; may cause problems!\n",
788  xstate->id);
789  }
790 
791  /* Now try to find vmpath. */
792  snprintf(buf,PATH_MAX,"/local/domain/%d/vm",xstate->id);
793  xstate->vmpath = xs_read(xsh,xth,buf,NULL);
794  if (!xstate->vmpath)
795  vwarn("could not read vmpath for dom %d; may cause problems!\n",
796  xstate->id);
797  else {
798  snprintf(buf,PATH_MAX,"%s/image/ostype",xstate->vmpath);
799  xstate->ostype = xs_read(xsh,xth,buf,NULL);
800  if (!xstate->ostype) {
801  vwarn("could not read ostype for dom %d; may cause problems!\n",
802  xstate->id);
803  g_hash_table_insert(target->config,strdup("VM_TYPE"),
804  strdup("paravirt"));
805  }
806  else if (strcmp(xstate->ostype,"hvm") == 0) {
807  xstate->hvm = 1;
808  g_hash_table_insert(target->config,strdup("VM_TYPE"),
809  strdup("hvm"));
810  }
811  else {
812  g_hash_table_insert(target->config,strdup("OS_TYPE"),
813  strdup(xstate->ostype));
814  g_hash_table_insert(target->config,strdup("VM_TYPE"),
815  strdup("paravirt"));
816  }
817 
818  snprintf(buf,PATH_MAX,"%s/image/kernel",xstate->vmpath);
819  xstate->kernel_filename = xs_read(xsh,xth,buf,NULL);
820  if (!xstate->kernel_filename)
821  vwarn("could not read kernel for dom %d; may cause problems!\n",
822  xstate->id);
823  else {
824  g_hash_table_insert(target->config,strdup("OS_KERNEL_FILENAME"),
825  strdup(xstate->kernel_filename));
826  }
827  }
828 
829  if (xspec->kernel_filename) {
831  "using kernel filename %s (overrides %s from xenstore)\n",
832  xspec->kernel_filename,xstate->kernel_filename ? xstate->kernel_filename : "''");
833 
834  if (xstate->kernel_filename)
835  free(xstate->kernel_filename);
836 
837  xstate->kernel_filename = strdup(xspec->kernel_filename);
838 
839  g_hash_table_remove(target->config,"OS_KERNEL_FILENAME");
840  g_hash_table_insert(target->config,strdup("OS_KERNEL_FILENAME"),
841  strdup(xstate->kernel_filename));
842  }
843 
844  if (xsh) {
845  xs_daemon_close(xsh);
846  xsh = NULL;
847  }
848 
849  free(buf);
850  buf = NULL;
851 
852  /*
853  * Try to infer the personality.
854  */
855  if (!target->personality_ops
856  && xstate->kernel_filename
857  && (strstr(xstate->kernel_filename,"inux")
858  || strstr(xstate->kernel_filename,"inuz"))) {
859  if (target_personality_attach(target,"os_linux_generic",NULL) == 0) {
861  "autoinitialized the os_linux_generic personality!\n");
862  }
863  else {
864  verror("failed to autoinitialize the os_linux_generic personality!\n");
865  goto errout;
866  }
867  }
868  else {
869  vwarn("cannot initialize a personality!\n");
870  }
871 
872  target->live = 1;
873  target->mmapable = 0; /* XXX: change this once we get mmap API
874  worked out. */
875 
876  /*
877  * Now load up our {xa|vmi}_instance as much as we can now; we'll
878  * try to do more when we load the debuginfo file for the kernel.
879  */
880  xstate->memops = NULL;
881 #ifdef ENABLE_LIBVMI
882  if (!xstate->memops && xspec->use_libvmi)
883  xstate->memops = &xen_vm_mem_ops_libvmi;
884 #endif
885 #ifdef ENABLE_XENACCESS
886  if (!xstate->memops && xspec->use_xenaccess)
887  xstate->memops = &xen_vm_mem_ops_xenaccess;
888 #endif
889  if (!xstate->memops)
890  xstate->memops = &xen_vm_mem_ops_builtin;
891 
892  if (xstate->memops->init) {
893  if (xstate->memops->init(target)) {
894  verror("failed to init memops!\n");
895  goto errout;
896  }
897  }
898 
899  /* Our threads can have two contexts -- kernel and user spaces. */
901 
902  if (evloop && xstate->evloop_fd < 0) {
903  /*
904  * Just save it off; we can't use it until in xen_vm_attach_internal.
905  */
906  target->evloop = evloop;
907  }
908 
909  vdebug(5,LA_TARGET,LF_XV,"opened dom %d\n",xstate->id);
910 
911  return target;
912 
913  errout:
914  if (domains) {
915  for (i = 0; i < size; ++i) {
916  free(domains[i]);
917  }
918  free(domains);
919  }
920  if (xstate->vmpath) {
921  free(xstate->vmpath);
922  xstate->vmpath = NULL;
923  }
924  if (xstate->ostype) {
925  free(xstate->ostype);
926  xstate->ostype = NULL;
927  }
928  if (xstate->name) {
929  free(xstate->name);
930  xstate->name = NULL;
931  }
932  if (xsh)
933  xs_daemon_close(xsh);
934  if (xstate) {
935  free(xstate);
936  if (target)
937  target->state = NULL;
938  }
939  if (target)
941 
942  return NULL;
943 }
944 
949 static int xen_vm_load_dominfo(struct target *target) {
950  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
951  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
952  long total,waited;
953  /*
954  * Wait for 10us repeatedly if dominfo doesn't return what we think
955  * it should. 10us is arbitrary, but a mid-granularity compromise.
956  */
957  long interval = 10;
958  struct timeval itv = { 0,0 };
959  int rc;
960 
961  if (!xstate->dominfo_valid) {
963  "load dominfo; current dominfo is invalid\n");
964  memset(&xstate->dominfo,0,sizeof(xstate->dominfo));
965  if (xc_domain_getinfo(xc_handle,xstate->id,1,
966  &xstate->dominfo) <= 0) {
967  verror("could not get dominfo for %d\n",xstate->id);
968  errno = EINVAL;
969  return -1;
970  }
971 
972  waited = 0;
973  total = (xspec->dominfo_timeout > 0) ? xspec->dominfo_timeout : 0;
974 
975  while (!xstate->dominfo.dying && !xstate->dominfo.crashed
976  && !xstate->dominfo.shutdown && !xstate->dominfo.paused
977  && !xstate->dominfo.blocked && !xstate->dominfo.running
978  && (total - waited) > 0) {
979  vwarnopt(5,LA_TARGET,LF_XV,"domain %d has no status!\n",xstate->id);
980 
981  itv.tv_sec = 0;
982  itv.tv_usec = (interval > (total - waited)) \
983  ? (total - waited) : interval;
984 
985  rc = select(0,NULL,NULL,NULL,&itv);
986  if (rc < 0) {
987  if (errno != EINTR) {
988  verror("select(dominfo retry): %s\n",strerror(errno));
989  return -1;
990  }
991  else {
992  /* Assume itv timer has expired -- even though it
993  * may not have, of course, since select() errored
994  * and we can't trust the timer value.
995  */
996  itv.tv_usec = 0;
997  }
998  }
999 
1000  waited += (interval - itv.tv_usec);
1001 
1003  "waited %d of %d total microseconds to retry dominfo...\n",
1004  waited,total);
1005 
1006  if (xc_domain_getinfo(xc_handle,xstate->id,1,
1007  &xstate->dominfo) <= 0) {
1008  verror("could not get dominfo for %d\n",xstate->id);
1009  errno = EINVAL;
1010  return -1;
1011  }
1012  }
1013 
1014  /*
1015  * Only do this once, and use libxc directly.
1016  */
1017  if (unlikely(!xstate->live_shinfo)) {
1018  xstate->live_shinfo =
1019  xc_map_foreign_range(xc_handle,xstate->id,__PAGE_SIZE,PROT_READ,
1020  xstate->dominfo.shared_info_frame);
1021  if (!xstate->live_shinfo) {
1022  verror("could not mmap shared_info frame 0x%lx!\n",
1023  xstate->dominfo.shared_info_frame);
1024  errno = EFAULT;
1025  return -1;
1026  }
1027  }
1028 
1029  /*
1030  * Have to grab vcpuinfo out of shared frame, argh! This can't
1031  * be the only way to access the tsc, but I can't find a better
1032  * libxc way to do it!
1033  *
1034  * XXX: Do we really have to do this every time the domain is
1035  * interrupted?
1036  */
1037  memcpy(&xstate->vcpuinfo,&xstate->live_shinfo->vcpu_info[0],
1038  sizeof(xstate->vcpuinfo));
1039 
1040  xstate->dominfo_valid = 1;
1041  }
1042  else {
1044  "did not need to load dominfo; current dominfo is valid\n");
1045  }
1046 
1047  return 0;
1048 }
1049 
1050 static struct target_thread *__xen_vm_load_cached_thread(struct target *target,
1051  tid_t tid) {
1052  struct target_thread *tthread;
1053 
1054  tthread = target_lookup_thread(target,tid);
1055  if (!tthread)
1056  return NULL;
1057 
1058  if (!OBJVALID(tthread))
1059  return xen_vm_load_thread(target,tid,0);
1060 
1061  return tthread;
1062 }
1063 
1064 static int __xen_vm_in_userspace(struct target *target,int cpl,REGVAL ipval) {
1065  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
1066 
1067  /*
1068  * This is a real pain. We have to use kernel_start_addr because on
1069  * at least some Xen hypervisors, %cs is zeroed out, so we cannot
1070  * extract the CPL. From my reading of the x86 and amd64 manuals,
1071  * it should not be zeroed out -- the segment selector registers are
1072  * only used for privilege levels in long mode.
1073  */
1074  if (xstate->kernel_start_addr && xstate->kernel_start_addr < ADDRMAX) {
1075  if (ipval < xstate->kernel_start_addr)
1076  return 1;
1077  else
1078  return 0;
1079  }
1080  else {
1081  if (cpl == 3)
1082  return 1;
1083  else
1084  return 0;
1085  }
1086 }
1087 
1088 static int __xen_get_cpl_thread(struct target *target,
1089  struct target_thread *tthread) {
1090  REG csr = -1;
1091  REGVAL cs;
1092 
1093  if (target->arch->type == ARCH_X86)
1094  csr = REG_X86_CS;
1095  else if (target->arch->type == ARCH_X86_64)
1096  csr = REG_X86_64_CS;
1097 
1098  /* Load the CPL. */
1099  errno = 0;
1100  cs = 0x3 & target_read_reg(target,tthread->tid,csr);
1101  if (errno) {
1102  verror("could not read CS register to find CPL!\n");
1103  return -1;
1104  }
1105 
1106  return (int)cs;
1107 }
1108 
1109 static int __xen_get_cpl(struct target *target,tid_t tid) {
1110  struct target_thread *tthread;
1111 
1112  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
1113  if (!errno)
1114  errno = EINVAL;
1115  verror("could not load cached thread %"PRIiTID"\n",tid);
1116  return 0;
1117  }
1118 
1119  return __xen_get_cpl_thread(target,tthread);
1120 }
1121 
1122 static struct target_thread *xen_vm_load_thread(struct target *target,
1123  tid_t tid,int force) {
1124  struct target_thread *tthread = NULL;
1125 
1126  /*
1127  * If we are asking for the global thread (TID_GLOBAL), do that
1128  * right away.
1129  */
1130  if (tid == TID_GLOBAL) {
1131  /*
1132  * We have to *not* call _load_current_thread if the global
1133  * thread is valid. This is part of a hack (chicken and egg)
1134  * problem where to "fully" load the global thread, we must have
1135  * its registers. Our register read functions try to load the
1136  * current thread if it's not loaded. So... see
1137  * _load_current_thread for more...
1138  */
1139  if (OBJVALID(target->global_thread))
1140  return target->global_thread;
1141  else {
1142  xen_vm_load_current_thread(target,force);
1143  return target->global_thread;
1144  }
1145  }
1146 
1147  /*
1148  * If we haven't loaded current_thread yet, we really should load it
1149  * because otherwise we don't know if current_thread->tid == @tid.
1150  * If it does, we don't want to do the below stuff, which only
1151  * applies to non-running threads.
1152  */
1153  if (!xen_vm_load_current_thread(target,force)) {
1154  vwarn("could not load current thread to compare with"
1155  " tid %"PRIiTID"!\n",tid);
1156  }
1157 
1158  /*
1159  * If the thread tid we are asking for is the current thread and is
1160  * valid, or if the thread is in our cache and is valid.
1161  */
1162  else if (target->current_thread
1163  && OBJVALID(target->current_thread)
1164  && target->current_thread->tid == tid) {
1165  return xen_vm_load_current_thread(target,force);
1166  }
1167  /*
1168  * Otherwise, try to lookup thread @tid.
1169  */
1170  else if ((tthread = target_lookup_thread(target,tid))) {
1171  if (OBJVALID(tthread) && !force) {
1172  vdebug(4,LA_TARGET,LF_XV,"did not need to load thread; copy is valid\n");
1173  return tthread;
1174  }
1175  }
1176 
1177  /*
1178  * Note:
1179  *
1180  * At this point, we can be sure that we are loading a thread that
1181  * is not running; thus, its CPU state is on the kernel stack.
1182  *
1183  * This means we must ask the personality to do it, because only the
1184  * personality can interpret the kernel stack.
1185  */
1186  SAFE_PERSONALITY_OP_WARN_NORET(load_thread,tthread,NULL,target,tid,force);
1187 
1188  return tthread;
1189 }
1190 
1191 #ifdef __x86_64__
1192 /*
1193  * NB: these functions do *NOT* zero out the destination's contents;
1194  * they just copy what they can into the destination.
1195  */
1196 static int __xen_vm_hvm_cpu_to_vcpu_context(HVM_SAVE_TYPE(CPU) *hvm,
1197  vcpu_guest_context_t *svm) {
1198  assert(sizeof(svm->fpu_ctxt.x) == sizeof(hvm->fpu_regs));
1199 
1200  memcpy(svm->fpu_ctxt.x,hvm->fpu_regs,sizeof(svm->fpu_ctxt.x));
1201 
1202  svm->user_regs.rax = hvm->rax;
1203  svm->user_regs.rbx = hvm->rbx;
1204  svm->user_regs.rcx = hvm->rcx;
1205  svm->user_regs.rdx = hvm->rdx;
1206  svm->user_regs.rbp = hvm->rbp;
1207  svm->user_regs.rsi = hvm->rsi;
1208  svm->user_regs.rdi = hvm->rdi;
1209  svm->user_regs.rsp = hvm->rsp;
1210  svm->user_regs.r8 = hvm->r8;
1211  svm->user_regs.r9 = hvm->r9;
1212  svm->user_regs.r10 = hvm->r10;
1213  svm->user_regs.r11 = hvm->r11;
1214  svm->user_regs.r12 = hvm->r12;
1215  svm->user_regs.r13 = hvm->r13;
1216  svm->user_regs.r14 = hvm->r14;
1217  svm->user_regs.r15 = hvm->r15;
1218 
1219  svm->user_regs.rip = hvm->rip;
1220  svm->user_regs.rflags = hvm->rflags;
1221 
1222  svm->user_regs.error_code = hvm->error_code;
1223 
1224  /* XXX: cs, ds, es, fs, gs */
1225 
1226  if (hvm->gs_base)
1227  svm->gs_base_kernel = hvm->gs_base;
1228  else
1229  svm->gs_base_kernel = hvm->shadow_gs;
1230 
1231  /* XXX: ldt/gdt stuff */
1232 
1233  /* XXX: kernel_ss, kernel_sp */
1234 
1235  svm->ctrlreg[0] = hvm->cr0;
1236  svm->ctrlreg[2] = hvm->cr2;
1237  svm->ctrlreg[3] = hvm->cr3;
1238  svm->ctrlreg[4] = hvm->cr4;
1239 
1240  svm->debugreg[0] = hvm->dr0;
1241  svm->debugreg[1] = hvm->dr1;
1242  svm->debugreg[2] = hvm->dr2;
1243  svm->debugreg[3] = hvm->dr3;
1244  svm->debugreg[6] = hvm->dr6;
1245  svm->debugreg[7] = hvm->dr7;
1246 
1247  /* XXX: fs_base, gs_base_kernel, gs_base_user */
1248 
1249  return 0;
1250 }
1251 
1252 static int __xen_vm_vcpu_to_hvm_cpu_context(vcpu_guest_context_t *svm,
1253  HVM_SAVE_TYPE(CPU) *hvm) {
1254  assert(sizeof(svm->fpu_ctxt.x) == sizeof(hvm->fpu_regs));
1255 
1256  memcpy(hvm->fpu_regs,svm->fpu_ctxt.x,sizeof(hvm->fpu_regs));
1257 
1258  if (hvm->rax != svm->user_regs.rax) {
1259  vdebug(9,LA_TARGET,LF_XV,"setting rax = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1260  svm->user_regs.rax,hvm->rax);
1261  hvm->rax = svm->user_regs.rax;
1262  }
1263  if (hvm->rbx != svm->user_regs.rbx) {
1264  vdebug(9,LA_TARGET,LF_XV,"setting rbx = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1265  svm->user_regs.rbx,hvm->rbx);
1266  hvm->rbx = svm->user_regs.rbx;
1267  }
1268  if (hvm->rcx != svm->user_regs.rcx) {
1269  vdebug(9,LA_TARGET,LF_XV,"setting rcx = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1270  svm->user_regs.rcx,hvm->rcx);
1271  hvm->rcx = svm->user_regs.rcx;
1272  }
1273  if (hvm->rdx != svm->user_regs.rdx) {
1274  vdebug(9,LA_TARGET,LF_XV,"setting rdx = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1275  svm->user_regs.rdx,hvm->rdx);
1276  hvm->rdx = svm->user_regs.rdx;
1277  }
1278  if (hvm->rbp != svm->user_regs.rbp) {
1279  vdebug(9,LA_TARGET,LF_XV,"setting rbp = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1280  svm->user_regs.rbp,hvm->rbp);
1281  hvm->rbp = svm->user_regs.rbp;
1282  }
1283  if (hvm->rsi != svm->user_regs.rsi) {
1284  vdebug(9,LA_TARGET,LF_XV,"setting rsi = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1285  svm->user_regs.rsi,hvm->rsi);
1286  hvm->rsi = svm->user_regs.rsi;
1287  }
1288  if (hvm->rdi != svm->user_regs.rdi) {
1289  vdebug(9,LA_TARGET,LF_XV,"setting rdi = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1290  svm->user_regs.rdi,hvm->rdi);
1291  hvm->rdi = svm->user_regs.rdi;
1292  }
1293  if (hvm->rsp != svm->user_regs.rsp) {
1294  vdebug(9,LA_TARGET,LF_XV,"setting rsp = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1295  svm->user_regs.rsp,hvm->rsp);
1296  hvm->rsp = svm->user_regs.rsp;
1297  }
1298  if (hvm->r8 != svm->user_regs.r8) {
1299  vdebug(9,LA_TARGET,LF_XV,"setting r8 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1300  svm->user_regs.r8,hvm->r8);
1301  hvm->r8 = svm->user_regs.r8;
1302  }
1303  if (hvm->r9 != svm->user_regs.r9) {
1304  vdebug(9,LA_TARGET,LF_XV,"setting r9 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1305  svm->user_regs.r9,hvm->r9);
1306  hvm->r9 = svm->user_regs.r9;
1307  }
1308  if (hvm->r10 != svm->user_regs.r10) {
1309  vdebug(9,LA_TARGET,LF_XV,"setting r10 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1310  svm->user_regs.r10,hvm->r10);
1311  hvm->r10 = svm->user_regs.r10;
1312  }
1313  if (hvm->r11 != svm->user_regs.r11) {
1314  vdebug(9,LA_TARGET,LF_XV,"setting r11 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1315  svm->user_regs.r11,hvm->r11);
1316  hvm->r11 = svm->user_regs.r11;
1317  }
1318  if (hvm->r12 != svm->user_regs.r12) {
1319  vdebug(9,LA_TARGET,LF_XV,"setting r12 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1320  svm->user_regs.r12,hvm->r12);
1321  hvm->r12 = svm->user_regs.r12;
1322  }
1323  if (hvm->r13 != svm->user_regs.r13) {
1324  vdebug(9,LA_TARGET,LF_XV,"setting r13 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1325  svm->user_regs.r13,hvm->r13);
1326  hvm->r13 = svm->user_regs.r13;
1327  }
1328  if (hvm->r14 != svm->user_regs.r14) {
1329  vdebug(9,LA_TARGET,LF_XV,"setting r14 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1330  svm->user_regs.r14,hvm->r14);
1331  hvm->r14 = svm->user_regs.r14;
1332  }
1333  if (hvm->r15 != svm->user_regs.r15) {
1334  vdebug(9,LA_TARGET,LF_XV,"setting r15 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1335  svm->user_regs.r15,hvm->r15);
1336  hvm->r15 = svm->user_regs.r15;
1337  }
1338 
1339  if (hvm->rip != svm->user_regs.rip) {
1340  vdebug(9,LA_TARGET,LF_XV,"setting rip = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1341  svm->user_regs.rip,hvm->rip);
1342  hvm->rip = svm->user_regs.rip;
1343  }
1344  if (hvm->rflags != svm->user_regs.rflags) {
1345  vdebug(9,LA_TARGET,LF_XV,"setting rflags = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1346  svm->user_regs.rflags,hvm->rflags);
1347  hvm->rflags = svm->user_regs.rflags;
1348  }
1349 
1350  if (hvm->error_code != svm->user_regs.error_code) {
1351  vdebug(9,LA_TARGET,LF_XV,"setting cr0 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1352  svm->user_regs.error_code,hvm->error_code);
1353  hvm->error_code = svm->user_regs.error_code;
1354  }
1355 
1356  /* XXX: cs, ds, es, fs, gs */
1357 
1358  /* XXX: ldt/gdt stuff */
1359 
1360  /* XXX: kernel_ss, kernel_sp */
1361 
1362  if (hvm->cr0 != svm->ctrlreg[0]) {
1363  vdebug(9,LA_TARGET,LF_XV,"setting cr0 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1364  svm->ctrlreg[0],hvm->cr0);
1365  hvm->cr0 = svm->ctrlreg[0];
1366  }
1367  if (hvm->cr2 != svm->ctrlreg[2]) {
1368  vdebug(9,LA_TARGET,LF_XV,"setting cr2 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1369  svm->ctrlreg[2],hvm->cr2);
1370  hvm->cr2 = svm->ctrlreg[2];
1371  }
1372  if (hvm->cr3 != svm->ctrlreg[3]) {
1373  vdebug(9,LA_TARGET,LF_XV,"setting cr3 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1374  svm->ctrlreg[3],hvm->cr3);
1375  hvm->cr3 = svm->ctrlreg[3];
1376  }
1377  if (hvm->cr4 != svm->ctrlreg[4]) {
1378  vdebug(9,LA_TARGET,LF_XV,"setting cr4 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1379  svm->ctrlreg[4],hvm->cr4);
1380  hvm->cr4 = svm->ctrlreg[4];
1381  }
1382 
1383  if (hvm->dr0 != svm->debugreg[0]) {
1384  vdebug(9,LA_TARGET,LF_XV,"setting dr0 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1385  svm->debugreg[0],hvm->dr0);
1386  hvm->dr0 = svm->debugreg[0];
1387  }
1388  if (hvm->dr1 != svm->debugreg[1]) {
1389  vdebug(9,LA_TARGET,LF_XV,"setting dr1 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1390  svm->debugreg[1],hvm->dr1);
1391  hvm->dr1 = svm->debugreg[1];
1392  }
1393  if (hvm->dr2 != svm->debugreg[2]) {
1394  vdebug(9,LA_TARGET,LF_XV,"setting dr2 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1395  svm->debugreg[2],hvm->dr2);
1396  hvm->dr2 = svm->debugreg[2];
1397  }
1398  if (hvm->dr3 != svm->debugreg[3]) {
1399  vdebug(9,LA_TARGET,LF_XV,"setting dr3 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1400  svm->debugreg[3],hvm->dr3);
1401  hvm->dr3 = svm->debugreg[3];
1402  }
1403  if (hvm->dr6 != svm->debugreg[6]) {
1404  vdebug(9,LA_TARGET,LF_XV,"setting dr6 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1405  svm->debugreg[6],hvm->dr6);
1406  hvm->dr6 = svm->debugreg[6];
1407  }
1408  if (hvm->dr7 != svm->debugreg[7]) {
1409  vdebug(9,LA_TARGET,LF_XV,"setting dr7 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1410  svm->debugreg[7],hvm->dr7);
1411  hvm->dr7 = svm->debugreg[7];
1412  }
1413 
1414  /* XXX: fs_base, gs_base_kernel, gs_base_user */
1415 
1416  return 0;
1417 }
1418 #endif
1419 
1420 /*
1421  * Simple wrapper around xc_vcpu_getcontext and the HVM stuff.
1422  *
1423  * NB: it appears that the only reason to use the HVM-specific stuff
1424  * (for CPU info) is to get correct segment register info, the VMCS/VMCB
1425  * stuff, LDT stuff; pretty much everything else is already in
1426  * vcpu_guest_context for the VCPU in question (see
1427  * xen/xen/arch/x86/hvm/hvm.c:hvm_save_cpu_ctxt()).
1428  *
1429  * If the domain is HVM, it populates a vcpu_guest_context as best as
1430  * possible from HVM info. It keeps the HVM data around for a later
1431  * setcontext operation.
1432  *
1433  * XXX: notice that we only load the highest-number VCPU. Initially we
1434  * focused on single-core VMs; that assumption is built into the code.
1435  * We can relax it sometime; but that's the reason for the code being
1436  * like it is.
1437  */
1438 static int __xen_vm_cpu_getcontext(struct target *target,
1439  vcpu_guest_context_t *context) {
1440  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
1441  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
1442 #ifdef __x86_64__
1443  uint32_t size = 0;
1444  uint32_t offset = 0;
1445  struct hvm_save_descriptor *sdesc = NULL;
1446 #endif
1447 #ifdef XC_HAVE_CONTEXT_ANY
1448  vcpu_guest_context_any_t context_any;
1449 #endif
1450  int ret;
1451 
1452  if (!xstate->hvm || xspec->no_hvm_setcontext) {
1453 #ifdef XC_HAVE_CONTEXT_ANY
1454  ret = xc_vcpu_getcontext(xc_handle,xstate->id,
1455  xstate->dominfo.max_vcpu_id,&context_any);
1456 #else
1457  ret = xc_vcpu_getcontext(xc_handle,xstate->id,
1458  xstate->dominfo.max_vcpu_id,context);
1459 #endif
1460  if (ret < 0) {
1461  verror("could not get vcpu context for %d\n",xstate->id);
1462  return -1;
1463  }
1464 #ifdef XC_HAVE_CONTEXT_ANY
1465  else
1466  memcpy(context,&context_any.c,sizeof(*context));
1467 #endif
1468  }
1469  else {
1470 #ifdef __x86_64__
1471  if ((size = xc_domain_hvm_getcontext(xc_handle,xstate->id,0,0)) <= 0) {
1472  verror("Could not get HVM context buf size!\n");
1473  return -1;
1474  }
1475 
1476  /* Handle increasing size; this should not happen. */
1477  if (unlikely(!xstate->hvm_context_buf)) {
1478  xstate->hvm_context_bufsiz = size;
1479  xstate->hvm_context_buf = malloc(size);
1480  }
1481  else if (size >= xstate->hvm_context_bufsiz) {
1482  free(xstate->hvm_context_buf);
1483  xstate->hvm_context_bufsiz = size;
1484  xstate->hvm_context_buf = malloc(size);
1485  }
1486 
1487  xstate->hvm_cpu = NULL;
1488 
1489  if (xc_domain_hvm_getcontext(xc_handle,xstate->id,xstate->hvm_context_buf,
1490  xstate->hvm_context_bufsiz) < 0) {
1491  verror("Could not load HVM context buf!\n");
1492  return -1;
1493  }
1494 
1495  offset = 0;
1496  while (offset < size) {
1497  sdesc = (struct hvm_save_descriptor *) \
1498  (xstate->hvm_context_buf + offset);
1499 
1500  offset += sizeof(*sdesc);
1501 
1502  if (sdesc->typecode == HVM_SAVE_CODE(CPU)
1503  && sdesc->instance == xstate->dominfo.max_vcpu_id) {
1504  xstate->hvm_cpu = (HVM_SAVE_TYPE(CPU) *) \
1505  (xstate->hvm_context_buf + offset);
1506  break;
1507  }
1508 
1509  offset += sdesc->length;
1510  }
1511 
1512  if (!xstate->hvm_cpu) {
1513  verror("Could not find HVM context for VCPU %d!\n",
1514  xstate->dominfo.max_vcpu_id);
1515  return -1;
1516  }
1517 
1518  if (__xen_vm_hvm_cpu_to_vcpu_context(xstate->hvm_cpu,context)) {
1519  verror("Could not translate HVM vcpu info to software vcpu info!\n");
1520  return -1;
1521  }
1522 #else
1523  /* Impossible. */
1524  verror("HVM unsupported on 32-bit platform!\n");
1525  errno = EINVAL;
1526  return -1;
1527 #endif
1528  }
1529 
1530  return 0;
1531 }
1532 
1533 static int __xen_vm_cpu_setcontext(struct target *target,
1534  vcpu_guest_context_t *context) {
1535  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
1536  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
1537 #ifdef XC_HAVE_CONTEXT_ANY
1538  vcpu_guest_context_any_t context_any;
1539 #endif
1540  int ret;
1541 
1542  if (!target->writeable) {
1543  verror("target %s not writeable!\n",target->name);
1544  errno = EROFS;
1545  return -1;
1546  }
1547 
1548  if (!xstate->hvm || xspec->no_hvm_setcontext) {
1549 #ifdef XC_HAVE_CONTEXT_ANY
1550  memcpy(&context_any.c,context,sizeof(*context));
1551  ret = xc_vcpu_setcontext(xc_handle,xstate->id,
1552  xstate->dominfo.max_vcpu_id,&context_any);
1553 #else
1554  ret = xc_vcpu_setcontext(xc_handle,xstate->id,
1555  xstate->dominfo.max_vcpu_id,context);
1556 #endif
1557  if (ret < 0) {
1558  verror("could not set vcpu context for dom %d\n",xstate->id);
1559  errno = EINVAL;
1560  return -1;
1561  }
1562  }
1563  else {
1564 #ifdef __x86_64__
1565  if (__xen_vm_vcpu_to_hvm_cpu_context(context,xstate->hvm_cpu)) {
1566  verror("Could not translate software vcpu info to HVM vcpu info!\n");
1567  return -1;
1568  }
1569 
1570  if (xc_domain_hvm_setcontext(xc_handle,xstate->id,
1571  xstate->hvm_context_buf,
1572  xstate->hvm_context_bufsiz)) {
1573  verror("Could not store HVM context buf!\n");
1574  return -1;
1575  }
1576 #else
1577  /* Impossible. */
1578  verror("HVM unsupported on 32-bit platform!\n");
1579  errno = EINVAL;
1580  return -1;
1581 #endif
1582  }
1583 
1584  return 0;
1585 }
1586 
1587 static struct target_thread *__xen_vm_load_current_thread(struct target *target,
1588  int force,
1589  int globalonly) {
1590  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
1591  struct target_thread *tthread = NULL;
1592  struct xen_vm_thread_state *tstate = NULL;
1593  struct xen_vm_thread_state *gtstate;
1594  REGVAL ipval;
1595  uint64_t pgd = 0;
1596  int cpl;
1597 
1598  /*
1599  * If the global thread has been loaded, and that's all the caller
1600  * wants, and they don't want to force a reload, give them that.
1601  */
1602  if (globalonly && !force
1603  && target->global_thread && OBJVALID(target->global_thread))
1604  return target->global_thread;
1605  /*
1606  * Otherwise, if the current thread is valid, and we're not forcing
1607  * a reload, give them the current thread.
1608  */
1609  else if (!globalonly && !force
1610  && target->current_thread && OBJVALID(target->current_thread))
1611  return target->current_thread;
1612 
1613  if (target_status(target) != TSTATUS_PAUSED) {
1614  verror("target not paused; cannot load current task!\n");
1615  errno = EBUSY;
1616  return NULL;
1617  }
1618 
1619  /*
1620  * The first thing to do is load the machine state into the global
1621  * thread, and set it as valid -- EVEN THOUGH we have not loaded
1622  * thread_info for it! We must do this so that a whole bunch of
1623  * register reads can work via the API.
1624  */
1625  if (xen_vm_load_dominfo(target)) {
1626  verror("could not load dominfo!\n");
1627  errno = EFAULT;
1628  return NULL;
1629  }
1630 
1631  gtstate = (struct xen_vm_thread_state *)target->global_thread->state;
1632 
1633  /*
1634  * Only need to call xc if we haven't loaded this thread.
1635  */
1636  if (!OBJVALID(target->global_thread)) {
1637  if (__xen_vm_cpu_getcontext(target,&gtstate->context) < 0) {
1638  verror("could not get vcpu context for %d\n",xstate->id);
1639  goto errout;
1640  }
1641  }
1642 
1643  /*
1644  * Load EIP for info, and CPL for user-mode check.
1645  *
1646  * NB: note that these two calls do *not* go through the target
1647  * API. They cannot, because the global thread has not been loaded
1648  * yet. And we can't finish loading the global thread yet, even
1649  * though we have the machine state, because we don't know which
1650  * thread context's regcache to put the machine state into (kernel
1651  * or userspace).
1652  */
1653  errno = 0;
1654 #ifdef __x86_64__
1655  ipval = gtstate->context.user_regs.rip;
1656 #else
1657  ipval = gtstate->context.user_regs.eip;
1658 #endif
1659 
1660  cpl = 0x3 & gtstate->context.user_regs.cs;
1661 
1662  /* Keep loading the global thread... */
1663  if(!OBJVALID(target->global_thread)) {
1664  if (__xen_vm_in_userspace(target,cpl,ipval))
1666  else
1668 
1669  /*
1670  * Push the registers into the regcache!
1671  */
1672  __xen_vm_vcpu_to_thread_regcache(target,&gtstate->context,
1673  target->global_thread,
1674  target->global_thread->tidctxt);
1675 
1676  /*
1677  * Very important. If thread is in userspace, we need to get
1678  * Xen's special kernel_sp register and set it as SP for the
1679  * kernel context so that personalities can load kernel threads
1680  * on i386 because they need kernel_sp to find the stack. On
1681  * x86_64 this is not necessary.
1682  */
1683  if (target->global_thread->tidctxt == THREAD_CTXT_USER) {
1685  THREAD_CTXT_KERNEL,target->spregno,
1686  gtstate->context.kernel_sp);
1687  }
1688 
1689  /*
1690  * NB: we must set the thread as valid now, because the next few
1691  * function calls are going to try to use the target API to read
1692  * registers from the global thread. So even through we're
1693  * technically still loading it, mark it as valid now... it'll
1694  * be fully valid shortly!
1695  */
1696  OBJSVALID(target->global_thread);
1698  }
1699 
1700  /*
1701  * Load CR3 for debug purposes.
1702  */
1703  __xen_vm_pgd(target,TID_GLOBAL,&pgd);
1704 
1706  "loading current thread (ip = 0x%"PRIxADDR",pgd = 0x%"PRIxADDR","
1707  "cpl = %d,tidctxt = %d)\n",ipval,pgd,cpl,
1708  target->global_thread->tidctxt);
1709 
1710  /*
1711  * If only loading the global thread, stop here.
1712  */
1713  if (globalonly)
1714  return target->global_thread;
1715 
1716  /*
1717  * Ask the personality to detect our current thread.
1718  */
1719  SAFE_PERSONALITY_OP(load_current_thread,tthread,NULL,target,force);
1720 
1721  /*
1722  * Set the current thread (might be a real thread, or the global
1723  * thread). If the personality detects a current thread, use it;
1724  * otherwise we have to just use the global thread!
1725  */
1726  if (tthread) {
1727  target->current_thread = tthread;
1728 
1729  /*
1730  * We want to set the current thread's context to whatever the
1731  * global thread was detected to be in. Enforce our will, no
1732  * matter what the personality does!
1733  */
1734  if (tthread->tidctxt != target->global_thread->tidctxt) {
1735  vwarn("personality set current thread context to %d; global thread"
1736  " context is %d; forcing current to global!\n",
1737  tthread->tidctxt,target->global_thread->tidctxt);
1738  tthread->tidctxt = target->global_thread->tidctxt;
1739  }
1740 
1741  /*
1742  * Now, copy in the machine state. Be careful -- if we have not
1743  * allocated tthread->state yet, allocate it now!
1744  */
1745  tstate = (struct xen_vm_thread_state *)tthread->state;
1746  if (!tstate)
1747  tthread->state = tstate = \
1748  (struct xen_vm_thread_state *)calloc(1,sizeof(*tstate));
1749 
1750  memcpy(&tstate->context,&gtstate->context,sizeof(gtstate->context));
1751 
1752  /* Also update the regcache for the current thread. */
1754  target->global_thread->tidctxt,
1755  tthread,tthread->tidctxt);
1756  }
1757  else
1758  target->current_thread = target->global_thread;
1759 
1761 
1763  "debug registers (vcpu context): 0x%"PRIxADDR",0x%"PRIxADDR
1764  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
1765  gtstate->context.debugreg[0],gtstate->context.debugreg[1],
1766  gtstate->context.debugreg[2],gtstate->context.debugreg[3],
1767  gtstate->context.debugreg[6],gtstate->context.debugreg[7]);
1768 
1769  /* Mark its state as valid in our cache. */
1770  OBJSVALID(tthread);
1771 
1772  return tthread;
1773 
1774  errout:
1775  /* XXX: should we really set this here? */
1776  target->current_thread = target->global_thread;
1777 
1778  vwarn("error loading current thread; trying to use default thread\n");
1779  errno = 0;
1780 
1781  return target->global_thread;
1782 }
1783 
1784 static struct target_thread *xen_vm_load_current_thread(struct target *target,
1785  int force) {
1786  return __xen_vm_load_current_thread(target,force,0);
1787 }
1788 
1793 /*
1794  * If the target is not paused, the result of this function is
1795  * undefined.
1796  *
1797  * Otherwise, first we get the CPL out of the lower two bits of the CS
1798  * register. Then we grab the current task and its pid.
1799  */
1800 tid_t xen_vm_gettid(struct target *target) {
1801  struct target_thread *tthread;
1802 
1803  if (target->current_thread && OBJVALID(target->current_thread))
1804  return target->current_thread->tid;
1805 
1806  tthread = xen_vm_load_current_thread(target,0);
1807  if (!tthread) {
1808  verror("could not load current thread to get TID!\n");
1809  return 0;
1810  }
1811 
1812  return tthread->tid;
1813 }
1814 
1815 void xen_vm_free_thread_state(struct target *target,void *state) {
1816  free(state);
1817 }
1818 
1819 static int xen_vm_snprintf(struct target *target,char *buf,int bufsiz) {
1820  struct xen_vm_spec *xspec = \
1821  (struct xen_vm_spec *)target->spec->backend_spec;
1822 
1823  return snprintf(buf,bufsiz,"domain(%s)",xspec->domain);
1824 }
1825 
1826 static int xen_vm_init(struct target *target) {
1827  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
1828  struct xen_vm_thread_state *tstate;
1829  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
1830 
1831  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
1832 
1833  if (target->spec->bpmode == THREAD_BPMODE_STRICT) {
1834  vwarn("auto-enabling SEMI_STRICT bpmode on Xen target.\n");
1836  }
1837 
1838  if (xspec && xspec->hypervisor_ignores_userspace_exceptions)
1839  g_hash_table_insert(target->config,
1840  strdup("OS_EMULATE_USERSPACE_EXCEPTIONS"),
1841  strdup("1"));
1842 
1843  /*
1844  * We can use the RF flag to temporarily disable the hw breakpoint
1845  * if we don't need to single step the breaked instruction (i.e.,
1846  * beacuse there are no post handlers nor actions). This saves us
1847  * from disable the hw breakpoint in this situation.
1848  */
1849  target->nodisablehwbponss = 1;
1850  target->threadctl = 0;
1851 
1852  xstate->dominfo_valid = 0;
1853  xstate->dominfo.domid = 0;
1854  xstate->dominfo.dying = 0;
1855  xstate->dominfo.crashed = 0;
1856  xstate->dominfo.shutdown = 0;
1857  xstate->dominfo.paused = 0;
1858  xstate->dominfo.blocked = 0;
1859  xstate->dominfo.running = 0;
1860  xstate->dominfo.hvm = 0;
1861  xstate->dominfo.debugged = 0;
1862  xstate->dominfo.shutdown_reason = 0;
1863  xstate->dominfo.max_vcpu_id = 0;
1864  xstate->dominfo.shared_info_frame = 0;
1865 
1866  xstate->xen_vm_vmp_client_fd = -1;
1867  xstate->xen_vm_vmp_client_path = NULL;
1868 
1869  /* Create the default thread. */
1870  tstate = (struct xen_vm_thread_state *)calloc(1,sizeof(*tstate));
1871 
1872  target->global_thread = target_create_thread(target,TID_GLOBAL,tstate,NULL);
1873  /* Default thread is always running. */
1875 
1876  /* Create our default context now; update its region later. */
1877  target->global_tlctxt =
1879 
1880  return 0;
1881 }
1882 
1883 #ifdef XENCTRL_HAS_XC_INTERFACE
1884 int xen_vm_xc_attach(xc_interface **xc_handle,xc_interface **xce_handle) {
1885 #else
1886 int xen_vm_xc_attach(int *xc_handle,int *xce_handle) {
1887 #endif
1888 
1889  if (xc_handle && *xc_handle == XC_IF_INVALID) {
1890 #ifdef XENCTRL_HAS_XC_INTERFACE
1891  *xc_handle = xc_interface_open(NULL,NULL,0);
1892 #else
1893  *xc_handle = xc_interface_open();
1894 #endif
1895  if (*xc_handle == XC_IF_INVALID) {
1896  verror("failed to open xc interface: %s\n",strerror(errno));
1897  return -1;
1898  }
1899  }
1900 
1901  if (xce_handle && *xce_handle == XC_IF_INVALID) {
1902 #ifdef XENCTRL_HAS_XC_INTERFACE
1903  *xce_handle = xc_evtchn_open(NULL,0);
1904 #else
1905  *xce_handle = xc_evtchn_open();
1906 #endif
1907  if (*xce_handle == XC_IF_INVALID) {
1908  verror("failed to open event channel: %s\n",strerror(errno));
1909  return -1;
1910  }
1911  }
1912 
1913  return 0;
1914 }
1915 
1916 #ifdef XENCTRL_HAS_XC_INTERFACE
1917 int xen_vm_xc_detach(xc_interface **xc_handle,xc_interface **xce_handle)
1918 #else
1919 int xen_vm_xc_detach(int *xc_handle,int *xce_handle)
1920 #endif
1921 {
1922  if (xc_handle && *xc_handle != XC_IF_INVALID) {
1923  xc_interface_close(*xc_handle);
1924  *xc_handle = XC_IF_INVALID;
1925  }
1926 
1927  if (xce_handle && *xce_handle != XC_IF_INVALID) {
1928  xc_evtchn_close(*xce_handle);
1929  *xce_handle = XC_IF_INVALID;
1930  }
1931 
1932  return 0;
1933 }
1934 
1935 #ifdef XENCTRL_HAS_XC_INTERFACE
1936 int xen_vm_virq_attach(xc_interface *xce_handle,XC_EVTCHN_PORT_T *dbg_port)
1937 #else
1938 int xen_vm_virq_attach(int xce_handle,XC_EVTCHN_PORT_T *dbg_port)
1939 #endif
1940 {
1941  if (dbg_port && *dbg_port == -1) {
1942  *dbg_port = xc_evtchn_bind_virq(xce_handle,VIRQ_DEBUGGER);
1943  /* Try to cast dbg_port to something signed. Old xc versions
1944  * have a bug in that evtchn_port_t is declared as uint32_t, but
1945  * the function prototypes that return them can theoretically
1946  * return -1. So, try to test for that...
1947  */
1948  if ((int32_t)*dbg_port < 0) {
1949  verror("failed to bind debug virq port: %s",strerror(errno));
1950  return -1;
1951  }
1952  }
1953 
1954  return 0;
1955 }
1956 
1957 #ifdef XENCTRL_HAS_XC_INTERFACE
1958 int xen_vm_virq_detach(xc_interface *xce_handle,XC_EVTCHN_PORT_T *dbg_port)
1959 #else
1960 int xen_vm_virq_detach(int xce_handle,XC_EVTCHN_PORT_T *dbg_port)
1961 #endif
1962 {
1963  if (dbg_port && *dbg_port != -1) {
1964  if (xc_evtchn_unbind(xce_handle,(evtchn_port_t)*dbg_port)) {
1965  verror("failed to unbind debug virq port\n");
1966  return -1;
1967  }
1968 
1969  *dbg_port = -1;
1970  }
1971 
1972  return 0;
1973 }
1974 
1975 int xen_vm_vmp_attach(char *path,int *cfd,char **cpath) {
1976  struct stat sbuf;
1977  struct sockaddr_un sun,sun_client;
1978  char *tmpdir;
1979  char *spath;
1980  int spath_len,cpath_len;
1981  int len;
1982 
1983  assert(cfd);
1984  assert(cpath);
1985 
1986  if (cfd && *cfd != -1)
1987  return 0;
1988 
1989  if (!path) {
1990  /*
1991  * Just try /var/run or TMPDIR or /tmp or .
1992  */
1993  if (stat("/var/run",&sbuf) == 0
1994  && S_ISDIR(sbuf.st_mode) && access("/var/run",W_OK) == 0) {
1995  spath_len = strlen("/var/run") + 1 + strlen(TARGET_XV_VMP_SOCKET_FILENAME) + 1;
1996  spath = malloc(spath_len);
1997  snprintf(spath,spath_len,"%s/%s","/var/run",TARGET_XV_VMP_SOCKET_FILENAME);
1998  }
1999  else if ((tmpdir = getenv("TMPDIR"))
2000  && stat(tmpdir,&sbuf) == 0 && access(tmpdir,W_OK) == 0) {
2001  spath_len = strlen(tmpdir) + 1 + strlen(TARGET_XV_VMP_SOCKET_FILENAME) + 1;
2002  spath = malloc(spath_len);
2003  snprintf(spath,spath_len,"%s/%s",tmpdir,TARGET_XV_VMP_SOCKET_FILENAME);
2004  }
2005  else if (stat("/tmp",&sbuf) == 0
2006  && S_ISDIR(sbuf.st_mode) && access("/tmp",W_OK) == 0) {
2007  spath_len = strlen("/tmp") + 1 + strlen(TARGET_XV_VMP_SOCKET_FILENAME) + 1;
2008  spath = malloc(spath_len);
2009  snprintf(spath,spath_len,"%s/%s","/tmp",TARGET_XV_VMP_SOCKET_FILENAME);
2010  }
2011  else {
2012  spath_len = strlen(".") + 1 + strlen(TARGET_XV_VMP_SOCKET_FILENAME) + 1;
2013  spath = malloc(spath_len);
2014  snprintf(spath,spath_len,"%s/%s",".",TARGET_XV_VMP_SOCKET_FILENAME);
2015  }
2016  }
2017  else
2018  spath = strdup(path);
2019 
2020  memset(&sun,0,sizeof(sun));
2021  sun.sun_family = AF_UNIX;
2022  snprintf(sun.sun_path,UNIX_PATH_MAX,"%s",spath);
2023 
2024  /*
2025  * The server only accepts path-bound unix domain socket
2026  * connections, so bind one and do it. Try to use the same basedir
2027  * as in @spath; else use TMPDIR or /tmp or .
2028  */
2029  if (1) {
2030  dirname(spath);
2031 
2032  cpath_len = strlen(spath) + 1
2035  *cpath = malloc(cpath_len);
2036 
2037  snprintf(*cpath,cpath_len,"%s/" TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT,
2038  spath,getpid());
2039  if (open(*cpath,O_CREAT | O_RDWR,S_IRUSR | S_IWUSR) < 0) {
2041  "could not open client VMP socket file %s: %s\n",
2042  *cpath,strerror(errno));
2043  free(*cpath);
2044  *cpath = NULL;
2045  }
2046  unlink(*cpath);
2047  }
2048 
2049  if (cpath[0] == '\0' && (tmpdir = getenv("TMPDIR"))) {
2050  cpath_len = strlen(tmpdir) + 1
2053  *cpath = malloc(cpath_len);
2054 
2055  snprintf(*cpath,cpath_len,"%s/" TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT,
2056  tmpdir,getpid());
2057  if (open(*cpath,O_CREAT | O_RDWR,S_IRUSR | S_IWUSR) < 0) {
2059  "could not open client VMP socket file %s: %s\n",
2060  *cpath,strerror(errno));
2061  free(*cpath);
2062  *cpath = NULL;
2063  }
2064  unlink(*cpath);
2065  }
2066 
2067  if (cpath[0] == '\0') {
2068  cpath_len = strlen("/tmp") + 1
2071  *cpath = malloc(cpath_len);
2072 
2073  snprintf(*cpath,cpath_len,"%s/" TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT,
2074  "/tmp",getpid());
2075  if (open(*cpath,O_CREAT | O_RDWR,S_IRUSR | S_IWUSR) < 0) {
2077  "could not open client VMP socket file %s: %s\n",
2078  *cpath,strerror(errno));
2079  free(*cpath);
2080  *cpath = NULL;
2081  }
2082  unlink(*cpath);
2083  }
2084 
2085  if (cpath[0] == '\0') {
2086  cpath_len = strlen(".") + 1
2089  *cpath = malloc(cpath_len);
2090 
2091  snprintf(*cpath,cpath_len,"%s/" TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT,
2092  ".",getpid());
2093  if (open(*cpath,O_CREAT | O_RDWR,S_IRUSR | S_IWUSR) < 0) {
2095  "could not open client VMP socket file %s: %s\n",
2096  *cpath,strerror(errno));
2097  free(*cpath);
2098  *cpath = NULL;
2099  }
2100  unlink(*cpath);
2101  }
2102 
2103  if (!*cpath) {
2104  verror("could not open a client VMP socket file; aborting!\n");
2105  goto err;
2106  }
2107 
2108  memset(&sun_client,0,sizeof(sun_client));
2109  sun_client.sun_family = AF_UNIX;
2110  snprintf(sun_client.sun_path,UNIX_PATH_MAX,"%s",*cpath);
2111 
2112  *cfd = socket(AF_UNIX,SOCK_STREAM,0);
2113  if (*cfd < 0) {
2114  verror("socket(): %s\n",strerror(errno));
2115  goto err;
2116  }
2117  len = offsetof(struct sockaddr_un,sun_path) + strlen(sun_client.sun_path);
2118  if (bind(*cfd,&sun_client,len) < 0) {
2119  verror("bind(%s): %s\n",sun_client.sun_path,strerror(errno));
2120  goto err;
2121  }
2122  if (fchmod(*cfd,S_IRUSR | S_IWUSR) < 0) {
2123  verror("chmod(%s): %s\n",sun_client.sun_path,strerror(errno));
2124  goto err;
2125  }
2126 
2127  len = offsetof(struct sockaddr_un,sun_path) + strlen(sun.sun_path);
2128  if (connect(*cfd,&sun,len) < 0) {
2129  verror("connect(%s): %s\n",sun.sun_path,strerror(errno));
2130  goto err;
2131  }
2132 
2133  free(spath);
2134 
2135  return 0;
2136 
2137  err:
2138  *cfd = -1;
2139  if (*cpath)
2140  free(*cpath);
2141  *cpath = NULL;
2142  free(spath);
2143 
2144  return -1;
2145 }
2146 
2147 int xen_vm_vmp_detach(int *cfd,char **cpath) {
2148  if (cfd && *cfd != -1) {
2149  close(*cfd);
2150  *cfd = -1;
2151  if (cpath && *cpath) {
2152  unlink(*cpath);
2153  free(*cpath);
2154  *cpath = NULL;
2155  }
2156  }
2157 
2158  return 0;
2159 }
2160 
2162  int rc;
2163 
2164  rc = system(TARGET_XV_VMP_BIN_PATH);
2165  if (rc) {
2166  verror("system(%s): %s\n",TARGET_XV_VMP_BIN_PATH,strerror(errno));
2167  return -1;
2168  }
2169 
2170  return 0;
2171 }
2172 
2173 int xen_vm_virq_or_vmp_attach_or_launch(struct target *target) {
2174  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2175  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
2176  int i;
2177  int rc = -1;
2178 
2179  if (xspec->no_use_multiplexer) {
2180  if (dbg_port > -1) {
2181  verror("cannot connect for multiple domains without multiplexer!\n");
2182  errno = EINVAL;
2183  return -1;
2184  }
2185  else
2186  return xen_vm_virq_attach(xce_handle,&dbg_port);
2187  }
2188 
2189  /* Try to connect. If we can't, then launch, wait, and try again. */
2190  if (xen_vm_vmp_attach(NULL,&xstate->xen_vm_vmp_client_fd,&xstate->xen_vm_vmp_client_path)) {
2191  if (xen_vm_vmp_launch()) {
2192  verror("could not launch Xen VIRQ_DEBUGGER multiplexer!\n");
2193  return -1;
2194  }
2195  else {
2196  vdebug(6,LA_TARGET,LF_XV,"launched Xen VIRQ_DEBUGGER multiplexer!\n");
2197  }
2198 
2199  for (i = 0; i < 5; ++i) {
2200  rc = xen_vm_vmp_attach(NULL,&xstate->xen_vm_vmp_client_fd,
2201  &xstate->xen_vm_vmp_client_path);
2202  if (rc == 0)
2203  break;
2204  else
2205  sleep(1);
2206  }
2207 
2208  if (rc) {
2209  verror("could not connect to launched Xen VIRQ_DEBUGGER multiplexer!\n");
2210  return -1;
2211  }
2212  }
2213 
2214  vdebug(6,LA_TARGET,LF_XV,"connected to Xen VIRQ_DEBUGGER multiplexer!\n");
2215 
2216  return 0;
2217 }
2218 
2219 int xen_vm_virq_or_vmp_detach(struct target *target) {
2220  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2221 
2222  if (dbg_port != -1) {
2223  xce_handle_fd = -1;
2224  return xen_vm_virq_detach(xce_handle,&dbg_port);
2225  }
2226  else
2227  return xen_vm_vmp_detach(&xstate->xen_vm_vmp_client_fd,
2228  &xstate->xen_vm_vmp_client_path);
2229 }
2230 
2231 int xen_vm_virq_or_vmp_get_fd(struct target *target) {
2232  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2233 
2234  if (dbg_port != -1) {
2235  if (xce_handle_fd == -1)
2236  xce_handle_fd = xc_evtchn_fd(xce_handle);
2237  return xce_handle_fd;
2238  }
2239  else
2240  return xstate->xen_vm_vmp_client_fd;
2241 }
2242 
2243  int xen_vm_virq_or_vmp_read(struct target *target,int *vmid) {
2244  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2245  XC_EVTCHN_PORT_T port = -1;
2246  struct target_xen_vm_vmp_client_response resp = { 0 };
2247  int retval;
2248  int rc;
2249 
2250  if (dbg_port != -1) {
2251  /* we've got something from eventchn. let's see what it is! */
2252  port = xc_evtchn_pending(xce_handle);
2253 
2254  /* unmask the event channel BEFORE doing anything else,
2255  * like unpausing the target!
2256  */
2257  retval = xc_evtchn_unmask(xce_handle, port);
2258  if (retval == -1) {
2259  verror("failed to unmask event channel\n");
2260  return -1;
2261  }
2262 
2263  if (port != dbg_port) {
2264  *vmid = -1;
2265  return 0;
2266  }
2267  else {
2268  /* XXX: don't try to figure out which VM; must check them
2269  * all; no infallible way to find out which one(s).
2270  */
2271  *vmid = 0;
2272  return 0;
2273  }
2274  }
2275  else {
2276  rc = read(xstate->xen_vm_vmp_client_fd,&resp,sizeof(resp));
2277  if (rc < 0) {
2278  if (errno == EINTR) {
2279  *vmid = -1;
2280  return 0;
2281  }
2282  return -1;
2283  }
2284  else if (rc == 0) {
2285  return -1;
2286  }
2287  else if (rc != sizeof(resp)) {
2288  return -1;
2289  }
2290  else {
2291  *vmid = resp.vmid;
2292  return 0;
2293  }
2294  }
2295 
2296  /* Not reached, despite what gcc thinks! */
2297  return -1;
2298 }
2299 
2300 static int xen_vm_attach_internal(struct target *target) {
2301  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2302  struct xen_domctl domctl;
2303  struct target_thread *tthread;
2304  struct xen_vm_thread_state *xtstate;
2305  struct xen_vm_spec *xspec;
2306 
2307  xspec = (struct xen_vm_spec *)target->spec->backend_spec;
2308 
2309  domctl.cmd = XEN_DOMCTL_setdebugging;
2310  domctl.domain = xstate->id;
2311  domctl.u.setdebugging.enable = true;
2312 
2313  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2314 
2315  /*
2316  * Always attach to XC.
2317  */
2318  if (xen_vm_xc_attach(&xc_handle,&xce_handle))
2319  return -1;
2320 
2321  /*
2322  * Connect to VIRQ_DEBUGGER, either through demultiplexer daemon, or
2323  * directly. If daemon, launch or connect...
2324  */
2326  return -1;
2327 
2328  /* NOT thread-safe! */
2329  ++xc_refcnt;
2330 
2331  if (xc_domctl(xc_handle,&domctl)) {
2332  verror("could not enable debugging of dom %d!\n",xstate->id);
2333  return -1;
2334  }
2335 
2336  /* Null out current state so we reload and see that it's paused! */
2337  xstate->dominfo_valid = 0;
2338  if (xen_vm_load_dominfo(target)) {
2339  verror("could not load dominfo for dom %d\n",xstate->id);
2340  return -1;
2341  }
2342 
2343  if (xen_vm_pause(target,0)) {
2344  verror("could not pause target before attaching; letting user handle!\n");
2345  }
2346 
2347  /*
2348  * Make sure memops is setup to read from memory.
2349  */
2350  if (xstate->memops && xstate->memops->attach) {
2351  if (xstate->memops->attach(target)) {
2352  verror("could not attach memops!\n");
2353  return -1;
2354  }
2355  }
2356 
2357  if (target->evloop && xstate->evloop_fd < 0) {
2358  xen_vm_attach_evloop(target,target->evloop);
2359  }
2360 
2361  if (target->writeable && !xspec->no_hw_debug_reg_clear) {
2362  /*
2363  * Null out hardware breakpoints, so that we don't try to infer that
2364  * one was set, only to error because it's a software BP, not a
2365  * hardware BP (even if the ip matches). This can happen if you do
2366  * one run with hw bps, then breakpoint the same ip with a sw bp.
2367  * Good practice anyway!
2368  */
2369 
2370  if (!(tthread = __xen_vm_load_cached_thread(target,TID_GLOBAL))) {
2371  if (!errno)
2372  errno = EINVAL;
2373  verror("could not load cached thread %"PRIiTID"\n",TID_GLOBAL);
2374  return -1;
2375  }
2376  xtstate = (struct xen_vm_thread_state *)tthread->state;
2377 
2378  xtstate->dr[0] = 0;
2379  xtstate->dr[1] = 0;
2380  xtstate->dr[2] = 0;
2381  xtstate->dr[3] = 0;
2382  /* Clear the status bits */
2383  xtstate->dr[6] = 0;
2384  /* Clear the control bit. */
2385  xtstate->dr[7] = 0;
2386 
2387  /* Now save these values for later write in flush_context! */
2388  xtstate->context.debugreg[0] = 0;
2389  xtstate->context.debugreg[1] = 0;
2390  xtstate->context.debugreg[2] = 0;
2391  xtstate->context.debugreg[3] = 0;
2392  xtstate->context.debugreg[6] = 0;
2393  xtstate->context.debugreg[7] = 0;
2394 
2395  OBJSDIRTY(tthread);
2396 
2397  if (target->current_thread) {
2398  tthread = target->current_thread;
2399  xtstate = (struct xen_vm_thread_state *)tthread->state;
2400 
2401  xtstate->dr[0] = 0;
2402  xtstate->dr[1] = 0;
2403  xtstate->dr[2] = 0;
2404  xtstate->dr[3] = 0;
2405  /* Clear the status bits */
2406  xtstate->dr[6] = 0;
2407  /* Clear the control bit. */
2408  xtstate->dr[7] = 0;
2409 
2410  /* Now save these values for later write in flush_context! */
2411  xtstate->context.debugreg[0] = 0;
2412  xtstate->context.debugreg[1] = 0;
2413  xtstate->context.debugreg[2] = 0;
2414  xtstate->context.debugreg[3] = 0;
2415  xtstate->context.debugreg[6] = 0;
2416  xtstate->context.debugreg[7] = 0;
2417 
2418  OBJSDIRTY(target->current_thread);
2419  }
2420  }
2421 
2422  return 0;
2423 }
2424 
2425 static int xen_vm_detach(struct target *target,int stay_paused) {
2426  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
2427  struct xen_domctl domctl;
2428 
2429  domctl.cmd = XEN_DOMCTL_setdebugging;
2430  domctl.domain = xstate->id;
2431  domctl.u.setdebugging.enable = false;
2432 
2433  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2434 
2435  if (!target->opened)
2436  return 0;
2437 
2438  if (xen_vm_status(target) == TSTATUS_PAUSED
2439  && (g_hash_table_size(target->threads) || target->global_thread)) {
2440  /* Flush back registers if they're dirty, but if we don't have
2441  * any threads (i.e. because we're closing/detaching), don't
2442  * flush all, which would load the global thread!
2443  */
2444  target_flush_all_threads(target);
2445  }
2446 
2447  if (target->evloop && xstate->evloop_fd > -1)
2448  xen_vm_detach_evloop(target);
2449 
2450  if (xstate->memops->fini) {
2451  if (xstate->memops->fini(target)) {
2452  verror("failed to fini memops; continuing anyway!\n");
2453  return 0;
2454  }
2455  }
2456 
2457  if (xstate->live_shinfo)
2458  munmap(xstate->live_shinfo,__PAGE_SIZE);
2459 
2460  if (xc_domctl(xc_handle,&domctl)) {
2461  verror("could not disable debugging of dom %d!\n",xstate->id);
2462  return -1;
2463  }
2464 
2465  if (!stay_paused && xen_vm_status(target) == TSTATUS_PAUSED) {
2466  __xen_vm_resume(target,1);
2467  }
2468 
2469  --xc_refcnt;
2470 
2471  if (!xc_refcnt) {
2472  /* Close all the xc stuff; we're the last one. */
2473  vdebug(4,LA_TARGET,LF_XV,"last domain; closing xc/xce interfaces.\n");
2474 
2475  if (xen_vm_virq_or_vmp_detach(target))
2476  verror("failed to unbind debug virq port\n");
2477 
2478  if (xen_vm_xc_detach(&xc_handle,&xce_handle))
2479  verror("failed to close xc interfaces\n");
2480  }
2481 
2482  vdebug(3,LA_TARGET,LF_XV,"detach dom %d succeeded.\n",xstate->id);
2483 
2484  return 0;
2485 }
2486 
2487 static int xen_vm_fini(struct target *target) {
2488  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
2489 
2490  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2491 
2492  if (xstate->vmpath)
2493  free(xstate->vmpath);
2494  if (xstate->kernel_filename)
2495  free(xstate->kernel_filename);
2496  if (xstate->name)
2497  free(xstate->name);
2498  if (xstate)
2499  free(xstate);
2500 
2501  return 0;
2502 }
2503 
2504 static int xen_vm_kill(struct target *target,int sig) {
2505  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
2506 
2507  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2508 
2509  /* XXX: fill in! */
2510  return 0;
2511 }
2512 
2513 /*
2514  * For now, just one big address space.
2515  */
2516 static int xen_vm_loadspaces(struct target *target) {
2517  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
2518 
2519  addrspace_create(target,"kernel",xstate->id);
2520 
2521  return 0;
2522 }
2523 
2524 /*
2525  * For now, just find our kernel binary path from xen store, as well as
2526  * the max amount of mem, and create a single region (with a single
2527  * range that is R/W/X) covering either all 32 or 64 bits.
2528  *
2529  * The immediate reason to do this is that figuring out which memory is
2530  * currently mapped to kernel or user address space is going to be slow
2531  * because it involves lots of list traverses. Plus, even if we had an
2532  * efficient data structure for searching address ranges, we would have
2533  * to reload the ranges/regions *every* time the domain runs. We do not
2534  * want to do this!
2535  *
2536  * So, XXX: come back to it later.
2537  */
2538 static int xen_vm_loadregions(struct target *target,struct addrspace *space) {
2539  struct memregion *region;
2540  struct memrange *range;
2541  char *kernel_filename;
2542 
2543  kernel_filename =
2544  (char *)g_hash_table_lookup(target->config,"OS_KERNEL_FILENAME");
2545 
2546  region = memregion_create(space,REGION_TYPE_MAIN,kernel_filename);
2547  if (!region)
2548  return -1;
2549  range = memrange_create(region,0,ADDRMAX,0,
2551  if (!range)
2552  return -1;
2553 
2554  target->global_tlctxt->region = region;
2555 
2556  return 0;
2557 }
2558 
2559 /*
2560  * For now, just try to find the debuginfo for our kernel, unless the
2561  * user told us about it in xstate.
2562  *
2563  * We need to look for gnu_debuglink first, and then look in
2564  * /usr*lib/debug for a match. Actually, we prefer the buildid because
2565  * for fedora kernel modules, we don't necessarily know the path to the
2566  * module in /lib/modules/VERSION/.../module.ko in the fs, so we can't
2567  * duplicate ... in the /usr/lib/debug search... so build id is the way
2568  * to go.
2569  *
2570  * But for just the kernel itself, this is easier. If we have buildid
2571  * or debuglink, we use /usr*lib/debug. Else, we look in /boot for a
2572  * file that replaces the vmlinuz part with vmlinux.
2573  */
2574 static int xen_vm_loaddebugfiles(struct target *target,
2575  struct addrspace *space,
2576  struct memregion *region) {
2577  int retval = -1;
2578  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2579  struct debugfile *debugfile;
2580  int bfn = 0;
2581  int bfpn = 0;
2582 
2583  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2584 
2585  /*
2586  * Open up the actual ELF binary and look for three sections to inform
2587  * our search. First, if there is a nonzero .debug_info section,
2588  * load that. Second, if there is a .note.gnu.build-id section,
2589  * read the build id and decompose it into a two-byte dir/file.debug
2590  * string that we look for in our search path (i.e., we look for
2591  * $PATH/.build-id/b1/b2..bX.debug). Otherwise, if there is a
2592  * .gnu_debuglink section, we read that section and try to find a
2593  * matching debug file.
2594  */
2595  if (!region->name || strlen(region->name) == 0)
2596  return -1;
2597 
2599  target->spec->debugfile_root_prefix,
2600  target->spec->debugfile_load_opts_list);
2601  if (!debugfile)
2602  goto out;
2603 
2604  if (target_associate_debugfile(target,region,debugfile)) {
2605  goto out;
2606  }
2607 
2608  /*
2609  * Try to figure out which binfile has the info we need. On
2610  * different distros, they're stripped different ways.
2611  */
2612  if (debugfile->binfile_pointing) {
2613  binfile_get_root_scope_sizes(debugfile->binfile,&bfn,NULL,NULL,NULL);
2615  NULL,NULL,NULL);
2616  if (bfpn > bfn) {
2617  RHOLD(debugfile->binfile_pointing,region);
2618  region->binfile = debugfile->binfile_pointing;
2619  }
2620  }
2621 
2622  if (!region->binfile) {
2623  RHOLD(debugfile->binfile,region);
2624  region->binfile = debugfile->binfile;
2625  }
2626 
2627  /*
2628  * With Xen VMs, we can't always know what the vCPU is running as
2629  * from the xenstore. For instance, with an HVM, we can't seem to
2630  * figure out whether it's running x86_64, x32, or i386 at all; we
2631  * have to load the kernel debuginfo binary to know.
2632  */
2633  if (!target->arch) {
2634  target->arch = debugfile->binfile->arch;
2635  }
2636 
2637  /*
2638  * Propagate some binfile info...
2639  */
2640  region->base_phys_addr = region->binfile->base_phys_addr;
2641  region->base_virt_addr = region->binfile->base_virt_addr;
2642 
2643  retval = 0;
2644 
2645  out:
2646  return retval;
2647 }
2648 
2649 static int xen_vm_postloadinit(struct target *target) {
2650  int rc;
2651  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2652 
2653  /*
2654  * We might not know this until now! Which register is the fbreg is
2655  * dependent on host cpu type, not target cpu type.
2656  */
2657  if (target->arch->type == ARCH_X86_64) {
2658  target->fbregno = REG_X86_64_RBP;
2659  target->spregno = REG_X86_64_RSP;
2660  target->ipregno = REG_X86_64_RIP;
2661  }
2662  else {
2663  target->fbregno = REG_X86_EBP;
2664  target->spregno = REG_X86_ESP;
2665  target->ipregno = REG_X86_EIP;
2666  }
2667 
2668  SAFE_PERSONALITY_OP_WARN(init,rc,0,target);
2669  SAFE_PERSONALITY_OP_WARN(postloadinit,rc,0,target);
2670 
2671  char *start = (char *)g_hash_table_lookup(target->config,
2672  "OS_KERNEL_START_ADDR");
2673  if (start)
2674  xstate->kernel_start_addr = strtoul(start,NULL,0);
2675 
2676  char *hpage = (char *)g_hash_table_lookup(target->config,
2677  "OS_KERNEL_HYPERCALL_PAGE");
2678  if (hpage)
2679  xstate->hypercall_page = strtoul(hpage,NULL,0);
2680 
2681  return 0;
2682 }
2683 
2684 static int xen_vm_postopened(struct target *target) {
2685  int rc;
2686  SAFE_PERSONALITY_OP_WARN(postopened,rc,0,target);
2687  return rc;
2688 }
2689 
2690 static int xen_vm_set_active_probing(struct target *target,
2691  active_probe_flags_t flags) {
2692  int rc;
2693  SAFE_PERSONALITY_OP_WARN(set_active_probing,rc,0,target,flags);
2694  return rc;
2695 }
2696 
2697 static struct target_spec *
2698 xen_vm_build_default_overlay_spec(struct target *target,tid_t tid) {
2700 }
2701 
2702 static struct target *
2703 xen_vm_instantiate_overlay(struct target *target,
2704  struct target_thread *tthread,
2705  struct target_spec *spec,
2706  struct target_thread **ntthread) {
2707  struct target *overlay;
2708  REGVAL thip;
2709  tid_t ltid;
2710  struct target_thread *leader;
2711 
2712  if (!spec)
2714 
2715  if (spec->target_type != TARGET_TYPE_OS_PROCESS) {
2716  errno = EINVAL;
2717  return NULL;
2718  }
2719 
2720  errno = 0;
2721  thip = target_read_reg_ctxt(target,tthread->tid,THREAD_CTXT_USER,
2722  target->ipregno);
2723  if (errno) {
2724  verror("could not read IP for tid %"PRIiTID"!!\n",tthread->tid);
2725  return NULL;
2726  }
2727  if (target_os_thread_is_user(target,tthread->tid) != 1) {
2728  errno = EINVAL;
2729  verror("tid %"PRIiTID" IP 0x%"PRIxADDR" is not a user thread!\n",
2730  tthread->tid,thip);
2731  return NULL;
2732  }
2733 
2734  /*
2735  * Flip to the group leader if it is not this thread itself.
2736  */
2737  ltid = target_os_thread_get_leader(target,tthread->tid);
2738  leader = target_lookup_thread(target,ltid);
2739  if (!leader) {
2740  verror("could not load group_leader for thread %d; BUG?!\n",tthread->tid);
2741  return NULL;
2742  }
2743  else if (leader != tthread) {
2745  "using group_leader %d instead of user-supplied overlay thread %d\n",
2746  leader->tid,tthread->tid);
2747  *ntthread = leader;
2748  }
2749 
2750  /*
2751  * All we want to do here is create the overlay target.
2752  */
2753  overlay = target_create("os_process",spec);
2754 
2755  return overlay;
2756 }
2757 
2758 static struct target_thread *
2759 xen_vm_lookup_overlay_thread_by_id(struct target *target,int id) {
2760  struct target_thread *retval;
2761 
2762  retval = xen_vm_load_thread(target,id,0);
2763  if (!retval) {
2764  if (!errno)
2765  errno = ESRCH;
2766  return NULL;
2767  }
2768 
2769  if (target_os_thread_is_user(target,retval->tid) == 1) {
2771  "found overlay thread %d\n",id);
2772  return retval;
2773  }
2774  else {
2775  verror("tid %d matched %d, but is a kernel thread!\n",retval->tid,id);
2776  errno = EINVAL;
2777  return NULL;
2778  }
2779 }
2780 
2781 static struct target_thread *
2782 xen_vm_lookup_overlay_thread_by_name(struct target *target,char *name) {
2783  struct target_thread *retval = NULL;
2784  struct target_thread *tthread;
2785  int slen;
2786  int rc;
2787  GHashTableIter iter;
2788 
2789  if ((rc = xen_vm_load_available_threads(target,0)))
2790  vwarn("could not load %d threads; continuing anyway!\n",-rc);
2791 
2792  g_hash_table_iter_init(&iter,target->threads);
2793  while (g_hash_table_iter_next(&iter,NULL,(gpointer)&tthread)) {
2794  if (tthread == target->global_thread)
2795  continue;
2796 
2797  if (!tthread->name) {
2798  vwarn("tid %d does not have a name; continuing!\n",
2799  tthread->tid);
2800  continue;
2801  }
2802 
2803  slen = strlen(tthread->name);
2805  "checking task with name '%*s' against '%s'\n",
2806  slen,tthread->name,name);
2807  if (strncmp(name,tthread->name,slen) == 0) {
2808  retval = tthread;
2809  break;
2810  }
2811  }
2812 
2813  if (retval) {
2814  if (target_os_thread_is_user(target,retval->tid) != 1) {
2815  verror("tid %d matched '%s', but is a kernel thread!\n",
2816  retval->tid,name);
2817  errno = EINVAL;
2818  return NULL;
2819  }
2820  else {
2822  "found overlay thread %"PRIiTID"\n",retval->tid);
2823  return tthread;
2824  }
2825  }
2826  else {
2827  errno = ESRCH;
2828  return NULL;
2829  }
2830 }
2831 
2832 int xen_vm_attach_overlay_thread(struct target *base,struct target *overlay,
2833  tid_t newtid) {
2834  tid_t cltid,nltid;
2835 
2836  nltid = target_os_thread_get_leader(base,newtid);
2837  cltid = target_os_thread_get_leader(base,overlay->base_thread->tid);
2838 
2839  if (nltid == -1 || cltid == -1)
2840  return -1;
2841 
2842  if (nltid == cltid)
2843  return 0;
2844 
2845  errno = EINVAL;
2846  return 1;
2847 }
2848 
2849 int xen_vm_detach_overlay_thread(struct target *base,struct target *overlay,
2850  tid_t tid) {
2851  return 0;
2852 }
2853 
2854 static target_status_t xen_vm_status(struct target *target) {
2855  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2857 
2858  if (xen_vm_load_dominfo(target)) {
2859  verror("could not load dominfo for dom %d\n",xstate->id);
2860  return retval;
2861  }
2862 
2863  if (xstate->dominfo.paused)
2864  retval = TSTATUS_PAUSED;
2865  else if (xstate->dominfo.running || xstate->dominfo.blocked)
2866  /* XXX: is this right? i.e., is "blocked" from the hypervisor
2867  perspective? */
2868  retval = TSTATUS_RUNNING;
2869  else if (xstate->dominfo.dying || xstate->dominfo.crashed)
2870  retval = TSTATUS_DEAD;
2871  else if (xstate->dominfo.shutdown)
2872  retval = TSTATUS_STOPPED;
2873  else
2874  retval = TSTATUS_ERROR;
2875 
2876  vdebug(9,LA_TARGET,LF_XV,"dom %d status %d\n",xstate->id,retval);
2877 
2878  return retval;
2879 }
2880 
2881 static int xen_vm_pause(struct target *target,int nowait) {
2882  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2883  struct timeval check_tv = { 0,0};
2884  target_poll_outcome_t outcome;
2885  int pstatus;
2886 
2887  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2888 
2889  if (xen_vm_load_dominfo(target))
2890  vwarn("could not load dominfo for dom %d, trying to pause anyway!\n",xstate->id);
2891 
2892  if (xstate->dominfo.paused) {
2893  if (target_get_status(target) != TSTATUS_PAUSED)
2895  else
2896  return 0;
2897  }
2898  else if (xc_domain_pause(xc_handle,xstate->id)) {
2899  verror("could not pause dom %d!\n",xstate->id);
2900  return -1;
2901  }
2902 
2903  /*
2904  * Give the memops a chance to handle pause.
2905  */
2906  if (xstate->memops && xstate->memops->handle_pause) {
2907  xstate->memops->handle_pause(target);
2908  }
2909 
2911 
2912  xstate->dominfo_valid = 0;
2913  if (xen_vm_load_dominfo(target))
2914  vwarn("could not reload dominfo for dom %d after pause!\n",xstate->id);
2915 
2916  /*
2917  * NB: very important.
2918  *
2919  * Since we allow pauses to be commanded asynchronously
2920  * w.r.t. target vm execution state, we have to check if there is
2921  * something to handle once we successfully pause it, and handle it
2922  * if so. Otherwise if a target_pause() and debug exception happen
2923  * at the "same" time relative to the user, we might leave a debug
2924  * event unhandled, and this could whack the target.
2925  *
2926  * We pass in a 0,0 timeval so that the select() in xen_vm_poll
2927  * truly polls.
2928  *
2929  * Also note that we don't care what the outcome is.
2930  */
2931  xen_vm_poll(target,&check_tv,&outcome,&pstatus);
2932 
2933  return 0;
2934 }
2935 
2936 static int xen_vm_flush_current_thread(struct target *target) {
2937  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
2938  struct target_thread *tthread;
2939  struct xen_vm_thread_state *tstate;
2940  tid_t tid;
2941  int rc;
2942 
2943  if (!target->current_thread) {
2944  verror("current thread not loaded!\n");
2945  errno = EINVAL;
2946  return -1;
2947  }
2948 
2949  tthread = target->current_thread;
2950  tid = tthread->tid;
2951  tstate = (struct xen_vm_thread_state *)tthread->state;
2952 
2953  vdebug(5,LA_TARGET,LF_XV,"dom %d tid %"PRIiTID"\n",xstate->id,tid);
2954 
2955  if (!OBJVALID(tthread) || !OBJDIRTY(tthread)) {
2957  "dom %d tid %"PRIiTID" not valid (%d) or not dirty (%d)\n",
2958  xstate->id,tid,OBJVALID(tthread),OBJDIRTY(tthread));
2959  return 0;
2960  }
2961 
2963  "EIP is 0x%"PRIxREGVAL" before flush (dom %d tid %"PRIiTID")\n",
2964  target_read_reg(target,TID_GLOBAL,target->ipregno),
2965  xstate->id,tid);
2966 
2967  if (!target->writeable) {
2968  verror("target %s not writeable!\n",target->name);
2969  errno = EROFS;
2970  return -1;
2971  }
2972 
2973  if (__xen_vm_thread_regcache_to_vcpu(target,tthread,tthread->tidctxt,
2974  &tstate->context)) {
2975  verror("could not convert regcache to vcpu context(dom %d tid %"PRIiTID")\n",
2976  xstate->id,tid);
2977  errno = EINVAL;
2978  return -1;
2979  }
2980 
2981  /*
2982  * Flush Xen machine context.
2983  */
2984  if (__xen_vm_cpu_setcontext(target,&tstate->context) < 0) {
2985  verror("could not set vcpu context (dom %d tid %"PRIiTID")\n",
2986  xstate->id,tid);
2987  errno = EINVAL;
2988  return -1;
2989  }
2990 
2991 #if __WORDSIZE == 32
2993  "eflags (vcpu context): 0x%"PRIxADDR"\n",
2994  tstate->context.user_regs.eflags);
2995 #else
2997  "rflags (vcpu context): 0x%"PRIxADDR"\n",
2998  tstate->context.user_regs.rflags);
2999 #endif
3001  "debug registers (vcpu context): 0x%"PRIxADDR",0x%"PRIxADDR
3002  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
3003  tstate->context.debugreg[0],tstate->context.debugreg[1],
3004  tstate->context.debugreg[2],tstate->context.debugreg[3],
3005  tstate->context.debugreg[6],tstate->context.debugreg[7]);
3006 
3008  "debug registers (our copy): 0x%"PRIxADDR",0x%"PRIxADDR
3009  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
3010  tstate->dr[0],tstate->dr[1],tstate->dr[2],tstate->dr[3],
3011  tstate->dr[6],tstate->dr[7]);
3012 
3013  SAFE_PERSONALITY_OP(flush_current_thread,rc,0,target);
3014  return rc;
3015 }
3016 
3017 /*
3018  * Very similar to flush_current_thread -- BUT it doesn't flush anything
3019  * but CPU context.
3020  *
3021  * Also, if @current_thread is not NULL, we do a funny thing -- we use
3022  * the cpu context from @current_thread as our base, and overlay ONLY the
3023  * debug registers from the global thread -- and set the context to
3024  * that. If @current_thread is NULL, we upload the full CPU context we
3025  * have. @current_thread must not be the global thread itself.
3026  *
3027  * We do things this way because the only time we use the global thread
3028  * to pass to bp/ss handlers in the probe library is when Xen is in
3029  * interrupt context. In that case, there is no current_thread -- the
3030  * current_thread is the global thread. So in reality, the only thing
3031  * that gets stored in the global thread is hardware probepoints that
3032  * were set for TID_GLOBAL. However, when the bp/ss handlers handle
3033  * those probepoints, they do so in the context of the thread -- which
3034  * is either current_thread (if in task context) or global_thread (if in
3035  * interrupt context, because there is no task thread, just an interrupt
3036  * stack). So, even when a TID_GLOBAL hardware probepoint is being
3037  * handled, all the non-debug-register modifications to it happen in the
3038  * current_thread CPU state.
3039  */
3040 static int xen_vm_flush_global_thread(struct target *target,
3041  struct target_thread *current_thread) {
3042  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
3043  struct target_thread *gthread;
3044  struct xen_vm_thread_state *tstate;
3045  struct xen_vm_thread_state *gtstate;
3046  vcpu_guest_context_t *ctxp;
3047  vcpu_guest_context_t context;
3048  int i;
3049 
3050  if (!target->global_thread) {
3051  verror("BUG: no global thread loaded!!!\n");
3052  errno = EINVAL;
3053  return -1;
3054  }
3055  if (current_thread == target->global_thread)
3056  current_thread = NULL;
3057 
3058  gthread = target->global_thread;
3059  gtstate = (struct xen_vm_thread_state *)gthread->state;
3060  if (current_thread)
3061  tstate = (struct xen_vm_thread_state *)current_thread->state;
3062  else
3063  tstate = NULL;
3064 
3065  if (!OBJVALID(gthread) || !OBJDIRTY(gthread)) {
3067  "dom %d tid %"PRIiTID" not valid (%d) or not dirty (%d)\n",
3068  xstate->id,gthread->tid,OBJVALID(gthread),OBJDIRTY(gthread));
3069  return 0;
3070  }
3071  else {
3072  /*
3073  * Always have to convert the global thread.
3074  */
3076  target->global_thread->tidctxt,
3077  &gtstate->context)) {
3078  verror("could not convert regcache to vcpu context"
3079  " (dom %d tid %"PRIiTID") ctxt %d\n",
3080  xstate->id,target->global_thread->tid,
3081  target->global_thread->tidctxt);
3082  errno = EINVAL;
3083  return -1;
3084  }
3085  }
3086 
3087  if (!current_thread) {
3088  /* Flush the global thread's CPU context directly. */
3089 
3090  vdebug(5,LA_TARGET,LF_XV,"dom %d tid %"PRIiTID" (full global vCPU flush)\n",
3091  xstate->id,gthread->tid);
3092 
3093  ctxp = &gtstate->context;
3094 
3095  }
3096  else {
3098  target->current_thread->tidctxt,
3099  &tstate->context)) {
3100  verror("could not convert regcache to vcpu context"
3101  " (dom %d tid %"PRIiTID") ctxt %d\n",
3102  xstate->id,target->current_thread->tid,
3103  target->current_thread->tidctxt);
3104  errno = EINVAL;
3105  return -1;
3106  }
3107 
3108  /* We have to merge the hardware debug register state from the
3109  * current thread with the state for the global thread.
3110  */
3111  ctxp = &context;
3112 
3113  /* Copy the current_thread's whole context in; then overlay teh
3114  * global thread's debugreg values *that are in use*.
3115  */
3116  memcpy(ctxp,&tstate->context,sizeof(tstate->context));
3117 
3118  /* Unilaterally NULL status register out; we're about to flush. */
3119  ctxp->debugreg[6] = 0;
3120 
3121  /* For any TID_GLOBAL debugreg that is in use, copy the register
3122  * and its control bits into the merged ctxp.
3123  */
3124  for (i = 0; i < 4; ++i) {
3125  if (gtstate->context.debugreg[i] == 0)
3126  continue;
3127 
3128  vdebug(5,LA_TARGET,LF_XV,"merging global debug reg %d in!\n",i);
3129  /* Copy in the break address */
3130  ctxp->debugreg[i] = gtstate->context.debugreg[i];
3131  /* Overwrite the control bits; unset them first, then set. */
3132  ctxp->debugreg[7] &= ~(0x3 << (i * 2));
3133  ctxp->debugreg[7] |= ((0x3 << (i * 2)) & gtstate->context.debugreg[7]);
3134  /* Overwrite the break-on bits; unset them first, then set. */
3135  ctxp->debugreg[7] &= ~(0x3 << (16 + (i * 4)));
3136  ctxp->debugreg[7] |= ((0x3 << (16 + (i * 4))) & gtstate->context.debugreg[7]);
3137  /* Overwrite the break-on size bits (watchpoint size) */
3138  ctxp->debugreg[7] &= ~(0x3 << (18 + (i * 4)));
3139  ctxp->debugreg[7] |= ((0x3 << (18 + (i * 4))) & gtstate->context.debugreg[7]);
3140  }
3141 
3142  /* Unilaterally set the break-exact bits. */
3143  //ctxp->debugreg[7] |= 0x3 << 8;
3144 
3145  }
3146 
3147  if (!current_thread) {
3149  "EIP is 0x%"PRIxREGVAL" before flush (dom %d tid %"PRIiTID")\n",
3150  target_read_reg(target,TID_GLOBAL,target->ipregno),
3151  xstate->id,gthread->tid);
3152  }
3153  else {
3155  "EIP is 0x%"PRIxREGVAL" (in thread %"PRIiTID") before flush (dom %d tid %"PRIiTID")\n",
3156 #ifdef __x86_64__
3157  gtstate->context.user_regs.rip,
3158 #else
3159  gtstate->context.user_regs.eip,
3160 #endif
3161  current_thread->tid,
3162  xstate->id,gthread->tid);
3163  }
3164 
3165  if (!target->writeable) {
3166  verror("target %s not writeable!\n",target->name);
3167  errno = EROFS;
3168  return -1;
3169  }
3170 
3171  /*
3172  * Flush Xen machine context.
3173  */
3174  if (__xen_vm_cpu_setcontext(target,ctxp) < 0) {
3175  verror("could not set vcpu context (dom %d tid %"PRIiTID")\n",
3176  xstate->id,gthread->tid);
3177  errno = EINVAL;
3178  return -1;
3179  }
3180 
3181  /* Mark cached copy as clean. */
3182  OBJSCLEAN(gthread);
3183 
3184  if (!current_thread)
3186  "debug registers (setting full vcpu context): 0x%"PRIxADDR",0x%"PRIxADDR
3187  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
3188  gtstate->context.debugreg[0],gtstate->context.debugreg[1],
3189  gtstate->context.debugreg[2],gtstate->context.debugreg[3],
3190  gtstate->context.debugreg[6],gtstate->context.debugreg[7]);
3191  else
3193  "debug registers (setting MERGED!!! vcpu context): 0x%"PRIxADDR",0x%"PRIxADDR
3194  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
3195  ctxp->debugreg[0],ctxp->debugreg[1],
3196  ctxp->debugreg[2],ctxp->debugreg[3],
3197  ctxp->debugreg[6],ctxp->debugreg[7]);
3198 
3199  if (!current_thread)
3201  "debug registers (our copy): 0x%"PRIxADDR",0x%"PRIxADDR
3202  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
3203  gtstate->dr[0],gtstate->dr[1],gtstate->dr[2],gtstate->dr[3],
3204  gtstate->dr[6],gtstate->dr[7]);
3205 
3206  return 0;
3207 }
3208 
3209 static int xen_vm_pause_thread(struct target *target,tid_t tid,int nowait) {
3210  verror("cannot pause individual threads in guests!\n");
3211  errno = EINVAL;
3212  return -1;
3213 }
3214 
3215 static int xen_vm_flush_thread(struct target *target,tid_t tid) {
3216  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
3217  struct target_thread *tthread;
3218  int rc;
3219 
3220  vdebug(16,LA_TARGET,LF_XV,"dom %d tid %"PRIiTID"\n",xstate->id,tid);
3221 
3222  /*
3223  * If we are flushing the global thread (TID_GLOBAL), do it right
3224  * away.
3225  */
3226  if (tid == TID_GLOBAL)
3227  return xen_vm_flush_current_thread(target);
3228 
3229  /*
3230  * If we haven't loaded current_thread yet, we really should load it
3231  * because otherwise we don't know if current_thread->tid == @tid.
3232  * If it does, we don't want to do the below stuff, which only
3233  * applies to non-running threads -- in this case, we want to flush
3234  * to the hardware directly.
3235  *
3236  * BUT -- we can't load a thread in the flush code; we might be
3237  * iterating over the threads hashtable, so a load might result in
3238  * a thread create which would result in the hashtable being
3239  * modified.
3240  */
3241  if (!target->current_thread) {
3243  "current thread not loaded to compare with"
3244  " tid %"PRIiTID"; exiting, user-mode EIP, or BUG?\n",
3245  tid);
3246  }
3247  else if (!OBJVALID(target->current_thread)) {
3249  "current thread not valid to compare with"
3250  " tid %"PRIiTID"; exiting, user-mode EIP, or BUG?\n",
3251  tid);
3252  }
3253 
3254  /*
3255  * If the thread tid we are asking for is the current thread and is
3256  * valid, or if the thread is in our cache and is valid.
3257  */
3258  if (target->current_thread && target->current_thread->tid == tid) {
3259  return xen_vm_flush_current_thread(target);
3260  }
3261  /*
3262  * Otherwise, try to lookup thread @tid.
3263  */
3264  tthread = target_lookup_thread(target,tid);
3265 
3266  if (!tthread) {
3267  verror("cannot flush unknown thread %"PRIiTID"; you forgot to load?\n",
3268  tid);
3269  errno = EINVAL;
3270  return -1;
3271  }
3272 
3273  if (tthread == target->current_thread)
3274  return xen_vm_flush_current_thread(target);
3275 
3276  if (!OBJVALID(tthread) || !OBJDIRTY(tthread)) {
3278  "dom %d tid %"PRIiTID" not valid (%d) or not dirty (%d)\n",
3279  xstate->id,tthread->tid,OBJVALID(tthread),OBJDIRTY(tthread));
3280  return 0;
3281  }
3282 
3283  SAFE_PERSONALITY_OP(flush_thread,rc,0,target,tid);
3284  if (rc)
3285  goto errout;
3286 
3287  OBJSCLEAN(tthread);
3288 
3289  return 0;
3290 
3291  errout:
3292  return -1;
3293 }
3294 
3295 static int xen_vm_flush_all_threads(struct target *target) {
3296  int rc, retval = 0;
3297  GHashTableIter iter;
3298  struct target_thread *tthread;
3299  struct target_thread *current_thread = NULL;
3300 
3301  g_hash_table_iter_init(&iter,target->threads);
3302  while (g_hash_table_iter_next(&iter,NULL,(gpointer)&tthread)) {
3303  if (tthread == target->current_thread
3304  || tthread == target->global_thread)
3305  continue;
3306  else
3307  rc = xen_vm_flush_thread(target,tthread->tid);
3308  if (rc) {
3309  verror("could not flush thread %"PRIiTID"\n",tthread->tid);
3310  ++retval;
3311  }
3312  }
3313 
3314  /*
3315  * If the current thread is not the global thread, we have to try to
3316  * flush it.
3317  */
3318  if (target->current_thread
3319  && target->current_thread != target->global_thread) {
3320  /* Save this off to tell flush_global_thread below that
3321  * it must merge its state with this thread's state.
3322  *
3323  * So if the current thread is not the global thread itself, and
3324  * its state is valid (whether it is dirty or not!!), we must
3325  * merge.
3326  */
3327  if (OBJVALID(target->current_thread))
3328  current_thread = target->current_thread;
3329 
3330  rc = xen_vm_flush_current_thread(target);
3331  if (rc) {
3332  verror("could not flush current thread %"PRIiTID"\n",
3333  target->current_thread->tid);
3334  ++retval;
3335  }
3336  }
3337 
3338  /*
3339  * Also, we always have to try to flush the "global" thread.
3340  * Remember, the global thread is a fake thread; it never maps to
3341  * anything real; it is just the current CPU registers. If the user
3342  * sets any probes or modifies registers with TID_GLOBAL, they only
3343  * get flushed if we flush the global thread.
3344  *
3345  * OF COURSE, this means that if you mix per-thread probing/register
3346  * modification and global thread modification, your changes to the
3347  * current hardware state will almost certainly stomp on each
3348  * other. OK, this is no longer permitted; get_unused_debug_reg now
3349  * makes sure this cannot happen.
3350  *
3351  * If we were handling a software breakpoint, we would have modified
3352  * cpu context in the current thread; if we were hanlding a hardware
3353  * probe or modifying a hardware probe, we would have written the
3354  * the global thread's cpu state (AND the current thread's CPU state
3355  * too, like EIP, etc). So what we need to is arbitrate between the
3356  * two contexts depending on what we're doing. For instance, if we
3357  * handled a hardware probepoint, we'll always need to flush the
3358  * global thread -- see monitor() and flush_global_thread().
3359  */
3360  rc = xen_vm_flush_global_thread(target,current_thread);
3361  if (rc) {
3362  verror("could not flush global thread %"PRIiTID"\n",TID_GLOBAL);
3363  ++retval;
3364  }
3365 
3366  return retval;
3367 }
3368 
3369 static int __value_get_append_tid(struct target *target,struct value *value,
3370  void *data) {
3371  struct array_list *list = (struct array_list *)data;
3372  struct value *v;
3373 
3375  value,"pid",NULL,LOAD_FLAG_NONE);
3376  if (!v) {
3377  verror("could not load pid in task; BUG?\n");
3378  /* errno should be set for us. */
3379  return -1;
3380  }
3381  array_list_append(list,(void *)(uintptr_t)v_i32(v));
3382  value_free(v);
3383 
3384  return 0;
3385 }
3386 
3387 static struct array_list *xen_vm_list_available_tids(struct target *target) {
3388  struct array_list *retval;
3389  SAFE_PERSONALITY_OP(list_available_tids,retval,NULL,target);
3390  return retval;
3391 }
3392 
3393 static int xen_vm_load_all_threads(struct target *target,int force) {
3394  struct array_list *cthreads;
3395  int rc = 0;
3396  int i;
3397  struct target_thread *tthread;
3398 
3399  cthreads = target_list_threads(target);
3400 
3401  for (i = 0; i < array_list_len(cthreads); ++i) {
3402  tthread = (struct target_thread *)array_list_item(cthreads,i);
3403 
3405  "tid %"PRIiTID" (%p)\n",tthread->tid,tthread);
3406 
3407  if (!xen_vm_load_thread(target,tthread->tid,force)) {
3408  if (target_lookup_thread(target,tthread->tid)) {
3409  verror("could not load thread %"PRIiTID"\n",tthread->tid);
3410  --rc;
3411  continue;
3412  }
3413  /*
3414  * If it's no longer in the cache, we evicted it because it
3415  * no longer exists... so this is not an error.
3416  */
3417  }
3418  }
3419 
3420  return rc;
3421 }
3422 
3423 static int xen_vm_load_available_threads(struct target *target,int force) {
3424  int rc;
3425 
3426  /*
3427  * Load the current thread first to load the global thread. The
3428  * current thread will get loaded again in the loop below if @force
3429  * is set...
3430  */
3431  if (!__xen_vm_load_current_thread(target,force,1)) {
3432  verror("could not load current thread!\n");
3433  return -1;
3434  }
3435 
3436  SAFE_PERSONALITY_OP(load_available_threads,rc,0,target,force);
3437  return rc;
3438 }
3439 
3440 static int xen_vm_thread_snprintf(struct target *target,
3441  struct target_thread *tthread,
3442  char *buf,int bufsiz,
3443  int detail,char *sep,char *kvsep) {
3444  int rc = 0;
3445  int nrc;
3446 
3447  if (tthread == target->current_thread || tthread == target->global_thread) {
3448  rc = target_regcache_snprintf(target,tthread,tthread->tidctxt,
3449  buf,bufsiz,detail,sep,kvsep,0);
3450  if (rc < 0)
3451  return rc;
3452  }
3453 
3454  SAFE_PERSONALITY_OP(thread_snprintf,nrc,0,target,tthread,
3455  (rc >= bufsiz) ? NULL : buf + rc,
3456  (rc >= bufsiz) ? 0 : bufsiz - rc,
3457  detail,sep,kvsep);
3458  if (nrc < 0) {
3459  verror("could not snprintf personality info for thread %d!\n",
3460  tthread->tid);
3461  return nrc;
3462  }
3463 
3464  return rc + nrc;
3465 }
3466 
3471 #if 0
3472 static int xen_vm_thread_snprintf(struct target_thread *tthread,
3473  char *buf,int bufsiz,
3474  int detail,char *sep,char *kvsep) {
3475  struct xen_vm_thread_state *tstate;
3476  struct cpu_user_regs *r;
3477  int rc = 0;
3478  int nrc;
3479 
3480  if (detail < 0)
3481  goto personality_out;
3482 
3483  tstate = (struct xen_vm_thread_state *)tthread->state;
3484  if (!tstate)
3485  goto personality_out;
3486 
3487  r = &tstate->context.user_regs;
3488 
3489  if (detail >= 0) {
3490  ;
3491  }
3492 
3493  if (detail >= 1)
3494  rc += snprintf((rc >= bufsiz) ? NULL : buf + rc,
3495  (rc >= bufsiz) ? 0 :bufsiz - rc,
3496  "%s" "ip%s%"RF "%s" "bp%s%"RF "%s" "sp%s%"RF "%s"
3497  "flags%s%"RF "%s" "ax%s%"RF "%s" "bx%s%"RF "%s"
3498  "cx%s%"RF "%s" "dx%s%"RF "%s" "di%s%"RF "%s"
3499  "si%s%"RF "%s" "cs%s%d" "%s" "ss%s%d" "%s"
3500  "ds%s%d" "%s" "es%s%d" "%s"
3501  "fs%s%d" "%s" "gs%s%d",
3502 #if __WORDSIZE == 64
3503  sep,kvsep,r->rip,sep,kvsep,r->rbp,sep,kvsep,r->rsp,sep,
3504  kvsep,r->eflags,sep,kvsep,r->rax,sep,kvsep,r->rbx,sep,
3505  kvsep,r->rcx,sep,kvsep,r->rdx,sep,kvsep,r->rdi,sep,
3506  kvsep,r->rsi,sep,kvsep,r->cs,sep,kvsep,r->ss,sep,
3507  kvsep,r->ds,sep,kvsep,r->es,sep,
3508  kvsep,r->fs,sep,kvsep,r->gs
3509 #else
3510  sep,kvsep,r->eip,sep,kvsep,r->ebp,sep,kvsep,r->esp,sep,
3511  kvsep,r->eflags,sep,kvsep,r->eax,sep,kvsep,r->ebx,sep,
3512  kvsep,r->ecx,sep,kvsep,r->edx,sep,kvsep,r->edi,sep,
3513  kvsep,r->esi,sep,kvsep,r->cs,sep,kvsep,r->ss,sep,
3514  kvsep,r->ds,sep,kvsep,r->es,sep,
3515  kvsep,r->fs,sep,kvsep,r->gs
3516 #endif
3517  );
3518  if (detail >= 2)
3519  rc += snprintf((rc >= bufsiz) ? NULL : buf + rc,
3520  (rc >= bufsiz) ? 0 :bufsiz - rc,
3521  "%s" "dr0%s%"DRF "%s" "dr1%s%"DRF
3522  "%s" "dr2%s%"DRF "%s" "dr3%s%"DRF
3523  "%s" "dr6%s%"DRF "%s" "dr7%s%"DRF,
3524  sep,kvsep,tstate->dr[0],sep,kvsep,tstate->dr[1],
3525  sep,kvsep,tstate->dr[1],sep,kvsep,tstate->dr[2],
3526  sep,kvsep,tstate->dr[6],sep,kvsep,tstate->dr[7]);
3527 
3528  personality_out:
3529  nrc = target_personality_thread_snprintf(tthread,
3530  (rc >= bufsiz) ? NULL : buf + rc,
3531  (rc >= bufsiz) ? 0 : bufsiz - rc,
3532  detail,sep,kvsep);
3533  if (nrc < 0) {
3534  verror("could not snprintf personality info for thread %d!\n",
3535  tthread->tid);
3536  return rc;
3537  }
3538 
3539  return rc + nrc;
3540 }
3541 #endif /* 0 */
3542 
3543 static int xen_vm_invalidate_thread(struct target *target,
3544  struct target_thread *tthread) {
3545  struct xen_vm_thread_state *xtstate;
3546  int rc;
3547 
3548  xtstate = (struct xen_vm_thread_state *)tthread->state;
3549  if (xtstate)
3550  xtstate->pgd_phys = 0;
3551 
3552  SAFE_PERSONALITY_OP(invalidate_thread,rc,0,target,tthread);
3553 
3554  return rc;
3555 }
3556 
3557 static int __xen_vm_resume(struct target *target,int detaching) {
3558  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
3559  int rc;
3560 
3561  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
3562 
3563  if (xen_vm_load_dominfo(target))
3564  vwarn("could not load dominfo for dom %d, trying to pause anyway!\n",xstate->id);
3565 
3566  if (!xstate->dominfo.paused) {
3567  vwarn("dom %d not paused; not invalidating and resuming; bug?\n",
3568  xstate->id);
3569  return -1;
3570  }
3571 
3572  /*
3573  * Only call this if we have threads still, or we are not detaching;
3574  * if we're detaching and the target_api has already deleted our
3575  * threads, flush_all_threads will end up loading at least the
3576  * global thread... which is counterproductive.
3577  */
3578  if (!detaching
3579  || g_hash_table_size(target->threads) || target->global_thread) {
3580  /* Flush back registers if they're dirty! */
3581  target_flush_all_threads(target);
3582 
3583  /* Invalidate our cached copies of threads. */
3585  }
3586 
3587  /* flush_context will not have done this necessarily! */
3588  xstate->dominfo_valid = 0;
3589 
3590  rc = xc_domain_unpause(xc_handle,xstate->id);
3591 
3593 
3594  return rc;
3595 }
3596 
3597 static int xen_vm_resume(struct target *target) {
3598  return __xen_vm_resume(target,0);
3599 }
3600 
3601 /*
3602  * If again is not NULL, we set again
3603  * to -1 if there was an error, but we should try again;
3604  * to 0 if not again;
3605  * to 1 if just handled a bp and should try again;
3606  * to 2 if just handled an ss and should try again.
3607  */
3608 static target_status_t xen_vm_handle_exception(struct target *target,
3610  int *again,void *priv) {
3611  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
3612  int cpl;
3613  REGVAL ipval;
3614  int dreg = -1;
3615  struct probepoint *dpp;
3616  struct target_thread *tthread;
3617  tid_t overlay_leader_tid;
3618  struct xen_vm_thread_state *gtstate;
3619  struct xen_vm_thread_state *xtstate;
3620  tid_t tid;
3621  struct probepoint *spp;
3622  struct target_thread *sstep_thread;
3623  struct target_thread *bogus_sstep_thread;
3624  ADDR bogus_sstep_probepoint_addr;
3625  struct target *overlay;
3626  struct target_memmod *pmmod;
3627  ADDR paddr;
3628  REGVAL tmp_ipval;
3629  int rc;
3630  target_status_t tstatus;
3631 
3632  /* Reload our dominfo */
3633  xstate->dominfo_valid = 0;
3634  if (xen_vm_load_dominfo(target)) {
3635  verror("could not load dominfo; returning to user!\n");
3636  goto out_err;
3637  }
3638 
3639  tstatus = target_status(target);
3640 
3641  if (tstatus == TSTATUS_RUNNING) {
3643  "ignoring \"exception\" in our running VM %d; not for us\n",
3644  xstate->id);
3645  if (again)
3646  *again = 0;
3647  return tstatus;
3648  }
3649  else if (tstatus == TSTATUS_PAUSED) {
3651  "new debug event (brctr = %"PRIu64", tsc = %"PRIx64")\n",
3652  xen_vm_get_counter(target),xen_vm_get_tsc(target));
3653 
3654  target->monitorhandling = 1;
3655 
3656  /* Force the current thread to be reloaded. */
3657  target->current_thread = NULL;
3658 
3659  /*
3660  * Load the global thread (machine state) very first... we have
3661  * to be able to read some register state!
3662  */
3663  if (!__xen_vm_load_current_thread(target,0,1)) {
3664  verror("could not load global thread!\n");
3665  goto out_err;
3666  }
3667 
3668  /*
3669  * Grab EIP and CPL first so we can see if we're in user or
3670  * kernel space and print better messages.
3671  */
3672  errno = 0;
3673  cpl = __xen_get_cpl(target,TID_GLOBAL);
3674  if (errno) {
3675  verror("could not read CPL while checking debug event: %s\n",
3676  strerror(errno));
3677  goto out_err;
3678  }
3679  ipval = target_read_reg(target,TID_GLOBAL,target->ipregno);
3680  if (errno) {
3681  verror("could not read EIP while checking debug event: %s\n",
3682  strerror(errno));
3683  goto out_err;
3684  }
3685 
3686  /*
3687  * Give the personality a chance to update its state.
3688  */
3689  SAFE_PERSONALITY_OP_WARN_NORET(handle_exception,rc,0,target,flags);
3690 
3691  /*
3692  * Give the memops a chance to update.
3693  */
3694  if (xstate->memops && xstate->memops->handle_exception_ours) {
3695  xstate->memops->handle_exception_ours(target);
3696  }
3697 
3698  /*
3699  * Reload the current thread. We don't force it because we
3700  * flush all threads before continuing the loop via again:,
3701  * or in target_resume/target_singlestep.
3702  */
3703  xen_vm_load_current_thread(target,0);
3704 
3705  if (__xen_vm_in_userspace(target,cpl,ipval)) {
3706  tthread = target->current_thread;
3707 
3708  if (!tthread) {
3709  verror("could not load current userspace thread at 0x%"PRIxADDR"!\n",
3710  ipval);
3711  goto out_err;
3712  }
3713 
3714  gtstate = (struct xen_vm_thread_state *) \
3715  target->global_thread->state;
3716  xtstate = (struct xen_vm_thread_state *) \
3717  target->current_thread->state;
3718  tid = target->current_thread->tid;
3719 
3721  "user-mode debug event at EIP 0x%"PRIxADDR" in tid %"PRIiTID";"
3722  " will try to handle it if it is single step!\n",
3723  ipval,tid);
3724  }
3725  else {
3726  /*
3727  * First, we check the current thread's state/registers to
3728  * try to handle the exception in the current thread. If
3729  * there is no information (and the current thread was not
3730  * the global thread), we try the global thread.
3731  */
3732  if (!(tthread = target->current_thread)) {
3733  verror("could not read current thread!\n");
3734  goto out_err_again;
3735  }
3736 
3737  /*
3738  * Next, if auto garbage collection is enabled, do it.
3739  *
3740  * We need to only do this every N interrupts, or something,
3741  * but what we really want is something that is related to
3742  * how many cycles have eclipsed in the target -- i.e., if
3743  * more than one second's worth of wallclock time has
3744  * elapsed in the target, we should garbage collect.
3745  *
3746  * But I don't know how to grab the current cycle counter
3747  * off the top of my head, so just do it when we accumulate
3748  * at least 32 threads.
3749  */
3750  /*
3751  if (g_hash_table_size(target->threads) > 32) {
3752  target_gc_threads(target);
3753  }
3754  */
3755 
3756  gtstate = (struct xen_vm_thread_state *)target->global_thread->state;
3757  xtstate = (struct xen_vm_thread_state *)tthread->state;
3758  tid = tthread->tid;
3759  }
3760 
3762  "thread %d at EIP 0x%"PRIxADDR": "
3763  "dbgreg[6]=0x%"DRF", eflags=0x%"RF"\n",
3764  tid, ipval, xtstate->context.debugreg[6],
3765  xtstate->context.user_regs.eflags);
3766 
3767  /* handle the triggered probe based on its event type */
3768  if (xtstate->context.debugreg[6] & 0x4000
3769  || (xstate->hvm && xstate->hvm_monitor_trap_flag_set)
3770  || (tthread->emulating_debug_mmod)) {
3771  vdebug(3,LA_TARGET,LF_XV,"new single step debug event (MTF %d)\n",
3772  xstate->hvm_monitor_trap_flag_set);
3773 
3774  /*
3775  * Three cases:
3776  * 1) We had to emulate a breakpoint/singlestep for a shared
3777  * page breakpoint; or
3778  * 2) we single-stepped an instruction that could have taken
3779  * us to a userspace EIP; or
3780  * 3) somehow the kernel jumped to one!
3781  */
3782  if (tthread->emulating_debug_mmod) {
3783  //&& __xen_vm_in_userspace(target,cpl,ipval)) {
3784  /* This is a shared-page singlestep. */
3785  tmp_ipval = ipval - target->arch->breakpoint_instrs_len;
3786 
3788  "emulating debug memmod at ss for tid %"PRIiTID
3789  " at paddr 0x%"PRIxADDR" (vaddr 0x%"PRIxADDR")\n",
3790  tid,tthread->emulating_debug_mmod->addr,tmp_ipval);
3791 
3792  target_os_emulate_ss_handler(target,tid,tthread->tidctxt,
3793  tthread->emulating_debug_mmod);
3794 
3795  /* Clear the status bits right now. */
3796  /*
3797  xtstate->context.debugreg[6] = 0;
3798  OBJSDIRTY(tthread);
3799 
3800  gtstate->context.debugreg[6] = 0;
3801  OBJSDIRTY(target->global_thread);
3802  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
3803  */
3804 
3805  goto out_ss_again;
3806  }
3807  else if (target->sstep_thread
3808  && ((target->sstep_thread->tpc
3810  || (__xen_vm_in_userspace(target,cpl,ipval)
3811  && !target->sstep_thread_overlay))) {
3812  sstep_thread = target->sstep_thread;
3813  }
3814  else if (target->sstep_thread
3815  && target->sstep_thread_overlay) {
3816  if (__xen_vm_in_userspace(target,cpl,ipval)) {
3818  "single step event in overlay tid %"PRIiTID
3819  " (tgid %"PRIiTID"); notifying overlay\n",
3820  tid,target->sstep_thread_overlay->base_tid);
3821 
3822  /* Clear the status bits right now. */
3823  xtstate->context.debugreg[6] = 0;
3824  OBJSDIRTY(tthread);
3825 
3828  tid,ipval,again);
3829  }
3830  else {
3831  /*
3832  * This is a thread that was stepping in userspace,
3833  * and found itself in the kernel. This can happen
3834  * if we have to use HVM global monitor trap flag
3835  * instead of EFLAGS TF. Even if we had setup the
3836  * MTF to single step the guest in userspace, that
3837  * may not be what happens. For instance, suppose
3838  * the instruction causes a page fault, or that a
3839  * clock interrupt happened. We'll find ourselves
3840  * stepping in the kernel, in its handlers, I
3841  * believe.
3842  *
3843  * We assume the thread did *not* do its singlestep
3844  * of the breakpoint's original instruction. If it
3845  * had, the EIP for the MTF event would still be in
3846  * userspace -- because single step debug exceptions
3847  * are traps following an instruction's execution.
3848  * Thus, we need to put the breakpoint back into
3849  * place and remove all state setup to handle it,
3850  * EXCEPT to note down that this thread's overlay SS
3851  * was interrupted at probepoint X, but that the
3852  * prehandler was already run. This way, we won't
3853  * run the prehandler again.
3854  *
3855  * This of course is somewhat bogus, because it
3856  * might affect vCPU state (we hit the BP twice
3857  * instead of just once)... but whatever.
3858  *
3859  * See target_thread::interrupted_ss_probepoint .
3860  */
3862  "single step event in overlay tid %"PRIiTID
3863  " (tgid %"PRIiTID") INTO KERNEL (at 0x%"PRIxADDR")"
3864  " notifying overlay\n",
3865  tid,target->sstep_thread_overlay->base_tid,ipval);
3866 
3867  /* Clear the status bits right now. */
3868  xtstate->context.debugreg[6] = 0;
3869  OBJSDIRTY(tthread);
3870 
3871  /*
3872  * Notify the overlay that a "bogus" singlestep
3873  * happened.
3874  */
3878  tid,ipval,again);
3879  }
3880  }
3881  else
3882  sstep_thread = NULL;
3883 
3884  target->sstep_thread = NULL;
3885 
3886  if (xtstate->context.user_regs.eflags & X86_EF_TF
3887  || (xstate->hvm && xstate->hvm_monitor_trap_flag_set)) {
3888  handle_inferred_sstep:
3889  if (!tthread->tpc) {
3890  if (sstep_thread && __xen_vm_in_userspace(target,cpl,ipval)) {
3891  vwarn("single step event (status reg and eflags) into"
3892  " userspace; trying to handle in sstep thread"
3893  " %"PRIiTID"!\n",sstep_thread->tid);
3894  goto handle_sstep_thread;
3895  }
3896  else {
3897  target->ops->handle_step(target,tthread,NULL);
3898 
3899  /* Clear the status bits right now. */
3900  xtstate->context.debugreg[6] = 0;
3901  OBJSDIRTY(tthread);
3902  /*
3903  * MUST DO THIS. If we are going to modify both the
3904  * current thread's CPU state possibly, and possibly
3905  * operate on the global thread's CPU state, we need
3906  * to clear the global thread's debug reg status
3907  * here; this also has the important side effect of
3908  * forcing a merge of the global thread's debug reg
3909  * state; see flush_global_thread !
3910  */
3911  gtstate->context.debugreg[6] = 0;
3912  OBJSDIRTY(target->global_thread);
3913  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
3914 
3915  goto out_ss_again;
3916  /*
3917  verror("single step event (status reg and eflags), but"
3918  " no handling context in thread %"PRIiTID"!"
3919  " letting user handle.\n",tthread->tid);
3920  goto out_paused;
3921  */
3922  }
3923  }
3924 
3925  /* Save the currently hanlding probepoint;
3926  * handle_step may clear tpc.
3927  */
3928  spp = tthread->tpc->probepoint;
3929 
3930  target->ops->handle_step(target,tthread,tthread->tpc->probepoint);
3931 
3932  /* Clear the status bits right now. */
3933  xtstate->context.debugreg[6] = 0;
3934  OBJSDIRTY(tthread);
3935  /*
3936  * MUST DO THIS. If we are going to modify both the
3937  * current thread's CPU state possibly, and possibly
3938  * operate on the global thread's CPU state, we need
3939  * to clear the global thread's debug reg status
3940  * here; this also has the important side effect of
3941  * forcing a merge of the global thread's debug reg
3942  * state; see flush_global_thread !
3943  */
3944  if (spp->style == PROBEPOINT_HW) {
3945  gtstate->context.debugreg[6] = 0;
3946  OBJSDIRTY(target->global_thread);
3947  }
3948  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
3949 
3950  goto out_ss_again;
3951  }
3952  else if (sstep_thread) {
3954  "thread %"PRIiTID" single stepped can_context_switch"
3955  " instr; trying to handle exception in old thread!\n",
3956  sstep_thread->tid);
3957 
3958  handle_sstep_thread:
3959  target->ops->handle_step(target,sstep_thread,
3960  sstep_thread->tpc->probepoint);
3961 
3962  /* Clear the status bits right now. */
3963  xtstate->context.debugreg[6] = 0;
3964  OBJSDIRTY(tthread);
3965  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
3966 
3967  goto out_ss_again;
3968  }
3969  else if (__xen_vm_in_userspace(target,cpl,ipval)) {
3970  verror("user-mode debug event (single step) at 0x%"PRIxADDR
3971  "; debug status reg 0x%"DRF"; eflags 0x%"RF
3972  "; skipping handling!\n",
3973  ipval,xtstate->context.debugreg[6],
3974  xtstate->context.user_regs.eflags);
3975  goto out_err_again;
3976  }
3977  else {
3978  target->ops->handle_step(target,tthread,NULL);
3979 
3980  /* Clear the status bits right now. */
3981  xtstate->context.debugreg[6] = 0;
3982  OBJSDIRTY(tthread);
3983  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
3984 
3985  goto out_ss_again;
3986  }
3987  }
3988  else {
3989  vdebug(3,LA_TARGET,LF_XV,"new (breakpoint?) debug event\n");
3990  /*
3991  * Some Xen kernels send us a debug event after a successful
3992  * singlestep, but they do not set the right flag to notify
3993  * us. So, if the TF flag is set, and we were expecting a
3994  * singlestep to happen, and there is not a breakpoint
3995  * exception instead -- assume that it is a singlestep
3996  * event.
3997  *
3998  * So, save it off in a special variable and handle below.
3999  */
4000  bogus_sstep_thread = target->sstep_thread;
4001  target->sstep_thread = NULL;
4002 
4003  dreg = -1;
4004 
4005  /* Check the hw debug status reg first */
4006 
4007  /* Only check the 4 low-order bits */
4008  if (xtstate->context.debugreg[6] & 15) {
4009  if (xtstate->context.debugreg[6] & 0x1)
4010  dreg = 0;
4011  else if (xtstate->context.debugreg[6] & 0x2)
4012  dreg = 1;
4013  else if (xtstate->context.debugreg[6] & 0x4)
4014  dreg = 2;
4015  else if (xtstate->context.debugreg[6] & 0x8)
4016  dreg = 3;
4017  }
4018 
4019  /*
4020  * More hypervisor bugs: some Xens don't appropriately
4021  * signal us for hw debug exceptions, and leave a stale
4022  * value in DR6. So, even if the debugreg[6] status
4023  * indicated that an HW debug reg was hit, check (if the HW
4024  * debug reg is for a breakpoint) that EIP is the same as
4025  * that debug reg! If it is not, don't believe DR6, and
4026  * look for soft breakpoints.
4027  */
4028  if (dreg > -1) {
4029  dpp = (struct probepoint *) \
4030  g_hash_table_lookup(tthread->hard_probepoints,
4031  (gpointer)ipval);
4032  if (!dpp) {
4033  dpp = (struct probepoint *) \
4034  g_hash_table_lookup(target->global_thread->hard_probepoints,
4035  (gpointer)ipval);
4036  if (!dpp) {
4037  verror("DR6 said hw dbg reg %d at 0x%"DRF" was hit;"
4038  " but EIP 0x%"PRIxADDR" in tid %"PRIiTID" does not"
4039  " match! ignoring hw dbg status; continuing"
4040  " other checks!\n",
4041  dreg,xtstate->context.debugreg[dreg],ipval,
4042  tthread->tid);
4043  dreg = -1;
4044 
4045  /*
4046  * Clear DR6 in global thread; it is clearly wrong!
4047  *
4048  * MUST DO THIS. If we are going to modify both the
4049  * current thread's CPU state possibly, and possibly
4050  * operate on the global thread's CPU state, we need to
4051  * clear the global thread's debug reg status here; this
4052  * also has the important side effect of forcing a merge
4053  * of the global thread's debug reg state; see
4054  * flush_global_thread !
4055  */
4056  gtstate->context.debugreg[6] = 0;
4057  OBJSDIRTY(target->global_thread);
4058  }
4059  }
4060  }
4061 
4062  if (dreg > -1) {
4063  if (__xen_vm_in_userspace(target,cpl,ipval)) {
4064  vwarn("user-mode debug event (hw dbg reg)"
4065  " at 0x%"PRIxADDR"; debug status reg 0x%"DRF"; eflags"
4066  " 0x%"RF"; trying to handle in global thread!\n",
4067  ipval,xtstate->context.debugreg[6],
4068  xtstate->context.user_regs.eflags);
4069  }
4070 
4071  /* If we are relying on the status reg to tell us,
4072  * then also read the actual hw debug reg to get the
4073  * address we broke on.
4074  */
4075  errno = 0;
4076  ipval = xtstate->context.debugreg[dreg];
4077 
4079  "found hw break (status) in dreg %d on 0x%"PRIxADDR"\n",
4080  dreg,ipval);
4081  }
4082  else if (__xen_vm_in_userspace(target,cpl,ipval)) {
4083  overlay = target_lookup_overlay(target,tid);
4084 
4085  /* If we didn't find one, try to find its leader as an overlay. */
4086  if (!overlay) {
4087  overlay_leader_tid =
4088  target_os_thread_get_leader(target,tthread->tid);
4089  overlay = target_lookup_overlay(target,overlay_leader_tid);
4090  if (overlay) {
4092  "found yet-unknown thread %d with"
4093  " overlay leader %d; will notify!\n",
4094  tthread->tid,overlay_leader_tid);
4095  }
4096  }
4097 
4098  if (overlay) {
4099  /*
4100  * Try to notify the overlay!
4101  */
4103  "user-mode debug event in overlay tid %"PRIiTID
4104  " (tgid %"PRIiTID") (not single step, not hw dbg reg)"
4105  " at 0x%"PRIxADDR"; debug status reg 0x%"DRF"; eflags"
4106  " 0x%"RF"; passing to overlay!\n",
4107  tid,overlay->base_tid,ipval,
4108  xtstate->context.debugreg[6],
4109  xtstate->context.user_regs.eflags);
4110 
4111  /*
4112  * We don't really know what kind of exception it
4113  * is. We can only assume it might be a breakpoint,
4114  * and set additional singlestep indicator flags if
4115  * possible. Some Xens don't get dr6 set
4116  * appropriately, it seems, so we have to catch the
4117  * inferred single step. But just cause we
4118  * commanded a single step doesn't mean it happened
4119  * appropriately, etc... the overlay has to handle
4120  * it.
4121  */
4123  if (xtstate->context.debugreg[6] & 0x4000) {
4124  bp_ef |= EXCEPTION_SINGLESTEP;
4126  "single step debug event in overlay\n");
4127  }
4128  else if (bogus_sstep_thread
4129  && target->sstep_thread_overlay == overlay
4130  && xtstate->context.user_regs.eflags & X86_EF_TF) {
4132  "inferred single step debug event in overlay\n");
4133  bp_ef |= EXCEPTION_SINGLESTEP_CMD;
4134  }
4135 
4136  /* Clear the status bits right now. */
4137  xtstate->context.debugreg[6] = 0;
4138  OBJSDIRTY(tthread);
4139 
4140  return target_notify_overlay(overlay,bp_ef,
4141  tid,ipval,again);
4142  }
4143  else {
4144  /*
4145  * Try to lookup paddr for ipval; if it matches and
4146  * hits as a memmod... then emulate a breakpoint.
4147  *
4148  * To do this, we must mark this kthread as
4149  * emulating a breakpoint at a memmod; flip the
4150  * memmod; then catch its singlestep above; and
4151  * flip the memmod back.
4152  */
4153 
4154  /* XXX: this is bad. We use the base target's
4155  * breakpoint_instr_len to try to detect an overlay!
4156  * It's ok for Xen and the Xen-process overlay, but
4157  * it's a definite abstraction breakdown.
4158  */
4159  tmp_ipval = ipval - target->arch->breakpoint_instrs_len;
4160  rc = xen_vm_addr_v2p(target,TID_GLOBAL,tmp_ipval,&paddr);
4161  if (!rc)
4162  pmmod = target_memmod_lookup(target,TID_GLOBAL,paddr,1);
4163  if (!rc && pmmod) {
4164  /*
4165  * Emulate it!
4166  */
4168  "emulating debug memmod at bp for tid %"PRIiTID
4169  " at paddr 0x%"PRIxADDR" (vaddr 0x%"PRIxADDR")\n",
4170  tid,pmmod->addr,tmp_ipval);
4171 
4172  if (target_os_emulate_bp_handler(target,tid,
4173  tthread->tidctxt,
4174  pmmod)) {
4175  verror("could not emulate debug memmod for"
4176  " tid %"PRIiTID" at paddr 0x%"PRIxADDR"\n",
4177  tid,pmmod->addr);
4178  goto out_err_again;
4179  }
4180  else {
4181  /* Clear the status bits right now. */
4182  xtstate->context.debugreg[6] = 0;
4183  OBJSDIRTY(tthread);
4184 
4185  gtstate->context.debugreg[6] = 0;
4186  OBJSDIRTY(target->global_thread);
4188  "cleared status debug reg 6\n");
4189 
4190  goto out_bp_again;
4191  }
4192  }
4193  else {
4194  verror("user-mode debug event (not single step, not"
4195  " hw dbg reg) at 0x%"PRIxADDR"; debug status reg"
4196  " 0x%"DRF"; eflags 0x%"RF"; skipping handling!\n",
4197  tmp_ipval,xtstate->context.debugreg[6],
4198  xtstate->context.user_regs.eflags);
4199  goto out_err_again;
4200  }
4201  }
4202  }
4203  else {
4205  "dreg status was 0x%"PRIxREGVAL"; trying eip method\n",
4206  (ADDR)xtstate->context.debugreg[6]);
4207 
4208  if (xtstate->dr[0] == ipval)
4209  dreg = 0;
4210  else if (xtstate->dr[1] == ipval)
4211  dreg = 1;
4212  else if (xtstate->dr[2] == ipval)
4213  dreg = 2;
4214  else if (xtstate->dr[3] == ipval)
4215  dreg = 3;
4216 
4217  if (dreg > -1)
4219  "found hw break (eip) in dreg %d on 0x%"PRIxADDR"\n",
4220  dreg,ipval);
4221  else {
4222  if (xtstate != gtstate) {
4223  /*
4224  * Check the global thread too; might be a
4225  * global breakpoint/watchpoint.
4226  */
4227  if (gtstate->dr[0] == ipval)
4228  dreg = 0;
4229  else if (gtstate->dr[1] == ipval)
4230  dreg = 1;
4231  else if (gtstate->dr[2] == ipval)
4232  dreg = 2;
4233  else if (gtstate->dr[3] == ipval)
4234  dreg = 3;
4235 
4236  if (dreg > -1)
4238  "found hw break (eip) in GLOBAL dreg %d on 0x%"PRIxADDR"\n",
4239  dreg,ipval);
4240  else
4242  "did NOT find hw break (eip) on 0x%"PRIxADDR
4243  " (neither global nor per-thread!)\n",
4244  ipval);
4245  }
4246  else {
4248  "did NOT find hw break (eip) on 0x%"PRIxADDR"\n",
4249  ipval);
4250  }
4251  }
4252  }
4253 
4254  if (dreg > -1) {
4255  /* Found HW breakpoint! */
4256  dpp = (struct probepoint *) \
4257  g_hash_table_lookup(tthread->hard_probepoints,
4258  (gpointer)ipval);
4259 
4260  if (dpp) {
4262  "found hw break in thread %"PRIiTID"\n",
4263  tthread->tid);
4264  }
4265  else {
4266  /* Check the global thread if not already checking it! */
4267  dpp = (struct probepoint *) \
4268  g_hash_table_lookup(target->global_thread->hard_probepoints,
4269  (gpointer)ipval);
4270  if (!dpp) {
4271  verror("could not find probepoint for hw dbg reg %d"
4272  " in current or global threads!\n",dreg);
4273  goto out_err;
4274  }
4275  else {
4277  "found hw break in global thread!\n");
4278 
4279  /*
4280  * MUST DO THIS. If we are going to modify both
4281  * the current thread's CPU state possibly, and
4282  * possibly operate on the global thread's CPU
4283  * state, we need to clear the global thread's
4284  * debug reg status here; this also has the
4285  * important side effect of forcing a merge of
4286  * the global thread's debug reg state; see
4287  * flush_global_thread !
4288  */
4289  gtstate->context.debugreg[6] = 0;
4290  OBJSDIRTY(target->global_thread);
4291  }
4292  }
4293 
4294  /* BEFORE we run the bp handler:
4295  *
4296  * If the domain happens to be in singlestep mode, and
4297  * we are hitting a breakpoint anyway... we have to
4298  * handle the breakpoint, singlestep ourselves, AND
4299  * THEN leave the processor in single step mode.
4300  */
4301  if (0 && xtstate->context.user_regs.eflags & X86_EF_TF) {
4302  //target->sstep_leave_enabled = 1;
4303  }
4304 
4305  /* Run the breakpoint handler. */
4306  target->ops->handle_break(target,tthread,dpp,
4307  xtstate->context.debugreg[6] & 0x4000);
4308 
4309  /* Clear the status bits right now. */
4310  xtstate->context.debugreg[6] = 0;
4311  OBJSDIRTY(tthread);
4312  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
4313 
4314  goto out_bp_again;
4315  }
4316  else if ((dpp = (struct probepoint *) \
4317  g_hash_table_lookup(target->soft_probepoints,
4318  (gpointer)(ipval - target->arch->breakpoint_instrs_len)))) {
4319  /* Run the breakpoint handler. */
4320  target->ops->handle_break(target,tthread,dpp,
4321  xtstate->context.debugreg[6] & 0x4000);
4322 
4323  /* Clear the status bits right now. */
4324  xtstate->context.debugreg[6] = 0;
4325  OBJSDIRTY(tthread);
4326  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
4327 
4328  goto out_bp_again;
4329  }
4330  else if (xtstate->context.user_regs.eflags & X86_EF_TF
4331  && tthread
4332  && tthread->tpc
4333  && tthread->tpc->probepoint) {
4335  "thread-inferred single step for dom %d (TF set, but not"
4336  " dreg status!) at 0x%"PRIxADDR" (stepped %lu bytes"
4337  " from probepoint)!\n",
4338  xstate->id,ipval,ipval - tthread->tpc->probepoint->addr);
4339  sstep_thread = tthread;
4340  goto handle_inferred_sstep;
4341  }
4342  else if (xtstate->context.user_regs.eflags & X86_EF_TF
4343  && bogus_sstep_thread
4344  && bogus_sstep_thread->tpc
4345  && bogus_sstep_thread->tpc->probepoint) {
4346  bogus_sstep_probepoint_addr =
4347  bogus_sstep_thread->tpc->probepoint->addr;
4348 
4349  /*
4350  * We have to assume it's valid. We can't do expensive
4351  * stuff and see if it could have gotten here validly;
4352  * we could have stepped a RET, IRET, anything.
4353  */
4355  "inferred single step for dom %d (TF set, but not"
4356  " dreg status!) at 0x%"PRIxADDR" (stepped %d bytes"
4357  " from probepoint)!\n",
4358  xstate->id,ipval,ipval - bogus_sstep_probepoint_addr);
4359  sstep_thread = bogus_sstep_thread;
4360  goto handle_inferred_sstep;
4361  }
4362  else if (xtstate->context.user_regs.eflags & X86_EF_TF) {
4363  //phantom:
4364  vwarn("phantom single step for dom %d (no breakpoint"
4365  " set either!); letting user handle fault at"
4366  " 0x%"PRIxADDR"!\n",xstate->id,ipval);
4367  goto out_paused;
4368  }
4369  else {
4370  vwarn("could not find hardware bp and not sstep'ing;"
4371  " letting user handle fault at 0x%"PRIxADDR"!\n",
4372  ipval);
4373  goto out_paused;
4374  }
4375  }
4376  }
4377 
4378  out_err:
4379  target->monitorhandling = 0;
4380  if (again)
4381  *again = 0;
4382  return TSTATUS_ERROR;
4383 
4384  out_err_again:
4385  target->monitorhandling = 0;
4386  if (again)
4387  *again = -1;
4388  return TSTATUS_ERROR;
4389 
4390  out_paused:
4391  target->monitorhandling = 0;
4392  if (again)
4393  *again = 0;
4394  return TSTATUS_PAUSED;
4395 
4396  out_bp_again:
4397  target->monitorhandling = 0;
4398  if (again)
4399  *again = 1;
4400  return TSTATUS_PAUSED;
4401 
4402  out_ss_again:
4403  target->monitorhandling = 0;
4404  if (again)
4405  *again = 2;
4406  return TSTATUS_PAUSED;
4407 }
4408 
4409 int xen_vm_evloop_handler(int readfd,int fdtype,void *state) {
4410  struct target *target = (struct target *)state;
4411  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
4412  int again;
4413  target_status_t retval;
4414  int vmid = -1;
4415 
4416  if (xen_vm_virq_or_vmp_read(target,&vmid)) {
4417  return EVLOOP_HRET_BADERROR;
4418  }
4419 
4420  if (vmid == -1)
4421  return EVLOOP_HRET_SUCCESS;
4422 
4423  if (vmid != 0 && vmid != xstate->id)
4424  return EVLOOP_HRET_SUCCESS;
4425 
4426  again = 0;
4427  retval = xen_vm_handle_exception(target,0,&again,NULL);
4428  if (retval == TSTATUS_ERROR && again == 0)
4429  return EVLOOP_HRET_ERROR;
4430  /*
4431  * XXX: this is the "abort to user handler" case -- but in this
4432  * case, we have no user, basically. Fix this.
4433  */
4434  //else if (retval == TSTATUS_PAUSED && again == 0)
4435  // return EVLOOP_HRET_SUCCESS;
4436 
4437  if (retval != TSTATUS_RUNNING)
4438  __xen_vm_resume(target,0);
4439 
4440  return EVLOOP_HRET_SUCCESS;
4441 }
4442 
4443 int xen_vm_attach_evloop(struct target *target,struct evloop *evloop) {
4444  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
4445 
4446  if (!target->evloop) {
4447  verror("no evloop attached!\n");
4448  return -1;
4449  }
4450 
4451  /* get a select()able file descriptor of the event channel */
4452  xstate->evloop_fd = xen_vm_virq_or_vmp_get_fd(target);
4453  if (xstate->evloop_fd == -1) {
4454  verror("event channel not initialized\n");
4455  return -1;
4456  }
4457 
4459  xen_vm_evloop_handler,target);
4460 
4462  "added evloop readfd %d event channel\n",xstate->evloop_fd);
4463 
4464  return 0;
4465 }
4466 
4467 int xen_vm_detach_evloop(struct target *target) {
4468  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
4469 
4470  if (xstate->evloop_fd < 0)
4471  return 0;
4472 
4474 
4475  xstate->evloop_fd = -1;
4476 
4477  return 0;
4478 }
4479 
4480 static target_status_t xen_vm_monitor(struct target *target) {
4481  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
4482  int ret, fd;
4483  struct timeval tv;
4484  fd_set inset;
4485  int again;
4486  target_status_t retval;
4487  int vmid = -1;
4488 
4489  /* get a select()able file descriptor of the event channel */
4490  fd = xen_vm_virq_or_vmp_get_fd(target);
4491  if (fd == -1) {
4492  verror("event channel not initialized\n");
4493  return TSTATUS_ERROR;
4494  }
4495 
4496  while (1) {
4497  tv.tv_sec = 0;
4498  tv.tv_usec = 50;
4499  FD_ZERO(&inset);
4500  FD_SET(fd,&inset);
4501 
4502  /* wait for a domain to trigger the VIRQ */
4503  ret = select(fd+1,&inset,NULL,NULL,&tv);
4504  if (ret == -1) // timeout
4505  continue;
4506 
4507  if (!FD_ISSET(fd, &inset))
4508  continue; // nothing in eventchn
4509 
4510  if (xen_vm_virq_or_vmp_read(target,&vmid)) {
4511  verror("failed to unmask event channel\n");
4512  break;
4513  }
4514 
4515  /* we've got something from eventchn. let's see what it is! */
4516  if (vmid != 0 && vmid != xstate->id)
4517  continue; // not the event that we are looking for
4518 
4519  again = 0;
4520  retval = xen_vm_handle_exception(target,0,&again,NULL);
4521  if (retval == TSTATUS_ERROR && again == 0) {
4522  target->needmonitorinterrupt = 0;
4523  return retval;
4524  }
4525  else if (target->needmonitorinterrupt) {
4526  target->needmonitorinterrupt = 0;
4527  return TSTATUS_INTERRUPTED;
4528  }
4529 
4530  //else if (retval == TSTATUS_PAUSED && again == 0)
4531  // return retval;
4532 
4533  if (xen_vm_load_dominfo(target)) {
4534  vwarn("could not load dominfo for dom %d, trying to unpause anyway!\n",
4535  xstate->id);
4536  __xen_vm_resume(target,0);
4537  }
4538  else if (xstate->dominfo.paused) {
4539  __xen_vm_resume(target,0);
4540  }
4541  }
4542 
4543  return TSTATUS_ERROR; /* Never hit, just compiler foo */
4544 }
4545 
4546 static target_status_t xen_vm_poll(struct target *target,struct timeval *tv,
4547  target_poll_outcome_t *outcome,int *pstatus) {
4548  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
4549  int ret, fd;
4550  struct timeval itv;
4551  fd_set inset;
4552  int again;
4553  target_status_t retval;
4554  int vmid = -1;
4555 
4556  /* get a select()able file descriptor of the event channel */
4557  fd = xen_vm_virq_or_vmp_get_fd(target);
4558  if (fd == -1) {
4559  verror("event channel not initialized\n");
4560  return TSTATUS_ERROR;
4561  }
4562 
4563  if (!tv) {
4564  itv.tv_sec = 0;
4565  itv.tv_usec = 0;
4566  tv = &itv;
4567  }
4568  FD_ZERO(&inset);
4569  FD_SET(fd,&inset);
4570 
4571  /* see if the VIRQ is lit for this domain */
4572  ret = select(fd+1,&inset,NULL,NULL,tv);
4573  if (ret == 0) {
4574  if (outcome)
4575  *outcome = POLL_NOTHING;
4576  return TSTATUS_RUNNING;
4577  }
4578 
4579  if (!FD_ISSET(fd, &inset)) {
4580  if (outcome)
4581  *outcome = POLL_NOTHING;
4582  return TSTATUS_RUNNING;
4583  }
4584 
4585  if (xen_vm_virq_or_vmp_read(target,&vmid)) {
4586  verror("failed to unmask event channel\n");
4587  if (outcome)
4588  *outcome = POLL_ERROR;
4589  return TSTATUS_ERROR;
4590  }
4591 
4592  /* we've got something from eventchn. let's see what it is! */
4593  if (vmid != 0 && vmid != xstate->id) {
4594  if (outcome)
4595  *outcome = POLL_NOTHING;
4596  return TSTATUS_RUNNING; // not the event that we are looking for
4597  }
4598 
4599  again = 0;
4600  retval = xen_vm_handle_exception(target,0,&again,NULL);
4601  if (pstatus)
4602  *pstatus = again;
4603 
4604  return retval;
4605 }
4606 
4607 static unsigned char *xen_vm_read(struct target *target,ADDR addr,
4608  unsigned long target_length,
4609  unsigned char *buf) {
4610  return xen_vm_read_pid(target,TID_GLOBAL,addr,target_length,buf);
4611 }
4612 
4613 static unsigned long xen_vm_write(struct target *target,ADDR addr,
4614  unsigned long length,unsigned char *buf) {
4615  if (!target->writeable) {
4616  verror("target %s not writeable!\n",target->name);
4617  errno = EROFS;
4618  return 0;
4619  }
4620 
4621  return xen_vm_write_pid(target,TID_GLOBAL,addr,length,buf);
4622 }
4623 
4624 /*
4625  * We have to either load pgd from vcpu context (for a running task), or
4626  * from the task struct (for a swapped out task).
4627  *
4628  * NB: @cr3 will be a physical address, not a kernel virtual address.
4629  * The mm_struct contains a virtual address; but the CR3 register of
4630  * course contains a physical one. And the CR3 content is not quite a
4631  * physical address, sometimes, it seems.
4632  */
4633 static int __xen_vm_pgd(struct target *target,tid_t tid,uint64_t *pgd) {
4634  struct xen_vm_state *xstate;
4635  struct target_thread *tthread;
4636  struct xen_vm_thread_state *xtstate;
4637  REGVAL cr0 = 0,cr4 = 0,msr_efer = 0,cpuid_edx = 0;
4638 
4639  xstate = (struct xen_vm_state *)target->state;
4640 
4641  if (tid == TID_GLOBAL) {
4642  tthread = __xen_vm_load_current_thread(target,0,1);
4643  if (!tthread) {
4644  verror("could not load global thread!\n");
4645  return -1;
4646  }
4647  xtstate = (struct xen_vm_thread_state *)tthread->state;
4648 
4649  /*
4650  * Use cached pgd if possible.
4651  */
4652  if (OBJVALID(tthread) && xtstate->pgd_phys > 0) {
4653  *pgd = xtstate->pgd_phys;
4654 
4655  vdebug(12,LA_TARGET,LF_XV,
4656  "tid %"PRIiTID" pgd (phys) = 0x%"PRIx64" (cached)\n",tid,*pgd);
4657 
4658  return 0;
4659  }
4660 
4661  if (xtstate->context.vm_assist & (1 << VMASST_TYPE_pae_extended_cr3)) {
4662  *pgd = ((uint64_t)xen_cr3_to_pfn(xtstate->context.ctrlreg[3])) \
4663  << XC_PAGE_SHIFT;
4664  }
4665  else {
4666  *pgd = xtstate->context.ctrlreg[3] & ~(__PAGE_SIZE - 1);
4667  }
4668 
4669  /*
4670  * XXX NB: Also load the current paging flags! This seems to be
4671  * the right place to do it... realistically, the flags are not
4672  * going to change much except during boot... or in the future
4673  * where there are nested HVMs! I suppose, in the future, we'll
4674  * have to have these set on a per-thread basis...
4675  *
4676  * (Pass cpuid_edx=REGVALMAX for now to make sure the NOPSE*
4677  * bits don't get set -- until we actually bother to find the
4678  * cpuid info.)
4679  */
4680  cr0 = xtstate->context.ctrlreg[0];
4681  cr4 = xtstate->context.ctrlreg[4];
4682  if (xstate->hvm && xstate->hvm_cpu)
4683  msr_efer = xstate->hvm_cpu->msr_efer;
4684  cpuid_edx = ADDRMAX;
4685 
4686  if (target_arch_x86_v2p_get_flags(target,cr0,cr4,msr_efer,
4687  cpuid_edx,&xstate->v2p_flags)) {
4688  if (target->arch->type == ARCH_X86_64) {
4689  verror("could not determine v2p_flags! pgd walks might fail;"
4690  " assuming 64-bit long mode and paging!\n");
4691  xstate->v2p_flags = ARCH_X86_V2P_LMA;
4692  }
4693  else {
4694  verror("could not determine v2p_flags! pgd walks might fail;"
4695  " assuming 32-bit mode and PAE (and auto-PSE)!\n");
4696  xstate->v2p_flags = ARCH_X86_V2P_PAE;
4697  }
4698  }
4699 
4700  /* Also quickly set the V2P_PV flag if this domain is paravirt. */
4701  if (!xstate->hvm)
4702  xstate->v2p_flags |= ARCH_X86_V2P_PV;
4703 
4704  if (vdebug_is_on(8,LA_TARGET,LF_XV)) {
4705  char buf[256];
4706  buf[0] = '\0';
4708  buf,sizeof(buf));
4709  vdebug(8,LA_TARGET,LF_TARGET,"v2p_flags = %s\n",buf);
4710  }
4711 
4712  xtstate->pgd_phys = *pgd;
4713  }
4714  else {
4715  tthread = xen_vm_load_thread(target,tid,0);
4716  if (!tthread) {
4717  verror("could not load tid %"PRIiTID"!\n",tid);
4718  return -1;
4719  }
4720  xtstate = (struct xen_vm_thread_state *)tthread->state;
4721  if (!xtstate) {
4722  xtstate = (struct xen_vm_thread_state *)calloc(1,sizeof(*xtstate));
4723  tthread->state = xtstate;
4724  }
4725 
4726  /*
4727  * Use cached pgd if possible.
4728  */
4729  if (OBJVALID(tthread) && xtstate->pgd_phys > 0) {
4730  *pgd = xtstate->pgd_phys;
4731 
4732  vdebug(12,LA_TARGET,LF_XV,
4733  "tid %"PRIiTID" pgd (phys) = 0x%"PRIx64" (cached)\n",tid,*pgd);
4734 
4735  return 0;
4736  }
4737 
4738  /*
4739  if (target->wordsize == 8) {
4740  if (xtstate->pgd >= xstate->kernel_start_addr)
4741  *pgd = xtstate->pgd - xstate->kernel_start_addr;
4742  else
4743 #if __WORDSIZE == 64
4744  *pgd = xtstate->pgd - 0xffff810000000000UL;
4745 #else
4746  *pgd = xtstate->pgd - 0xffff810000000000ULL;
4747 #endif
4748  }
4749  else {
4750  *pgd = xtstate->pgd - xstate->kernel_start_addr;
4751  }
4752  */
4753 
4754  if (target_os_thread_get_pgd_phys(target,tid,pgd)) {
4755  verror("could not get phys pgd for tid %"PRIiTID": %s!\n",
4756  tid,strerror(errno));
4757  return -1;
4758  }
4759 
4760  xtstate->pgd_phys = *pgd;
4761  }
4762 
4763  vdebug(12,LA_TARGET,LF_XV,
4764  "tid %"PRIiTID" pgd (phys) = 0x%"PRIx64"\n",tid,*pgd);
4765 
4766  return 0;
4767 }
4768 
4769 static int xen_vm_addr_v2p(struct target *target,tid_t tid,
4770  ADDR vaddr,ADDR *paddr) {
4771  struct xen_vm_state *xstate;
4772  uint64_t pgd = 0;
4773 
4774  xstate = (struct xen_vm_state *)target->state;
4775 
4776  if (__xen_vm_pgd(target,tid,&pgd)) {
4777  verror("could not read pgd for tid %"PRIiTID"!\n",tid);
4778  return -1;
4779  }
4780 
4781  if (!xstate->memops || !xstate->memops->addr_v2p) {
4782  errno = EINVAL;
4783  return -1;
4784  }
4785 
4786  return xstate->memops->addr_v2p(target,tid,pgd,vaddr,paddr);
4787 }
4788 
4789 static unsigned char *xen_vm_read_phys(struct target *target,ADDR paddr,
4790  unsigned long length,unsigned char *buf) {
4791  struct xen_vm_state *xstate;
4792 
4793  xstate = (struct xen_vm_state *)target->state;
4794 
4795  if (!xstate->memops || !xstate->memops->read_phys) {
4796  errno = EINVAL;
4797  return NULL;
4798  }
4799 
4800  return xstate->memops->read_phys(target,paddr,length,buf);
4801 }
4802 
4803 static unsigned long xen_vm_write_phys(struct target *target,ADDR paddr,
4804  unsigned long length,unsigned char *buf) {
4805  struct xen_vm_state *xstate;
4806 
4807  xstate = (struct xen_vm_state *)target->state;
4808 
4809  if (!xstate->memops || !xstate->memops->read_phys) {
4810  errno = EINVAL;
4811  return 0;
4812  }
4813 
4814  if (!target->writeable) {
4815  verror("target %s not writeable!\n",target->name);
4816  errno = EROFS;
4817  return 0;
4818  }
4819 
4820  return xstate->memops->write_phys(target,paddr,length,buf);
4821 }
4822 
4823 unsigned char *xen_vm_read_pid(struct target *target,tid_t tid,ADDR vaddr,
4824  unsigned long length,unsigned char *buf) {
4825  struct xen_vm_state *xstate;
4826  uint64_t pgd = 0;
4827 
4828  xstate = (struct xen_vm_state *)target->state;
4829 
4830  if (!xstate->memops || !xstate->memops->read_tid) {
4831  errno = EINVAL;
4832  return 0;
4833  }
4834 
4835  if (__xen_vm_pgd(target,tid,&pgd)) {
4836  verror("could not read pgd for tid %"PRIiTID"!\n",tid);
4837  return NULL;
4838  }
4839 
4840  return xstate->memops->read_tid(target,tid,pgd,vaddr,length,buf);
4841 }
4842 
4843 unsigned long xen_vm_write_pid(struct target *target,tid_t tid,ADDR vaddr,
4844  unsigned long length,unsigned char *buf) {
4845  struct xen_vm_state *xstate;
4846  uint64_t pgd = 0;
4847 
4848  xstate = (struct xen_vm_state *)target->state;
4849 
4850  if (!xstate->memops || !xstate->memops->write_tid) {
4851  errno = EINVAL;
4852  return 0;
4853  }
4854 
4855  if (__xen_vm_pgd(target,tid,&pgd)) {
4856  verror("could not read pgd for tid %"PRIiTID"!\n",tid);
4857  return 0;
4858  }
4859 
4860  if (!target->writeable) {
4861  verror("target %s not writeable!\n",target->name);
4862  errno = EROFS;
4863  return 0;
4864  }
4865 
4866  return xstate->memops->write_tid(target,tid,pgd,vaddr,length,buf);
4867 }
4868 
4869 /* Register mapping.
4870  *
4871  * First, be aware that our host bit size (64/32) *does* influence which
4872  * registers we can access -- i.e., 64-bit host tracing a
4873  * 32-bit process still gets the 64-bit registers -- but even then, we
4874  * want the 32-bit mapping for DWARF reg num to i386 reg.
4875  *
4876  * XXX XXX XXX
4877  * If structs in xen/xen.h (and arch-specific includes containing
4878  * cpu_user_regs) change, ever, these mappings will be wrong.
4879  */
4880 #ifdef __x86_64__
4881 static int dreg_to_offset64[ARCH_X86_64_REG_COUNT] = {
4882  offsetof(struct vcpu_guest_context,user_regs.rax),
4883  offsetof(struct vcpu_guest_context,user_regs.rdx),
4884  offsetof(struct vcpu_guest_context,user_regs.rcx),
4885  offsetof(struct vcpu_guest_context,user_regs.rbx),
4886  offsetof(struct vcpu_guest_context,user_regs.rsi),
4887  offsetof(struct vcpu_guest_context,user_regs.rdi),
4888  offsetof(struct vcpu_guest_context,user_regs.rbp),
4889  offsetof(struct vcpu_guest_context,user_regs.rsp),
4890  offsetof(struct vcpu_guest_context,user_regs.r8),
4891  offsetof(struct vcpu_guest_context,user_regs.r9),
4892  offsetof(struct vcpu_guest_context,user_regs.r10),
4893  offsetof(struct vcpu_guest_context,user_regs.r11),
4894  offsetof(struct vcpu_guest_context,user_regs.r12),
4895  offsetof(struct vcpu_guest_context,user_regs.r13),
4896  offsetof(struct vcpu_guest_context,user_regs.r14),
4897  offsetof(struct vcpu_guest_context,user_regs.r15),
4898  offsetof(struct vcpu_guest_context,user_regs.rip),
4899  -1, -1, -1, -1, -1, -1, -1, -1,
4900  -1, -1, -1, -1, -1, -1, -1, -1,
4901  -1, -1, -1, -1, -1, -1, -1, -1,
4902  -1, -1, -1, -1, -1, -1, -1, -1,
4903  offsetof(struct vcpu_guest_context,user_regs.rflags),
4904  offsetof(struct vcpu_guest_context,user_regs.es),
4905  offsetof(struct vcpu_guest_context,user_regs.cs),
4906  offsetof(struct vcpu_guest_context,user_regs.ss),
4907  offsetof(struct vcpu_guest_context,user_regs.ds),
4908  offsetof(struct vcpu_guest_context,user_regs.fs),
4909  offsetof(struct vcpu_guest_context,user_regs.gs),
4910  -1, -1,
4911  /* What about fs_base, gs_base, gs_base_kernel; that's what these are. */
4912  offsetof(struct vcpu_guest_context,fs_base),
4913  offsetof(struct vcpu_guest_context,gs_base_kernel), /* XXX: reuse kernel */
4914  offsetof(struct vcpu_guest_context,gs_base_kernel),
4915  offsetof(struct vcpu_guest_context,gs_base_user),
4916  -1, -1, -1, -1, -1, -1,
4917  -1, -1,
4918  offsetof(struct vcpu_guest_context,ctrlreg[0]),
4919  offsetof(struct vcpu_guest_context,ctrlreg[1]),
4920  offsetof(struct vcpu_guest_context,ctrlreg[2]),
4921  offsetof(struct vcpu_guest_context,ctrlreg[3]),
4922  offsetof(struct vcpu_guest_context,ctrlreg[4]),
4923  -1, -1, -1, -1, -1,
4924  offsetof(struct vcpu_guest_context,debugreg[0]),
4925  offsetof(struct vcpu_guest_context,debugreg[1]),
4926  offsetof(struct vcpu_guest_context,debugreg[2]),
4927  offsetof(struct vcpu_guest_context,debugreg[3]),
4928  -1,-1,
4929  offsetof(struct vcpu_guest_context,debugreg[6]),
4930  offsetof(struct vcpu_guest_context,debugreg[7]),
4931  -1,
4932 };
4933 #endif
4934 static int dreg_to_offset32[ARCH_X86_REG_COUNT] = {
4935  offsetof(struct vcpu_guest_context,user_regs.eax),
4936  offsetof(struct vcpu_guest_context,user_regs.ecx),
4937  offsetof(struct vcpu_guest_context,user_regs.edx),
4938  offsetof(struct vcpu_guest_context,user_regs.ebx),
4939  offsetof(struct vcpu_guest_context,user_regs.esp),
4940  offsetof(struct vcpu_guest_context,user_regs.ebp),
4941  offsetof(struct vcpu_guest_context,user_regs.esi),
4942  offsetof(struct vcpu_guest_context,user_regs.edi),
4943  offsetof(struct vcpu_guest_context,user_regs.eip),
4944  offsetof(struct vcpu_guest_context,user_regs.eflags),
4945  -1, -1, -1, -1, -1, -1, -1, -1,
4946  -1, -1,
4947  -1, -1, -1, -1, -1, -1, -1, -1,
4948  -1, -1, -1, -1, -1, -1, -1, -1,
4949  -1, -1, -1,
4950  /* These are "fake" DWARF regs. */
4951  offsetof(struct vcpu_guest_context,user_regs.es),
4952  offsetof(struct vcpu_guest_context,user_regs.cs),
4953  offsetof(struct vcpu_guest_context,user_regs.ss),
4954  offsetof(struct vcpu_guest_context,user_regs.ds),
4955  offsetof(struct vcpu_guest_context,user_regs.fs),
4956  offsetof(struct vcpu_guest_context,user_regs.gs),
4957  offsetof(struct vcpu_guest_context,ctrlreg[0]),
4958  offsetof(struct vcpu_guest_context,ctrlreg[1]),
4959  offsetof(struct vcpu_guest_context,ctrlreg[2]),
4960  offsetof(struct vcpu_guest_context,ctrlreg[3]),
4961  offsetof(struct vcpu_guest_context,ctrlreg[4]),
4962  offsetof(struct vcpu_guest_context,debugreg[0]),
4963  offsetof(struct vcpu_guest_context,debugreg[1]),
4964  offsetof(struct vcpu_guest_context,debugreg[2]),
4965  offsetof(struct vcpu_guest_context,debugreg[3]),
4966  -1,-1,
4967  offsetof(struct vcpu_guest_context,debugreg[6]),
4968  offsetof(struct vcpu_guest_context,debugreg[7]),
4969  -1,
4970 };
4971 
4972 /*
4973  * Register functions.
4974  */
4975 int __xen_vm_vcpu_to_thread_regcache(struct target *target,
4976  struct vcpu_guest_context *context,
4977  struct target_thread *tthread,
4978  thread_ctxt_t tctxt) {
4979  int offset;
4980  int i;
4981  int count = 0;
4982  REGVAL regval;
4983 
4984  vdebug(9,LA_TARGET,LF_XV,"translating vcpu to thid %d tctxt %d\n",
4985  tthread->tid,tctxt);
4986 
4987  /*
4988  * NB: we need to read 64-bit numbers from the vcpu structs if the
4989  * host is 64-bit, even if the target is 32-bit, I think...
4990  */
4991  if (arch_wordsize(target->arch) == 8 || __WORDSIZE == 64) {
4992  for (i = 0; i < ARCH_X86_64_REG_COUNT; ++i) {
4993  offset = dreg_to_offset64[i];
4994  if (offset < 0)
4995  continue;
4996 
4997  if (likely(i < REG_X86_64_ES) || likely(i > REG_X86_64_GS))
4998  regval = (REGVAL)*(uint64_t *)(((char *)context) + offset);
4999  else
5000  regval = (REGVAL)*(uint16_t *)(((char *)context) + offset);
5001 
5002  if (target_regcache_init_reg_tidctxt(target,tthread,tctxt,
5003  i,regval)) {
5004  vwarn("could not set reg %d thid %d tctxt %d\n",
5005  i,tthread->tid,tctxt);
5006  }
5007  else
5008  ++count;
5009  }
5010  }
5011  else if (arch_wordsize(target->arch) == 4) {
5012  for (i = 0; i < ARCH_X86_REG_COUNT; ++i) {
5013  offset = dreg_to_offset32[i];
5014  if (offset < 0)
5015  continue;
5016 
5017  regval = (REGVAL)*(uint32_t *)(((char *)context) + offset);
5018 
5019  if (target_regcache_init_reg_tidctxt(target,tthread,tctxt,
5020  i,regval)) {
5021  vwarn("could not set reg %d thid %d tctxt %d\n",
5022  i,tthread->tid,tctxt);
5023  }
5024  else
5025  ++count;
5026  }
5027  }
5028 
5030  "translated %d vcpu regs to thid %d tctxt %d regcache\n",
5031  count,tthread->tid,tctxt);
5032 
5033  return 0;
5034 }
5035 
5037  struct target_thread *tthread,
5038  thread_ctxt_t tctxt,
5039  REG reg,REGVAL regval,void *priv) {
5040  struct vcpu_guest_context *context;
5041  int offset;
5042 
5043  if (reg > ARCH_X86_64_REG_COUNT) {
5044  vwarn("unsupported reg %d!\n",reg);
5045  errno = EINVAL;
5046  return -1;
5047  }
5048 
5049  context = (struct vcpu_guest_context *)priv;
5050  offset = dreg_to_offset64[reg];
5051 
5052  if (offset < 0) {
5053  vwarn("unsupported reg %d!\n",reg);
5054  errno = EINVAL;
5055  return -1;
5056  }
5057 
5058  vdebug(16,LA_TARGET,LF_XV,
5059  "tid %d thid %d tctxt %d regcache->vcpu %d 0x%"PRIxREGVAL"\n",
5060  target->id,tthread->tid,tctxt,reg,regval);
5061 
5062  if (likely(reg < REG_X86_64_ES) || likely(reg > REG_X86_64_GS))
5063  *(uint64_t *)(((char *)context) + offset) =
5064  (uint64_t)regval;
5065  else
5066  *(uint16_t *)(((char *)context) + offset) =
5067  (uint16_t)regval;
5068 
5069  return 0;
5070 }
5071 
5073  struct target_thread *tthread,
5074  thread_ctxt_t tctxt,
5075  REG reg,void *rawval,int rawlen,
5076  void *priv) {
5077  //struct vcpu_guest_context *context;
5078  int offset;
5079 
5080  if (reg > ARCH_X86_64_REG_COUNT) {
5081  vwarn("unsupported reg %d!\n",reg);
5082  errno = EINVAL;
5083  return -1;
5084  }
5085 
5086  //context = (struct vcpu_guest_context *)priv;
5087  offset = dreg_to_offset64[reg];
5088 
5089  if (offset < 0) {
5090  vwarn("unsupported reg %d!\n",reg);
5091  errno = EINVAL;
5092  return -1;
5093  }
5094 
5095  vwarn("tid %d thid %d tctxt %d regcache->vcpu %d"
5096  " -- unsupported rawval len %d\n",
5097  target->id,tthread->tid,tctxt,reg,rawlen);
5098 
5099  return -1;
5100 }
5101 
5103  struct target_thread *tthread,
5104  thread_ctxt_t tctxt,
5105  REG reg,REGVAL regval,void *priv) {
5106  struct vcpu_guest_context *context;
5107  int offset;
5108 
5109  if (reg > ARCH_X86_REG_COUNT) {
5110  vwarn("unsupported reg %d!\n",reg);
5111  errno = EINVAL;
5112  return -1;
5113  }
5114 
5115  context = (struct vcpu_guest_context *)priv;
5116  offset = dreg_to_offset32[reg];
5117 
5118  if (offset < 0) {
5119  vwarn("unsupported reg %d!\n",reg);
5120  errno = EINVAL;
5121  return -1;
5122  }
5123 
5124  vdebug(16,LA_TARGET,LF_XV,
5125  "tid %d thid %d tctxt %d regcache->vcpu %d 0x%"PRIxREGVAL"\n",
5126  target->id,tthread->tid,tctxt,reg,regval);
5127 
5128  *(uint32_t *)(((char *)context) + offset) = (uint32_t)regval;
5129 
5130  return 0;
5131 }
5132 
5134  struct target_thread *tthread,
5135  thread_ctxt_t tctxt,
5136  REG reg,void *rawval,int rawlen,
5137  void *priv) {
5138  //struct vcpu_guest_context *context;
5139  int offset;
5140 
5141  if (reg > ARCH_X86_REG_COUNT) {
5142  vwarn("unsupported reg %d!\n",reg);
5143  errno = EINVAL;
5144  return -1;
5145  }
5146 
5147  //context = (struct vcpu_guest_context *)priv;
5148  offset = dreg_to_offset32[reg];
5149 
5150  if (offset < 0) {
5151  vwarn("unsupported reg %d!\n",reg);
5152  errno = EINVAL;
5153  return -1;
5154  }
5155 
5156  vwarn("tid %d thid %d tctxt %d regcache->vcpu %d"
5157  " -- unsupported rawval len %d\n",
5158  target->id,tthread->tid,tctxt,reg,rawlen);
5159 
5160  return -1;
5161 }
5162 
5163 int __xen_vm_thread_regcache_to_vcpu(struct target *target,
5164  struct target_thread *tthread,
5165  thread_ctxt_t tctxt,
5166  struct vcpu_guest_context *context) {
5167  vdebug(9,LA_TARGET,LF_XV,"translating thid %d tctxt %d to vcpu\n",
5168  tthread->tid,tctxt);
5169 
5170  /*
5171  * NB: we need to write 64-bit numbers from the vcpu structs if the
5172  * host is 64-bit, even if the target is 32-bit, I think...
5173  */
5174  if (arch_wordsize(target->arch) == 8 || __WORDSIZE == 64) {
5175  target_regcache_foreach_dirty(target,tthread,tctxt,
5178  context);
5179  }
5180  else if (arch_wordsize(target->arch) == 4) {
5181  target_regcache_foreach_dirty(target,tthread,tctxt,
5184  context);
5185  }
5186 
5187  return 0;
5188 }
5189 
5190 /*
5191  * Hardware breakpoint support.
5192  */
5193 static REG xen_vm_get_unused_debug_reg(struct target *target,tid_t tid) {
5194  REG retval = -1;
5195  struct target_thread *tthread;
5196  struct xen_vm_thread_state *xtstate;
5197 
5198  if (tid != TID_GLOBAL) {
5199  verror("currently must use TID_GLOBAL for hardware probepoints!\n");
5200  return -1;
5201  }
5202 
5203  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5204  if (!errno)
5205  errno = EINVAL;
5206  verror("could not load cached thread %"PRIiTID"\n",tid);
5207  return 0;
5208  }
5209  xtstate = (struct xen_vm_thread_state *)tthread->state;
5210 
5211  if (!xtstate->dr[0]) { retval = 0; }
5212  else if (!xtstate->dr[1]) { retval = 1; }
5213  else if (!xtstate->dr[2]) { retval = 2; }
5214  else if (!xtstate->dr[3]) { retval = 3; }
5215 
5216  vdebug(5,LA_TARGET,LF_XV,"returning unused debug reg %d\n",retval);
5217 
5218  return retval;
5219 }
5220 
5221 /*
5222  * struct x86_dr_format {
5223  * int dr0_l:1;
5224  * int dr0_g:1;
5225  * int dr1_l:1;
5226  * int dr1_g:1;
5227  * int dr2_l:1;
5228  * int dr2_g:1;
5229  * int dr3_l:1;
5230  * int dr3_g:1;
5231  * int exact_l:1;
5232  * int exact_g:1;
5233  * int reserved:6;
5234  * probepoint_whence_t dr0_break:2;
5235  * probepoint_watchsize_t dr0_len:2;
5236  * probepoint_whence_t dr1_break:2;
5237  * probepoint_watchsize_t dr1_len:2;
5238  * probepoint_whence_t dr2_break:2;
5239  * probepoint_watchsize_t dr2_len:2;
5240  * probepoint_whence_t dr3_break:2;
5241  * probepoint_watchsize_t dr3_len:2;
5242  * };
5243 */
5244 
5245 static int xen_vm_set_hw_breakpoint(struct target *target,tid_t tid,
5246  REG reg,ADDR addr) {
5247  struct target_thread *tthread;
5248  struct xen_vm_thread_state *xtstate;
5249 
5250  if (reg < 0 || reg > 3) {
5251  errno = EINVAL;
5252  return -1;
5253  }
5254 
5255  if (!target->writeable) {
5256  verror("target %s not writeable!\n",target->name);
5257  errno = EROFS;
5258  return -1;
5259  }
5260 
5261  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5262  if (!errno)
5263  errno = EINVAL;
5264  verror("could not load cached thread %"PRIiTID"\n",tid);
5265  return -1;
5266  }
5267  xtstate = (struct xen_vm_thread_state *)tthread->state;
5268 
5269  if (xtstate->context.debugreg[reg] != 0) {
5270  vwarn("debug reg %"PRIiREG" already has an address, overwriting (0x%lx)!\n",
5271  reg,xtstate->context.debugreg[reg]);
5272  //errno = EBUSY;
5273  //return -1;
5274  }
5275 
5276  /* Set the address, then the control bits. */
5277  xtstate->dr[reg] = (unsigned long)addr;
5278 
5279  /* Clear the status bits */
5280  xtstate->dr[6] = 0; //&= ~(1 << reg);
5281 
5282  /* Set the local control bit, and unset the global bit. */
5283  xtstate->dr[7] |= (1 << (reg * 2));
5284  xtstate->dr[7] &= ~(1 << (reg * 2 + 1));
5285  /* Set the break to be on execution (00b). */
5286  xtstate->dr[7] &= ~(3 << (16 + (reg * 4)));
5287 
5288  /* Now save these values for later write in flush_context! */
5289  xtstate->context.debugreg[reg] = xtstate->dr[reg];
5290  xtstate->context.debugreg[6] = xtstate->dr[6];
5291  xtstate->context.debugreg[7] = xtstate->dr[7];
5292 
5293  OBJSDIRTY(tthread);
5294 
5295 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5296  struct xen_vm_state *xstate;
5297  xstate = (struct xen_vm_state *)(target->state);
5298  assert(xstate->dominfo_valid);
5299  if (xstate->dominfo.ttd_replay_flag) {
5300  int ret = xc_ttd_vmi_add_probe(xc_handle,xstate->id,addr);
5301 
5302  if (ret) {
5303  verror("failed to register probe [dom%d:%"PRIxADDR" (%d)\n",
5304  xstate->id,addr,ret);
5305  return ret;
5306  }
5308  "registered probe in replay domain [dom%d:%"PRIxADDR"]\n",
5309  xstate->id,addr);
5310  }
5311 #endif
5312 
5313  return 0;
5314 }
5315 
5316 static int xen_vm_set_hw_watchpoint(struct target *target,tid_t tid,
5317  REG reg,ADDR addr,
5318  probepoint_whence_t whence,
5319  probepoint_watchsize_t watchsize) {
5320  struct target_thread *tthread;
5321  struct xen_vm_thread_state *xtstate;
5322 
5323  if (reg < 0 || reg > 3) {
5324  errno = EINVAL;
5325  return -1;
5326  }
5327 
5328  if (!target->writeable) {
5329  verror("target %s not writeable!\n",target->name);
5330  errno = EROFS;
5331  return -1;
5332  }
5333 
5334  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5335  if (!errno)
5336  errno = EINVAL;
5337  verror("could not load cached thread %"PRIiTID"\n",tid);
5338  return -1;
5339  }
5340  xtstate = (struct xen_vm_thread_state *)tthread->state;
5341 
5342  if (xtstate->context.debugreg[reg] != 0) {
5343  vwarn("debug reg %"PRIiREG" already has an address, overwriting (0x%lx)!\n",
5344  reg,xtstate->context.debugreg[reg]);
5345  //errno = EBUSY;
5346  //return -1;
5347  }
5348 
5349  /* Set the address, then the control bits. */
5350  xtstate->dr[reg] = addr;
5351 
5352  /* Clear the status bits */
5353  xtstate->dr[6] = 0; //&= ~(1 << reg);
5354 
5355  /* Set the local control bit, and unset the global bit. */
5356  xtstate->dr[7] |= (1 << (reg * 2));
5357  xtstate->dr[7] &= ~(1 << (reg * 2 + 1));
5358  /* Set the break to be on whatever whence was) (clear the bits first!). */
5359  xtstate->dr[7] &= ~(3 << (16 + (reg * 4)));
5360  xtstate->dr[7] |= (whence << (16 + (reg * 4)));
5361  /* Set the watchsize to be whatever watchsize was). */
5362  xtstate->dr[7] &= ~(3 << (18 + (reg * 4)));
5363  xtstate->dr[7] |= (watchsize << (18 + (reg * 4)));
5364 
5366  "dreg6 = 0x%"PRIxADDR"; dreg7 = 0x%"PRIxADDR", w = %d, ws = 0x%x\n",
5367  xtstate->dr[6],xtstate->dr[7],whence,watchsize);
5368 
5369  /* Now save these values for later write in flush_context! */
5370  xtstate->context.debugreg[reg] = xtstate->dr[reg];
5371  xtstate->context.debugreg[6] = xtstate->dr[6];
5372  xtstate->context.debugreg[7] = xtstate->dr[7];
5373 
5374  OBJSDIRTY(tthread);
5375 
5376 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5377  struct xen_vm_state *xstate;
5378  xstate = (struct xen_vm_state *)(target->state);
5379  assert(xstate->dominfo_valid);
5380  if (xstate->dominfo.ttd_replay_flag) {
5381  int ret = xc_ttd_vmi_add_probe(xc_handle,xstate->id,addr);
5382 
5383  if (ret) {
5384  verror("failed to register probe [dom%d:%"PRIxADDR" (%d)\n",
5385  xstate->id,addr,ret);
5386  return ret;
5387  }
5389  "registered probe in replay domain [dom%d:%"PRIxADDR"]\n",
5390  xstate->id,addr);
5391  }
5392 #endif
5393 
5394  return 0;
5395 }
5396 
5397 static int xen_vm_unset_hw_breakpoint(struct target *target,tid_t tid,REG reg) {
5398 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5399  ADDR addr;
5400 #endif
5401  struct target_thread *tthread;
5402  struct xen_vm_thread_state *xtstate;
5403 
5404  if (reg < 0 || reg > 3) {
5405  errno = EINVAL;
5406  return -1;
5407  }
5408 
5409  if (!target->writeable) {
5410  verror("target %s not writeable!\n",target->name);
5411  errno = EROFS;
5412  return -1;
5413  }
5414 
5415  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5416  if (!errno)
5417  errno = EINVAL;
5418  verror("could not load cached thread %"PRIiTID"\n",tid);
5419  return -1;
5420  }
5421  xtstate = (struct xen_vm_thread_state *)tthread->state;
5422 
5423 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5424  addr = xtstate->dr[reg];
5425 #endif
5426 
5427  /* Set the address, then the control bits. */
5428  xtstate->dr[reg] = 0;
5429 
5430  /* Clear the status bits */
5431  xtstate->dr[6] = 0; //&= ~(1 << reg);
5432 
5433  /* Unset the local control bit, and unset the global bit. */
5434  xtstate->dr[7] &= ~(3 << (reg * 2));
5435 
5436  /* Now save these values for later write in flush_context! */
5437  xtstate->context.debugreg[reg] = xtstate->dr[reg];
5438  xtstate->context.debugreg[6] = xtstate->dr[6];
5439  xtstate->context.debugreg[7] = xtstate->dr[7];
5440 
5441  OBJSDIRTY(tthread);
5442 
5443 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5444  struct xen_vm_state *xstate;
5445  xstate = (struct xen_vm_state *)(target->state);
5446  assert(xstate->dominfo_valid);
5447  if (xstate->dominfo.ttd_replay_flag) {
5448  int ret = xc_ttd_vmi_remove_probe(xc_handle,xstate->id,addr);
5449 
5450  if (ret) {
5451  verror("failed to unregister probe [dom%d:%"PRIxADDR" (%d)\n",
5452  xstate->id,addr,ret);
5453  return ret;
5454  }
5456  "unregistered probe in replay domain [dom%d:%"PRIxADDR"]\n",
5457  xstate->id,addr);
5458  }
5459 #endif
5460 
5461  return 0;
5462 }
5463 
5464 static int xen_vm_unset_hw_watchpoint(struct target *target,tid_t tid,REG reg) {
5465  /* It's the exact same thing, yay! */
5466  return xen_vm_unset_hw_breakpoint(target,tid,reg);
5467 }
5468 
5469 int xen_vm_disable_hw_breakpoints(struct target *target,tid_t tid) {
5470  struct target_thread *tthread;
5471  struct xen_vm_thread_state *xtstate;
5472 
5473  if (!target->writeable) {
5474  verror("target %s not writeable!\n",target->name);
5475  errno = EROFS;
5476  return -1;
5477  }
5478 
5479  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5480  if (!errno)
5481  errno = EINVAL;
5482  verror("could not load cached thread %"PRIiTID"\n",tid);
5483  return -1;
5484  }
5485  xtstate = (struct xen_vm_thread_state *)tthread->state;
5486 
5487  xtstate->context.debugreg[7] = 0;
5488 
5489  OBJSDIRTY(tthread);
5490 
5491  return 0;
5492 }
5493 
5494 int xen_vm_enable_hw_breakpoints(struct target *target,tid_t tid) {
5495  struct target_thread *tthread;
5496  struct xen_vm_thread_state *xtstate;
5497 
5498  if (!target->writeable) {
5499  verror("target %s not writeable!\n",target->name);
5500  errno = EROFS;
5501  return -1;
5502  }
5503 
5504  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5505  if (!errno)
5506  errno = EINVAL;
5507  verror("could not load cached thread %"PRIiTID"\n",tid);
5508  return -1;
5509  }
5510  xtstate = (struct xen_vm_thread_state *)tthread->state;
5511 
5512  xtstate->context.debugreg[7] = xtstate->dr[7];
5513 
5514  OBJSDIRTY(tthread);
5515 
5516  return 0;
5517 }
5518 
5519 int xen_vm_disable_hw_breakpoint(struct target *target,tid_t tid,REG dreg) {
5520  struct target_thread *tthread;
5521  struct xen_vm_thread_state *xtstate;
5522 
5523  if (dreg < 0 || dreg > 3) {
5524  errno = EINVAL;
5525  return -1;
5526  }
5527 
5528  if (!target->writeable) {
5529  verror("target %s not writeable!\n",target->name);
5530  errno = EROFS;
5531  return -1;
5532  }
5533 
5534  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5535  if (!errno)
5536  errno = EINVAL;
5537  verror("could not load cached thread %"PRIiTID"\n",tid);
5538  return -1;
5539  }
5540  xtstate = (struct xen_vm_thread_state *)tthread->state;
5541 
5542  /* Clear the status bits */
5543  xtstate->dr[6] = 0; //&= ~(1 << reg);
5544 
5545  /* Unset the local control bit, and unset the global bit. */
5546  xtstate->dr[7] &= ~(3 << (dreg * 2));
5547 
5548  /* Now save these values for later write in flush_context! */
5549  xtstate->context.debugreg[6] = xtstate->dr[6];
5550  xtstate->context.debugreg[7] = xtstate->dr[7];
5551 
5552  OBJSDIRTY(tthread);
5553 
5554 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5555  struct xen_vm_state *xstate;
5556  xstate = (struct xen_vm_state *)(target->state);
5557  assert(xstate->dominfo_valid);
5558  if (xstate->dominfo.ttd_replay_flag) {
5559  int ret = xc_ttd_vmi_remove_probe(xc_handle,xstate->id,xtstate->dr[dreg]);
5560 
5561  if (ret) {
5562  verror("failed to unregister probe [dom%d:%lx (%d)\n",
5563  xstate->id,xtstate->dr[dreg],ret);
5564  return ret;
5565  }
5567  "unregistered probe in replay domain [dom%d:%lx]\n",
5568  xstate->id,xtstate->dr[dreg]);
5569  }
5570 #endif
5571 
5572  return 0;
5573 }
5574 
5575 int xen_vm_enable_hw_breakpoint(struct target *target,tid_t tid,REG dreg) {
5576  struct target_thread *tthread;
5577  struct xen_vm_thread_state *xtstate;
5578 
5579  if (dreg < 0 || dreg > 3) {
5580  errno = EINVAL;
5581  return -1;
5582  }
5583 
5584  if (!target->writeable) {
5585  verror("target %s not writeable!\n",target->name);
5586  errno = EROFS;
5587  return -1;
5588  }
5589 
5590  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5591  if (!errno)
5592  errno = EINVAL;
5593  verror("could not load cached thread %"PRIiTID"\n",tid);
5594  return -1;
5595  }
5596  xtstate = (struct xen_vm_thread_state *)tthread->state;
5597 
5598  /* Clear the status bits */
5599  xtstate->dr[6] = 0; //&= ~(1 << reg);
5600 
5601  /* Set the local control bit, and unset the global bit. */
5602  xtstate->dr[7] |= (1 << (dreg * 2));
5603  xtstate->dr[7] &= ~(1 << (dreg * 2 + 1));
5604 
5605  /* Now save these values for later write in flush_context! */
5606  xtstate->context.debugreg[6] = xtstate->dr[6];
5607  xtstate->context.debugreg[7] = xtstate->dr[7];
5608 
5609  OBJSDIRTY(tthread);
5610 
5611 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5612  struct xen_vm_state *xstate;
5613  xstate = (struct xen_vm_state *)(target->state);
5614  assert(xstate->dominfo_valid);
5615  if (xstate->dominfo.ttd_replay_flag) {
5616  int ret = xc_ttd_vmi_add_probe(xc_handle,xstate->id,xtstate->dr[dreg]);
5617 
5618  if (ret) {
5619  verror("failed to register probe [dom%d:%lx (%d)\n",
5620  xstate->id,xtstate->dr[dreg],ret);
5621  return ret;
5622  }
5624  "registered probe in replay domain [dom%d:%lx]\n",
5625  xstate->id,xtstate->dr[dreg]);
5626  }
5627 #endif
5628 
5629  return 0;
5630 }
5631 
5632 int xen_vm_notify_sw_breakpoint(struct target *target,ADDR addr,
5633  int notification) {
5634 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5635  struct xen_vm_state *xstate;
5636  int ret = -1;
5637  char *msg = "unregister";
5638 
5639  xstate = (struct xen_vm_state *)(target->state);
5640 
5641  /* SW breakpoints are only implemented for replay domains right now */
5642  assert(xstate->dominfo_valid);
5643  if (!xstate->dominfo.ttd_replay_flag)
5644  return 0;
5645 
5646  if (notification) {
5647  msg = "register";
5648  ret = xc_ttd_vmi_add_probe(xc_handle,xstate->id,addr);
5649  }
5650  else {
5651  ret = xc_ttd_vmi_remove_probe(xc_handle,xstate->id,addr);
5652  }
5653 
5654  if (ret) {
5655  verror("failed to %s probe [dom%d:%"PRIxADDR" (%d)\n",
5656  msg,xstate->id,addr,ret);
5657  return ret;
5658  }
5660  "%sed probe in replay domain [dom%d:%"PRIxADDR"]\n",
5661  msg,xstate->id,addr);
5662 #endif
5663  return 0;
5664 }
5665 
5666 int xen_vm_singlestep(struct target *target,tid_t tid,int isbp,
5667  struct target *overlay) {
5668  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
5669  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
5670  struct target_thread *tthread;
5671  struct xen_vm_thread_state *xtstate;
5672 
5673  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5674  if (!errno)
5675  errno = EINVAL;
5676  verror("could not load cached thread %"PRIiTID"\n",tid);
5677  return -1;
5678  }
5679  xtstate = (struct xen_vm_thread_state *)tthread->state;
5680 
5681  /*
5682  * Try to use xc_domain_debug_control for HVM domains; but if it
5683  * fails, abort to the old way.
5684  *
5685  * NB: it had better not fail. HVM Xen looks to see if EFLAGS_TF is
5686  * set, and if it is, it will reinject the debug trap into the guest
5687  * after we see it... which we don't want! Maybe I can find a way
5688  * around that too.
5689  *
5690  * NB: this uses the CPU's monitor trap flag. Xen's VMX HVM support
5691  * doesn't give us a way to figure out that the monitor trap flag is
5692  * what was triggered... so for the hvm case, we keep a special bit
5693  * (only need one cause we only support one VCPU).
5694  *
5695  * XXX: in the future, only use HVM trap monitor flag if the thread
5696  * is the current or global thread. Otherwise obviously we won't
5697  * get what we want. Ugh, this is all crazy.
5698  *
5699  * We also can't use the MTF if this is an overlay thread and the
5700  * hypervisor is not patched to handle userspace debug exceptions.
5701  */
5702  if (xstate->hvm
5703  && (!overlay
5704  || (overlay && !xspec->hypervisor_ignores_userspace_exceptions))) {
5705 #ifdef XC_HAVE_DOMAIN_DEBUG_CONTROL
5706  if (xc_domain_debug_control(xc_handle,xstate->id,
5707  XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON,
5708  xstate->dominfo.max_vcpu_id)) {
5709  vwarn("xc_domain_debug_control failed! falling back to eflags!\n");
5710  goto nohvm;
5711  }
5712  else
5713  xstate->hvm_monitor_trap_flag_set = 1;
5714 #else
5715  vwarn("xc_domain_debug_control does not exist; falling back to eflags!\n");
5716  goto nohvm;
5717 #endif
5718  }
5719  else if (overlay && xspec->hypervisor_ignores_userspace_exceptions) {
5720  /*
5721  * We have to emulate the exception in the userspace part of the
5722  * target's thread.
5723  */
5724  verror("BUG: overlay process driver should call"
5725  " target_os_thread_singlestep()!\n");
5726  errno = EINVAL;
5727  return -1;
5728  }
5729  else {
5730  nohvm:
5731  if (!target->writeable) {
5732  verror("target %s not writeable!\n",target->name);
5733  errno = EROFS;
5734  return -1;
5735  }
5736 
5737 #if __WORDSIZE == 32
5738  xtstate->context.user_regs.eflags |= X86_EF_TF;
5739  /*
5740  * If this is a single step of an instruction for which a breakpoint
5741  * is set, set the RF flag. Why? Because then we don't have to
5742  * disable the hw breakpoint at this instruction if there is one.
5743  * The x86 clears it after one instruction anyway, so it's safe.
5744  */
5745  if (isbp)
5746  xtstate->context.user_regs.eflags |= X86_EF_RF;
5747  xtstate->context.user_regs.eflags &= ~X86_EF_IF;
5748 #else
5749  xtstate->context.user_regs.rflags |= X86_EF_TF;
5750  if (isbp)
5751  xtstate->context.user_regs.rflags |= X86_EF_RF;
5752  xtstate->context.user_regs.rflags &= ~X86_EF_IF;
5753 #endif
5754  OBJSDIRTY(tthread);
5755  }
5756 
5757  target->sstep_thread = tthread;
5758  if (overlay)
5759  target->sstep_thread_overlay = overlay;
5760  else
5761  target->sstep_thread_overlay = NULL;
5762 
5763  return 0;
5764 }
5765 
5766 int xen_vm_singlestep_end(struct target *target,tid_t tid,
5767  struct target *overlay) {
5768  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
5769  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
5770  struct target_thread *tthread;
5771  struct xen_vm_thread_state *xtstate;
5772 
5773  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5774  if (!errno)
5775  errno = EINVAL;
5776  verror("could not load cached thread %"PRIiTID"\n",tid);
5777  return -1;
5778  }
5779  xtstate = (struct xen_vm_thread_state *)tthread->state;
5780 
5781  /*
5782  * Try to use xc_domain_debug_control for HVM domains; but if it
5783  * fails, abort to the old way.
5784  */
5785  if (xstate->hvm
5786  && (!overlay
5787  || (overlay && !xspec->hypervisor_ignores_userspace_exceptions))) {
5788 #ifdef XC_HAVE_DOMAIN_DEBUG_CONTROL
5789  if (xc_domain_debug_control(xc_handle,xstate->id,
5790  XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF,
5791  xstate->dominfo.max_vcpu_id)) {
5792  vwarn("xc_domain_debug_control failed! falling back to eflags!\n");
5793  goto nohvm;
5794  }
5795  else
5796  xstate->hvm_monitor_trap_flag_set = 0;
5797 #else
5798  vwarn("xc_domain_debug_control does not exist; falling back to eflags!\n");
5799  goto nohvm;
5800 #endif
5801  }
5802  else if (overlay && xspec->hypervisor_ignores_userspace_exceptions) {
5803  /*
5804  * We have to emulate the exception in the userspace part of the
5805  * target's thread.
5806  */
5807  verror("BUG: overlay process driver should call"
5808  " target_os_thread_singlestep_end()!\n");
5809  errno = EINVAL;
5810  return -1;
5811  }
5812  else {
5813  nohvm:
5814  if (!target->writeable) {
5815  verror("target %s not writeable!\n",target->name);
5816  errno = EROFS;
5817  return -1;
5818  }
5819 
5820 #if __WORDSIZE ==32
5821  xtstate->context.user_regs.eflags &= ~X86_EF_TF;
5822 #else
5823  xtstate->context.user_regs.rflags &= ~X86_EF_TF;
5824 #endif
5825  OBJSDIRTY(tthread);
5826  }
5827 
5828  target->sstep_thread = NULL;
5829  target->sstep_thread_overlay = NULL;
5830 
5831  return 0;
5832 }
5833 
5834 int xen_vm_instr_can_switch_context(struct target *target,ADDR addr) {
5835  unsigned char buf[2];
5836 
5837  if (!target_read_addr(target,addr,2,buf)) {
5838  verror("could not read 2 bytes at 0x%"PRIxADDR"!\n",addr);
5839  return -1;
5840  }
5841 
5842  /* For now, if it's an IRET, or INT, return 1; otherwise, don't. */
5843  if (buf[0] == 0xcf)
5844  return (int)buf[0];
5845  else if (buf[0] == 0xcc || buf[0] == 0xcd || buf[1] == 0xce)
5846  return (int)buf[0];
5847 
5848  return 0;
5849 }
5850 
5851 uint64_t xen_vm_get_tsc(struct target *target) {
5852  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
5853 
5854  assert(xstate->dominfo_valid);
5855 
5856 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5857  struct target_thread *gthread;
5858  struct xen_vm_thread_state *gtstate;
5859  if (xstate->dominfo.ttd_guest) {
5860  if (target->global_thread && OBJVALID(target->global_thread))
5861  gthread = target->global_thread;
5862  else if (!(gthread = __xen_vm_load_current_thread(target,0,1))) {
5863  verror("could not load global thread!\n");
5864  return UINT64_MAX;
5865  }
5866 
5867  gtstate = (struct xen_vm_thread_state *)gthread->state;
5868 
5869  return gtstate->context.ttd_perf.tsc;
5870  }
5871  else {
5872 #endif
5873  if (xstate->vcpuinfo.time.version & 0x1)
5874  vwarn("tsc update in progress; tsc may be wrong?!\n");
5875 
5876  return xstate->vcpuinfo.time.tsc_timestamp;
5877 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5878  }
5879 #endif
5880 }
5881 
5882 uint64_t xen_vm_get_time(struct target *target) {
5883  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
5884 
5885  assert(xstate->dominfo_valid);
5886 
5887  if (xstate->vcpuinfo.time.version & 0x1)
5888  vwarn("tsc update in progress; time may be wrong?!\n");
5889 
5890  return xstate->vcpuinfo.time.system_time;
5891 }
5892 
5893 uint64_t xen_vm_get_counter(struct target *target) {
5894  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
5895 
5896  assert(xstate->dominfo_valid);
5897 
5898 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5899  struct target_thread *gthread;
5900  struct xen_vm_thread_state *gtstate;
5901  if (xstate->dominfo.ttd_guest) {
5902  if (target->global_thread && OBJVALID(target->global_thread))
5903  gthread = target->global_thread;
5904  else if (!(gthread = __xen_vm_load_current_thread(target,0,1))) {
5905  verror("could not load global thread!\n");
5906  return UINT64_MAX;
5907  }
5908 
5909  gtstate = (struct xen_vm_thread_state *)gthread->state;
5910 
5911  return gtstate->context.ttd_perf.brctr;
5912  }
5913  else {
5914 #endif
5915  if (xstate->vcpuinfo.time.version & 0x1)
5916  vwarn("time (subbing for counter) update in progress; time/counter"
5917  " may be wrong?!\n");
5918 
5919  return xstate->vcpuinfo.time.system_time;
5920 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5921  }
5922 #endif
5923 }
5924 
5925 int xen_vm_enable_feature(struct target *target,int feature,void *arg) {
5926  if (feature != XV_FEATURE_BTS)
5927  return -1;
5928 
5929 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5930  struct xen_vm_state *xstate;
5931 
5932  xstate = (struct xen_vm_state *)(target->state);
5933 
5934  assert(xstate->dominfo_valid);
5935  if (!xstate->dominfo.ttd_replay_flag)
5936  return 0;
5937 
5938  return xc_ttd_set_bts_on(xc_handle,xstate->id);
5939 #else
5940  return -1;
5941 #endif
5942 }
5943 
5944 int xen_vm_disable_feature(struct target *target,int feature) {
5945  if (feature != XV_FEATURE_BTS)
5946  return -1;
5947 
5948 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5949  struct xen_vm_state *xstate;
5950 
5951  xstate = (struct xen_vm_state *)(target->state);
5952 
5953  assert(xstate->dominfo_valid);
5954  if (!xstate->dominfo.ttd_replay_flag)
5955  return 0;
5956 
5957  return xc_ttd_set_bts_off(xc_handle,xstate->id);
5958 #else
5959  return -1;
5960 #endif
5961 }
5962 
5963 /*
5964  * Local variables:
5965  * mode: C
5966  * c-set-style: "BSD"
5967  * c-basic-offset: 4
5968  * End:
5969  */
arch_type_t type
Definition: arch.h:117
#define OBJSCLEAN(obj)
Definition: object.h:116
char * domain
Definition: target_xen_vm.h:68
#define TARGET_XV_VMP_SOCKET_FILENAME
int target_flush_all_threads(struct target *target)
Definition: target_api.c:1340
int xen_vm_virq_or_vmp_read(struct target *target, int *vmid)
#define DRF
ADDR base_virt_addr
Definition: binfile.h:286
REGVAL target_regcache_readreg_tidctxt(struct target *target, tid_t tid, thread_ctxt_t tidctxt, REG reg)
Definition: target.c:6885
#define REG_X86_64_RSP
Definition: arch_x86_64.h:43
ADDR kernel_start_addr
#define vwarnopt(level, area, flags, format,...)
Definition: log.h:37
shared_info_t * live_shinfo
GHashTable * config
Definition: target_api.h:2622
int target_arch_x86_v2p_flags_snprintf(struct target *target, arch_x86_v2p_flags_t flags, char *buf, unsigned int bufsiz)
void * state
Definition: target_api.h:2526
#define SAFE_PERSONALITY_OP_WARN(op, outvar, expoutval, target,...)
Definition: target.h:748
#define XV_ARGP_HIUE
int __xen_vm_thread_regcache_to_vcpu_64_raw_h(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, REG reg, void *rawval, int rawlen, void *priv)
int xen_vm_virq_attach(int xce_handle, XC_EVTCHN_PORT_T *dbg_port)
int can_switch_context
Definition: probe.h:305
target_status_t target_get_status(struct target *target)
Definition: target.c:4029
unsigned int use_xenaccess
Definition: target_xen_vm.h:77
void * backend_spec
Definition: target_api.h:2290
int evloop_unset_fd(struct evloop *evloop, int fd, int fdtype)
Definition: evloop.c:165
int xen_vm_instr_can_switch_context(struct target *target, ADDR addr)
#define SAFE_PERSONALITY_OP_WARN_NORET(op, outvar, expoutval, target,...)
Definition: target.h:766
struct debugfile * debugfile_from_file(char *filename, char *root_prefix, struct array_list *debugfile_load_opts_list)
Definition: debug.c:1584
thread_bpmode_t bpmode
Definition: target_api.h:2211
int xen_vm_disable_hw_breakpoint(struct target *target, tid_t tid, REG dreg)
#define unlikely(expr)
Definition: debugpred.h:101
unsigned char *(* read_phys)(struct target *target, ADDR paddr, unsigned long length, unsigned char *buf)
int32_t tid_t
Definition: common.h:36
#define OBJSVALID(obj)
Definition: object.h:101
target_status_t
Definition: target_api.h:197
#define REG_X86_64_RIP
Definition: arch_x86_64.h:54
struct argp xen_vm_argp
#define X86_EF_IF
Definition: arch_x86.h:27
int xen_vm_virq_or_vmp_attach_or_launch(struct target *target)
result_t probepoint_ss_handler(struct target *target, struct target_thread *tthread, struct probepoint *probepoint)
Definition: probe.c:3023
int xen_vm_virq_or_vmp_get_fd(struct target *target)
struct memregion * region
Definition: target_api.h:2391
uint64_t xen_vm_get_time(struct target *target)
int target_regcache_copy_all(struct target_thread *sthread, thread_ctxt_t stidctxt, struct target_thread *dthread, thread_ctxt_t dtidctxt)
Definition: target.c:6770
char * kernel_filename
Definition: target_xen_vm.h:69
GHashTable * soft_probepoints
Definition: target_api.h:2735
#define PRIiREG
Definition: common.h:94
static uint64_t unsigned int i
error_t xen_vm_argp_parse_opt(int key, char *arg, struct argp_state *state)
probepoint_state_t state
Definition: probe.h:220
struct target_personality_ops * personality_ops
Definition: target_api.h:2585
struct target * sstep_thread_overlay
Definition: target_api.h:2733
char * replay_dir
Definition: target_xen_vm.h:71
struct target_thread * sstep_thread
Definition: target_api.h:2727
GHashTable * target_regcache_copy_registers(struct target *target, tid_t tid)
Definition: target.c:6862
ADDR addr
Definition: probe.h:218
int xen_vm_notify_sw_breakpoint(struct target *target, ADDR addr, int notification)
char * path
char * kernel_filename
int target_os_thread_is_user(struct target *target, tid_t tid)
Definition: target_os.c:55
probepoint_whence_t
Definition: probe_api.h:234
target_debug_bp_handler_t handle_break
Definition: target_api.h:2869
#define SAFE_PERSONALITY_OP(op, outvar, defoutval, target,...)
Definition: target.h:783
struct target_thread * base_thread
Definition: target_api.h:2655
#define likely(expr)
Definition: debugpred.h:102
#define REG_X86_EBP
Definition: arch_x86.h:42
#define REG_X86_64_GS
Definition: arch_x86_64.h:107
struct target_thread * global_thread
Definition: target_api.h:2685
int xen_vm_disable_feature(struct target *target, int feature)
struct target * target_create(char *type, struct target_spec *spec)
Definition: target.c:1875
uint32_t monitorhandling
Definition: target_api.h:2465
char * name
Definition: target.h:939
#define assert(x)
Definition: dlmalloc.c:1456
int target_regcache_writereg_tidctxt(struct target *target, tid_t tid, thread_ctxt_t tidctxt, REG reg, REGVAL value)
Definition: target.c:6907
#define TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT_EXTRA
int target_associate_debugfile(struct target *target, struct memregion *region, struct debugfile *debugfile)
Definition: target.c:1993
int xen_vm_detach_overlay_thread(struct target *base, struct target *overlay, tid_t tid)
#define verror(format,...)
Definition: log.h:30
int(* fini)(struct target *target)
unsigned int hvm
unsigned char * target_read_addr(struct target *target, ADDR addr, unsigned long length, unsigned char *buf)
Definition: target_api.c:1053
tid_t target_os_thread_get_leader(struct target *target, tid_t tid)
Definition: target_os.c:62
#define XV_ARGP_MEMCACHE_MMAP_SIZE
void ** list
Definition: alist.h:34
unsigned char * xen_vm_read_pid(struct target *target, tid_t tid, ADDR vaddr, unsigned long length, unsigned char *buf)
uint64_t xen_vm_get_counter(struct target *target)
int __xen_vm_thread_regcache_to_vcpu_32_reg_h(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, REG reg, REGVAL regval, void *priv)
#define UNIX_PATH_MAX
Definition: target_xen_vm.c:33
int xen_vm_evloop_handler(int readfd, int fdtype, void *state)
int xen_vm_vmp_launch()
Definition: evloop.h:66
uint32_t mmapable
Definition: target_api.h:2465
#define REG_X86_ESP
Definition: arch_x86.h:41
unsigned int use_libvmi
Definition: target_xen_vm.h:77
struct memregion * memregion_create(struct addrspace *space, region_type_t type, char *name)
Definition: memory.c:242
ADDR base_phys_addr
Definition: target.h:977
int target_personality_attach(struct target *target, char *personality, char *personality_lib)
Definition: target.c:6062
#define vwarn(format,...)
Definition: log.h:33
uint32_t threadctl
Definition: target_api.h:2465
struct evloop * evloop
Definition: target_api.h:2638
unsigned int clear_mem_caches_each_exception
Definition: target_xen_vm.h:77
#define ADDRMAX
Definition: common.h:74
int xc_handle
void free(void *ptr)
Definition: debugserver.c:207
REGVAL target_read_reg(struct target *target, tid_t tid, REG reg)
Definition: target_api.c:1132
unsigned int no_hvm_setcontext
Definition: target_xen_vm.h:77
result_t probepoint_bp_handler(struct target *target, struct target_thread *tthread, struct probepoint *probepoint, int was_stepping)
Definition: probe.c:2593
struct xen_vm_mem_ops xen_vm_mem_ops_builtin
int target_regcache_foreach_dirty(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, target_regcache_regval_handler_t regh, target_regcache_rawval_handler_t rawh, void *priv)
Definition: target.c:6488
int xen_vm_detach_evloop(struct target *target)
struct arch * arch
Definition: binfile.h:216
struct memrange * memrange_create(struct memregion *region, ADDR start, ADDR end, OFFSET offset, unsigned int prot_flags)
Definition: memory.c:538
struct xen_vm_spec * xen_vm_build_spec(void)
int target_os_thread_get_pgd_phys(struct target *target, tid_t tid, ADDR *pgdp)
Definition: target_os.c:48
#define RF
unsigned int no_hw_debug_reg_clear
Definition: target_xen_vm.h:77
struct target_thread * current_thread
Definition: target_api.h:2680
#define OBJDIRTY(obj)
Definition: object.h:80
#define REG_X86_CS
Definition: arch_x86.h:84
struct target_memmod * target_memmod_lookup(struct target *target, tid_t tid, ADDR addr, int is_phys)
Definition: target.c:4897
probepoint_watchsize_t
Definition: probe_api.h:241
tid_t base_tid
Definition: target_api.h:2657
int __xen_vm_thread_regcache_to_vcpu_32_raw_h(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, REG reg, void *rawval, int rawlen, void *priv)
#define __PAGE_SIZE
int xen_vm_singlestep_end(struct target *target, tid_t tid, struct target *overlay)
unsigned int no_use_multiplexer
Definition: target_xen_vm.h:77
struct target_location_ctxt * global_tlctxt
Definition: target_api.h:2707
int xen_vm_vmp_attach(char *path, int *cfd, char **cpath)
uint64_t xen_vm_get_tsc(struct target *target)
uint32_t live
Definition: target_api.h:2465
#define THREAD_CTXT_USER
Definition: target_os.h:33
int target_arch_x86_v2p_get_flags(struct target *target, REGVAL cr0, REGVAL cr4, REGVAL msr_efer, REGVAL cpuid_edx, arch_x86_v2p_flags_t *flags)
int xen_vm_vmp_client_fd
GHashTable * threads
Definition: target_api.h:2672
unsigned int hvm_monitor_trap_flag_set
int(* handle_exception_ours)(struct target *target)
void value_free(struct value *value)
Definition: value.c:282
#define TARGET_XV_VMP_BIN_PATH
struct target_location_ctxt * target_global_tlctxt(struct target *target)
Definition: target.c:5299
struct target_spec * spec
Definition: target_api.h:2296
struct value * target_load_value_member(struct target *target, struct target_location_ctxt *tlctxt, struct value *old_value, const char *member, const char *delim, load_flags_t flags)
Definition: target.c:2942
#define REG_X86_64_CS
Definition: arch_x86_64.h:103
char * config_file
Definition: target_xen_vm.h:70
#define THREAD_CTXT_KERNEL
Definition: target_os.h:32
#define PROT_EXEC
Definition: common.h:108
int len
Definition: dumptarget.c:52
struct xen_vm_mem_ops xen_vm_mem_ops_libvmi
#define RHOLD(x, hx)
Definition: common.h:622
#define PROT_WRITE
Definition: common.h:107
unsigned long dr[8]
ADDR addr
Definition: target.h:392
struct target_memmod * emulating_debug_mmod
Definition: target_api.h:2179
#define EVLOOP_FDTYPE_A
Definition: evloop.h:29
int target_regcache_snprintf(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, char *buf, int bufsiz, int detail, char *sep, char *kvsep, int flags)
Definition: target.c:6687
target_poll_outcome_t
Definition: target_api.h:394
Definition: probe.h:308
target_type_t target_type
Definition: target_api.h:2203
#define TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT
int(* addr_v2p)(struct target *target, tid_t tid, ADDR pgd, ADDR vaddr, ADDR *paddr)
#define XC_IF_INVALID
#define vdebug(devel, areas, flags, format,...)
Definition: log.h:302
#define REG_X86_EIP
Definition: arch_x86.h:45
int evloop_set_fd(struct evloop *evloop, int fd, int fdtype, evloop_handler_t handler, void *state)
Definition: evloop.c:48
struct thread_probepoint_context * tpc
Definition: target_api.h:2168
unsigned char *(* read_tid)(struct target *target, tid_t tid, ADDR pgd, ADDR addr, unsigned long target_length, unsigned char *buf)
struct target * xen_vm_instantiate(struct target_spec *spec, struct evloop *evloop)
#define EVLOOP_HRET_ERROR
Definition: evloop.h:35
#define REG_X86_64_ES
Definition: arch_x86_64.h:102
Definition: log.h:172
int target_regcache_init_reg_tidctxt(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, REG reg, REGVAL regval)
Definition: target.c:6449
#define OBJVALID(obj)
Definition: object.h:76
struct memregion * region
Definition: target.h:992
struct arch * arch
Definition: target_api.h:2603
#define OBJSDIRTY(obj)
Definition: object.h:111
struct xen_vm_mem_ops * memops
#define ARCH_X86_64_REG_COUNT
Definition: arch_x86_64.h:33
void * calloc(size_t nmemb, size_t size)
Definition: debugserver.c:200
unsigned int hypervisor_ignores_userspace_exceptions
Definition: target_xen_vm.h:77
int unlink(const char *pathname)
Definition: qemuhacks.c:134
struct array_list * debugfile_load_opts_list
Definition: target_api.h:2237
int __xen_vm_thread_regcache_to_vcpu(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, struct vcpu_guest_context *context)
#define ARCH_X86_REG_COUNT
Definition: arch_x86.h:34
struct binfile * binfile
Definition: dwdebug.h:808
int xce_handle_fd
unsigned int thread_ctxt_t
Definition: target_api.h:300
void target_thread_set_status(struct target_thread *tthread, thread_status_t status)
Definition: target.c:4041
Definition: log.h:70
vcpu_guest_context_t context
int xen_vm_enable_feature(struct target *target, int feature, void *arg)
result_t
Definition: common.h:25
int xen_vm_attach_evloop(struct target *target, struct evloop *evloop)
uint32_t REGVAL
Definition: common.h:66
Definition: arch.h:102
uint32_t needmonitorinterrupt
Definition: target_api.h:2465
#define EVLOOP_HRET_SUCCESS
Definition: evloop.h:36
target_status_t target_status(struct target *target)
Definition: target_api.c:1046
int target_invalidate_all_threads(struct target *target)
Definition: target.c:4468
#define PRIiTID
Definition: common.h:37
char * xen_vm_vmp_client_path
struct target_spec * target_build_spec(target_type_t type, target_mode_t mode)
Definition: target_api.c:410
struct target_ops xen_vm_ops
int dominfo_timeout
Definition: target_xen_vm.h:72
int(* attach)(struct target *target)
struct binfile * binfile_pointing
Definition: dwdebug.h:830
int xen_vm_singlestep(struct target *target, tid_t tid, int isbp, struct target *overlay)
int8_t REG
Definition: common.h:93
#define XV_ARGP_USE_XENACCESS
int(* handle_pause)(struct target *target)
uint32_t opened
Definition: target_api.h:2465
int xen_vm_virq_detach(int xce_handle, XC_EVTCHN_PORT_T *dbg_port)
struct target_location_ctxt * target_location_ctxt_create(struct target *target, tid_t tid, struct memregion *region)
Definition: target.c:5304
int target_finalize(struct target *target)
Definition: target.c:1955
unsigned int breakpoint_instrs_len
Definition: arch.h:150
GHashTable * hard_probepoints
Definition: target_api.h:2117
unsigned long int memcache_mmap_size
Definition: target_xen_vm.h:75
int xen_vm_disable_hw_breakpoints(struct target *target, tid_t tid)
uint32_t ADDR
Definition: common.h:64
char * name
Definition: target_api.h:2521
struct target * target_lookup_overlay(struct target *target, tid_t tid)
Definition: target.c:4497
struct target_ops * ops
Definition: target_api.h:2548
Definition: log.h:162
REGVAL target_read_reg_ctxt(struct target *target, tid_t tid, thread_ctxt_t tidctxt, REG reg)
Definition: target_api.c:1157
target_exception_flags_t
Definition: target_api.h:386
target_status_t target_notify_overlay(struct target *overlay, target_exception_flags_t flags, tid_t tid, ADDR ipval, int *again)
Definition: target.c:4491
REG spregno
Definition: target_api.h:2507
thread_ctxt_t tidctxt
Definition: target_api.h:2080
int xen_vm_enable_hw_breakpoint(struct target *target, tid_t tid, REG dreg)
arch_x86_v2p_flags_t v2p_flags
int32_t v_i32(struct value *v)
Definition: value.c:394
#define PROT_READ
Definition: common.h:106
REG fbregno
Definition: target_api.h:2506
#define PRIxADDR
Definition: common.h:67
int xen_vm_enable_hw_breakpoints(struct target *target, tid_t tid)
#define EVLOOP_FDTYPE_R
Definition: evloop.h:30
int __xen_vm_vcpu_to_thread_regcache(struct target *target, struct vcpu_guest_context *context, struct target_thread *tthread, thread_ctxt_t tctxt)
int binfile_get_root_scope_sizes(struct binfile *binfile, int *named, int *duplicated, int *anon, int *numscopes)
Definition: binfile.c:326
REGVAL target_regcache_readreg(struct target *target, tid_t tid, REG reg)
Definition: target.c:6528
int(* snprintf)(struct target *target, char *buf, int bufsiz)
Definition: target_api.h:2798
unsigned long(* write_tid)(struct target *target, tid_t tid, ADDR pgd, ADDR addr, unsigned long length, unsigned char *buf)
struct target_spec * spec
Definition: target_api.h:2605
int xen_vm_vmp_detach(int *cfd, char **cpath)
#define EVLOOP_HRET_BADERROR
Definition: evloop.h:34
int xen_vm_xc_detach(int *xc_handle, int *xce_handle)
unsigned long(* write_phys)(struct target *target, ADDR paddr, unsigned long length, unsigned char *buf)
uint32_t nodisablehwbponss
Definition: target_api.h:2465
result_t target_os_emulate_ss_handler(struct target *target, tid_t tid, thread_ctxt_t tidctxt, struct target_memmod *mmod)
Definition: target_os.c:163
int xen_vm_attach_overlay_thread(struct target *base, struct target *overlay, tid_t newtid)
vcpu_info_t vcpuinfo
ADDR base_virt_addr
Definition: target.h:978
active_probe_flags_t
Definition: target_api.h:432
int id
Definition: target_api.h:2514
void * malloc(size_t size)
Definition: debugserver.c:214
char * debugfile_root_prefix
Definition: target_api.h:2235
unsigned int max_thread_ctxt
Definition: target_api.h:2505
struct target_thread * target_lookup_thread(struct target *target, tid_t tid)
Definition: target.c:4023
void target_set_status(struct target *target, target_status_t status)
Definition: target.c:4035
xc_dominfo_t dominfo
int __xen_vm_thread_regcache_to_vcpu_64_reg_h(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, REG reg, REGVAL regval, void *priv)
target_debug_handler_t handle_step
Definition: target_api.h:2870
Definition: log.h:177
#define XV_ARGP_USE_LIBVMI
ADDR base_phys_addr
Definition: binfile.h:285
void xen_vm_free_spec(struct xen_vm_spec *xspec)
int vdebug_is_on(int level, log_areas_t areas, log_flags_t flags)
Definition: log.c:335
#define XV_ARGP_CLEAR_MEM_CACHES
struct binfile * binfile
Definition: target.h:961
#define REG_X86_64_RBP
Definition: arch_x86_64.h:42
struct target_thread * target_create_thread(struct target *target, tid_t tid, void *tstate, void *tpstate)
Definition: target.c:4063
struct array_list * target_list_threads(struct target *target)
Definition: target_api.c:1233
int target_regcache_writereg(struct target *target, tid_t tid, REG reg, REGVAL value)
Definition: target.c:6569
result_t target_os_emulate_bp_handler(struct target *target, tid_t tid, thread_ctxt_t tidctxt, struct target_memmod *mmod)
Definition: target_os.c:85
REG ipregno
Definition: target_api.h:2508
#define TID_GLOBAL
Definition: target_api.h:145
#define X86_EF_RF
Definition: arch_x86.h:28
struct argp_option xen_vm_argp_opts[]
int xen_vm_xc_attach(int *xc_handle, int *xce_handle)
#define X86_EF_TF
Definition: arch_x86.h:26
unsigned long xen_vm_write_pid(struct target *target, tid_t tid, ADDR vaddr, unsigned long length, unsigned char *buf)
int xen_vm_spec_to_argv(struct target_spec *spec, int *argc, char ***argv)
#define XV_ARGP_REPLAYDIR
struct probepoint * probepoint
Definition: probe.h:207
#define PRIxREGVAL
Definition: common.h:72
struct addrspace * addrspace_create(struct target *target, char *name, ADDR tag)
Definition: memory.c:41
char * xen_vm_argp_header
uint32_t writeable
Definition: target_api.h:2465
int xen_vm_virq_or_vmp_detach(struct target *target)