Stackdb
Stackdb is a stackable, multi-target and -level source debugger and memory forensics library.
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
proxyreq.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012, 2013 The University of Utah
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of
7  * the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
17  */
18 
19 #ifndef __PROXYREQ_H__
20 #define __PROXYREQ_H__
21 
22 #include <stdsoap2.h>
23 #include <stdlib.h>
24 
25 #include "evloop.h"
26 #include "monitor.h"
27 
28 /*
29  * Our multithreaded, gSOAP-based servers sometimes need to "proxy"
30  * request handling. Why? If the RPC request is to pause a target
31  * (monitored by the server via a server thread attached to the target
32  * via ptrace), or to pause an analysis (monitored by the server thread
33  * that created the analysis, and interacted with by incoming
34  * request-handler threads), we need to make sure the request is handled
35  * in the thread that is attached to the target/analysis. This is
36  * necessary for targets that are built on the ptrace API, and also
37  * allows us to enforce serialization of request handling... but it is
38  * primarily driven by the ptrace API. With ptrace, only one thread can
39  * attach to another thread, and only that thread can interact with the
40  * monitored thread's CPU/memory state or control it via ptrace. Not
41  * all debugged entities may have a similar requirement, but
42  * fundamentally, only a single control thread should access a debugged
43  * entity at a single time -- all debugging actions must be coordinated
44  * through a common controller -- so overall, this model is reasonable.
45  *
46  * (The other way to have constructed our server would have been to have
47  * created a new server, on a different port, for each target and
48  * analysis we launch. This model seemed unattractive earlier on
49  * because it requires basically a launchpad/registry server that
50  * redirects new target/analysis requests to another server that does
51  * the work on a different port, and because it completely prohibits
52  * debuginfo sharing (until we solve that problem elsewhere). However,
53  * this need to proxy RPCs to specific handler threads turned out to be
54  * a real pain, and I wish I had done the new-server-per-target/analysis
55  * thing even though it's less elegant.)
56  *
57  * Anyway, this means that some RPCs must be "redirected" to the
58  * target/analysis control thread that is attached to the
59  * target/analysis. We have to have gsoap process the incoming request
60  * enough to obtain the target/analysis id, and because of the way gsoap
61  * is built, the best we can do is buffer the whole incoming request,
62  * see what object (target/analysis) it is destined for, stop serving
63  * the request in the request handling thread, and pass the original
64  * request bytes to that entity and *reprocess* the request. Yes, this
65  * sucks; more details later.
66  *
67  * A request-handler thread in a server handles incoming SOAP requests,
68  * "proxies" them (i.e., the gSOAP code parses the request and invokes
69  * the RPC function), and either passes them to another thread/process
70  * for handling, OR responds directly.
71  *
72  * Some requests don't end up getting proxied; they are handled simply in
73  * the request handling thread. If soap->error is set to SOAP_STOP,
74  * that means that the RPC function proxied the request, and the request
75  * needs to be passed to the thread/process (i.e., target or analysis)
76  * that it is destined for. All proxied requests are synchronous (from
77  * the perspective of the client, at least), and are handled serially in
78  * order of arrival.
79  *
80  * In the case where the destination is a thread, the request and soap
81  * object is queued for the handling thread, and the request thread
82  * returns, leaving its gsoap state intact. The monitor thread
83  * receives notification of the monitor msg over a monitor pipe, and
84  * handles the request, responds directly to the client, and
85  * terminates the gsoap state and client connection.
86  *
87  * In the case where the destination is a process, the request is
88  * marshalled into a monitor msg, and the request thread forwards that
89  * to the monitor and returns, leaving its gsoap state intact. The
90  * monitor services it using a dummy gsoap object, and responds with
91  * the XML response as a monitor msg. Then, the monitor thread
92  * "coerces" the gsoap object into just sending a response directly.
93  *
94  * This does mean that any requests that *might* need to be proxied must
95  * be buffered and processed twice by gsoap, because gsoap does not
96  * expose its internal demux-to-RPC function mechanism (and we can't
97  * just hack up a call to the RPC function dynamically, with
98  * already-processed arguments, without some function interface -- well,
99  * we *could*, maybe, but it's not worth it).
100  *
101  * But worse, for processes, we must buffer the response XML *fully*,
102  * then let the request thread send the response. In the future, we
103  * might be able to make it zero-copy via mmap, but not yet.
104  */
105 
106 #define PROXYREQ_MAXSIZE 1024 * 1024 * 4096 // 4MB
107 
108 typedef enum {
115 
116 struct proxyreq {
117  /*
118  * The request-handling thread that initiated the request. We use
119  * this as a unique ID for the request, even though the monitor
120  * thread will handle the response.
121  *
122  * (We should also point out that we don't need a unique ID for
123  * threads at the moment, since proxied request RPCs are handled
124  * serially. But monitor messages have IDs, so we make requests
125  * have them too, since requests are sent as monitor messages.)
126  */
127  unsigned long tid;
128 
130 
131  /*
132  * Each proxied request is associated with a monitor. RPCs that
133  * instantiate a new monitored object (target or analysis), or are
134  * destined to an existing object, must set the @monitor field.
135  */
136  struct monitor *monitor;
137  /*
138  * We save off the object separately in case the monitor disappears
139  * asynchronous w.r.t. our functions; this helps us avoid locking
140  * the monitor's (and the global monitor lock).
141  */
142  int objid;
143  int msg_id;
144 
145  /*
146  * Since sometimes we have to unlock the per-objtype mutex if we
147  * error out, save it here when we set objid above, IF we locked
148  * it. Then, later on, if it non-NULL, we lock it. See
149  * PROXY_REQUEST_LOCKED and PROXY_REQUEST_HANDLE_STOP below.
150  */
151  pthread_mutex_t *objtype_mutex;
152 
153  /*
154  * If this request instantiated the monitor, @monitor_is_new is
155  * set.
156  */
157  uint8_t monitor_is_new;
158 
159  /* The soap struct associated with this request. */
160  struct soap *soap;
161 
162  /*
163  * Various processing state as we proxy the request.
164  */
165 
166  /* Set to the gsoap frecv to get the real data. */
167  size_t (*orig_frecv)(struct soap *soap,char *s,size_t n);
168  /* Set to the gsoap fclose to temporarily prevent sock close. */
169  int (*orig_fclose)(struct soap *soap);
170 
171  int (*orig_fsend)(struct soap *soap,const char *s,size_t n);
172 
173  /*
174  * Request/response buffer info.
175  */
176  char *buf;
177  int len;
178  int bufsiz;
179  int bufidx;
180 };
181 
182 int proxyreq_handle_request(struct soap *soap,char *svc_name);
183 
184 /*
185  * Creates a proxy request associated with @monitor, and prepares @soap
186  * to capture the request.
187  *
188  * (Must be called before a soap_serve() call so it can capture the whole
189  * incoming request.)
190  */
191 struct proxyreq *proxyreq_create(struct soap *soap);
192 
193 /*
194  * Creates a proxy request associated with @buf that was passed to a
195  * monitored child from a monitor, and creates a soap struct set up to
196  * replay the forwarded request.
197  *
198  * (Must be called before a soap_serve() call so it can replay the whole
199  * incoming request.)
200  */
201 struct proxyreq *proxyreq_create_proxied(int objid,char *buf,int buflen);
202 
203 /*
204  * Attaches @objid's monitor to @pr (@objid must be a monitored object).
205  * Must be called before actually proxying a request!
206  */
207 int proxyreq_attach_objid(struct proxyreq *pr,int objid);
208 
209 /*
210  * Attaches a new @monitor to @pr (@objid should be the object created
211  * by/for @monitor). Must be called before actually proxying a request!
212  */
213 int proxyreq_attach_new_objid(struct proxyreq *pr,int objid,
214  struct monitor *monitor);
215 
216 /*
217  * Frees a proxy request.
218  */
219 void proxyreq_free(struct proxyreq *pr);
220 
221 /*
222  * Detaches (and cleans up!) a soap struct from a proxyreq.
223  *
224  * (User should not have to call this; proxyreq_free calls it.)
225  */
226 void proxyreq_detach_soap(struct proxyreq *pr);
227 
228 /*
229  * Frees a proxy request *buffer*. Only call when you want the buffer
230  * freed (i.e., so it doesn't hang around during request processing)
231  * without freeing the proxy request itself (because it still needs to
232  * be processed and completed).
233  */
234 void proxyreq_free_buffer(struct proxyreq *pr);
235 
242 /*
243  * Receives and handles the request from @pr->monitor. For threads, can
244  * just pull the msg out of the monitor's hashtable; for processes, has
245  * to read it from the monitor msg.
246  *
247  * Receiving/handling/response is synchronous w.r.t. requests, so it all
248  * happens here.
249  *
250  * For threads, we have to adjust the soap struct's receive functions to
251  * use some of ours, to replay back the request during soap_serve().
252  *
253  * For processes, we have to create a fake proxyreq and soap struct,
254  * setup the state to replay the request AND setup the output functions
255  * to write only to our buffer, then call soap_serve, then send the
256  * resulting buffer as a response msg to the monitor.
257  */
258 int proxyreq_recv_request(struct monitor *monitor,struct monitor_msg *msg);
259 
260 /*
261  * Receives and handles an incoming response from @pr->monitor.
262  *
263  * For threads, this function is not currently used (because the thread
264  * monitor can respond directly via soap_serve()).
265  *
266  * For processes,
267  */
268 int proxyreq_recv_response(struct monitor *monitor,struct monitor_msg *msg);
269 
270 /*
271  * Sends the request to @pr->monitor via monitor_sendfor().
272  */
273 int proxyreq_send_request(struct proxyreq *pr);
274 
275 /*
276  * Sends a response to @pr->monitor as a monitor_msg from a process via
277  * monitor_child_sendfor().
278  */
279 int proxyreq_send_response(struct proxyreq *pr);
280 
281 /*
282  * "Call" in the RPC once you know if you would proxy or not. Calls
283  * either proxyreq_send_request to forward the request to a proxy or
284  * allows the RPC to execute normally.
285  *
286  * Also unlocks @mutex if non-NULL if it returns an error.
287  */
288 #define PROXY_REQUEST_LOCKED(soap,mobjid,mutex) { \
289  struct proxyreq *_pr; \
290  int _rc; \
291  _pr = (struct proxyreq *)(soap)->user; \
292  if (!_pr) { \
293  verror("no proxyreq state!\n"); \
294  pthread_mutex_unlock(mutex); \
295  return SOAP_ERR; \
296  } \
297  if (_pr->state == PROXYREQ_STATE_NEW) { \
298  _pr->state = PROXYREQ_STATE_BUFFERED; \
299  _rc = proxyreq_attach_objid(_pr,mobjid); \
300  if (_rc != SOAP_OK) { \
301  pthread_mutex_unlock(mutex); \
302  return _rc; \
303  } \
304  _pr->objtype_mutex = mutex; \
305  /* \
306  * WARNING: the thing that handles SOAP_STOP must \
307  * 1) call proxyreq_send_request; and \
308  * 2) unlock the mutex! \
309  */ \
310  /* pthread_mutex_unlock(mutex); */ \
311  return SOAP_STOP; \
312  } \
313  else if (_pr->state == PROXYREQ_STATE_PROCESSING) { \
314  _pr->state = PROXYREQ_STATE_SERVING; \
315  /* Free the request buffer, allowing us to create a response \
316  * buf in its place if necessary. \
317  */ \
318  _pr->objtype_mutex = mutex; \
319  if (_pr->buf) { \
320  free(_pr->buf); \
321  _pr->buf = NULL; \
322  _pr->len = _pr->bufsiz = _pr->bufidx = 0; \
323  } \
324  if (_pr->orig_fclose) { \
325  _pr->soap->fclose = _pr->orig_fclose; \
326  _pr->orig_fclose = NULL; \
327  } \
328  /* Caller continues to normally execute RPC here... */ \
329  } \
330  else { \
331  verror("unexpected proxyreq state %d; bug?!\n",_pr->state); \
332  pthread_mutex_unlock(mutex); \
333  return SOAP_ERR; \
334  } \
335 }
336 
337 #define PROXY_REQUEST_HANDLE_STOP(soap) { \
338  struct proxyreq *_pr; \
339  _pr = (struct proxyreq *)(soap)->user; \
340  \
341  if (((soap)->error = proxyreq_send_request(_pr)) != SOAP_OK) { \
342  verror("proxyreq_send_request error %d\n",retval); \
343  } \
344  if (_pr->objtype_mutex) \
345  pthread_mutex_unlock(_pr->objtype_mutex); \
346 } \
347 
348 #endif /* __PROXY_H__ */
proxyreq_state_t
Definition: proxyreq.h:108
proxyreq_state_t state
Definition: proxyreq.h:129
struct proxyreq * proxyreq_create(struct soap *soap)
Definition: proxyreq.c:298
void proxyreq_detach_soap(struct proxyreq *pr)
Definition: proxyreq.c:692
int bufidx
Definition: proxyreq.h:179
struct monitor * monitor
Definition: proxyreq.h:136
int proxyreq_attach_objid(struct proxyreq *pr, int objid)
Definition: proxyreq.c:400
struct soap * soap
Definition: proxyreq.h:160
uint8_t monitor_is_new
Definition: proxyreq.h:157
int proxyreq_send_response(struct proxyreq *pr)
Definition: proxyreq.c:507
int len
Definition: proxyreq.h:177
int objid
Definition: proxyreq.h:142
int(* orig_fsend)(struct soap *soap, const char *s, size_t n)
Definition: proxyreq.h:171
int bufsiz
Definition: proxyreq.h:178
int(* orig_fclose)(struct soap *soap)
Definition: proxyreq.h:169
unsigned long tid
Definition: proxyreq.h:127
int proxyreq_send_request(struct proxyreq *pr)
Definition: proxyreq.c:447
pthread_mutex_t * objtype_mutex
Definition: proxyreq.h:151
int proxyreq_attach_new_objid(struct proxyreq *pr, int objid, struct monitor *monitor)
Definition: proxyreq.c:421
int msg_id
Definition: proxyreq.h:143
size_t(* orig_frecv)(struct soap *soap, char *s, size_t n)
Definition: proxyreq.h:167
int proxyreq_recv_request(struct monitor *monitor, struct monitor_msg *msg)
Definition: proxyreq.c:552
int proxyreq_recv_response(struct monitor *monitor, struct monitor_msg *msg)
Definition: proxyreq.c:621
struct proxyreq * proxyreq_create_proxied(int objid, char *buf, int buflen)
Definition: proxyreq.c:332
void proxyreq_free_buffer(struct proxyreq *pr)
Definition: proxyreq.c:707
void proxyreq_free(struct proxyreq *pr)
Definition: proxyreq.c:717
char * buf
Definition: proxyreq.h:176
int proxyreq_handle_request(struct soap *soap, char *svc_name)
Definition: proxyreq.c:30