1 /* Target operations for the remote server for GDB.
2 Copyright (C) 2002, 2004, 2005, 2011
3 Free Software Foundation, Inc.
4
5 Contributed by MontaVista Software.
6
7 This file is part of GDB.
8 It has been modified to integrate it in valgrind
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
24
25 #include "server.h"
26 #include "target.h"
27 #include "regdef.h"
28 #include "regcache.h"
29 #include "valgrind_low.h"
30 #include "gdb/signals.h"
31 #include "pub_core_aspacemgr.h"
32 #include "pub_tool_machine.h"
33 #include "pub_core_threadstate.h"
34 #include "pub_core_transtab.h"
35 #include "pub_core_gdbserver.h"
36 #include "pub_tool_debuginfo.h"
37
38
39 /* the_low_target defines the architecture specific aspects depending
40 on the cpu */
41 static struct valgrind_target_ops the_low_target;
42
43 static
image_ptid(unsigned long ptid)44 char *image_ptid(unsigned long ptid)
45 {
46 static char result[100];
47 VG_(sprintf) (result, "id %ld", ptid);
48 return result;
49 }
50 #define get_thread(inf) ((struct thread_info *)(inf))
51 static
remove_thread_if_not_in_vg_threads(struct inferior_list_entry * inf)52 void remove_thread_if_not_in_vg_threads (struct inferior_list_entry *inf)
53 {
54 struct thread_info *thread = get_thread (inf);
55 if (!VG_(lwpid_to_vgtid)(thread_to_gdb_id(thread))) {
56 dlog(1, "removing gdb ptid %s\n",
57 image_ptid(thread_to_gdb_id(thread)));
58 remove_thread (thread);
59 }
60 }
61
62 /* synchronize threads known by valgrind and threads known by gdbserver */
63 static
valgrind_update_threads(int pid)64 void valgrind_update_threads (int pid)
65 {
66 ThreadId tid;
67 ThreadState *ts;
68 unsigned long ptid;
69 struct thread_info *ti;
70
71 /* call remove_thread for all gdb threads not in valgrind threads */
72 for_each_inferior (&all_threads, remove_thread_if_not_in_vg_threads);
73
74 /* call add_thread for all valgrind threads not known in gdb all_threads */
75 for (tid = 1; tid < VG_N_THREADS; tid++) {
76
77 #define LOCAL_THREAD_TRACE " ti* %p vgtid %d status %s as gdb ptid %s lwpid %d\n", \
78 ti, tid, VG_(name_of_ThreadStatus) (ts->status), \
79 image_ptid (ptid), ts->os_state.lwpid
80
81 if (VG_(is_valid_tid) (tid)) {
82 ts = VG_(get_ThreadState) (tid);
83 ptid = ts->os_state.lwpid;
84 ti = gdb_id_to_thread (ptid);
85 if (!ti) {
86 /* we do not report the threads which are not yet fully
87 initialized otherwise this creates duplicated threads
88 in gdb: once with pid xxx lwpid 0, then after that
89 with pid xxx lwpid yyy. */
90 if (ts->status != VgTs_Init) {
91 dlog(1, "adding_thread" LOCAL_THREAD_TRACE);
92 add_thread (ptid, ts, ptid);
93 }
94 } else {
95 dlog(2, "(known thread)" LOCAL_THREAD_TRACE);
96 }
97 }
98 #undef LOCAL_THREAD_TRACE
99 }
100 }
101
102 static
build_shadow_arch(struct reg * reg_defs,int n)103 struct reg* build_shadow_arch (struct reg *reg_defs, int n) {
104 int i, r;
105 static char *postfix[3] = { "", "s1", "s2" };
106 struct reg *new_regs = malloc(3 * n * sizeof(reg_defs[0]));
107 int reg_set_len = reg_defs[n-1].offset + reg_defs[n-1].size;
108
109 for (i = 0; i < 3; i++) {
110 for (r = 0; r < n; r++) {
111 new_regs[i*n + r].name = malloc(strlen(reg_defs[r].name)
112 + strlen (postfix[i]) + 1);
113 strcpy (new_regs[i*n + r].name, reg_defs[r].name);
114 strcat (new_regs[i*n + r].name, postfix[i]);
115 new_regs[i*n + r].offset = i*reg_set_len + reg_defs[r].offset;
116 new_regs[i*n + r].size = reg_defs[r].size;
117 dlog(1,
118 "%10s Nr %d offset(bit) %d offset(byte) %d size(bit) %d\n",
119 new_regs[i*n + r].name, i*n + r, new_regs[i*n + r].offset,
120 (new_regs[i*n + r].offset) / 8, new_regs[i*n + r].size);
121 }
122 }
123
124 return new_regs;
125 }
126
127
128 static CORE_ADDR stopped_data_address = 0;
VG_(set_watchpoint_stop_address)129 void VG_(set_watchpoint_stop_address) (Addr addr)
130 {
131 stopped_data_address = addr;
132 }
133
valgrind_stopped_by_watchpoint(void)134 int valgrind_stopped_by_watchpoint (void)
135 {
136 return stopped_data_address != 0;
137 }
138
valgrind_stopped_data_address(void)139 CORE_ADDR valgrind_stopped_data_address (void)
140 {
141 return stopped_data_address;
142 }
143
144 /* pc at which we last stopped */
145 static CORE_ADDR stop_pc;
146
147 /* pc at which we resume.
148 If stop_pc != resume_pc, it means
149 gdb/gdbserver has changed the pc so as to have either
150 a "continue by jumping at that address"
151 or a "continue at that address to call some code from gdb".
152 */
153 static CORE_ADDR resume_pc;
154
155 static int vki_signal_to_report;
156
gdbserver_signal_encountered(Int vki_sigNo)157 void gdbserver_signal_encountered (Int vki_sigNo)
158 {
159 vki_signal_to_report = vki_sigNo;
160 }
161
162 static int vki_signal_to_deliver;
gdbserver_deliver_signal(Int vki_sigNo)163 Bool gdbserver_deliver_signal (Int vki_sigNo)
164 {
165 return vki_sigNo == vki_signal_to_deliver;
166 }
167
168 static
sym(Addr addr)169 char* sym (Addr addr)
170 {
171 static char buf[200];
172 VG_(describe_IP) (addr, buf, 200);
173 return buf;
174 }
175
176 ThreadId vgdb_interrupted_tid = 0;
177
178 /* 0 => not single stepping.
179 1 => single stepping asked by gdb
180 2 => single stepping asked by valgrind (watchpoint) */
181 static int stepping = 0;
182
valgrind_get_ignore_break_once(void)183 Addr valgrind_get_ignore_break_once(void)
184 {
185 if (valgrind_single_stepping())
186 return resume_pc;
187 else
188 return 0;
189 }
190
valgrind_set_single_stepping(Bool set)191 void valgrind_set_single_stepping(Bool set)
192 {
193 if (set)
194 stepping = 2;
195 else
196 stepping = 0;
197 }
198
valgrind_single_stepping(void)199 Bool valgrind_single_stepping(void)
200 {
201 if (stepping)
202 return True;
203 else
204 return False;
205 }
206
valgrind_thread_alive(unsigned long tid)207 int valgrind_thread_alive (unsigned long tid)
208 {
209 struct thread_info *ti = gdb_id_to_thread(tid);
210 ThreadState *tst;
211
212 if (ti != NULL) {
213 tst = (ThreadState *) inferior_target_data (ti);
214 return tst->status != VgTs_Zombie;
215 }
216 else {
217 return 0;
218 }
219 }
220
valgrind_resume(struct thread_resume * resume_info)221 void valgrind_resume (struct thread_resume *resume_info)
222 {
223 dlog(1,
224 "resume_info step %d sig %d stepping %d\n",
225 resume_info->step,
226 resume_info->sig,
227 stepping);
228 if (valgrind_stopped_by_watchpoint()) {
229 dlog(1, "clearing watchpoint stopped_data_address %p\n",
230 C2v(stopped_data_address));
231 VG_(set_watchpoint_stop_address) ((Addr) 0);
232 }
233 vki_signal_to_deliver = resume_info->sig;
234
235 stepping = resume_info->step;
236 resume_pc = (*the_low_target.get_pc) ();
237 if (resume_pc != stop_pc) {
238 dlog(1,
239 "stop_pc %p changed to be resume_pc %s\n",
240 C2v(stop_pc), sym(resume_pc));
241 }
242 regcache_invalidate();
243 }
244
valgrind_wait(char * ourstatus)245 unsigned char valgrind_wait (char *ourstatus)
246 {
247 int pid;
248 unsigned long wptid;
249 ThreadState *tst;
250 enum target_signal sig;
251
252 pid = VG_(getpid) ();
253 dlog(1, "enter valgrind_wait pid %d\n", pid);
254
255 regcache_invalidate();
256 valgrind_update_threads(pid);
257
258 /* in valgrind, we consider that a wait always succeeds with STOPPED 'T'
259 and with a signal TRAP (i.e. a breakpoint), unless there is
260 a signal to report. */
261 *ourstatus = 'T';
262 if (vki_signal_to_report == 0)
263 sig = TARGET_SIGNAL_TRAP;
264 else {
265 sig = target_signal_from_host(vki_signal_to_report);
266 vki_signal_to_report = 0;
267 }
268
269 if (vgdb_interrupted_tid != 0)
270 tst = VG_(get_ThreadState) (vgdb_interrupted_tid);
271 else
272 tst = VG_(get_ThreadState) (VG_(running_tid));
273 wptid = tst->os_state.lwpid;
274 /* we can only change the current_inferior when the wptid references
275 an existing thread. Otherwise, we are still in the init phase.
276 (hack similar to main thread hack in valgrind_update_threads) */
277 if (tst->os_state.lwpid)
278 current_inferior = gdb_id_to_thread (wptid);
279 stop_pc = (*the_low_target.get_pc) ();
280
281 dlog(1,
282 "exit valgrind_wait returns ptid %s stop_pc %s signal %d\n",
283 image_ptid (wptid), sym (stop_pc), sig);
284 return sig;
285 }
286
287 /* Fetch one register from valgrind VEX guest state. */
288 static
fetch_register(int regno)289 void fetch_register (int regno)
290 {
291 int size;
292 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
293 ThreadId tid = tst->tid;
294
295 if (regno >= the_low_target.num_regs) {
296 dlog(0, "error fetch_register regno %d max %d\n",
297 regno, the_low_target.num_regs);
298 return;
299 }
300 size = register_size (regno);
301 if (size > 0) {
302 Bool mod;
303 char buf [size];
304 VG_(memset) (buf, 0, size); // registers not fetched will be seen as 0.
305 (*the_low_target.transfer_register) (tid, regno, buf,
306 valgrind_to_gdbserver, size, &mod);
307 // Note: the *mod received from transfer_register is not interesting.
308 // We are interested to see if the register data in the register cache is modified.
309 supply_register (regno, buf, &mod);
310 if (mod && VG_(debugLog_getLevel)() > 1) {
311 char bufimage [2*size + 1];
312 heximage (bufimage, buf, size);
313 dlog(2, "fetched register %d size %d name %s value %s tid %d status %s\n",
314 regno, size, the_low_target.reg_defs[regno].name, bufimage,
315 tid, VG_(name_of_ThreadStatus) (tst->status));
316 }
317 }
318 }
319
320 /* Fetch all registers, or just one, from the child process. */
321 static
usr_fetch_inferior_registers(int regno)322 void usr_fetch_inferior_registers (int regno)
323 {
324 if (regno == -1 || regno == 0)
325 for (regno = 0; regno < the_low_target.num_regs; regno++)
326 fetch_register (regno);
327 else
328 fetch_register (regno);
329 }
330
331 /* Store our register values back into the inferior.
332 If REGNO is -1, do this for all registers.
333 Otherwise, REGNO specifies which register (so we can save time). */
334 static
usr_store_inferior_registers(int regno)335 void usr_store_inferior_registers (int regno)
336 {
337 int size;
338 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
339 ThreadId tid = tst->tid;
340
341 if (regno >= 0) {
342
343 if (regno >= the_low_target.num_regs) {
344 dlog(0, "error store_register regno %d max %d\n",
345 regno, the_low_target.num_regs);
346 return;
347 }
348
349 size = register_size (regno);
350 if (size > 0) {
351 Bool mod;
352 Addr old_SP, new_SP;
353 char buf[size];
354
355 if (regno == the_low_target.stack_pointer_regno) {
356 /* When the stack pointer register is changed such that
357 the stack is extended, we better inform the tool of the
358 stack increase. This is needed in particular to avoid
359 spurious Memcheck errors during Inferior calls. So, we
360 save in old_SP the SP before the change. A change of
361 stack pointer is also assumed to have initialised this
362 new stack space. For the typical example of an inferior
363 call, gdb writes arguments on the stack, and then
364 changes the stack pointer. As the stack increase tool
365 function might mark it as undefined, we have to call it
366 at the good moment. */
367 VG_(memset) ((void *) &old_SP, 0, size);
368 (*the_low_target.transfer_register) (tid, regno, (void *) &old_SP,
369 valgrind_to_gdbserver, size, &mod);
370 }
371
372 VG_(memset) (buf, 0, size);
373 collect_register (regno, buf);
374 (*the_low_target.transfer_register) (tid, regno, buf,
375 gdbserver_to_valgrind, size, &mod);
376 if (mod && VG_(debugLog_getLevel)() > 1) {
377 char bufimage [2*size + 1];
378 heximage (bufimage, buf, size);
379 dlog(2,
380 "stored register %d size %d name %s value %s "
381 "tid %d status %s\n",
382 regno, size, the_low_target.reg_defs[regno].name, bufimage,
383 tid, VG_(name_of_ThreadStatus) (tst->status));
384 }
385 if (regno == the_low_target.stack_pointer_regno) {
386 VG_(memcpy) (&new_SP, buf, size);
387 if (old_SP > new_SP) {
388 Word delta = (Word)new_SP - (Word)old_SP;
389 dlog(1,
390 " stack increase by stack pointer changed from %p to %p "
391 "delta %ld\n",
392 (void*) old_SP, (void *) new_SP,
393 delta);
394 VG_TRACK( new_mem_stack_w_ECU, new_SP, -delta, 0 );
395 VG_TRACK( new_mem_stack, new_SP, -delta );
396 VG_TRACK( post_mem_write, Vg_CoreClientReq, tid,
397 new_SP, -delta);
398 }
399 }
400 }
401 }
402 else {
403 for (regno = 0; regno < the_low_target.num_regs; regno++)
404 usr_store_inferior_registers (regno);
405 }
406 }
407
valgrind_fetch_registers(int regno)408 void valgrind_fetch_registers (int regno)
409 {
410 usr_fetch_inferior_registers (regno);
411 }
412
valgrind_store_registers(int regno)413 void valgrind_store_registers (int regno)
414 {
415 usr_store_inferior_registers (regno);
416 }
417
valgrind_read_memory(CORE_ADDR memaddr,unsigned char * myaddr,int len)418 int valgrind_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
419 {
420 const void *sourceaddr = C2v (memaddr);
421 dlog(2, "reading memory %p size %d\n", sourceaddr, len);
422 if (!VG_(am_is_valid_for_client_or_free_or_resvn) ((Addr) sourceaddr,
423 len, VKI_PROT_READ)) {
424 dlog(1, "error reading memory %p size %d\n", sourceaddr, len);
425 return -1;
426 }
427 VG_(memcpy) (myaddr, sourceaddr, len);
428 return 0;
429 }
430
valgrind_write_memory(CORE_ADDR memaddr,const unsigned char * myaddr,int len)431 int valgrind_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
432 {
433 void *targetaddr = C2v (memaddr);
434 dlog(2, "writing memory %p size %d\n", targetaddr, len);
435 if (!VG_(am_is_valid_for_client_or_free_or_resvn) ((Addr)targetaddr,
436 len, VKI_PROT_WRITE)) {
437 dlog(1, "error writing memory %p size %d\n", targetaddr, len);
438 return -1;
439 }
440 if (len > 0) {
441 VG_(memcpy) (targetaddr, myaddr, len);
442 if (VG_(tdict).track_post_mem_write) {
443 /* Inform the tool of the post memwrite. Note that we do the
444 minimum necessary to avoid complains from e.g.
445 memcheck. The idea is that the debugger is as least
446 intrusive as possible. So, we do not inform of the pre
447 mem write (and in any case, this would cause problems with
448 memcheck that does not like our CorePart in
449 pre_mem_write. */
450 ThreadState *tst =
451 (ThreadState *) inferior_target_data (current_inferior);
452 ThreadId tid = tst->tid;
453 VG_(tdict).track_post_mem_write( Vg_CoreClientReq, tid,
454 (Addr) targetaddr, len );
455 }
456 }
457 return 0;
458 }
459
460 /* insert or remove a breakpoint */
461 static
valgrind_point(Bool insert,char type,CORE_ADDR addr,int len)462 int valgrind_point (Bool insert, char type, CORE_ADDR addr, int len)
463 {
464 PointKind kind;
465 switch (type) {
466 case '0': /* implemented by inserting checks at each instruction in sb */
467 kind = software_breakpoint;
468 break;
469 case '1': /* hw breakpoint, same implementation as sw breakpoint */
470 kind = hardware_breakpoint;
471 break;
472 case '2':
473 kind = write_watchpoint;
474 break;
475 case '3':
476 kind = read_watchpoint;
477 break;
478 case '4':
479 kind = access_watchpoint;
480 break;
481 default:
482 vg_assert (0);
483 }
484
485 /* Attention: gdbserver convention differs: 0 means ok; 1 means not ok */
486 if (VG_(gdbserver_point) (kind, insert, addr, len))
487 return 0;
488 else
489 return 1; /* error or unsupported */
490 }
491
valgrind_target_xml(Bool shadow_mode)492 char* valgrind_target_xml (Bool shadow_mode)
493 {
494 return (*the_low_target.target_xml) (shadow_mode);
495 }
496
valgrind_insert_watchpoint(char type,CORE_ADDR addr,int len)497 int valgrind_insert_watchpoint (char type, CORE_ADDR addr, int len)
498 {
499 return valgrind_point (/* insert */ True, type, addr, len);
500 }
501
valgrind_remove_watchpoint(char type,CORE_ADDR addr,int len)502 int valgrind_remove_watchpoint (char type, CORE_ADDR addr, int len)
503 {
504 return valgrind_point (/* insert*/ False, type, addr, len);
505 }
506
507 /* returns a pointer to the architecture state corresponding to
508 the provided register set: 0 => normal guest registers,
509 1 => shadow1
510 2 => shadow2
511 */
get_arch(int set,ThreadState * tst)512 VexGuestArchState* get_arch (int set, ThreadState* tst)
513 {
514 switch (set) {
515 case 0: return &tst->arch.vex;
516 case 1: return &tst->arch.vex_shadow1;
517 case 2: return &tst->arch.vex_shadow2;
518 default: vg_assert(0);
519 }
520 }
521
522 static int non_shadow_num_regs = 0;
523 static struct reg *non_shadow_reg_defs = NULL;
initialize_shadow_low(Bool shadow_mode)524 void initialize_shadow_low(Bool shadow_mode)
525 {
526 if (non_shadow_reg_defs == NULL) {
527 non_shadow_reg_defs = the_low_target.reg_defs;
528 non_shadow_num_regs = the_low_target.num_regs;
529 }
530
531 regcache_invalidate();
532 if (the_low_target.reg_defs != non_shadow_reg_defs) {
533 free (the_low_target.reg_defs);
534 }
535 if (shadow_mode) {
536 the_low_target.num_regs = 3 * non_shadow_num_regs;
537 the_low_target.reg_defs = build_shadow_arch (non_shadow_reg_defs, non_shadow_num_regs);
538 } else {
539 the_low_target.num_regs = non_shadow_num_regs;
540 the_low_target.reg_defs = non_shadow_reg_defs;
541 }
542 set_register_cache (the_low_target.reg_defs, the_low_target.num_regs);
543 }
544
set_desired_inferior(int use_general)545 void set_desired_inferior (int use_general)
546 {
547 struct thread_info *found;
548
549 if (use_general == 1) {
550 found = (struct thread_info *) find_inferior_id (&all_threads,
551 general_thread);
552 } else {
553 found = NULL;
554
555 /* If we are continuing any (all) thread(s), use step_thread
556 to decide which thread to step and/or send the specified
557 signal to. */
558 if ((step_thread != 0 && step_thread != -1)
559 && (cont_thread == 0 || cont_thread == -1))
560 found = (struct thread_info *) find_inferior_id (&all_threads,
561 step_thread);
562
563 if (found == NULL)
564 found = (struct thread_info *) find_inferior_id (&all_threads,
565 cont_thread);
566 }
567
568 if (found == NULL)
569 current_inferior = (struct thread_info *) all_threads.head;
570 else
571 current_inferior = found;
572 {
573 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
574 ThreadId tid = tst->tid;
575 dlog(1, "set_desired_inferior use_general %d found %p tid %d lwpid %d\n",
576 use_general, found, tid, tst->os_state.lwpid);
577 }
578 }
579
VG_(dmemcpy)580 void* VG_(dmemcpy) ( void *d, const void *s, SizeT sz, Bool *mod )
581 {
582 if (VG_(memcmp) (d, s, sz)) {
583 *mod = True;
584 return VG_(memcpy) (d, s, sz);
585 } else {
586 *mod = False;
587 return d;
588 }
589 }
590
VG_(transfer)591 void VG_(transfer) (void *valgrind,
592 void *gdbserver,
593 transfer_direction dir,
594 SizeT sz,
595 Bool *mod)
596 {
597 if (dir == valgrind_to_gdbserver)
598 VG_(dmemcpy) (gdbserver, valgrind, sz, mod);
599 else if (dir == gdbserver_to_valgrind)
600 VG_(dmemcpy) (valgrind, gdbserver, sz, mod);
601 else
602 vg_assert (0);
603 }
604
valgrind_initialize_target(void)605 void valgrind_initialize_target(void)
606 {
607 #if defined(VGA_x86)
608 x86_init_architecture(&the_low_target);
609 #elif defined(VGA_amd64)
610 amd64_init_architecture(&the_low_target);
611 #elif defined(VGA_arm)
612 arm_init_architecture(&the_low_target);
613 #elif defined(VGA_ppc32)
614 ppc32_init_architecture(&the_low_target);
615 #elif defined(VGA_ppc64)
616 ppc64_init_architecture(&the_low_target);
617 #elif defined(VGA_s390x)
618 s390x_init_architecture(&the_low_target);
619 #elif defined(VGA_mips32)
620 mips32_init_architecture(&the_low_target);
621 #else
622 architecture missing in target.c valgrind_initialize_target
623 #endif
624 }
625