• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2002-2003 Hewlett-Packard Co
3  *	David Mosberger-Tang <davidm@hpl.hp.com>
4  */
5 #ifndef _ASM_IA64_THREAD_INFO_H
6 #define _ASM_IA64_THREAD_INFO_H
7 
8 #ifndef ASM_OFFSETS_C
9 #include <asm/asm-offsets.h>
10 #endif
11 #include <asm/processor.h>
12 #include <asm/ptrace.h>
13 
14 #define PREEMPT_ACTIVE_BIT 30
15 #define PREEMPT_ACTIVE	(1 << PREEMPT_ACTIVE_BIT)
16 
17 #ifndef __ASSEMBLY__
18 
19 /*
20  * On IA-64, we want to keep the task structure and kernel stack together, so they can be
21  * mapped by a single TLB entry and so they can be addressed by the "current" pointer
22  * without having to do pointer masking.
23  */
24 struct thread_info {
25 	struct task_struct *task;	/* XXX not really needed, except for dup_task_struct() */
26 	struct exec_domain *exec_domain;/* execution domain */
27 	__u32 flags;			/* thread_info flags (see TIF_*) */
28 	__u32 cpu;			/* current CPU */
29 	__u32 last_cpu;			/* Last CPU thread ran on */
30 	__u32 status;			/* Thread synchronous flags */
31 	mm_segment_t addr_limit;	/* user-level address space limit */
32 	int preempt_count;		/* 0=premptable, <0=BUG; will also serve as bh-counter */
33 	struct restart_block restart_block;
34 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
35 	__u64 ac_stamp;
36 	__u64 ac_leave;
37 	__u64 ac_stime;
38 	__u64 ac_utime;
39 #endif
40 };
41 
42 #define THREAD_SIZE			KERNEL_STACK_SIZE
43 
44 #define INIT_THREAD_INFO(tsk)			\
45 {						\
46 	.task		= &tsk,			\
47 	.exec_domain	= &default_exec_domain,	\
48 	.flags		= 0,			\
49 	.cpu		= 0,			\
50 	.addr_limit	= KERNEL_DS,		\
51 	.preempt_count	= INIT_PREEMPT_COUNT,	\
52 	.restart_block = {			\
53 		.fn = do_no_restart_syscall,	\
54 	},					\
55 }
56 
57 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
58 
59 #ifndef ASM_OFFSETS_C
60 /* how to get the thread information struct from C */
61 #define current_thread_info()	((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
62 #define alloc_thread_info_node(tsk, node)	\
63 		((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
64 #define task_thread_info(tsk)	((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
65 #else
66 #define current_thread_info()	((struct thread_info *) 0)
67 #define alloc_thread_info_node(tsk, node)	((struct thread_info *) 0)
68 #define task_thread_info(tsk)	((struct thread_info *) 0)
69 #endif
70 #define free_thread_info(ti)	/* nothing */
71 #define task_stack_page(tsk)	((void *)(tsk))
72 
73 #define __HAVE_THREAD_FUNCTIONS
74 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
75 #define setup_thread_stack(p, org)			\
76 	*task_thread_info(p) = *task_thread_info(org);	\
77 	task_thread_info(p)->ac_stime = 0;		\
78 	task_thread_info(p)->ac_utime = 0;		\
79 	task_thread_info(p)->task = (p);
80 #else
81 #define setup_thread_stack(p, org) \
82 	*task_thread_info(p) = *task_thread_info(org); \
83 	task_thread_info(p)->task = (p);
84 #endif
85 #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
86 
87 #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
88 #define alloc_task_struct_node(node)						\
89 ({										\
90 	struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP,	\
91 					     KERNEL_STACK_SIZE_ORDER);		\
92 	struct task_struct *ret = page ? page_address(page) : NULL;		\
93 										\
94 	ret;									\
95 })
96 #define free_task_struct(tsk)	free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
97 
98 #endif /* !__ASSEMBLY */
99 
100 /*
101  * thread information flags
102  * - these are process state flags that various assembly files may need to access
103  * - pending work-to-be-done flags are in least-significant 16 bits, other flags
104  *   in top 16 bits
105  */
106 #define TIF_SIGPENDING		0	/* signal pending */
107 #define TIF_NEED_RESCHED	1	/* rescheduling necessary */
108 #define TIF_SYSCALL_TRACE	2	/* syscall trace active */
109 #define TIF_SYSCALL_AUDIT	3	/* syscall auditing active */
110 #define TIF_SINGLESTEP		4	/* restore singlestep on return to user mode */
111 #define TIF_NOTIFY_RESUME	6	/* resumption notification requested */
112 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
113 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
114 #define TIF_MCA_INIT		18	/* this task is processing MCA or INIT */
115 #define TIF_DB_DISABLED		19	/* debug trap disabled for fsyscall */
116 #define TIF_RESTORE_RSE		21	/* user RBS is newer than kernel RBS */
117 
118 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
119 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
120 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
121 #define _TIF_SYSCALL_TRACEAUDIT	(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
122 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
123 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
124 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
125 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
126 #define _TIF_MCA_INIT		(1 << TIF_MCA_INIT)
127 #define _TIF_DB_DISABLED	(1 << TIF_DB_DISABLED)
128 #define _TIF_RESTORE_RSE	(1 << TIF_RESTORE_RSE)
129 
130 /* "work to do on user-return" bits */
131 #define TIF_ALLWORK_MASK	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\
132 				 _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE)
133 /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
134 #define TIF_WORK_MASK		(TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
135 
136 #define TS_POLLING		1 	/* true if in idle loop and not sleeping */
137 #define TS_RESTORE_SIGMASK	2	/* restore signal mask in do_signal() */
138 
139 #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
140 
141 #ifndef __ASSEMBLY__
142 #define HAVE_SET_RESTORE_SIGMASK	1
set_restore_sigmask(void)143 static inline void set_restore_sigmask(void)
144 {
145 	struct thread_info *ti = current_thread_info();
146 	ti->status |= TS_RESTORE_SIGMASK;
147 	set_bit(TIF_SIGPENDING, &ti->flags);
148 }
149 #endif	/* !__ASSEMBLY__ */
150 
151 #endif /* _ASM_IA64_THREAD_INFO_H */
152