• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /****************************************************************************
2  ****************************************************************************
3  ***
4  ***   This header was automatically generated from a Linux kernel header
5  ***   of the same name, to make information necessary for userspace to
6  ***   call into the kernel available to libc.  It contains only constants,
7  ***   structures, and macros generated from the original header, and thus,
8  ***   contains no copyrightable information.
9  ***
10  ****************************************************************************
11  ****************************************************************************/
12 #ifndef _LINUX_SUNRPC_SCHED_H_
13 #define _LINUX_SUNRPC_SCHED_H_
14 
15 #include <linux/timer.h>
16 #include <linux/sunrpc/types.h>
17 #include <linux/spinlock.h>
18 #include <linux/wait.h>
19 #include <linux/workqueue.h>
20 #include <linux/sunrpc/xdr.h>
21 
22 struct rpc_procinfo;
23 struct rpc_message {
24  struct rpc_procinfo * rpc_proc;
25  void * rpc_argp;
26  void * rpc_resp;
27  struct rpc_cred * rpc_cred;
28 };
29 
30 struct rpc_call_ops;
31 struct rpc_wait_queue;
32 struct rpc_wait {
33  struct list_head list;
34  struct list_head links;
35  struct rpc_wait_queue * rpc_waitq;
36 };
37 
38 struct rpc_task {
39 #ifdef RPC_DEBUG
40  unsigned long tk_magic;
41 #endif
42  atomic_t tk_count;
43  struct list_head tk_task;
44  struct rpc_clnt * tk_client;
45  struct rpc_rqst * tk_rqstp;
46  int tk_status;
47 
48  struct rpc_message tk_msg;
49  __u8 tk_garb_retry;
50  __u8 tk_cred_retry;
51 
52  unsigned long tk_cookie;
53 
54  void (*tk_timeout_fn)(struct rpc_task *);
55  void (*tk_callback)(struct rpc_task *);
56  void (*tk_action)(struct rpc_task *);
57  const struct rpc_call_ops *tk_ops;
58  void * tk_calldata;
59 
60  struct timer_list tk_timer;
61  unsigned long tk_timeout;
62  unsigned short tk_flags;
63  unsigned char tk_priority : 2;
64  unsigned long tk_runstate;
65  struct workqueue_struct *tk_workqueue;
66  union {
67  struct work_struct tk_work;
68  struct rpc_wait tk_wait;
69  } u;
70 
71  unsigned short tk_timeouts;
72  size_t tk_bytes_sent;
73  unsigned long tk_start;
74  long tk_rtt;
75 
76 #ifdef RPC_DEBUG
77  unsigned short tk_pid;
78 #endif
79 };
80 #define tk_auth tk_client->cl_auth
81 #define tk_xprt tk_client->cl_xprt
82 
83 #define task_for_each(task, pos, head)   list_for_each(pos, head)   if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1)
84 
85 #define task_for_first(task, head)   if (!list_empty(head) &&   ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1))
86 
87 #define alltask_for_each(task, pos, head)   list_for_each(pos, head)   if ((task=list_entry(pos, struct rpc_task, tk_task)),1)
88 
89 typedef void (*rpc_action)(struct rpc_task *);
90 
91 struct rpc_call_ops {
92  void (*rpc_call_prepare)(struct rpc_task *, void *);
93  void (*rpc_call_done)(struct rpc_task *, void *);
94  void (*rpc_release)(void *);
95 };
96 
97 #define RPC_TASK_ASYNC 0x0001
98 #define RPC_TASK_SWAPPER 0x0002
99 #define RPC_TASK_CHILD 0x0008
100 #define RPC_CALL_MAJORSEEN 0x0020
101 #define RPC_TASK_ROOTCREDS 0x0040
102 #define RPC_TASK_DYNAMIC 0x0080
103 #define RPC_TASK_KILLED 0x0100
104 #define RPC_TASK_SOFT 0x0200
105 #define RPC_TASK_NOINTR 0x0400
106 
107 #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
108 #define RPC_IS_CHILD(t) ((t)->tk_flags & RPC_TASK_CHILD)
109 #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
110 #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
111 #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
112 #define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
113 #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
114 #define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
115 
116 #define RPC_TASK_RUNNING 0
117 #define RPC_TASK_QUEUED 1
118 #define RPC_TASK_WAKEUP 2
119 #define RPC_TASK_HAS_TIMER 3
120 #define RPC_TASK_ACTIVE 4
121 
122 #define RPC_IS_RUNNING(t) (test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
123 #define rpc_set_running(t) (set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
124 #define rpc_test_and_set_running(t)   (test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
125 #define rpc_clear_running(t)   do {   smp_mb__before_clear_bit();   clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate);   smp_mb__after_clear_bit();   } while (0)
126 
127 #define RPC_IS_QUEUED(t) (test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate))
128 #define rpc_set_queued(t) (set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate))
129 #define rpc_clear_queued(t)   do {   smp_mb__before_clear_bit();   clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate);   smp_mb__after_clear_bit();   } while (0)
130 
131 #define rpc_start_wakeup(t)   (test_and_set_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate) == 0)
132 #define rpc_finish_wakeup(t)   do {   smp_mb__before_clear_bit();   clear_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate);   smp_mb__after_clear_bit();   } while (0)
133 
134 #define RPC_IS_ACTIVATED(t) (test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate))
135 #define rpc_set_active(t) (set_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate))
136 #define rpc_clear_active(t)   do {   smp_mb__before_clear_bit();   clear_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate);   smp_mb__after_clear_bit();   } while(0)
137 
138 #define RPC_PRIORITY_LOW 0
139 #define RPC_PRIORITY_NORMAL 1
140 #define RPC_PRIORITY_HIGH 2
141 #define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1)
142 
143 struct rpc_wait_queue {
144  spinlock_t lock;
145  struct list_head tasks[RPC_NR_PRIORITY];
146  unsigned long cookie;
147  unsigned char maxpriority;
148  unsigned char priority;
149  unsigned char count;
150  unsigned char nr;
151  unsigned short qlen;
152 #ifdef RPC_DEBUG
153  const char * name;
154 #endif
155 };
156 
157 #define RPC_BATCH_COUNT 16
158 
159 #ifndef RPC_DEBUG
160 #define RPC_WAITQ_INIT(var,qname) {   .lock = SPIN_LOCK_UNLOCKED,   .tasks = {   [0] = LIST_HEAD_INIT(var.tasks[0]),   [1] = LIST_HEAD_INIT(var.tasks[1]),   [2] = LIST_HEAD_INIT(var.tasks[2]),   },   }
161 #else
162 #define RPC_WAITQ_INIT(var,qname) {   .lock = SPIN_LOCK_UNLOCKED,   .tasks = {   [0] = LIST_HEAD_INIT(var.tasks[0]),   [1] = LIST_HEAD_INIT(var.tasks[1]),   [2] = LIST_HEAD_INIT(var.tasks[2]),   },   .name = qname,   }
163 #endif
164 #define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname)
165 
166 #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
167 
168 struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags,
169  const struct rpc_call_ops *ops, void *data);
170 struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
171  const struct rpc_call_ops *ops, void *data);
172 struct rpc_task *rpc_new_child(struct rpc_clnt *, struct rpc_task *parent);
173 
174 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
175 
176 #ifdef RPC_DEBUG
177 
178 #endif
179 
180 #ifdef RPC_DEBUG
181 #endif
182 #endif
183