1 /* AF_RXRPC internal definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/atomic.h>
13 #include <linux/seqlock.h>
14 #include <net/net_namespace.h>
15 #include <net/netns/generic.h>
16 #include <net/sock.h>
17 #include <net/af_rxrpc.h>
18 #include "protocol.h"
19
20 #if 0
21 #define CHECK_SLAB_OKAY(X) \
22 BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
23 (POISON_FREE << 8 | POISON_FREE))
24 #else
25 #define CHECK_SLAB_OKAY(X) do {} while (0)
26 #endif
27
28 #define FCRYPT_BSIZE 8
29 struct rxrpc_crypt {
30 union {
31 u8 x[FCRYPT_BSIZE];
32 __be32 n[2];
33 };
34 } __attribute__((aligned(8)));
35
36 #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
37 #define rxrpc_queue_delayed_work(WS,D) \
38 queue_delayed_work(rxrpc_workqueue, (WS), (D))
39
40 struct rxrpc_connection;
41
42 /*
43 * Mark applied to socket buffers.
44 */
45 enum rxrpc_skb_mark {
46 RXRPC_SKB_MARK_DATA, /* data message */
47 RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
48 RXRPC_SKB_MARK_BUSY, /* server busy message */
49 RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
50 RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
51 RXRPC_SKB_MARK_NET_ERROR, /* network error message */
52 RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
53 RXRPC_SKB_MARK_NEW_CALL, /* local error message */
54 };
55
56 /*
57 * sk_state for RxRPC sockets
58 */
59 enum {
60 RXRPC_UNBOUND = 0,
61 RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
62 RXRPC_CLIENT_BOUND, /* client local address bound */
63 RXRPC_SERVER_BOUND, /* server local address bound */
64 RXRPC_SERVER_BOUND2, /* second server local address bound */
65 RXRPC_SERVER_LISTENING, /* server listening for connections */
66 RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */
67 RXRPC_CLOSE, /* socket is being closed */
68 };
69
70 /*
71 * Per-network namespace data.
72 */
73 struct rxrpc_net {
74 struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
75 u32 epoch; /* Local epoch for detecting local-end reset */
76 struct list_head calls; /* List of calls active in this namespace */
77 rwlock_t call_lock; /* Lock for ->calls */
78
79 struct list_head conn_proc_list; /* List of conns in this namespace for proc */
80 struct list_head service_conns; /* Service conns in this namespace */
81 rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
82 struct delayed_work service_conn_reaper;
83
84 unsigned int nr_client_conns;
85 unsigned int nr_active_client_conns;
86 bool kill_all_client_conns;
87 bool live;
88 spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
89 spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
90 struct list_head waiting_client_conns;
91 struct list_head active_client_conns;
92 struct list_head idle_client_conns;
93 struct delayed_work client_conn_reaper;
94
95 struct list_head local_endpoints;
96 struct mutex local_mutex; /* Lock for ->local_endpoints */
97
98 spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
99 DECLARE_HASHTABLE (peer_hash, 10);
100 };
101
102 /*
103 * Service backlog preallocation.
104 *
105 * This contains circular buffers of preallocated peers, connections and calls
106 * for incoming service calls and their head and tail pointers. This allows
107 * calls to be set up in the data_ready handler, thereby avoiding the need to
108 * shuffle packets around so much.
109 */
110 struct rxrpc_backlog {
111 unsigned short peer_backlog_head;
112 unsigned short peer_backlog_tail;
113 unsigned short conn_backlog_head;
114 unsigned short conn_backlog_tail;
115 unsigned short call_backlog_head;
116 unsigned short call_backlog_tail;
117 #define RXRPC_BACKLOG_MAX 32
118 struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
119 struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
120 struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
121 };
122
123 /*
124 * RxRPC socket definition
125 */
126 struct rxrpc_sock {
127 /* WARNING: sk has to be the first member */
128 struct sock sk;
129 rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
130 rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
131 struct rxrpc_local *local; /* local endpoint */
132 struct rxrpc_backlog *backlog; /* Preallocation for services */
133 spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
134 struct list_head sock_calls; /* List of calls owned by this socket */
135 struct list_head to_be_accepted; /* calls awaiting acceptance */
136 struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
137 rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
138 struct key *key; /* security for this socket */
139 struct key *securities; /* list of server security descriptors */
140 struct rb_root calls; /* User ID -> call mapping */
141 unsigned long flags;
142 #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
143 rwlock_t call_lock; /* lock for calls */
144 u32 min_sec_level; /* minimum security level */
145 #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
146 bool exclusive; /* Exclusive connection for a client socket */
147 u16 second_service; /* Additional service bound to the endpoint */
148 struct {
149 /* Service upgrade information */
150 u16 from; /* Service ID to upgrade (if not 0) */
151 u16 to; /* service ID to upgrade to */
152 } service_upgrade;
153 sa_family_t family; /* Protocol family created with */
154 struct sockaddr_rxrpc srx; /* Primary Service/local addresses */
155 struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
156 };
157
158 #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
159
160 /*
161 * CPU-byteorder normalised Rx packet header.
162 */
163 struct rxrpc_host_header {
164 u32 epoch; /* client boot timestamp */
165 u32 cid; /* connection and channel ID */
166 u32 callNumber; /* call ID (0 for connection-level packets) */
167 u32 seq; /* sequence number of pkt in call stream */
168 u32 serial; /* serial number of pkt sent to network */
169 u8 type; /* packet type */
170 u8 flags; /* packet flags */
171 u8 userStatus; /* app-layer defined status */
172 u8 securityIndex; /* security protocol ID */
173 union {
174 u16 _rsvd; /* reserved */
175 u16 cksum; /* kerberos security checksum */
176 };
177 u16 serviceId; /* service ID */
178 } __packed;
179
180 /*
181 * RxRPC socket buffer private variables
182 * - max 48 bytes (struct sk_buff::cb)
183 */
184 struct rxrpc_skb_priv {
185 union {
186 u8 nr_jumbo; /* Number of jumbo subpackets */
187 };
188 union {
189 int remain; /* amount of space remaining for next write */
190 };
191
192 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
193 };
194
195 #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
196
197 /*
198 * RxRPC security module interface
199 */
200 struct rxrpc_security {
201 const char *name; /* name of this service */
202 u8 security_index; /* security type provided */
203
204 /* Initialise a security service */
205 int (*init)(void);
206
207 /* Clean up a security service */
208 void (*exit)(void);
209
210 /* initialise a connection's security */
211 int (*init_connection_security)(struct rxrpc_connection *);
212
213 /* prime a connection's packet security */
214 int (*prime_packet_security)(struct rxrpc_connection *);
215
216 /* impose security on a packet */
217 int (*secure_packet)(struct rxrpc_call *,
218 struct sk_buff *,
219 size_t,
220 void *);
221
222 /* verify the security on a received packet */
223 int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
224 unsigned int, unsigned int, rxrpc_seq_t, u16);
225
226 /* Locate the data in a received packet that has been verified. */
227 void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
228 unsigned int *, unsigned int *);
229
230 /* issue a challenge */
231 int (*issue_challenge)(struct rxrpc_connection *);
232
233 /* respond to a challenge */
234 int (*respond_to_challenge)(struct rxrpc_connection *,
235 struct sk_buff *,
236 u32 *);
237
238 /* verify a response */
239 int (*verify_response)(struct rxrpc_connection *,
240 struct sk_buff *,
241 u32 *);
242
243 /* clear connection security */
244 void (*clear)(struct rxrpc_connection *);
245 };
246
247 /*
248 * RxRPC local transport endpoint description
249 * - owned by a single AF_RXRPC socket
250 * - pointed to by transport socket struct sk_user_data
251 */
252 struct rxrpc_local {
253 struct rcu_head rcu;
254 atomic_t usage;
255 struct rxrpc_net *rxnet; /* The network ns in which this resides */
256 struct list_head link;
257 struct socket *socket; /* my UDP socket */
258 struct work_struct processor;
259 struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
260 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
261 struct sk_buff_head reject_queue; /* packets awaiting rejection */
262 struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
263 struct rb_root client_conns; /* Client connections by socket params */
264 spinlock_t client_conns_lock; /* Lock for client_conns */
265 spinlock_t lock; /* access lock */
266 rwlock_t services_lock; /* lock for services list */
267 int debug_id; /* debug ID for printks */
268 bool dead;
269 bool service_closed; /* Service socket closed */
270 struct sockaddr_rxrpc srx; /* local address */
271 };
272
273 /*
274 * RxRPC remote transport endpoint definition
275 * - matched by local endpoint, remote port, address and protocol type
276 */
277 struct rxrpc_peer {
278 struct rcu_head rcu; /* This must be first */
279 atomic_t usage;
280 unsigned long hash_key;
281 struct hlist_node hash_link;
282 struct rxrpc_local *local;
283 struct hlist_head error_targets; /* targets for net error distribution */
284 struct work_struct error_distributor;
285 struct rb_root service_conns; /* Service connections */
286 seqlock_t service_conn_lock;
287 spinlock_t lock; /* access lock */
288 unsigned int if_mtu; /* interface MTU for this peer */
289 unsigned int mtu; /* network MTU for this peer */
290 unsigned int maxdata; /* data size (MTU - hdrsize) */
291 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
292 int debug_id; /* debug ID for printks */
293 int error_report; /* Net (+0) or local (+1000000) to distribute */
294 #define RXRPC_LOCAL_ERROR_OFFSET 1000000
295 struct sockaddr_rxrpc srx; /* remote address */
296
297 /* calculated RTT cache */
298 #define RXRPC_RTT_CACHE_SIZE 32
299 ktime_t rtt_last_req; /* Time of last RTT request */
300 u64 rtt; /* Current RTT estimate (in nS) */
301 u64 rtt_sum; /* Sum of cache contents */
302 u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
303 u8 rtt_cursor; /* next entry at which to insert */
304 u8 rtt_usage; /* amount of cache actually used */
305
306 u8 cong_cwnd; /* Congestion window size */
307 };
308
309 /*
310 * Keys for matching a connection.
311 */
312 struct rxrpc_conn_proto {
313 union {
314 struct {
315 u32 epoch; /* epoch of this connection */
316 u32 cid; /* connection ID */
317 };
318 u64 index_key;
319 };
320 };
321
322 struct rxrpc_conn_parameters {
323 struct rxrpc_local *local; /* Representation of local endpoint */
324 struct rxrpc_peer *peer; /* Remote endpoint */
325 struct key *key; /* Security details */
326 bool exclusive; /* T if conn is exclusive */
327 bool upgrade; /* T if service ID can be upgraded */
328 u16 service_id; /* Service ID for this connection */
329 u32 security_level; /* Security level selected */
330 };
331
332 /*
333 * Bits in the connection flags.
334 */
335 enum rxrpc_conn_flag {
336 RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
337 RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
338 RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */
339 RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
340 RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
341 RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
342 RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
343 };
344
345 /*
346 * Events that can be raised upon a connection.
347 */
348 enum rxrpc_conn_event {
349 RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
350 };
351
352 /*
353 * The connection cache state.
354 */
355 enum rxrpc_conn_cache_state {
356 RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
357 RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
358 RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
359 RXRPC_CONN_CLIENT_UPGRADE, /* Conn is on active list, probing for upgrade */
360 RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
361 RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
362 RXRPC_CONN__NR_CACHE_STATES
363 };
364
365 /*
366 * The connection protocol state.
367 */
368 enum rxrpc_conn_proto_state {
369 RXRPC_CONN_UNUSED, /* Connection not yet attempted */
370 RXRPC_CONN_CLIENT, /* Client connection */
371 RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
372 RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
373 RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
374 RXRPC_CONN_SERVICE, /* Service secured connection */
375 RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
376 RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
377 RXRPC_CONN__NR_STATES
378 };
379
380 /*
381 * RxRPC connection definition
382 * - matched by { local, peer, epoch, conn_id, direction }
383 * - each connection can only handle four simultaneous calls
384 */
385 struct rxrpc_connection {
386 struct rxrpc_conn_proto proto;
387 struct rxrpc_conn_parameters params;
388
389 atomic_t usage;
390 struct rcu_head rcu;
391 struct list_head cache_link;
392
393 spinlock_t channel_lock;
394 unsigned char active_chans; /* Mask of active channels */
395 #define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
396 struct list_head waiting_calls; /* Calls waiting for channels */
397 struct rxrpc_channel {
398 struct rxrpc_call __rcu *call; /* Active call */
399 u32 call_id; /* ID of current call */
400 u32 call_counter; /* Call ID counter */
401 u32 last_call; /* ID of last call */
402 u8 last_type; /* Type of last packet */
403 union {
404 u32 last_seq;
405 u32 last_abort;
406 };
407 } channels[RXRPC_MAXCALLS];
408
409 struct work_struct processor; /* connection event processor */
410 union {
411 struct rb_node client_node; /* Node in local->client_conns */
412 struct rb_node service_node; /* Node in peer->service_conns */
413 };
414 struct list_head proc_link; /* link in procfs list */
415 struct list_head link; /* link in master connection list */
416 struct sk_buff_head rx_queue; /* received conn-level packets */
417 const struct rxrpc_security *security; /* applied security module */
418 struct key *server_key; /* security for this service */
419 struct crypto_skcipher *cipher; /* encryption handle */
420 struct rxrpc_crypt csum_iv; /* packet checksum base */
421 unsigned long flags;
422 unsigned long events;
423 unsigned long idle_timestamp; /* Time at which last became idle */
424 spinlock_t state_lock; /* state-change lock */
425 enum rxrpc_conn_cache_state cache_state;
426 enum rxrpc_conn_proto_state state; /* current state of connection */
427 u32 abort_code; /* Abort code of connection abort */
428 int debug_id; /* debug ID for printks */
429 atomic_t serial; /* packet serial number counter */
430 unsigned int hi_serial; /* highest serial number received */
431 u32 security_nonce; /* response re-use preventer */
432 u16 service_id; /* Service ID, possibly upgraded */
433 u8 size_align; /* data size alignment (for security) */
434 u8 security_size; /* security header size */
435 u8 security_ix; /* security type */
436 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
437 short error; /* Local error code */
438 };
439
440 /*
441 * Flags in call->flags.
442 */
443 enum rxrpc_call_flag {
444 RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
445 RXRPC_CALL_HAS_USERID, /* has a user ID attached */
446 RXRPC_CALL_IS_SERVICE, /* Call is service call */
447 RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
448 RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
449 RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
450 RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */
451 RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
452 RXRPC_CALL_PINGING, /* Ping in process */
453 RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
454 RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
455 };
456
457 /*
458 * Events that can be raised on a call.
459 */
460 enum rxrpc_call_event {
461 RXRPC_CALL_EV_ACK, /* need to generate ACK */
462 RXRPC_CALL_EV_ABORT, /* need to generate abort */
463 RXRPC_CALL_EV_TIMER, /* Timer expired */
464 RXRPC_CALL_EV_RESEND, /* Tx resend required */
465 RXRPC_CALL_EV_PING, /* Ping send required */
466 };
467
468 /*
469 * The states that a call can be in.
470 */
471 enum rxrpc_call_state {
472 RXRPC_CALL_UNINITIALISED,
473 RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
474 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
475 RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
476 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
477 RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
478 RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
479 RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
480 RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
481 RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
482 RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
483 RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
484 RXRPC_CALL_COMPLETE, /* - call complete */
485 NR__RXRPC_CALL_STATES
486 };
487
488 /*
489 * Call Tx congestion management modes.
490 */
491 enum rxrpc_congest_mode {
492 RXRPC_CALL_SLOW_START,
493 RXRPC_CALL_CONGEST_AVOIDANCE,
494 RXRPC_CALL_PACKET_LOSS,
495 RXRPC_CALL_FAST_RETRANSMIT,
496 NR__RXRPC_CONGEST_MODES
497 };
498
499 /*
500 * RxRPC call definition
501 * - matched by { connection, call_id }
502 */
503 struct rxrpc_call {
504 struct rcu_head rcu;
505 struct rxrpc_connection *conn; /* connection carrying call */
506 struct rxrpc_peer *peer; /* Peer record for remote address */
507 struct rxrpc_sock __rcu *socket; /* socket responsible */
508 struct mutex user_mutex; /* User access mutex */
509 ktime_t ack_at; /* When deferred ACK needs to happen */
510 ktime_t resend_at; /* When next resend needs to happen */
511 ktime_t ping_at; /* When next to send a ping */
512 ktime_t expire_at; /* When the call times out */
513 struct timer_list timer; /* Combined event timer */
514 struct work_struct processor; /* Event processor */
515 rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
516 struct list_head link; /* link in master call list */
517 struct list_head chan_wait_link; /* Link in conn->waiting_calls */
518 struct hlist_node error_link; /* link in error distribution list */
519 struct list_head accept_link; /* Link in rx->acceptq */
520 struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
521 struct list_head sock_link; /* Link in rx->sock_calls */
522 struct rb_node sock_node; /* Node in rx->calls */
523 struct sk_buff *tx_pending; /* Tx socket buffer being filled */
524 wait_queue_head_t waitq; /* Wait queue for channel or Tx */
525 s64 tx_total_len; /* Total length left to be transmitted (or -1) */
526 __be32 crypto_buf[2]; /* Temporary packet crypto buffer */
527 unsigned long user_call_ID; /* user-defined call ID */
528 unsigned long flags;
529 unsigned long events;
530 spinlock_t lock;
531 rwlock_t state_lock; /* lock for state transition */
532 u32 abort_code; /* Local/remote abort code */
533 int error; /* Local error incurred */
534 enum rxrpc_call_state state; /* current state of call */
535 enum rxrpc_call_completion completion; /* Call completion condition */
536 atomic_t usage;
537 u16 service_id; /* service ID */
538 u8 security_ix; /* Security type */
539 u32 call_id; /* call ID on connection */
540 u32 cid; /* connection ID plus channel index */
541 int debug_id; /* debug ID for printks */
542 unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
543 unsigned short rx_pkt_len; /* Current recvmsg packet len */
544
545 /* Rx/Tx circular buffer, depending on phase.
546 *
547 * In the Rx phase, packets are annotated with 0 or the number of the
548 * segment of a jumbo packet each buffer refers to. There can be up to
549 * 47 segments in a maximum-size UDP packet.
550 *
551 * In the Tx phase, packets are annotated with which buffers have been
552 * acked.
553 */
554 #define RXRPC_RXTX_BUFF_SIZE 64
555 #define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
556 #define RXRPC_INIT_RX_WINDOW_SIZE 32
557 struct sk_buff **rxtx_buffer;
558 u8 *rxtx_annotations;
559 #define RXRPC_TX_ANNO_ACK 0
560 #define RXRPC_TX_ANNO_UNACK 1
561 #define RXRPC_TX_ANNO_NAK 2
562 #define RXRPC_TX_ANNO_RETRANS 3
563 #define RXRPC_TX_ANNO_MASK 0x03
564 #define RXRPC_TX_ANNO_LAST 0x04
565 #define RXRPC_TX_ANNO_RESENT 0x08
566
567 #define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */
568 #define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
569 #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
570 rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
571 * not hard-ACK'd packet follows this.
572 */
573 rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
574
575 /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
576 * is fixed, we keep these numbers in terms of segments (ie. DATA
577 * packets) rather than bytes.
578 */
579 #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
580 u8 cong_cwnd; /* Congestion window size */
581 u8 cong_extra; /* Extra to send for congestion management */
582 u8 cong_ssthresh; /* Slow-start threshold */
583 enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
584 u8 cong_dup_acks; /* Count of ACKs showing missing packets */
585 u8 cong_cumul_acks; /* Cumulative ACK count */
586 ktime_t cong_tstamp; /* Last time cwnd was changed */
587
588 rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
589 * consumed packet follows this.
590 */
591 rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
592 rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
593 u8 rx_winsize; /* Size of Rx window */
594 u8 tx_winsize; /* Maximum size of Tx window */
595 bool tx_phase; /* T if transmission phase, F if receive phase */
596 u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
597
598 /* receive-phase ACK management */
599 u8 ackr_reason; /* reason to ACK */
600 u16 ackr_skew; /* skew on packet being ACK'd */
601 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
602 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
603 rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
604 rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
605
606 /* ping management */
607 rxrpc_serial_t ping_serial; /* Last ping sent */
608 ktime_t ping_time; /* Time last ping sent */
609
610 /* transmission-phase ACK management */
611 ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
612 rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
613 rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
614 };
615
616 /*
617 * Summary of a new ACK and the changes it made to the Tx buffer packet states.
618 */
619 struct rxrpc_ack_summary {
620 u8 ack_reason;
621 u8 nr_acks; /* Number of ACKs in packet */
622 u8 nr_nacks; /* Number of NACKs in packet */
623 u8 nr_new_acks; /* Number of new ACKs in packet */
624 u8 nr_new_nacks; /* Number of new NACKs in packet */
625 u8 nr_rot_new_acks; /* Number of rotated new ACKs */
626 bool new_low_nack; /* T if new low NACK found */
627 bool retrans_timeo; /* T if reTx due to timeout happened */
628 u8 flight_size; /* Number of unreceived transmissions */
629 /* Place to stash values for tracing */
630 enum rxrpc_congest_mode mode:8;
631 u8 cwnd;
632 u8 ssthresh;
633 u8 dup_acks;
634 u8 cumulative_acks;
635 };
636
637 #include <trace/events/rxrpc.h>
638
639 /*
640 * af_rxrpc.c
641 */
642 extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
643 extern atomic_t rxrpc_debug_id;
644 extern struct workqueue_struct *rxrpc_workqueue;
645
646 /*
647 * call_accept.c
648 */
649 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
650 void rxrpc_discard_prealloc(struct rxrpc_sock *);
651 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
652 struct rxrpc_connection *,
653 struct sk_buff *);
654 void rxrpc_accept_incoming_calls(struct rxrpc_local *);
655 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
656 rxrpc_notify_rx_t);
657 int rxrpc_reject_call(struct rxrpc_sock *);
658
659 /*
660 * call_event.c
661 */
662 void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
663 void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
664 void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
665 enum rxrpc_propose_ack_trace);
666 void rxrpc_process_call(struct work_struct *);
667
668 /*
669 * call_object.c
670 */
671 extern const char *const rxrpc_call_states[];
672 extern const char *const rxrpc_call_completions[];
673 extern unsigned int rxrpc_max_call_lifetime;
674 extern struct kmem_cache *rxrpc_call_jar;
675
676 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
677 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
678 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
679 struct rxrpc_conn_parameters *,
680 struct sockaddr_rxrpc *,
681 unsigned long, s64, gfp_t);
682 int rxrpc_retry_client_call(struct rxrpc_sock *,
683 struct rxrpc_call *,
684 struct rxrpc_conn_parameters *,
685 struct sockaddr_rxrpc *,
686 gfp_t);
687 void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
688 struct sk_buff *);
689 void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
690 int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *);
691 void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
692 bool __rxrpc_queue_call(struct rxrpc_call *);
693 bool rxrpc_queue_call(struct rxrpc_call *);
694 void rxrpc_see_call(struct rxrpc_call *);
695 void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
696 void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
697 void rxrpc_cleanup_call(struct rxrpc_call *);
698 void rxrpc_destroy_all_calls(struct rxrpc_net *);
699
rxrpc_is_service_call(const struct rxrpc_call * call)700 static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
701 {
702 return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
703 }
704
rxrpc_is_client_call(const struct rxrpc_call * call)705 static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
706 {
707 return !rxrpc_is_service_call(call);
708 }
709
710 /*
711 * Transition a call to the complete state.
712 */
__rxrpc_set_call_completion(struct rxrpc_call * call,enum rxrpc_call_completion compl,u32 abort_code,int error)713 static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
714 enum rxrpc_call_completion compl,
715 u32 abort_code,
716 int error)
717 {
718 if (call->state < RXRPC_CALL_COMPLETE) {
719 call->abort_code = abort_code;
720 call->error = error;
721 call->completion = compl,
722 call->state = RXRPC_CALL_COMPLETE;
723 wake_up(&call->waitq);
724 return true;
725 }
726 return false;
727 }
728
rxrpc_set_call_completion(struct rxrpc_call * call,enum rxrpc_call_completion compl,u32 abort_code,int error)729 static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
730 enum rxrpc_call_completion compl,
731 u32 abort_code,
732 int error)
733 {
734 bool ret;
735
736 write_lock_bh(&call->state_lock);
737 ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
738 write_unlock_bh(&call->state_lock);
739 return ret;
740 }
741
742 /*
743 * Record that a call successfully completed.
744 */
__rxrpc_call_completed(struct rxrpc_call * call)745 static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
746 {
747 return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
748 }
749
rxrpc_call_completed(struct rxrpc_call * call)750 static inline bool rxrpc_call_completed(struct rxrpc_call *call)
751 {
752 bool ret;
753
754 write_lock_bh(&call->state_lock);
755 ret = __rxrpc_call_completed(call);
756 write_unlock_bh(&call->state_lock);
757 return ret;
758 }
759
760 /*
761 * Record that a call is locally aborted.
762 */
__rxrpc_abort_call(const char * why,struct rxrpc_call * call,rxrpc_seq_t seq,u32 abort_code,int error)763 static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
764 rxrpc_seq_t seq,
765 u32 abort_code, int error)
766 {
767 trace_rxrpc_abort(why, call->cid, call->call_id, seq,
768 abort_code, error);
769 return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
770 abort_code, error);
771 }
772
rxrpc_abort_call(const char * why,struct rxrpc_call * call,rxrpc_seq_t seq,u32 abort_code,int error)773 static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
774 rxrpc_seq_t seq, u32 abort_code, int error)
775 {
776 bool ret;
777
778 write_lock_bh(&call->state_lock);
779 ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
780 write_unlock_bh(&call->state_lock);
781 return ret;
782 }
783
784 /*
785 * Abort a call due to a protocol error.
786 */
__rxrpc_abort_eproto(struct rxrpc_call * call,struct sk_buff * skb,const char * eproto_why,const char * why,u32 abort_code)787 static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
788 struct sk_buff *skb,
789 const char *eproto_why,
790 const char *why,
791 u32 abort_code)
792 {
793 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
794
795 trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
796 return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
797 }
798
799 #define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
800 __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
801 (abort_why), (abort_code))
802
803 /*
804 * conn_client.c
805 */
806 extern unsigned int rxrpc_max_client_connections;
807 extern unsigned int rxrpc_reap_client_connections;
808 extern unsigned int rxrpc_conn_idle_client_expiry;
809 extern unsigned int rxrpc_conn_idle_client_fast_expiry;
810 extern struct idr rxrpc_client_conn_ids;
811
812 void rxrpc_destroy_client_conn_ids(void);
813 int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
814 struct sockaddr_rxrpc *, gfp_t);
815 void rxrpc_expose_client_call(struct rxrpc_call *);
816 void rxrpc_disconnect_client_call(struct rxrpc_call *);
817 void rxrpc_put_client_conn(struct rxrpc_connection *);
818 void rxrpc_discard_expired_client_conns(struct work_struct *);
819 void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
820
821 /*
822 * conn_event.c
823 */
824 void rxrpc_process_connection(struct work_struct *);
825
826 /*
827 * conn_object.c
828 */
829 extern unsigned int rxrpc_connection_expiry;
830 extern unsigned int rxrpc_closed_conn_expiry;
831
832 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
833 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
834 struct sk_buff *);
835 void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
836 void rxrpc_disconnect_call(struct rxrpc_call *);
837 void rxrpc_kill_connection(struct rxrpc_connection *);
838 bool rxrpc_queue_conn(struct rxrpc_connection *);
839 void rxrpc_see_connection(struct rxrpc_connection *);
840 void rxrpc_get_connection(struct rxrpc_connection *);
841 struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
842 void rxrpc_put_service_conn(struct rxrpc_connection *);
843 void rxrpc_service_connection_reaper(struct work_struct *);
844 void rxrpc_destroy_all_connections(struct rxrpc_net *);
845
rxrpc_conn_is_client(const struct rxrpc_connection * conn)846 static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
847 {
848 return conn->out_clientflag;
849 }
850
rxrpc_conn_is_service(const struct rxrpc_connection * conn)851 static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
852 {
853 return !rxrpc_conn_is_client(conn);
854 }
855
rxrpc_put_connection(struct rxrpc_connection * conn)856 static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
857 {
858 if (!conn)
859 return;
860
861 if (rxrpc_conn_is_client(conn))
862 rxrpc_put_client_conn(conn);
863 else
864 rxrpc_put_service_conn(conn);
865 }
866
867 /*
868 * conn_service.c
869 */
870 struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
871 struct sk_buff *);
872 struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
873 void rxrpc_new_incoming_connection(struct rxrpc_sock *,
874 struct rxrpc_connection *, struct sk_buff *);
875 void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
876
877 /*
878 * input.c
879 */
880 void rxrpc_data_ready(struct sock *);
881
882 /*
883 * insecure.c
884 */
885 extern const struct rxrpc_security rxrpc_no_security;
886
887 /*
888 * key.c
889 */
890 extern struct key_type key_type_rxrpc;
891 extern struct key_type key_type_rxrpc_s;
892
893 int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
894 int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
895 int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
896 u32);
897
898 /*
899 * local_event.c
900 */
901 extern void rxrpc_process_local_events(struct rxrpc_local *);
902
903 /*
904 * local_object.c
905 */
906 struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
907 void __rxrpc_put_local(struct rxrpc_local *);
908 void rxrpc_destroy_all_locals(struct rxrpc_net *);
909
rxrpc_get_local(struct rxrpc_local * local)910 static inline void rxrpc_get_local(struct rxrpc_local *local)
911 {
912 atomic_inc(&local->usage);
913 }
914
915 static inline
rxrpc_get_local_maybe(struct rxrpc_local * local)916 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
917 {
918 return atomic_inc_not_zero(&local->usage) ? local : NULL;
919 }
920
rxrpc_put_local(struct rxrpc_local * local)921 static inline void rxrpc_put_local(struct rxrpc_local *local)
922 {
923 if (local && atomic_dec_and_test(&local->usage))
924 __rxrpc_put_local(local);
925 }
926
rxrpc_queue_local(struct rxrpc_local * local)927 static inline void rxrpc_queue_local(struct rxrpc_local *local)
928 {
929 rxrpc_queue_work(&local->processor);
930 }
931
932 /*
933 * misc.c
934 */
935 extern unsigned int rxrpc_max_backlog __read_mostly;
936 extern unsigned int rxrpc_requested_ack_delay;
937 extern unsigned int rxrpc_soft_ack_delay;
938 extern unsigned int rxrpc_idle_ack_delay;
939 extern unsigned int rxrpc_rx_window_size;
940 extern unsigned int rxrpc_rx_mtu;
941 extern unsigned int rxrpc_rx_jumbo_max;
942 extern unsigned int rxrpc_resend_timeout;
943
944 extern const s8 rxrpc_ack_priority[];
945
946 /*
947 * net_ns.c
948 */
949 extern unsigned int rxrpc_net_id;
950 extern struct pernet_operations rxrpc_net_ops;
951
rxrpc_net(struct net * net)952 static inline struct rxrpc_net *rxrpc_net(struct net *net)
953 {
954 return net_generic(net, rxrpc_net_id);
955 }
956
957 /*
958 * output.c
959 */
960 int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
961 int rxrpc_send_abort_packet(struct rxrpc_call *);
962 int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
963 void rxrpc_reject_packets(struct rxrpc_local *);
964
965 /*
966 * peer_event.c
967 */
968 void rxrpc_error_report(struct sock *);
969 void rxrpc_peer_error_distributor(struct work_struct *);
970 void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
971 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
972
973 /*
974 * peer_object.c
975 */
976 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
977 const struct sockaddr_rxrpc *);
978 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
979 struct sockaddr_rxrpc *, gfp_t);
980 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
981 struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
982 struct rxrpc_peer *);
983
rxrpc_get_peer(struct rxrpc_peer * peer)984 static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
985 {
986 atomic_inc(&peer->usage);
987 return peer;
988 }
989
990 static inline
rxrpc_get_peer_maybe(struct rxrpc_peer * peer)991 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
992 {
993 return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
994 }
995
996 extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
rxrpc_put_peer(struct rxrpc_peer * peer)997 static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
998 {
999 if (peer && atomic_dec_and_test(&peer->usage))
1000 __rxrpc_put_peer(peer);
1001 }
1002
1003 /*
1004 * proc.c
1005 */
1006 extern const struct file_operations rxrpc_call_seq_fops;
1007 extern const struct file_operations rxrpc_connection_seq_fops;
1008
1009 /*
1010 * recvmsg.c
1011 */
1012 void rxrpc_notify_socket(struct rxrpc_call *);
1013 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
1014
1015 /*
1016 * rxkad.c
1017 */
1018 #ifdef CONFIG_RXKAD
1019 extern const struct rxrpc_security rxkad;
1020 #endif
1021
1022 /*
1023 * security.c
1024 */
1025 int __init rxrpc_init_security(void);
1026 void rxrpc_exit_security(void);
1027 int rxrpc_init_client_conn_security(struct rxrpc_connection *);
1028 int rxrpc_init_server_conn_security(struct rxrpc_connection *);
1029
1030 /*
1031 * sendmsg.c
1032 */
1033 int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
1034
1035 /*
1036 * skbuff.c
1037 */
1038 void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
1039 void rxrpc_packet_destructor(struct sk_buff *);
1040 void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
1041 void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
1042 void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
1043 void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
1044 void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
1045 void rxrpc_purge_queue(struct sk_buff_head *);
1046
1047 /*
1048 * sysctl.c
1049 */
1050 #ifdef CONFIG_SYSCTL
1051 extern int __init rxrpc_sysctl_init(void);
1052 extern void rxrpc_sysctl_exit(void);
1053 #else
rxrpc_sysctl_init(void)1054 static inline int __init rxrpc_sysctl_init(void) { return 0; }
rxrpc_sysctl_exit(void)1055 static inline void rxrpc_sysctl_exit(void) {}
1056 #endif
1057
1058 /*
1059 * utils.c
1060 */
1061 int rxrpc_extract_addr_from_skb(struct rxrpc_local *, struct sockaddr_rxrpc *,
1062 struct sk_buff *);
1063
before(u32 seq1,u32 seq2)1064 static inline bool before(u32 seq1, u32 seq2)
1065 {
1066 return (s32)(seq1 - seq2) < 0;
1067 }
before_eq(u32 seq1,u32 seq2)1068 static inline bool before_eq(u32 seq1, u32 seq2)
1069 {
1070 return (s32)(seq1 - seq2) <= 0;
1071 }
after(u32 seq1,u32 seq2)1072 static inline bool after(u32 seq1, u32 seq2)
1073 {
1074 return (s32)(seq1 - seq2) > 0;
1075 }
after_eq(u32 seq1,u32 seq2)1076 static inline bool after_eq(u32 seq1, u32 seq2)
1077 {
1078 return (s32)(seq1 - seq2) >= 0;
1079 }
1080
1081 /*
1082 * debug tracing
1083 */
1084 extern unsigned int rxrpc_debug;
1085
1086 #define dbgprintk(FMT,...) \
1087 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
1088
1089 #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1090 #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1091 #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
1092 #define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
1093 #define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
1094
1095
1096 #if defined(__KDEBUG)
1097 #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
1098 #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
1099 #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
1100 #define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
1101 #define _net(FMT,...) knet(FMT,##__VA_ARGS__)
1102
1103 #elif defined(CONFIG_AF_RXRPC_DEBUG)
1104 #define RXRPC_DEBUG_KENTER 0x01
1105 #define RXRPC_DEBUG_KLEAVE 0x02
1106 #define RXRPC_DEBUG_KDEBUG 0x04
1107 #define RXRPC_DEBUG_KPROTO 0x08
1108 #define RXRPC_DEBUG_KNET 0x10
1109
1110 #define _enter(FMT,...) \
1111 do { \
1112 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
1113 kenter(FMT,##__VA_ARGS__); \
1114 } while (0)
1115
1116 #define _leave(FMT,...) \
1117 do { \
1118 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
1119 kleave(FMT,##__VA_ARGS__); \
1120 } while (0)
1121
1122 #define _debug(FMT,...) \
1123 do { \
1124 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
1125 kdebug(FMT,##__VA_ARGS__); \
1126 } while (0)
1127
1128 #define _proto(FMT,...) \
1129 do { \
1130 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
1131 kproto(FMT,##__VA_ARGS__); \
1132 } while (0)
1133
1134 #define _net(FMT,...) \
1135 do { \
1136 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
1137 knet(FMT,##__VA_ARGS__); \
1138 } while (0)
1139
1140 #else
1141 #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1142 #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1143 #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
1144 #define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
1145 #define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
1146 #endif
1147
1148 /*
1149 * debug assertion checking
1150 */
1151 #if 1 // defined(__KDEBUGALL)
1152
1153 #define ASSERT(X) \
1154 do { \
1155 if (unlikely(!(X))) { \
1156 pr_err("Assertion failed\n"); \
1157 BUG(); \
1158 } \
1159 } while (0)
1160
1161 #define ASSERTCMP(X, OP, Y) \
1162 do { \
1163 __typeof__(X) _x = (X); \
1164 __typeof__(Y) _y = (__typeof__(X))(Y); \
1165 if (unlikely(!(_x OP _y))) { \
1166 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1167 (unsigned long)_x, (unsigned long)_x, #OP, \
1168 (unsigned long)_y, (unsigned long)_y); \
1169 BUG(); \
1170 } \
1171 } while (0)
1172
1173 #define ASSERTIF(C, X) \
1174 do { \
1175 if (unlikely((C) && !(X))) { \
1176 pr_err("Assertion failed\n"); \
1177 BUG(); \
1178 } \
1179 } while (0)
1180
1181 #define ASSERTIFCMP(C, X, OP, Y) \
1182 do { \
1183 __typeof__(X) _x = (X); \
1184 __typeof__(Y) _y = (__typeof__(X))(Y); \
1185 if (unlikely((C) && !(_x OP _y))) { \
1186 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1187 (unsigned long)_x, (unsigned long)_x, #OP, \
1188 (unsigned long)_y, (unsigned long)_y); \
1189 BUG(); \
1190 } \
1191 } while (0)
1192
1193 #else
1194
1195 #define ASSERT(X) \
1196 do { \
1197 } while (0)
1198
1199 #define ASSERTCMP(X, OP, Y) \
1200 do { \
1201 } while (0)
1202
1203 #define ASSERTIF(C, X) \
1204 do { \
1205 } while (0)
1206
1207 #define ASSERTIFCMP(C, X, OP, Y) \
1208 do { \
1209 } while (0)
1210
1211 #endif /* __KDEBUGALL */
1212