• Home
  • Raw
  • Download

Lines Matching refs:qp

78 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
84 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init() local
90 qp->ecn = 0; in ip4_frag_init()
91 qp->peer = q->fqdir->max_dist ? in ip4_frag_init()
98 struct ipq *qp; in ip4_frag_free() local
100 qp = container_of(q, struct ipq, q); in ip4_frag_free()
101 if (qp->peer) in ip4_frag_free()
102 inet_putpeer(qp->peer); in ip4_frag_free()
139 struct ipq *qp; in ip_expire() local
142 qp = container_of(frag, struct ipq, q); in ip_expire()
143 net = qp->q.fqdir->net; in ip_expire()
148 if (READ_ONCE(qp->q.fqdir->dead)) in ip_expire()
151 spin_lock(&qp->q.lock); in ip_expire()
153 if (qp->q.flags & INET_FRAG_COMPLETE) in ip_expire()
156 ipq_kill(qp); in ip_expire()
160 if (!(qp->q.flags & INET_FRAG_FIRST_IN)) in ip_expire()
167 head = inet_frag_pull_head(&qp->q); in ip_expire()
170 head->dev = dev_get_by_index_rcu(net, qp->iif); in ip_expire()
185 if (frag_expire_skip_icmp(qp->q.key.v4.user) && in ip_expire()
189 spin_unlock(&qp->q.lock); in ip_expire()
194 spin_unlock(&qp->q.lock); in ip_expire()
198 ipq_put(qp); in ip_expire()
225 static int ip_frag_too_far(struct ipq *qp) in ip_frag_too_far() argument
227 struct inet_peer *peer = qp->peer; in ip_frag_too_far()
228 unsigned int max = qp->q.fqdir->max_dist; in ip_frag_too_far()
236 start = qp->rid; in ip_frag_too_far()
238 qp->rid = end; in ip_frag_too_far()
240 rc = qp->q.fragments_tail && (end - start) > max; in ip_frag_too_far()
243 __IP_INC_STATS(qp->q.fqdir->net, IPSTATS_MIB_REASMFAILS); in ip_frag_too_far()
248 static int ip_frag_reinit(struct ipq *qp) in ip_frag_reinit() argument
252 if (!mod_timer(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) { in ip_frag_reinit()
253 refcount_inc(&qp->q.refcnt); in ip_frag_reinit()
257 sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments); in ip_frag_reinit()
258 sub_frag_mem_limit(qp->q.fqdir, sum_truesize); in ip_frag_reinit()
260 qp->q.flags = 0; in ip_frag_reinit()
261 qp->q.len = 0; in ip_frag_reinit()
262 qp->q.meat = 0; in ip_frag_reinit()
263 qp->q.rb_fragments = RB_ROOT; in ip_frag_reinit()
264 qp->q.fragments_tail = NULL; in ip_frag_reinit()
265 qp->q.last_run_head = NULL; in ip_frag_reinit()
266 qp->iif = 0; in ip_frag_reinit()
267 qp->ecn = 0; in ip_frag_reinit()
273 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) in ip_frag_queue() argument
275 struct net *net = qp->q.fqdir->net; in ip_frag_queue()
283 if (qp->q.flags & INET_FRAG_COMPLETE) in ip_frag_queue()
287 unlikely(ip_frag_too_far(qp)) && in ip_frag_queue()
288 unlikely(err = ip_frag_reinit(qp))) { in ip_frag_queue()
289 ipq_kill(qp); in ip_frag_queue()
309 if (end < qp->q.len || in ip_frag_queue()
310 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) in ip_frag_queue()
312 qp->q.flags |= INET_FRAG_LAST_IN; in ip_frag_queue()
313 qp->q.len = end; in ip_frag_queue()
320 if (end > qp->q.len) { in ip_frag_queue()
322 if (qp->q.flags & INET_FRAG_LAST_IN) in ip_frag_queue()
324 qp->q.len = end; in ip_frag_queue()
343 prev_tail = qp->q.fragments_tail; in ip_frag_queue()
344 err = inet_frag_queue_insert(&qp->q, skb, offset, end); in ip_frag_queue()
349 qp->iif = dev->ifindex; in ip_frag_queue()
351 qp->q.stamp = skb->tstamp; in ip_frag_queue()
352 qp->q.meat += skb->len; in ip_frag_queue()
353 qp->ecn |= ecn; in ip_frag_queue()
354 add_frag_mem_limit(qp->q.fqdir, skb->truesize); in ip_frag_queue()
356 qp->q.flags |= INET_FRAG_FIRST_IN; in ip_frag_queue()
360 if (fragsize > qp->q.max_size) in ip_frag_queue()
361 qp->q.max_size = fragsize; in ip_frag_queue()
364 fragsize > qp->max_df_size) in ip_frag_queue()
365 qp->max_df_size = fragsize; in ip_frag_queue()
367 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && in ip_frag_queue()
368 qp->q.meat == qp->q.len) { in ip_frag_queue()
372 err = ip_frag_reasm(qp, skb, prev_tail, dev); in ip_frag_queue()
375 inet_frag_kill(&qp->q); in ip_frag_queue()
390 inet_frag_kill(&qp->q); in ip_frag_queue()
397 static bool ip_frag_coalesce_ok(const struct ipq *qp) in ip_frag_coalesce_ok() argument
399 return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER; in ip_frag_coalesce_ok()
403 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, in ip_frag_reasm() argument
406 struct net *net = qp->q.fqdir->net; in ip_frag_reasm()
412 ipq_kill(qp); in ip_frag_reasm()
414 ecn = ip_frag_ecn_table[qp->ecn]; in ip_frag_reasm()
421 reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail); in ip_frag_reasm()
425 len = ip_hdrlen(skb) + qp->q.len; in ip_frag_reasm()
430 inet_frag_reasm_finish(&qp->q, skb, reasm_data, in ip_frag_reasm()
431 ip_frag_coalesce_ok(qp)); in ip_frag_reasm()
434 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); in ip_frag_reasm()
448 if (qp->max_df_size == qp->q.max_size) { in ip_frag_reasm()
458 qp->q.rb_fragments = RB_ROOT; in ip_frag_reasm()
459 qp->q.fragments_tail = NULL; in ip_frag_reasm()
460 qp->q.last_run_head = NULL; in ip_frag_reasm()
464 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); in ip_frag_reasm()
468 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr); in ip_frag_reasm()
479 struct ipq *qp; in ip_defrag() local
485 qp = ip_find(net, ip_hdr(skb), user, vif); in ip_defrag()
486 if (qp) { in ip_defrag()
489 spin_lock(&qp->q.lock); in ip_defrag()
491 ret = ip_frag_queue(qp, skb); in ip_defrag()
493 spin_unlock(&qp->q.lock); in ip_defrag()
494 ipq_put(qp); in ip_defrag()