Searched refs:softnet_data (Results 1 – 7 of 7) sorted by relevance
119 static u32 softnet_backlog_len(struct softnet_data *sd) in softnet_backlog_len()125 static struct softnet_data *softnet_get_online(loff_t *pos) in softnet_get_online()127 struct softnet_data *sd = NULL; in softnet_get_online()131 sd = &per_cpu(softnet_data, *pos); in softnet_get_online()155 struct softnet_data *sd = v; in softnet_seq_show()
221 static inline void rps_lock(struct softnet_data *sd) in rps_lock()228 static inline void rps_unlock(struct softnet_data *sd) in rps_unlock()402 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);403 EXPORT_PER_CPU_SYMBOL(softnet_data);3045 struct softnet_data *sd; in __netif_reschedule()3049 sd = this_cpu_ptr(&softnet_data); in __netif_reschedule()3113 skb->next = __this_cpu_read(softnet_data.completion_queue); in __dev_kfree_skb_irq()3114 __this_cpu_write(softnet_data.completion_queue, skb); in __dev_kfree_skb_irq()4276 static inline void ____napi_schedule(struct softnet_data *sd, in ____napi_schedule()4334 per_cpu(softnet_data, next_cpu).input_queue_head; in set_rps_cpu()[all …]
123 struct softnet_data *sd; in flow_limit_cpu_sysctl()138 sd = &per_cpu(softnet_data, i); in flow_limit_cpu_sysctl()170 sd = &per_cpu(softnet_data, i); in flow_limit_cpu_sysctl()
263 struct softnet_data *sd = &get_cpu_var(softnet_data); in zap_completion_queue()285 put_cpu_var(softnet_data); in zap_completion_queue()
117 struct softnet_data *sd; in validate_xmit_xfrm()140 sd = this_cpu_ptr(&softnet_data); in validate_xmit_xfrm()334 struct softnet_data *sd; in xfrm_dev_resume()347 sd = this_cpu_ptr(&softnet_data); in xfrm_dev_resume()356 void xfrm_dev_backlog(struct softnet_data *sd) in xfrm_dev_backlog()
3235 struct softnet_data { struct3244 struct softnet_data *rps_ipi_list; argument3268 struct softnet_data *rps_ipi_next; argument3278 static inline void input_queue_head_incr(struct softnet_data *sd) in input_queue_head_incr() argument3285 static inline void input_queue_tail_incr_save(struct softnet_data *sd, in input_queue_tail_incr_save()3293 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);3297 return this_cpu_read(softnet_data.xmit.recursion); in dev_recursion_level()3303 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > in dev_xmit_recursion()3309 __this_cpu_inc(softnet_data.xmit.recursion); in dev_xmit_recursion_inc()3314 __this_cpu_dec(softnet_data.xmit.recursion); in dev_xmit_recursion_dec()[all …]
1881 void xfrm_dev_backlog(struct softnet_data *sd);1938 static inline void xfrm_dev_backlog(struct softnet_data *sd) in xfrm_dev_backlog()