• Home
  • Raw
  • Download

Lines Matching refs:qs

30 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)  in queue_stack_map_is_empty()  argument
32 return qs->head == qs->tail; in queue_stack_map_is_empty()
35 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) in queue_stack_map_is_full() argument
37 u32 head = qs->head + 1; in queue_stack_map_is_full()
39 if (unlikely(head >= qs->size)) in queue_stack_map_is_full()
42 return head == qs->tail; in queue_stack_map_is_full()
71 struct bpf_queue_stack *qs; in queue_stack_map_alloc() local
75 cost = queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc()
81 qs = bpf_map_area_alloc(queue_size, numa_node); in queue_stack_map_alloc()
82 if (!qs) { in queue_stack_map_alloc()
87 memset(qs, 0, sizeof(*qs)); in queue_stack_map_alloc()
89 bpf_map_init_from_attr(&qs->map, attr); in queue_stack_map_alloc()
91 bpf_map_charge_move(&qs->map.memory, &mem); in queue_stack_map_alloc()
92 qs->size = size; in queue_stack_map_alloc()
94 raw_spin_lock_init(&qs->lock); in queue_stack_map_alloc()
96 return &qs->map; in queue_stack_map_alloc()
102 struct bpf_queue_stack *qs = bpf_queue_stack(map); in queue_stack_map_free() local
104 bpf_map_area_free(qs); in queue_stack_map_free()
109 struct bpf_queue_stack *qs = bpf_queue_stack(map); in __queue_map_get() local
114 raw_spin_lock_irqsave(&qs->lock, flags); in __queue_map_get()
116 if (queue_stack_map_is_empty(qs)) { in __queue_map_get()
117 memset(value, 0, qs->map.value_size); in __queue_map_get()
122 ptr = &qs->elements[qs->tail * qs->map.value_size]; in __queue_map_get()
123 memcpy(value, ptr, qs->map.value_size); in __queue_map_get()
126 if (unlikely(++qs->tail >= qs->size)) in __queue_map_get()
127 qs->tail = 0; in __queue_map_get()
131 raw_spin_unlock_irqrestore(&qs->lock, flags); in __queue_map_get()
138 struct bpf_queue_stack *qs = bpf_queue_stack(map); in __stack_map_get() local
144 raw_spin_lock_irqsave(&qs->lock, flags); in __stack_map_get()
146 if (queue_stack_map_is_empty(qs)) { in __stack_map_get()
147 memset(value, 0, qs->map.value_size); in __stack_map_get()
152 index = qs->head - 1; in __stack_map_get()
153 if (unlikely(index >= qs->size)) in __stack_map_get()
154 index = qs->size - 1; in __stack_map_get()
156 ptr = &qs->elements[index * qs->map.value_size]; in __stack_map_get()
157 memcpy(value, ptr, qs->map.value_size); in __stack_map_get()
160 qs->head = index; in __stack_map_get()
163 raw_spin_unlock_irqrestore(&qs->lock, flags); in __stack_map_get()
195 struct bpf_queue_stack *qs = bpf_queue_stack(map); in queue_stack_map_push_elem() local
209 raw_spin_lock_irqsave(&qs->lock, irq_flags); in queue_stack_map_push_elem()
211 if (queue_stack_map_is_full(qs)) { in queue_stack_map_push_elem()
217 if (unlikely(++qs->tail >= qs->size)) in queue_stack_map_push_elem()
218 qs->tail = 0; in queue_stack_map_push_elem()
221 dst = &qs->elements[qs->head * qs->map.value_size]; in queue_stack_map_push_elem()
222 memcpy(dst, value, qs->map.value_size); in queue_stack_map_push_elem()
224 if (unlikely(++qs->head >= qs->size)) in queue_stack_map_push_elem()
225 qs->head = 0; in queue_stack_map_push_elem()
228 raw_spin_unlock_irqrestore(&qs->lock, irq_flags); in queue_stack_map_push_elem()