Home
last modified time | relevance | path

Searched refs:gfp (Results 1 – 25 of 100) sorted by relevance

1234

/net/wireless/
Dnl80211.h60 const u8 *buf, size_t len, gfp_t gfp);
67 bool reconnect, gfp_t gfp);
71 bool reconnect, gfp_t gfp);
74 const u8 *addr, gfp_t gfp);
77 const u8 *addr, gfp_t gfp);
81 gfp_t gfp);
84 struct cfg80211_roam_info *info, gfp_t gfp);
96 int key_id, const u8 *tsc, gfp_t gfp);
105 gfp_t gfp);
109 struct cfg80211_rx_info *info, gfp_t gfp);
[all …]
/net/sctp/
Dulpevent.c49 gfp_t gfp) in sctp_ulpevent_new() argument
54 skb = alloc_skb(size, gfp); in sctp_ulpevent_new()
117 __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp) in sctp_ulpevent_make_assoc_change() argument
131 sizeof(struct sctp_assoc_change), 0, gfp); in sctp_ulpevent_make_assoc_change()
149 MSG_NOTIFICATION, gfp); in sctp_ulpevent_make_assoc_change()
244 int flags, int state, int error, gfp_t gfp) in sctp_ulpevent_make_peer_addr_change() argument
251 MSG_NOTIFICATION, gfp); in sctp_ulpevent_make_peer_addr_change()
376 gfp_t gfp) in sctp_ulpevent_make_remote_error() argument
395 skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp); in sctp_ulpevent_make_remote_error()
432 __u16 flags, __u32 error, gfp_t gfp) in sctp_ulpevent_make_send_failed() argument
[all …]
Dauth.c58 static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) in sctp_auth_create_key() argument
67 key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp); in sctp_auth_create_key()
79 struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp) in sctp_auth_shkey_create() argument
84 new = kzalloc(sizeof(struct sctp_shared_key), gfp); in sctp_auth_shkey_create()
188 gfp_t gfp) in sctp_auth_make_key_vector() argument
202 new = sctp_auth_create_key(len, gfp); in sctp_auth_make_key_vector()
223 gfp_t gfp) in sctp_auth_make_local_vector() argument
228 (struct sctp_hmac_algo_param *)asoc->c.auth_hmacs, gfp); in sctp_auth_make_local_vector()
234 gfp_t gfp) in sctp_auth_make_peer_vector() argument
239 gfp); in sctp_auth_make_peer_vector()
[all …]
Dbind_addr.c35 gfp_t gfp, int flags);
45 enum sctp_scope scope, gfp_t gfp, in sctp_bind_addr_copy() argument
57 gfp, flags); in sctp_bind_addr_copy()
69 SCTP_SCOPE_LINK, gfp, in sctp_bind_addr_copy()
96 gfp_t gfp) in sctp_bind_addr_dup() argument
106 1, gfp); in sctp_bind_addr_dup()
145 int new_size, __u8 addr_state, gfp_t gfp) in sctp_add_bind_addr() argument
150 addr = kzalloc(sizeof(*addr), gfp); in sctp_add_bind_addr()
213 gfp_t gfp) in sctp_bind_addrs_to_raw() argument
240 retval.v = kmalloc(len, gfp); in sctp_bind_addrs_to_raw()
[all …]
Dstream_sched_prio.c41 struct sctp_stream *stream, int prio, gfp_t gfp) in sctp_sched_prio_new_head() argument
45 p = kmalloc(sizeof(*p), gfp); in sctp_sched_prio_new_head()
59 struct sctp_stream *stream, int prio, gfp_t gfp) in sctp_sched_prio_get_head() argument
90 return sctp_sched_prio_new_head(stream, prio, gfp); in sctp_sched_prio_get_head()
164 __u16 prio, gfp_t gfp) in sctp_sched_prio_set() argument
175 prio_head = sctp_sched_prio_get_head(stream, prio, gfp); in sctp_sched_prio_set()
203 gfp_t gfp) in sctp_sched_prio_init_sid() argument
206 return sctp_sched_prio_set(stream, sid, 0, gfp); in sctp_sched_prio_init_sid()
Dsm_sideeffect.c48 gfp_t gfp);
57 gfp_t gfp);
697 gfp_t gfp) in sctp_cmd_process_init() argument
706 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp)) in sctp_cmd_process_init()
1109 struct sctp_datamsg *msg, gfp_t gfp) in sctp_cmd_send_msg() argument
1114 sctp_outq_tail(&asoc->outqueue, chunk, gfp); in sctp_cmd_send_msg()
1147 void *event_arg, gfp_t gfp) in sctp_do_sm() argument
1172 &commands, gfp); in sctp_do_sm()
1189 gfp_t gfp) in sctp_side_effects() argument
1202 commands, gfp))) in sctp_side_effects()
[all …]
Doutqueue.c56 static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
281 void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp) in sctp_outq_tail() argument
313 sctp_outq_flush(q, 0, gfp); in sctp_outq_tail()
596 int rtx_timeout, int *start_timer, gfp_t gfp) in __sctp_outq_flush_rtx() argument
672 sctp_packet_transmit(pkt, gfp); in __sctp_outq_flush_rtx()
677 error = sctp_packet_transmit(pkt, gfp); in __sctp_outq_flush_rtx()
693 error = sctp_packet_transmit(pkt, gfp); in __sctp_outq_flush_rtx()
703 error = sctp_packet_transmit(pkt, gfp); in __sctp_outq_flush_rtx()
756 void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp) in sctp_outq_uncork() argument
761 sctp_outq_flush(q, 0, gfp); in sctp_outq_uncork()
[all …]
Dstream_sched.c26 __u16 value, gfp_t gfp) in sctp_sched_fcfs_set() argument
44 gfp_t gfp) in sctp_sched_fcfs_init_sid() argument
205 __u16 value, gfp_t gfp) in sctp_sched_set_value() argument
218 return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp); in sctp_sched_set_value()
262 int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp) in sctp_sched_init_sid() argument
268 return sched->init_sid(stream, sid, gfp); in sctp_sched_init_sid()
Dendpointola.c43 gfp_t gfp) in sctp_endpoint_init() argument
48 ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); in sctp_endpoint_init()
55 if (sctp_auth_init(ep, gfp)) in sctp_endpoint_init()
98 null_key = sctp_auth_shkey_create(0, gfp); in sctp_endpoint_init()
129 struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) in sctp_endpoint_new() argument
134 ep = kzalloc(sizeof(*ep), gfp); in sctp_endpoint_new()
138 if (!sctp_endpoint_init(ep, sk, gfp)) in sctp_endpoint_new()
Dsm_make_chunk.c51 gfp_t gfp);
53 __u8 flags, int paylen, gfp_t gfp);
56 gfp_t gfp);
66 gfp_t gfp);
208 gfp_t gfp, int vparam_len) in sctp_make_init() argument
233 addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp); in sctp_make_init()
319 retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize, gfp); in sctp_make_init()
381 gfp_t gfp, int unkparam_len) in sctp_make_init_ack() argument
400 addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); in sctp_make_init_ack()
472 retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize, gfp); in sctp_make_init_ack()
[all …]
Dassociola.c55 enum sctp_scope scope, gfp_t gfp) in sctp_association_init() argument
232 if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp)) in sctp_association_init()
256 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) in sctp_association_init()
289 enum sctp_scope scope, gfp_t gfp) in sctp_association_new() argument
293 asoc = kzalloc(sizeof(*asoc), gfp); in sctp_association_new()
297 if (!sctp_association_init(asoc, ep, sk, scope, gfp)) in sctp_association_new()
586 const gfp_t gfp, in sctp_assoc_add_peer() argument
618 peer = sctp_transport_new(asoc->base.net, addr, gfp); in sctp_assoc_add_peer()
1574 enum sctp_scope scope, gfp_t gfp) in sctp_assoc_set_bind_addr_from_ep() argument
1593 scope, gfp, flags); in sctp_assoc_set_bind_addr_from_ep()
[all …]
Doutput.c181 int one_packet, gfp_t gfp) in sctp_packet_transmit_chunk() argument
193 error = sctp_packet_transmit(packet, gfp); in sctp_packet_transmit_chunk()
436 struct sk_buff *head, int gso, gfp_t gfp) in sctp_packet_pack() argument
471 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp); in sctp_packet_pack()
520 packet->auth->shkey, gfp); in sctp_packet_pack()
568 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) in sctp_packet_transmit() argument
598 MAX_HEADER, gfp); in sctp_packet_transmit()
620 pkt_count = sctp_packet_pack(packet, head, gso, gfp); in sctp_packet_transmit()
Dstream_interleave.c27 int len, __u8 flags, gfp_t gfp) in sctp_make_idatafrag_empty() argument
38 retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp); in sctp_make_idatafrag_empty()
826 struct sctp_chunk *chunk, gfp_t gfp) in sctp_ulpevent_idata() argument
832 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); in sctp_ulpevent_idata()
932 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp) in sctp_intl_start_pd() argument
961 gfp_t gfp) in sctp_renege_events() argument
980 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) in sctp_renege_events()
981 sctp_intl_start_pd(ulpq, gfp); in sctp_renege_events()
987 __u32 mid, __u16 flags, gfp_t gfp) in sctp_intl_stream_abort_pd() argument
997 sid, mid, flags, gfp); in sctp_intl_stream_abort_pd()
[all …]
Dulpqueue.c86 gfp_t gfp) in sctp_ulpq_tail_data() argument
93 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); in sctp_ulpq_tail_data()
1026 gfp_t gfp) in sctp_ulpq_partial_delivery() argument
1076 gfp_t gfp) in sctp_ulpq_renege() argument
1093 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); in sctp_ulpq_renege()
1099 sctp_ulpq_partial_delivery(ulpq, gfp); in sctp_ulpq_renege()
1112 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) in sctp_ulpq_abort_pd() argument
1127 0, 0, 0, gfp); in sctp_ulpq_abort_pd()
/net/rxrpc/
Dconn_client.c52 gfp_t gfp) in rxrpc_get_client_connection_id() argument
59 idr_preload(gfp); in rxrpc_get_client_connection_id()
119 gfp_t gfp) in rxrpc_alloc_bundle() argument
123 bundle = kzalloc(sizeof(*bundle), gfp); in rxrpc_alloc_bundle()
164 rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp) in rxrpc_alloc_client_connection() argument
172 conn = rxrpc_alloc_connection(gfp); in rxrpc_alloc_client_connection()
185 ret = rxrpc_get_client_connection_id(conn, gfp); in rxrpc_alloc_client_connection()
267 gfp_t gfp) in rxrpc_look_up_bundle() argument
279 return rxrpc_alloc_bundle(cp, gfp); in rxrpc_look_up_bundle()
305 candidate = rxrpc_alloc_bundle(cp, gfp); in rxrpc_look_up_bundle()
[all …]
Dcall_object.c122 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, in rxrpc_alloc_call() argument
128 call = kmem_cache_zalloc(rxrpc_call_jar, gfp); in rxrpc_alloc_call()
134 gfp); in rxrpc_alloc_call()
138 call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp); in rxrpc_alloc_call()
196 gfp_t gfp, in rxrpc_alloc_client_call() argument
204 call = rxrpc_alloc_call(rx, gfp, debug_id); in rxrpc_alloc_client_call()
239 static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp) in rxrpc_get_call_slot() argument
273 gfp_t gfp, in rxrpc_new_client_call() argument
287 limiter = rxrpc_get_call_slot(p, gfp); in rxrpc_new_client_call()
293 call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id); in rxrpc_new_client_call()
[all …]
Dcall_accept.c38 unsigned long user_call_ID, gfp_t gfp, in rxrpc_service_prealloc_one() argument
73 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); in rxrpc_service_prealloc_one()
86 conn = rxrpc_prealloc_service_connection(rxnet, gfp); in rxrpc_service_prealloc_one()
100 call = rxrpc_alloc_call(rx, gfp, debug_id); in rxrpc_service_prealloc_one()
163 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) in rxrpc_service_prealloc() argument
168 b = kzalloc(sizeof(struct rxrpc_backlog), gfp); in rxrpc_service_prealloc()
478 unsigned long user_call_ID, gfp_t gfp, in rxrpc_kernel_charge_accept() argument
489 gfp, debug_id); in rxrpc_kernel_charge_accept()
Dpeer_object.c210 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) in rxrpc_alloc_peer() argument
217 peer = kzalloc(sizeof(struct rxrpc_peer), gfp); in rxrpc_alloc_peer()
286 gfp_t gfp) in rxrpc_create_peer() argument
292 peer = rxrpc_alloc_peer(local, gfp); in rxrpc_create_peer()
333 struct sockaddr_rxrpc *srx, gfp_t gfp) in rxrpc_lookup_peer() argument
352 candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp); in rxrpc_lookup_peer()
/net/rds/
Dtcp_recv.c151 gfp_t gfp; member
174 arg->gfp); in rds_tcp_data_recv()
218 clone = pskb_extract(skb, offset, to_copy, arg->gfp); in rds_tcp_data_recv()
245 arg->gfp); in rds_tcp_data_recv()
262 static int rds_tcp_read_sock(struct rds_conn_path *cp, gfp_t gfp) in rds_tcp_read_sock() argument
271 arg.gfp = gfp; in rds_tcp_read_sock()
277 rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp, in rds_tcp_read_sock()
Dpage.c69 gfp_t gfp) in rds_page_remainder_alloc() argument
76 gfp |= __GFP_HIGHMEM; in rds_page_remainder_alloc()
80 page = alloc_page(gfp); in rds_page_remainder_alloc()
122 page = alloc_page(gfp); in rds_page_remainder_alloc()
Dib_recv.c101 static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp) in rds_ib_recv_alloc_cache() argument
106 cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); in rds_ib_recv_alloc_cache()
121 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp) in rds_ib_recv_alloc_caches() argument
125 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp); in rds_ib_recv_alloc_caches()
127 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp); in rds_ib_recv_alloc_caches()
308 struct rds_ib_recv_work *recv, gfp_t gfp) in rds_ib_recv_refill_one() argument
313 gfp_t slab_mask = gfp; in rds_ib_recv_refill_one()
314 gfp_t page_mask = gfp; in rds_ib_recv_refill_one()
316 if (gfp & __GFP_DIRECT_RECLAIM) { in rds_ib_recv_refill_one()
382 void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp) in rds_ib_recv_refill() argument
[all …]
/net/core/
Dpage_pool.c252 gfp_t gfp) in __page_pool_alloc_page_order() argument
256 gfp |= __GFP_COMP; in __page_pool_alloc_page_order()
257 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order()
278 gfp_t gfp) in __page_pool_alloc_pages_slow() argument
288 return __page_pool_alloc_page_order(pool, gfp); in __page_pool_alloc_pages_slow()
297 nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache); in __page_pool_alloc_pages_slow()
333 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages() argument
343 page = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_pages()
586 unsigned int size, gfp_t gfp) in page_pool_alloc_frag() argument
605 page = page_pool_alloc_pages(pool, gfp); in page_pool_alloc_frag()
Dxdp.c237 static int __mem_id_cyclic_get(gfp_t gfp) in __mem_id_cyclic_get() argument
243 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp); in __mem_id_cyclic_get()
275 gfp_t gfp = GFP_KERNEL; in __xdp_reg_mem_model() local
301 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); in __xdp_reg_mem_model()
306 id = __mem_id_cyclic_get(gfp); in __xdp_reg_mem_model()
552 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp) in xdp_alloc_skb_bulk() argument
554 n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, in xdp_alloc_skb_bulk()
Dhwbm.c23 int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) in hwbm_pool_refill() argument
31 buf = kmalloc(frag_size, gfp); in hwbm_pool_refill()
/net/ceph/
Dbuffer.c12 struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) in ceph_buffer_new() argument
16 b = kmalloc(sizeof(*b), gfp); in ceph_buffer_new()
20 b->vec.iov_base = ceph_kvmalloc(len, gfp); in ceph_buffer_new()

1234