Home
last modified time | relevance | path

Searched refs:ctx (Results 1 – 25 of 31) sorted by relevance

12

/fs/
Dtimerfd.c41 struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, tmr); in timerfd_tmrproc() local
44 spin_lock_irqsave(&ctx->wqh.lock, flags); in timerfd_tmrproc()
45 ctx->expired = 1; in timerfd_tmrproc()
46 ctx->ticks++; in timerfd_tmrproc()
47 wake_up_locked(&ctx->wqh); in timerfd_tmrproc()
48 spin_unlock_irqrestore(&ctx->wqh.lock, flags); in timerfd_tmrproc()
53 static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx) in timerfd_get_remaining() argument
57 remaining = hrtimer_expires_remaining(&ctx->tmr); in timerfd_get_remaining()
61 static void timerfd_setup(struct timerfd_ctx *ctx, int flags, in timerfd_setup() argument
71 ctx->expired = 0; in timerfd_setup()
[all …]
Deventfd.c43 struct eventfd_ctx *ctx = file->private_data; in eventfd_signal() local
48 spin_lock_irqsave(&ctx->wqh.lock, flags); in eventfd_signal()
49 if (ULLONG_MAX - ctx->count < n) in eventfd_signal()
50 n = (int) (ULLONG_MAX - ctx->count); in eventfd_signal()
51 ctx->count += n; in eventfd_signal()
52 if (waitqueue_active(&ctx->wqh)) in eventfd_signal()
53 wake_up_locked(&ctx->wqh); in eventfd_signal()
54 spin_unlock_irqrestore(&ctx->wqh.lock, flags); in eventfd_signal()
67 struct eventfd_ctx *ctx = file->private_data; in eventfd_poll() local
71 poll_wait(file, &ctx->wqh, wait); in eventfd_poll()
[all …]
Daio.c82 static void aio_free_ring(struct kioctx *ctx) in aio_free_ring() argument
84 struct aio_ring_info *info = &ctx->ring_info; in aio_free_ring()
91 down_write(&ctx->mm->mmap_sem); in aio_free_ring()
92 do_munmap(ctx->mm, info->mmap_base, info->mmap_size); in aio_free_ring()
93 up_write(&ctx->mm->mmap_sem); in aio_free_ring()
102 static int aio_setup_ring(struct kioctx *ctx) in aio_setup_ring() argument
105 struct aio_ring_info *info = &ctx->ring_info; in aio_setup_ring()
106 unsigned nr_events = ctx->max_reqs; in aio_setup_ring()
132 down_write(&ctx->mm->mmap_sem); in aio_setup_ring()
137 up_write(&ctx->mm->mmap_sem); in aio_setup_ring()
[all …]
Dsignalfd.c44 struct signalfd_ctx *ctx = file->private_data; in signalfd_poll() local
50 if (next_signal(&current->pending, &ctx->sigmask) || in signalfd_poll()
52 &ctx->sigmask)) in signalfd_poll()
128 static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info, in signalfd_dequeue() argument
135 ret = dequeue_signal(current, &ctx->sigmask, info); in signalfd_dequeue()
149 ret = dequeue_signal(current, &ctx->sigmask, info); in signalfd_dequeue()
176 struct signalfd_ctx *ctx = file->private_data; in signalfd_read() local
188 ret = signalfd_dequeue(ctx, &info, nonblock); in signalfd_read()
212 struct signalfd_ctx *ctx; in SYSCALL_DEFINE4() local
228 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); in SYSCALL_DEFINE4()
[all …]
/fs/cifs/
Dmd5.c48 cifs_MD5_init(struct MD5Context *ctx) in cifs_MD5_init() argument
50 ctx->buf[0] = 0x67452301; in cifs_MD5_init()
51 ctx->buf[1] = 0xefcdab89; in cifs_MD5_init()
52 ctx->buf[2] = 0x98badcfe; in cifs_MD5_init()
53 ctx->buf[3] = 0x10325476; in cifs_MD5_init()
55 ctx->bits[0] = 0; in cifs_MD5_init()
56 ctx->bits[1] = 0; in cifs_MD5_init()
64 cifs_MD5_update(struct MD5Context *ctx, unsigned char const *buf, unsigned len) in cifs_MD5_update() argument
70 t = ctx->bits[0]; in cifs_MD5_update()
71 if ((ctx->bits[0] = t + ((__u32) len << 3)) < t) in cifs_MD5_update()
[all …]
Dasn1.c106 asn1_open(struct asn1_ctx *ctx, unsigned char *buf, unsigned int len) in asn1_open() argument
108 ctx->begin = buf; in asn1_open()
109 ctx->end = buf + len; in asn1_open()
110 ctx->pointer = buf; in asn1_open()
111 ctx->error = ASN1_ERR_NOERROR; in asn1_open()
115 asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) in asn1_octet_decode() argument
117 if (ctx->pointer >= ctx->end) { in asn1_octet_decode()
118 ctx->error = ASN1_ERR_DEC_EMPTY; in asn1_octet_decode()
121 *ch = *(ctx->pointer)++; in asn1_octet_decode()
126 asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) in asn1_tag_decode() argument
[all …]
Dsmbencrypt.c185 struct HMACMD5Context ctx;
205 hmac_md5_init_limK_to_64(owf, 16, &ctx);
206 hmac_md5_update((const unsigned char *) user_u, user_l * 2, &ctx);
207 hmac_md5_update((const unsigned char *) dom_u, domain_l * 2, &ctx);
208 hmac_md5_final(kr_buf, &ctx);
265 struct HMACMD5Context ctx;
267 hmac_md5_init_limK_to_64(kr, 16, &ctx);
268 hmac_md5_update(srv_chal->data, srv_chal->length, &ctx);
269 hmac_md5_update(cli_chal->data, cli_chal->length, &ctx);
270 hmac_md5_final(resp_buf, &ctx);
[all …]
Dmd5.h17 struct MD5Context ctx; member
33 struct HMACMD5Context *ctx);
35 struct HMACMD5Context *ctx);
36 void hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx);
Dcifsencrypt.c229 struct HMACMD5Context ctx; in CalcNTLMv2_partial_mac_key() local
239 hmac_md5_init_limK_to_64(temp_hash, 16, &ctx); in CalcNTLMv2_partial_mac_key()
274 (user_name_len+dom_name_len)*2, &ctx); in CalcNTLMv2_partial_mac_key()
276 hmac_md5_final(ses->server->ntlmv2_hash, &ctx); in CalcNTLMv2_partial_mac_key()
/fs/ntfs/
Dattrib.c83 int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) in ntfs_map_runlist_nolock() argument
102 if (!ctx) { in ntfs_map_runlist_nolock()
107 ctx = ntfs_attr_get_search_ctx(base_ni, m); in ntfs_map_runlist_nolock()
108 if (unlikely(!ctx)) { in ntfs_map_runlist_nolock()
115 BUG_ON(IS_ERR(ctx->mrec)); in ntfs_map_runlist_nolock()
116 a = ctx->attr; in ntfs_map_runlist_nolock()
142 old_ctx = *ctx; in ntfs_map_runlist_nolock()
160 ntfs_attr_reinit_search_ctx(ctx); in ntfs_map_runlist_nolock()
166 CASE_SENSITIVE, vcn, NULL, 0, ctx); in ntfs_map_runlist_nolock()
172 BUG_ON(!ctx->attr->non_resident); in ntfs_map_runlist_nolock()
[all …]
Dnamei.c178 ntfs_attr_search_ctx *ctx; in ntfs_lookup() local
202 ctx = NULL; in ntfs_lookup()
205 ctx = ntfs_attr_get_search_ctx(ni, m); in ntfs_lookup()
206 if (unlikely(!ctx)) { in ntfs_lookup()
215 NULL, 0, ctx); in ntfs_lookup()
225 a = ctx->attr; in ntfs_lookup()
232 fn = (FILE_NAME_ATTR*)((u8*)ctx->attr + le16_to_cpu( in ntfs_lookup()
233 ctx->attr->data.resident.value_offset)); in ntfs_lookup()
244 ntfs_attr_put_search_ctx(ctx); in ntfs_lookup()
248 ctx = NULL; in ntfs_lookup()
[all …]
Dinode.c450 static int ntfs_is_extended_system_file(ntfs_attr_search_ctx *ctx) in ntfs_is_extended_system_file() argument
455 ntfs_attr_reinit_search_ctx(ctx); in ntfs_is_extended_system_file()
458 nr_links = le16_to_cpu(ctx->mrec->link_count); in ntfs_is_extended_system_file()
462 ctx))) { in ntfs_is_extended_system_file()
464 ATTR_RECORD *attr = ctx->attr; in ntfs_is_extended_system_file()
473 if (p < (u8*)ctx->mrec || (u8*)p > (u8*)ctx->mrec + in ntfs_is_extended_system_file()
474 le32_to_cpu(ctx->mrec->bytes_in_use)) { in ntfs_is_extended_system_file()
476 ntfs_error(ctx->ntfs_ino->vol->sb, "Corrupt file name " in ntfs_is_extended_system_file()
481 ntfs_error(ctx->ntfs_ino->vol->sb, "Non-resident file " in ntfs_is_extended_system_file()
486 ntfs_error(ctx->ntfs_ino->vol->sb, "File name with " in ntfs_is_extended_system_file()
[all …]
Dfile.c123 ntfs_attr_search_ctx *ctx = NULL; in ntfs_attr_extend_initialized() local
155 ctx = ntfs_attr_get_search_ctx(base_ni, m); in ntfs_attr_extend_initialized()
156 if (unlikely(!ctx)) { in ntfs_attr_extend_initialized()
161 CASE_SENSITIVE, 0, NULL, 0, ctx); in ntfs_attr_extend_initialized()
167 m = ctx->mrec; in ntfs_attr_extend_initialized()
168 a = ctx->attr; in ntfs_attr_extend_initialized()
199 ctx = ntfs_attr_get_search_ctx(base_ni, m); in ntfs_attr_extend_initialized()
200 if (unlikely(!ctx)) { in ntfs_attr_extend_initialized()
205 CASE_SENSITIVE, 0, NULL, 0, ctx); in ntfs_attr_extend_initialized()
211 m = ctx->mrec; in ntfs_attr_extend_initialized()
[all …]
Dmft.c1286 ntfs_attr_search_ctx *ctx = NULL; in ntfs_mft_bitmap_extend_allocation_nolock() local
1394 ctx = ntfs_attr_get_search_ctx(mft_ni, mrec); in ntfs_mft_bitmap_extend_allocation_nolock()
1395 if (unlikely(!ctx)) { in ntfs_mft_bitmap_extend_allocation_nolock()
1402 0, ctx); in ntfs_mft_bitmap_extend_allocation_nolock()
1410 a = ctx->attr; in ntfs_mft_bitmap_extend_allocation_nolock()
1431 ret = ntfs_attr_record_resize(ctx->mrec, a, mp_size + in ntfs_mft_bitmap_extend_allocation_nolock()
1471 flush_dcache_mft_record_page(ctx->ntfs_ino); in ntfs_mft_bitmap_extend_allocation_nolock()
1472 mark_mft_record_dirty(ctx->ntfs_ino); in ntfs_mft_bitmap_extend_allocation_nolock()
1473 ntfs_attr_reinit_search_ctx(ctx); in ntfs_mft_bitmap_extend_allocation_nolock()
1476 0, ctx); in ntfs_mft_bitmap_extend_allocation_nolock()
[all …]
Ddir.c90 ntfs_attr_search_ctx *ctx; in ntfs_lookup_inode_by_name() local
107 ctx = ntfs_attr_get_search_ctx(dir_ni, m); in ntfs_lookup_inode_by_name()
108 if (unlikely(!ctx)) { in ntfs_lookup_inode_by_name()
114 0, ctx); in ntfs_lookup_inode_by_name()
125 ir = (INDEX_ROOT*)((u8*)ctx->attr + in ntfs_lookup_inode_by_name()
126 le16_to_cpu(ctx->attr->data.resident.value_offset)); in ntfs_lookup_inode_by_name()
137 if ((u8*)ie < (u8*)ctx->mrec || (u8*)ie + in ntfs_lookup_inode_by_name()
189 ntfs_attr_put_search_ctx(ctx); in ntfs_lookup_inode_by_name()
288 ntfs_attr_put_search_ctx(ctx); in ntfs_lookup_inode_by_name()
310 ntfs_attr_put_search_ctx(ctx); in ntfs_lookup_inode_by_name()
[all …]
Dattrib.h64 ntfs_attr_search_ctx *ctx);
71 const VCN vcn, ntfs_attr_search_ctx *ctx);
76 ntfs_attr_search_ctx *ctx);
88 extern void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx);
91 extern void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx);
Daops.c404 ntfs_attr_search_ctx *ctx; in ntfs_readpage() local
487 ctx = ntfs_attr_get_search_ctx(base_ni, mrec); in ntfs_readpage()
488 if (unlikely(!ctx)) { in ntfs_readpage()
493 CASE_SENSITIVE, 0, NULL, 0, ctx); in ntfs_readpage()
496 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); in ntfs_readpage()
508 memcpy(addr, (u8*)ctx->attr + in ntfs_readpage()
509 le16_to_cpu(ctx->attr->data.resident.value_offset), in ntfs_readpage()
516 ntfs_attr_put_search_ctx(ctx); in ntfs_readpage()
1359 ntfs_attr_search_ctx *ctx = NULL; in ntfs_writepage() local
1456 ctx = NULL; in ntfs_writepage()
[all …]
Dlcnalloc.h49 s64 count, ntfs_attr_search_ctx *ctx, const bool is_rollback);
108 s64 count, ntfs_attr_search_ctx *ctx) in ntfs_cluster_free() argument
110 return __ntfs_cluster_free(ni, start_vcn, count, ctx, false); in ntfs_cluster_free()
Dsuper.c355 ntfs_attr_search_ctx *ctx; in ntfs_write_volume_flags() local
368 ctx = ntfs_attr_get_search_ctx(ni, m); in ntfs_write_volume_flags()
369 if (!ctx) { in ntfs_write_volume_flags()
374 ctx); in ntfs_write_volume_flags()
377 vi = (VOLUME_INFORMATION*)((u8*)ctx->attr + in ntfs_write_volume_flags()
378 le16_to_cpu(ctx->attr->data.resident.value_offset)); in ntfs_write_volume_flags()
380 flush_dcache_mft_record_page(ctx->ntfs_ino); in ntfs_write_volume_flags()
381 mark_mft_record_dirty(ctx->ntfs_ino); in ntfs_write_volume_flags()
382 ntfs_attr_put_search_ctx(ctx); in ntfs_write_volume_flags()
388 if (ctx) in ntfs_write_volume_flags()
[all …]
/fs/nfs/
Dinode.c519 struct nfs_open_context *ctx; in alloc_nfs_open_context() local
521 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); in alloc_nfs_open_context()
522 if (ctx != NULL) { in alloc_nfs_open_context()
523 ctx->path.dentry = dget(dentry); in alloc_nfs_open_context()
524 ctx->path.mnt = mntget(mnt); in alloc_nfs_open_context()
525 ctx->cred = get_rpccred(cred); in alloc_nfs_open_context()
526 ctx->state = NULL; in alloc_nfs_open_context()
527 ctx->lockowner = current->files; in alloc_nfs_open_context()
528 ctx->flags = 0; in alloc_nfs_open_context()
529 ctx->error = 0; in alloc_nfs_open_context()
[all …]
Dread.c114 static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, in nfs_readpage_async() argument
124 new = nfs_create_request(ctx, inode, page, 0, len); in nfs_readpage_async()
479 struct nfs_open_context *ctx; in nfs_readpage() local
507 ctx = nfs_find_open_context(inode, NULL, FMODE_READ); in nfs_readpage()
508 if (ctx == NULL) in nfs_readpage()
511 ctx = get_nfs_open_context(nfs_file_open_context(file)); in nfs_readpage()
513 error = nfs_readpage_async(ctx, inode, page); in nfs_readpage()
515 put_nfs_open_context(ctx); in nfs_readpage()
524 struct nfs_open_context *ctx; member
540 new = nfs_create_request(desc->ctx, inode, page, 0, len); in readpage_async_filler()
[all …]
Ddirect.c70 struct nfs_open_context *ctx; /* file open context info */ member
161 dreq->ctx = NULL; in nfs_direct_req_alloc()
175 if (dreq->ctx != NULL) in nfs_direct_req_free()
176 put_nfs_open_context(dreq->ctx); in nfs_direct_req_free()
277 struct nfs_open_context *ctx = dreq->ctx; in nfs_direct_read_schedule_segment() local
278 struct inode *inode = ctx->path.dentry->d_inode; in nfs_direct_read_schedule_segment()
284 .rpc_cred = ctx->cred, in nfs_direct_read_schedule_segment()
334 data->args.context = get_nfs_open_context(ctx); in nfs_direct_read_schedule_segment()
423 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); in nfs_direct_read()
453 .rpc_cred = dreq->ctx->cred, in nfs_direct_write_reschedule()
[all …]
Dnfs4proc.c646 struct nfs_open_context *ctx; in nfs4_state_find_open_context() local
649 list_for_each_entry(ctx, &nfsi->open_files, list) { in nfs4_state_find_open_context()
650 if (ctx->state != state) in nfs4_state_find_open_context()
652 get_nfs_open_context(ctx); in nfs4_state_find_open_context()
654 return ctx; in nfs4_state_find_open_context()
660 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_… in nfs4_open_recoverdata_alloc() argument
664 opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL); in nfs4_open_recoverdata_alloc()
740 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) in _nfs4_do_open_reclaim() argument
747 opendata = nfs4_open_recoverdata_alloc(ctx, state); in _nfs4_do_open_reclaim()
763 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) in nfs4_do_open_reclaim() argument
[all …]
Ddelegation.c67 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state) in nfs_delegation_claim_locks() argument
76 if (nfs_file_open_context(fl->fl_file) != ctx) in nfs_delegation_claim_locks()
100 struct nfs_open_context *ctx; in nfs_delegation_claim_opens() local
106 list_for_each_entry(ctx, &nfsi->open_files, list) { in nfs_delegation_claim_opens()
107 state = ctx->state; in nfs_delegation_claim_opens()
114 get_nfs_open_context(ctx); in nfs_delegation_claim_opens()
116 err = nfs4_open_delegation_recall(ctx, state, stateid); in nfs_delegation_claim_opens()
118 err = nfs_delegation_claim_locks(ctx, state); in nfs_delegation_claim_opens()
119 put_nfs_open_context(ctx); in nfs_delegation_claim_opens()
Dfile.c214 static int nfs_do_fsync(struct nfs_open_context *ctx, struct inode *inode) in nfs_do_fsync() argument
219 have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); in nfs_do_fsync()
221 have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); in nfs_do_fsync()
223 ret = xchg(&ctx->error, 0); in nfs_do_fsync()
235 struct nfs_open_context *ctx = nfs_file_open_context(file); in nfs_file_flush() local
249 status = nfs_do_fsync(ctx, inode); in nfs_file_flush()
324 struct nfs_open_context *ctx = nfs_file_open_context(file); in nfs_file_fsync() local
332 return nfs_do_fsync(ctx, inode); in nfs_file_fsync()
496 struct nfs_open_context *ctx; in nfs_need_sync_write() local
500 ctx = nfs_file_open_context(filp); in nfs_need_sync_write()
[all …]

12