/fs/proc/ |
D | page.c | 92 u64 k; in stable_page_flags() local 102 k = page->flags; in stable_page_flags() 165 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); in stable_page_flags() 167 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); in stable_page_flags() 171 u |= kpf_copy_bit(k, KPF_ERROR, PG_error); in stable_page_flags() 172 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); in stable_page_flags() 173 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); in stable_page_flags() 174 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); in stable_page_flags() 176 u |= kpf_copy_bit(k, KPF_LRU, PG_lru); in stable_page_flags() 177 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); in stable_page_flags() [all …]
|
D | array.c | 254 struct k_sigaction *k; in collect_sigign_sigcatch() local 257 k = p->sighand->action; in collect_sigign_sigcatch() 258 for (i = 1; i <= _NSIG; ++i, ++k) { in collect_sigign_sigcatch() 259 if (k->sa.sa_handler == SIG_IGN) in collect_sigign_sigcatch() 261 else if (k->sa.sa_handler != SIG_DFL) in collect_sigign_sigcatch()
|
/fs/ubifs/ |
D | key.h | 103 static inline void ino_key_init_flash(const struct ubifs_info *c, void *k, in ino_key_init_flash() argument 106 union ubifs_key *key = k; in ino_key_init_flash() 110 memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8); in ino_key_init_flash() 181 static inline void dent_key_init_flash(const struct ubifs_info *c, void *k, in dent_key_init_flash() argument 185 union ubifs_key *key = k; in dent_key_init_flash() 192 memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8); in dent_key_init_flash() 233 static inline void xent_key_init_flash(const struct ubifs_info *c, void *k, in xent_key_init_flash() argument 236 union ubifs_key *key = k; in xent_key_init_flash() 243 memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8); in xent_key_init_flash() 333 static inline int key_type_flash(const struct ubifs_info *c, const void *k) in key_type_flash() argument [all …]
|
/fs/squashfs/ |
D | zlib_wrapper.c | 56 int zlib_err, zlib_init = 0, k = 0; in zlib_uncompress() local 64 if (stream->avail_in == 0 && k < b) { in zlib_uncompress() 67 stream->next_in = bh[k]->b_data + offset; in zlib_uncompress() 89 if (stream->avail_in == 0 && k < b) in zlib_uncompress() 90 put_bh(bh[k++]); in zlib_uncompress() 102 if (k < b) in zlib_uncompress() 108 for (; k < b; k++) in zlib_uncompress() 109 put_bh(bh[k]); in zlib_uncompress()
|
D | zstd_wrapper.c | 69 int k = 0; in zstd_uncompress() local 84 if (in_buf.pos == in_buf.size && k < b) { in zstd_uncompress() 88 in_buf.src = bh[k]->b_data + offset; in zstd_uncompress() 111 if (in_buf.pos == in_buf.size && k < b) in zstd_uncompress() 112 put_bh(bh[k++]); in zstd_uncompress() 123 if (k < b) in zstd_uncompress() 129 for (; k < b; k++) in zstd_uncompress() 130 put_bh(bh[k]); in zstd_uncompress()
|
D | xz_wrapper.c | 124 int avail, total = 0, k = 0; in squashfs_xz_uncompress() local 135 if (stream->buf.in_pos == stream->buf.in_size && k < b) { in squashfs_xz_uncompress() 138 stream->buf.in = bh[k]->b_data + offset; in squashfs_xz_uncompress() 154 if (stream->buf.in_pos == stream->buf.in_size && k < b) in squashfs_xz_uncompress() 155 put_bh(bh[k++]); in squashfs_xz_uncompress() 160 if (xz_err != XZ_STREAM_END || k < b) in squashfs_xz_uncompress() 166 for (; k < b; k++) in squashfs_xz_uncompress() 167 put_bh(bh[k]); in squashfs_xz_uncompress()
|
D | block.c | 85 int bytes, compressed, b = 0, k = 0, avail, i; in squashfs_read_data() local 170 for (bytes = length; k < b; k++) { in squashfs_read_data() 180 memcpy(data + pg_offset, bh[k]->b_data + offset, in squashfs_read_data() 187 put_bh(bh[k]); in squashfs_read_data() 196 for (; k < b; k++) in squashfs_read_data() 197 put_bh(bh[k]); in squashfs_read_data()
|
/fs/reiserfs/ |
D | prints.c | 176 char *k = fmt; in is_there_reiserfs_struct() local 178 while ((k = strchr(k, '%')) != NULL) { in is_there_reiserfs_struct() 179 if (k[1] == 'k' || k[1] == 'K' || k[1] == 'h' || k[1] == 't' || in is_there_reiserfs_struct() 180 k[1] == 'z' || k[1] == 'b' || k[1] == 'y' || k[1] == 'a') { in is_there_reiserfs_struct() 181 *what = k[1]; in is_there_reiserfs_struct() 184 k++; in is_there_reiserfs_struct() 186 return k; in is_there_reiserfs_struct() 206 char *k; in prepare_error_buf() local 218 while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) { in prepare_error_buf() 219 *k = 0; in prepare_error_buf() [all …]
|
D | ibalance.c | 819 int insert_num, n, k; in balance_internal() local 853 k = 0; in balance_internal() 891 k = tb->lnum[h] - child_pos - 1; in balance_internal() 898 n + child_pos + 1, k, in balance_internal() 901 replace_lkey(tb, h, insert_key + k); in balance_internal() 909 MAX_CHILD_SIZE(insert_ptr[k]) - in balance_internal() 910 B_FREE_SPACE(insert_ptr[k])); in balance_internal() 911 put_dc_block_number(dc, insert_ptr[k]->b_blocknr); in balance_internal() 915 k++; in balance_internal() 916 insert_key += k; in balance_internal() [all …]
|
D | hashes.c | 46 u32 k[] = { 0x9464a485, 0x542e1a94, 0x3e846bff, 0xb75bcfc3 }; in keyed_hash() local 48 u32 h0 = k[0], h1 = k[1]; in keyed_hash()
|
D | item_ops.c | 499 int k, l; in direntry_create_vi() local 502 for (k = 0; k < dir_u->entry_count; k++) in direntry_create_vi() 503 l += dir_u->entry_sizes[k]; in direntry_create_vi()
|
/fs/udf/ |
D | partition.c | 158 int i, j, k, l; in udf_relocate_blocks() local 187 for (k = 0; k < reallocationTableLen; k++) { in udf_relocate_blocks() 188 struct sparingEntry *entry = &st->mapEntry[k]; in udf_relocate_blocks() 228 for (l = k; l < reallocationTableLen; l++) { in udf_relocate_blocks() 244 memmove(&st->mapEntry[k + 1], in udf_relocate_blocks() 245 &st->mapEntry[k], in udf_relocate_blocks() 246 (l - k) * in udf_relocate_blocks() 248 st->mapEntry[k] = mapEntry; in udf_relocate_blocks() 257 st->mapEntry[k].mappedLocation) + in udf_relocate_blocks()
|
/fs/hpfs/ |
D | alloc.c | 161 unsigned k = le32_to_cpu(bmp[i-1]); in alloc_in_bmp() local 162 while (k & 0x80000000) { in alloc_in_bmp() 163 q--; k <<= 1; in alloc_in_bmp() 368 unsigned k; in hpfs_check_free_dnodes() local 370 for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) { in hpfs_check_free_dnodes() 389 u32 k; in hpfs_check_free_dnodes() local 391 for (k = 0xf; k; k <<= 4) in hpfs_check_free_dnodes() 392 if ((le32_to_cpu(bmp[j]) & k) == k) { in hpfs_check_free_dnodes()
|
/fs/minix/ |
D | itree_common.c | 230 int k, err; in find_shared() local 233 for (k = depth; k > 1 && !offsets[k-1]; k--) in find_shared() 235 partial = get_branch(inode, k, offsets, chain, &err); in find_shared() 239 partial = chain + k-1; in find_shared() 246 if (p == chain + k - 1 && p > chain) { in find_shared() 359 int k = sb->s_blocksize_bits - 10; in nblocks() local 361 blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k); in nblocks()
|
D | bitmap.c | 47 int k = sb->s_blocksize_bits + 3; in minix_free_block() local 55 bit = zone & ((1<<k) - 1); in minix_free_block() 56 zone >>= k; in minix_free_block() 188 int k = sb->s_blocksize_bits + 3; in minix_free_inode() local 196 bit = ino & ((1<<k) - 1); in minix_free_inode() 197 ino >>= k; in minix_free_inode()
|
/fs/jfs/ |
D | jfs_txnmgr.c | 227 int k, size; in txInit() local 275 for (k = 1; k < nTxBlock - 1; k++) { in txInit() 276 TxBlock[k].next = k + 1; in txInit() 277 init_waitqueue_head(&TxBlock[k].gcwait); in txInit() 278 init_waitqueue_head(&TxBlock[k].waitor); in txInit() 280 TxBlock[k].next = 0; in txInit() 281 init_waitqueue_head(&TxBlock[k].gcwait); in txInit() 282 init_waitqueue_head(&TxBlock[k].waitor); in txInit() 303 for (k = 1; k < nTxLock - 1; k++) in txInit() 304 TxLock[k].next = k + 1; in txInit() [all …]
|
D | jfs_extent.c | 631 u64 m, k; in extRoundDown() local 639 k = (u64) 1 << i; in extRoundDown() 640 k = ((k - 1) & nb) ? k : k >> 1; in extRoundDown() 642 return (k); in extRoundDown()
|
D | jfs_dmap.c | 1395 int rc, ti, i, k, m, n, agperlev; in dbAllocAG() local 1487 for (k = bmp->db_agheight; k > 0; k--) { in dbAllocAG() 2928 int lp, pp, k; in dbAdjTree() local 2952 for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) { in dbAdjTree() 3008 int ti, n = 0, k, x = 0; in dbFindLeaf() local 3023 for (k = le32_to_cpu(tp->dmt_height), ti = 1; in dbFindLeaf() 3024 k > 0; k--, ti = ((ti + n) << 2) + 1) { in dbFindLeaf() 3427 int i, i0 = true, j, j0 = true, k, n; in dbExtendFS() local 3476 k = 1 << (l2agsize - oldl2agsize); in dbExtendFS() 3482 for (j = 0; j < k && i < agno; j++, i++) { in dbExtendFS() [all …]
|
/fs/ |
D | binfmt_elf.c | 582 unsigned long k, map_addr; in load_elf_interp() local 610 k = load_addr + eppnt->p_vaddr; in load_elf_interp() 611 if (BAD_ADDR(k) || in load_elf_interp() 614 TASK_SIZE - eppnt->p_memsz < k) { in load_elf_interp() 623 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; in load_elf_interp() 624 if (k > elf_bss) in load_elf_interp() 625 elf_bss = k; in load_elf_interp() 631 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz; in load_elf_interp() 632 if (k > last_bss) { in load_elf_interp() 633 last_bss = k; in load_elf_interp() [all …]
|
/fs/sysv/ |
D | itree.c | 286 int k, err; in find_shared() local 289 for (k = depth; k > 1 && !offsets[k-1]; k--) in find_shared() 291 partial = get_branch(inode, k, offsets, chain, &err); in find_shared() 295 partial = chain + k-1; in find_shared() 312 if (p == chain + k - 1 && p > chain) { in find_shared()
|
/fs/ext4/ |
D | indirect.c | 277 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, in ext4_blks_to_allocate() argument 286 if (k > 0) { in ext4_blks_to_allocate() 783 int k, err; in ext4_find_shared() local 787 for (k = depth; k > 1 && !offsets[k-1]; k--) in ext4_find_shared() 789 partial = ext4_get_branch(inode, k, offsets, chain, &err); in ext4_find_shared() 792 partial = chain + k-1; in ext4_find_shared() 808 if (p == chain + k - 1 && p > chain) { in ext4_find_shared()
|
D | extents.c | 683 int k, l = path->p_depth; in ext4_ext_show_path() local 686 for (k = 0; k <= l; k++, path++) { in ext4_ext_show_path() 812 int k; in ext4_ext_binsearch_idx() local 815 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { in ext4_ext_binsearch_idx() 816 if (k != 0 && in ext4_ext_binsearch_idx() 819 "first=0x%p\n", k, in ext4_ext_binsearch_idx() 825 BUG_ON(k && le32_to_cpu(ix->ei_block) in ext4_ext_binsearch_idx() 883 int k; in ext4_ext_binsearch() local 886 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { in ext4_ext_binsearch() 887 BUG_ON(k && le32_to_cpu(ex->ee_block) in ext4_ext_binsearch() [all …]
|
/fs/fat/ |
D | dir.c | 361 int chi, chl, i, j, k; in fat_parse_short() local 414 for (k = 8; k < MSDOS_NAME;) { in fat_parse_short() 415 c = work[k]; in fat_parse_short() 418 chl = fat_shortname2uni(nls_disk, &work[k], MSDOS_NAME - k, in fat_parse_short() 422 k++; in fat_parse_short() 433 int offset = min(chl, MSDOS_NAME-k); in fat_parse_short() 434 k += offset; in fat_parse_short() 437 for (chi = 0; chi < chl && k < MSDOS_NAME; in fat_parse_short() 438 chi++, i++, k++) { in fat_parse_short() 439 ptname[i] = work[k]; in fat_parse_short()
|
/fs/nilfs2/ |
D | alloc.c | 737 int i, j, k, ret; in nilfs_palloc_freev() local 808 for (k = 0; k < nempties; k++) { in nilfs_palloc_freev() 810 last_nrs[k]); in nilfs_palloc_freev() 814 ret, (unsigned long long)last_nrs[k], in nilfs_palloc_freev()
|
/fs/ext2/ |
D | inode.c | 362 ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks, in ext2_blks_to_allocate() argument 371 if (k > 0) { in ext2_blks_to_allocate() 1055 int k, err; in ext2_find_shared() local 1058 for (k = depth; k > 1 && !offsets[k-1]; k--) in ext2_find_shared() 1060 partial = ext2_get_branch(inode, k, offsets, chain, &err); in ext2_find_shared() 1062 partial = chain + k-1; in ext2_find_shared() 1080 if (p == chain + k - 1 && p > chain) { in ext2_find_shared()
|