/fs/ext2/ |
D | dir.c | 114 char *kaddr = page_address(page); in ext2_check_page() local 129 p = (ext2_dirent *)(kaddr + offs); in ext2_check_page() 181 p = (ext2_dirent *)(kaddr + offs); in ext2_check_page() 300 char *kaddr, *limit; in ext2_readdir() local 311 kaddr = page_address(page); in ext2_readdir() 314 offset = ext2_validate_entry(kaddr, offset, chunk_mask); in ext2_readdir() 320 de = (ext2_dirent *)(kaddr+offset); in ext2_readdir() 321 limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1); in ext2_readdir() 336 offset = (char *)de - kaddr; in ext2_readdir() 384 char *kaddr; in ext2_find_entry() local [all …]
|
D | xip.c | 20 void **kaddr, unsigned long *pfn) in __inode_direct_access() argument 29 return ops->direct_access(bdev, sector, kaddr, pfn); in __inode_direct_access() 55 void *kaddr; in ext2_clear_xip_target() local 59 rc = __inode_direct_access(inode, block, &kaddr, &pfn); in ext2_clear_xip_target() 61 clear_page(kaddr); in ext2_clear_xip_target()
|
/fs/sysv/ |
D | dir.c | 84 char *kaddr, *limit; in sysv_readdir() local 90 kaddr = (char *)page_address(page); in sysv_readdir() 91 de = (struct sysv_dir_entry *)(kaddr+offset); in sysv_readdir() 92 limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE; in sysv_readdir() 100 offset = (char *)de - kaddr; in sysv_readdir() 157 char *kaddr; in sysv_find_entry() local 160 kaddr = (char*)page_address(page); in sysv_find_entry() 161 de = (struct sysv_dir_entry *) kaddr; in sysv_find_entry() 162 kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; in sysv_find_entry() 163 for ( ; (char *) de <= kaddr ; de++) { in sysv_find_entry() [all …]
|
/fs/minix/ |
D | dir.c | 112 char *p, *kaddr, *limit; in minix_readdir() local 117 kaddr = (char *)page_address(page); in minix_readdir() 118 p = kaddr+offset; in minix_readdir() 119 limit = kaddr + minix_last_byte(inode, n) - chunk_size; in minix_readdir() 134 offset = p - kaddr; in minix_readdir() 186 char *kaddr, *limit; in minix_find_entry() local 192 kaddr = (char*)page_address(page); in minix_find_entry() 193 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize; in minix_find_entry() 194 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { in minix_find_entry() 228 char *kaddr, *p; in minix_add_link() local [all …]
|
/fs/ufs/ |
D | dir.c | 116 char *kaddr = page_address(page); in ufs_check_page() local 131 p = (struct ufs_dir_entry *)(kaddr + offs); in ufs_check_page() 181 p = (struct ufs_dir_entry *)(kaddr + offs); in ufs_check_page() 279 char *kaddr; in ufs_find_entry() local 282 kaddr = page_address(page); in ufs_find_entry() 283 de = (struct ufs_dir_entry *) kaddr; in ufs_find_entry() 284 kaddr += ufs_last_byte(dir, n) - reclen; in ufs_find_entry() 285 while ((char *) de <= kaddr) { in ufs_find_entry() 326 char *kaddr; in ufs_add_link() local 345 kaddr = page_address(page); in ufs_add_link() [all …]
|
/fs/udf/ |
D | file.c | 46 char *kaddr; in udf_adinicb_readpage() local 51 kaddr = kmap(page); in udf_adinicb_readpage() 52 memset(kaddr, 0, PAGE_CACHE_SIZE); in udf_adinicb_readpage() 53 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); in udf_adinicb_readpage() 66 char *kaddr; in udf_adinicb_writepage() local 71 kaddr = kmap(page); in udf_adinicb_writepage() 72 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); in udf_adinicb_writepage() 88 char *kaddr; in udf_adinicb_write_end() local 91 kaddr = kmap_atomic(page, KM_USER0); in udf_adinicb_write_end() 93 kaddr + offset, copied); in udf_adinicb_write_end() [all …]
|
/fs/btrfs/ |
D | struct-funcs.c | 57 p = (type *)(eb->kaddr + part_offset - eb->map_start); \ 63 char *kaddr; \ 70 &map_token, &kaddr, \ 77 p = (type *)(kaddr + part_offset - map_start); \ 94 p = (type *)(eb->kaddr + part_offset - eb->map_start); \ 101 char *kaddr; \ 107 &map_token, &kaddr, \ 115 p = (type *)(kaddr + part_offset - map_start); \ 130 memcpy(disk_key, eb->kaddr + ptr - eb->map_start, in btrfs_node_key()
|
D | zlib.c | 366 char *kaddr; in btrfs_zlib_decompress_biovec() local 451 kaddr = kmap_atomic(page_out, KM_USER0); in btrfs_zlib_decompress_biovec() 452 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, in btrfs_zlib_decompress_biovec() 454 kunmap_atomic(kaddr, KM_USER0); in btrfs_zlib_decompress_biovec() 544 char *kaddr; in btrfs_zlib_decompress() local 607 kaddr = kmap_atomic(dest_page, KM_USER0); in btrfs_zlib_decompress() 608 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes); in btrfs_zlib_decompress() 609 kunmap_atomic(kaddr, KM_USER0); in btrfs_zlib_decompress()
|
D | extent_io.c | 2693 void *kaddr; in extent_prepare_write() local 2695 kaddr = kmap_atomic(page, KM_USER0); in extent_prepare_write() 2697 memset(kaddr + to, 0, block_off_end - to); in extent_prepare_write() 2699 memset(kaddr + block_off_start, 0, in extent_prepare_write() 2702 kunmap_atomic(kaddr, KM_USER0); in extent_prepare_write() 3418 char *kaddr; in read_extent_buffer() local 3432 kaddr = kmap_atomic(page, KM_USER1); in read_extent_buffer() 3433 memcpy(dst, kaddr + offset, cur); in read_extent_buffer() 3434 kunmap_atomic(kaddr, KM_USER1); in read_extent_buffer() 3449 char *kaddr; in map_private_extent_buffer() local [all …]
|
/fs/ntfs/ |
D | bitmap.c | 54 u8 *kaddr; in __ntfs_bitmap_set_bits_in_run() local 82 kaddr = page_address(page); in __ntfs_bitmap_set_bits_in_run() 92 u8 *byte = kaddr + pos; in __ntfs_bitmap_set_bits_in_run() 112 memset(kaddr + pos, value ? 0xff : 0, len); in __ntfs_bitmap_set_bits_in_run() 130 kaddr = page_address(page); in __ntfs_bitmap_set_bits_in_run() 136 memset(kaddr, value ? 0xff : 0, len); in __ntfs_bitmap_set_bits_in_run() 150 byte = kaddr + len; in __ntfs_bitmap_set_bits_in_run()
|
D | dir.c | 95 u8 *kaddr; in ntfs_lookup_inode_by_name() local 329 kaddr = (u8*)page_address(page); in ntfs_lookup_inode_by_name() 332 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << in ntfs_lookup_inode_by_name() 335 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { in ntfs_lookup_inode_by_name() 368 if (index_end > kaddr + PAGE_CACHE_SIZE) { in ntfs_lookup_inode_by_name() 649 u8 *kaddr; 807 kaddr = (u8*)page_address(page); 810 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 813 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 846 if (index_end > kaddr + PAGE_CACHE_SIZE) { [all …]
|
D | aops.c | 90 void *kaddr; in ntfs_end_buffer_async_read() local 96 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); in ntfs_end_buffer_async_read() 97 memset(kaddr + bh_offset(bh) + ofs, 0, in ntfs_end_buffer_async_read() 100 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); in ntfs_end_buffer_async_read() 140 u8 *kaddr; in ntfs_end_buffer_async_read() local 149 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); in ntfs_end_buffer_async_read() 151 post_read_mst_fixup((NTFS_RECORD*)(kaddr + in ntfs_end_buffer_async_read() 153 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); in ntfs_end_buffer_async_read() 744 u8 *kaddr; in ntfs_write_block() local 748 kaddr = kmap_atomic(page, KM_USER0); in ntfs_write_block() [all …]
|
D | index.c | 129 u8 *index_end, *kaddr; in ntfs_index_lookup() local 285 kaddr = (u8*)page_address(page); in ntfs_index_lookup() 288 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << in ntfs_index_lookup() 291 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { in ntfs_index_lookup() 324 if (index_end > kaddr + PAGE_CACHE_SIZE) { in ntfs_index_lookup()
|
D | logfile.c | 490 u8 *kaddr = NULL; in ntfs_check_logfile() local 552 kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK); in ntfs_check_logfile() 558 if (!ntfs_is_empty_recordp((le32*)kaddr)) in ntfs_check_logfile() 566 if (ntfs_is_rcrd_recordp((le32*)kaddr)) in ntfs_check_logfile() 569 if (!ntfs_is_rstr_recordp((le32*)kaddr) && in ntfs_check_logfile() 570 !ntfs_is_chkd_recordp((le32*)kaddr)) { in ntfs_check_logfile() 581 (RESTART_PAGE_HEADER*)kaddr, pos, in ntfs_check_logfile()
|
D | attrib.c | 1545 u8 *kaddr; in ntfs_attr_make_non_resident() local 1658 kaddr = kmap_atomic(page, KM_USER0); in ntfs_attr_make_non_resident() 1659 memcpy(kaddr, (u8*)a + in ntfs_attr_make_non_resident() 1662 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); in ntfs_attr_make_non_resident() 1663 kunmap_atomic(kaddr, KM_USER0); in ntfs_attr_make_non_resident() 1808 kaddr = kmap_atomic(page, KM_USER0); in ntfs_attr_make_non_resident() 1809 memcpy((u8*)a + mp_ofs, kaddr, attr_size); in ntfs_attr_make_non_resident() 1810 kunmap_atomic(kaddr, KM_USER0); in ntfs_attr_make_non_resident() 2498 u8 *kaddr; in ntfs_attr_set() local 2542 kaddr = kmap_atomic(page, KM_USER0); in ntfs_attr_set() [all …]
|
D | file.c | 715 u8 *kaddr; in ntfs_prepare_pages_for_non_resident_write() local 718 kaddr = kmap_atomic(page, KM_USER0); in ntfs_prepare_pages_for_non_resident_write() 721 memset(kaddr + pofs, 0, pos - bh_pos); in ntfs_prepare_pages_for_non_resident_write() 725 memset(kaddr + pofs, 0, bh_end - end); in ntfs_prepare_pages_for_non_resident_write() 727 kunmap_atomic(kaddr, KM_USER0); in ntfs_prepare_pages_for_non_resident_write() 1647 char *kattr, *kaddr; in ntfs_commit_pages_after_write() local 1706 kaddr = kmap_atomic(page, KM_USER0); in ntfs_commit_pages_after_write() 1708 memcpy(kattr + pos, kaddr + pos, bytes); in ntfs_commit_pages_after_write() 1720 memcpy(kaddr, kattr, pos); in ntfs_commit_pages_after_write() 1722 memcpy(kaddr + end, kattr + end, attr_len - end); in ntfs_commit_pages_after_write() [all …]
|
D | super.c | 1242 u32 *kaddr, *kend; in check_windows_hibernation_status() local 1298 kaddr = (u32*)page_address(page); in check_windows_hibernation_status() 1299 if (*(le32*)kaddr == const_cpu_to_le32(0x72626968)/*'hibr'*/) { in check_windows_hibernation_status() 1305 kend = kaddr + NTFS_HIBERFIL_HEADER_SIZE/sizeof(*kaddr); in check_windows_hibernation_status() 1307 if (unlikely(*kaddr)) { in check_windows_hibernation_status() 1316 } while (++kaddr < kend); in check_windows_hibernation_status() 2472 u32 *kaddr; in get_nr_free_clusters() local 2504 kaddr = (u32*)kmap_atomic(page, KM_USER0); in get_nr_free_clusters() 2513 nr_free -= (s64)hweight32(kaddr[i]); in get_nr_free_clusters() 2514 kunmap_atomic(kaddr, KM_USER0); in get_nr_free_clusters() [all …]
|
/fs/freevxfs/ |
D | vxfs_lookup.c | 125 caddr_t kaddr; in vxfs_find_entry() local 131 kaddr = (caddr_t)page_address(pp); in vxfs_find_entry() 138 baddr = kaddr + (block * bsize); in vxfs_find_entry() 280 caddr_t kaddr; in vxfs_readdir() local 286 kaddr = (caddr_t)page_address(pp); in vxfs_readdir() 293 baddr = kaddr + (block * bsize); in vxfs_readdir() 299 (kaddr + offset) : in vxfs_readdir() 310 offset = (caddr_t)de - kaddr; in vxfs_readdir()
|
D | vxfs_immed.c | 104 caddr_t kaddr; in vxfs_immed_readpage() local 106 kaddr = kmap(pp); in vxfs_immed_readpage() 107 memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE); in vxfs_immed_readpage()
|
/fs/ocfs2/ |
D | aops.c | 59 void *kaddr; in ocfs2_symlink_get_block() local 103 kaddr = kmap_atomic(bh_result->b_page, KM_USER0); in ocfs2_symlink_get_block() 104 if (!kaddr) { in ocfs2_symlink_get_block() 108 memcpy(kaddr + (bh_result->b_size * iblock), in ocfs2_symlink_get_block() 111 kunmap_atomic(kaddr, KM_USER0); in ocfs2_symlink_get_block() 217 void *kaddr; in ocfs2_read_inline_data() local 238 kaddr = kmap_atomic(page, KM_USER0); in ocfs2_read_inline_data() 240 memcpy(kaddr, di->id2.i_data.id_data, size); in ocfs2_read_inline_data() 242 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); in ocfs2_read_inline_data() 244 kunmap_atomic(kaddr, KM_USER0); in ocfs2_read_inline_data() [all …]
|
/fs/ |
D | bio-integrity.c | 316 void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); in bio_integrity_generate() local 317 bix.data_buf = kaddr + bv->bv_offset; in bio_integrity_generate() 330 kunmap_atomic(kaddr, KM_USER0); in bio_integrity_generate() 457 void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); in bio_integrity_verify() local 458 bix.data_buf = kaddr + bv->bv_offset; in bio_integrity_verify() 466 kunmap_atomic(kaddr, KM_USER0); in bio_integrity_verify() 476 kunmap_atomic(kaddr, KM_USER0); in bio_integrity_verify()
|
/fs/gfs2/ |
D | ops_address.c | 435 void *kaddr; in stuffed_readpage() local 452 kaddr = kmap_atomic(page, KM_USER0); in stuffed_readpage() 453 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), in stuffed_readpage() 455 memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize); in stuffed_readpage() 456 kunmap_atomic(kaddr, KM_USER0); in stuffed_readpage() 768 void *kaddr; in gfs2_stuffed_write_end() local 773 kaddr = kmap_atomic(page, KM_USER0); in gfs2_stuffed_write_end() 774 memcpy(buf + pos, kaddr + pos, copied); in gfs2_stuffed_write_end() 775 memset(kaddr + pos + copied, 0, len - copied); in gfs2_stuffed_write_end() 777 kunmap_atomic(kaddr, KM_USER0); in gfs2_stuffed_write_end()
|
D | lops.c | 534 void *kaddr; in gfs2_check_magic() local 538 kaddr = kmap_atomic(bh->b_page, KM_USER0); in gfs2_check_magic() 539 ptr = kaddr + bh_offset(bh); in gfs2_check_magic() 542 kunmap_atomic(kaddr, KM_USER0); in gfs2_check_magic() 577 void *kaddr; in gfs2_write_blocks() local 579 kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0); in gfs2_write_blocks() 580 memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh), in gfs2_write_blocks() 582 kunmap_atomic(kaddr, KM_USER0); in gfs2_write_blocks()
|
/fs/reiserfs/ |
D | tail_conversion.c | 132 char *kaddr = kmap_atomic(up_to_date_bh->b_page, KM_USER0); in direct2indirect() local 133 memset(kaddr + pgoff, 0, n_blk_size - total_tail); in direct2indirect() 134 kunmap_atomic(kaddr, KM_USER0); in direct2indirect()
|
/fs/nfs/ |
D | nfs2xdr.c | 432 __be32 *end, *entry, *kaddr; in nfs_xdr_readdirres() local 452 kaddr = p = kmap_atomic(*page, KM_USER0); in nfs_xdr_readdirres() 486 kunmap_atomic(kaddr, KM_USER0); in nfs_xdr_readdirres() 603 char *kaddr; in nfs_xdr_readlinkres() local 631 kaddr = (char *)kmap_atomic(rcvbuf->pages[0], KM_USER0); in nfs_xdr_readlinkres() 632 kaddr[len+rcvbuf->page_base] = '\0'; in nfs_xdr_readlinkres() 633 kunmap_atomic(kaddr, KM_USER0); in nfs_xdr_readlinkres()
|