Searched refs:num_pg (Results 1 – 10 of 10) sorted by relevance
301 static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg) in tcm_1d_limit() argument303 if (__tcm_sizeof(a) < num_pg) in tcm_1d_limit()305 if (!num_pg) in tcm_1d_limit()308 a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width; in tcm_1d_limit()309 a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width); in tcm_1d_limit()
657 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_info_init()737 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_to_pri_map()761 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_shaper_cfg()803 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_dwrr_cfg()1184 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_lvl2_schd_mode_cfg()1481 hdev->tm_info.num_pg != 1) in hclge_tm_schd_init()
362 u8 num_pg; /* It must be 1 if vNET-Base schd */ member
1484 hdev->tm_info.num_pg = 1; in hclge_configure()
117 __le16 num_pg; member317 pg_per_blk = le16_to_cpu(src->num_pg); in nvme_nvm_setup_12()367 geo->num_pg = le16_to_cpu(src->num_pg); in nvme_nvm_setup_12()1066 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg); in nvm_dev_attr_show_12()
395 u16 num_pg; member612 if (pg == geo->num_pg) { in nvm_next_ppa_in_chk()
481 unsigned int num_pg; member
1066 unsigned long max_pg, num_pg, new_pg, old_pg; in mm_account_pinned_pages() local1072 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ in mm_account_pinned_pages()1078 new_pg = old_pg + num_pg; in mm_account_pinned_pages()1086 mmp->num_pg = num_pg; in mm_account_pinned_pages()1088 mmp->num_pg += num_pg; in mm_account_pinned_pages()1098 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); in mm_unaccount_pinned_pages()
891 ppa.g.pg = geo->num_pg - 1; in nvm_bb_chunk_scan()920 for (pg = 0; pg < geo->num_pg; pg++) { in nvm_bb_chunk_scan()
1246 ppa->g.pg < geo->num_pg && in pblk_boundary_ppa_checks()