Lines Matching refs:mq
867 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level) in writeback_sentinel() argument
869 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels); in writeback_sentinel()
872 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level) in demote_sentinel() argument
874 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels); in demote_sentinel()
877 static void __update_writeback_sentinels(struct smq_policy *mq) in __update_writeback_sentinels() argument
880 struct queue *q = &mq->dirty; in __update_writeback_sentinels()
884 sentinel = writeback_sentinel(mq, level); in __update_writeback_sentinels()
890 static void __update_demote_sentinels(struct smq_policy *mq) in __update_demote_sentinels() argument
893 struct queue *q = &mq->clean; in __update_demote_sentinels()
897 sentinel = demote_sentinel(mq, level); in __update_demote_sentinels()
903 static void update_sentinels(struct smq_policy *mq) in update_sentinels() argument
905 if (time_after(jiffies, mq->next_writeback_period)) { in update_sentinels()
906 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; in update_sentinels()
907 mq->current_writeback_sentinels = !mq->current_writeback_sentinels; in update_sentinels()
908 __update_writeback_sentinels(mq); in update_sentinels()
911 if (time_after(jiffies, mq->next_demote_period)) { in update_sentinels()
912 mq->next_demote_period = jiffies + DEMOTE_PERIOD; in update_sentinels()
913 mq->current_demote_sentinels = !mq->current_demote_sentinels; in update_sentinels()
914 __update_demote_sentinels(mq); in update_sentinels()
918 static void __sentinels_init(struct smq_policy *mq) in __sentinels_init() argument
924 sentinel = writeback_sentinel(mq, level); in __sentinels_init()
926 q_push(&mq->dirty, sentinel); in __sentinels_init()
928 sentinel = demote_sentinel(mq, level); in __sentinels_init()
930 q_push(&mq->clean, sentinel); in __sentinels_init()
934 static void sentinels_init(struct smq_policy *mq) in sentinels_init() argument
936 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; in sentinels_init()
937 mq->next_demote_period = jiffies + DEMOTE_PERIOD; in sentinels_init()
939 mq->current_writeback_sentinels = false; in sentinels_init()
940 mq->current_demote_sentinels = false; in sentinels_init()
941 __sentinels_init(mq); in sentinels_init()
943 mq->current_writeback_sentinels = !mq->current_writeback_sentinels; in sentinels_init()
944 mq->current_demote_sentinels = !mq->current_demote_sentinels; in sentinels_init()
945 __sentinels_init(mq); in sentinels_init()
950 static void del_queue(struct smq_policy *mq, struct entry *e) in del_queue() argument
952 q_del(e->dirty ? &mq->dirty : &mq->clean, e); in del_queue()
955 static void push_queue(struct smq_policy *mq, struct entry *e) in push_queue() argument
958 q_push(&mq->dirty, e); in push_queue()
960 q_push(&mq->clean, e); in push_queue()
964 static void push(struct smq_policy *mq, struct entry *e) in push() argument
966 h_insert(&mq->table, e); in push()
968 push_queue(mq, e); in push()
971 static void push_queue_front(struct smq_policy *mq, struct entry *e) in push_queue_front() argument
974 q_push_front(&mq->dirty, e); in push_queue_front()
976 q_push_front(&mq->clean, e); in push_queue_front()
979 static void push_front(struct smq_policy *mq, struct entry *e) in push_front() argument
981 h_insert(&mq->table, e); in push_front()
983 push_queue_front(mq, e); in push_front()
986 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) in infer_cblock() argument
988 return to_cblock(get_index(&mq->cache_alloc, e)); in infer_cblock()
991 static void requeue(struct smq_policy *mq, struct entry *e) in requeue() argument
999 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue()
1001 q_requeue(&mq->clean, e, 1u, NULL, NULL); in requeue()
1005 q_requeue(&mq->dirty, e, 1u, in requeue()
1006 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels), in requeue()
1007 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels)); in requeue()
1011 static unsigned default_promote_level(struct smq_policy *mq) in default_promote_level() argument
1031 unsigned hits = mq->cache_stats.hits; in default_promote_level()
1032 unsigned misses = mq->cache_stats.misses; in default_promote_level()
1037 static void update_promote_levels(struct smq_policy *mq) in update_promote_levels() argument
1043 unsigned threshold_level = allocator_empty(&mq->cache_alloc) ? in update_promote_levels()
1044 default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u); in update_promote_levels()
1053 switch (stats_assess(&mq->hotspot_stats)) { in update_promote_levels()
1066 mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level; in update_promote_levels()
1067 mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level); in update_promote_levels()
1074 static void update_level_jump(struct smq_policy *mq) in update_level_jump() argument
1076 switch (stats_assess(&mq->hotspot_stats)) { in update_level_jump()
1078 mq->hotspot_level_jump = 4u; in update_level_jump()
1082 mq->hotspot_level_jump = 2u; in update_level_jump()
1086 mq->hotspot_level_jump = 1u; in update_level_jump()
1091 static void end_hotspot_period(struct smq_policy *mq) in end_hotspot_period() argument
1093 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); in end_hotspot_period()
1094 update_promote_levels(mq); in end_hotspot_period()
1096 if (time_after(jiffies, mq->next_hotspot_period)) { in end_hotspot_period()
1097 update_level_jump(mq); in end_hotspot_period()
1098 q_redistribute(&mq->hotspot); in end_hotspot_period()
1099 stats_reset(&mq->hotspot_stats); in end_hotspot_period()
1100 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD; in end_hotspot_period()
1104 static void end_cache_period(struct smq_policy *mq) in end_cache_period() argument
1106 if (time_after(jiffies, mq->next_cache_period)) { in end_cache_period()
1107 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in end_cache_period()
1109 q_redistribute(&mq->dirty); in end_cache_period()
1110 q_redistribute(&mq->clean); in end_cache_period()
1111 stats_reset(&mq->cache_stats); in end_cache_period()
1113 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD; in end_cache_period()
1125 static unsigned percent_to_target(struct smq_policy *mq, unsigned p) in percent_to_target() argument
1127 return from_cblock(mq->cache_size) * p / 100u; in percent_to_target()
1130 static bool clean_target_met(struct smq_policy *mq, bool idle) in clean_target_met() argument
1140 return q_size(&mq->dirty) == 0u; in clean_target_met()
1149 static bool free_target_met(struct smq_policy *mq) in free_target_met() argument
1153 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; in free_target_met()
1154 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= in free_target_met()
1155 percent_to_target(mq, FREE_TARGET); in free_target_met()
1160 static void mark_pending(struct smq_policy *mq, struct entry *e) in mark_pending() argument
1168 static void clear_pending(struct smq_policy *mq, struct entry *e) in clear_pending() argument
1174 static void queue_writeback(struct smq_policy *mq, bool idle) in queue_writeback() argument
1180 e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle); in queue_writeback()
1182 mark_pending(mq, e); in queue_writeback()
1183 q_del(&mq->dirty, e); in queue_writeback()
1187 work.cblock = infer_cblock(mq, e); in queue_writeback()
1189 r = btracker_queue(mq->bg_work, &work, NULL); in queue_writeback()
1191 clear_pending(mq, e); in queue_writeback()
1192 q_push_front(&mq->dirty, e); in queue_writeback()
1197 static void queue_demotion(struct smq_policy *mq) in queue_demotion() argument
1203 if (WARN_ON_ONCE(!mq->migrations_allowed)) in queue_demotion()
1206 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); in queue_demotion()
1208 if (!clean_target_met(mq, true)) in queue_demotion()
1209 queue_writeback(mq, false); in queue_demotion()
1213 mark_pending(mq, e); in queue_demotion()
1214 q_del(&mq->clean, e); in queue_demotion()
1218 work.cblock = infer_cblock(mq, e); in queue_demotion()
1219 r = btracker_queue(mq->bg_work, &work, NULL); in queue_demotion()
1221 clear_pending(mq, e); in queue_demotion()
1222 q_push_front(&mq->clean, e); in queue_demotion()
1226 static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock, in queue_promotion() argument
1233 if (!mq->migrations_allowed) in queue_promotion()
1236 if (allocator_empty(&mq->cache_alloc)) { in queue_promotion()
1241 if (!free_target_met(mq)) in queue_promotion()
1242 queue_demotion(mq); in queue_promotion()
1246 if (btracker_promotion_already_present(mq->bg_work, oblock)) in queue_promotion()
1253 e = alloc_entry(&mq->cache_alloc); in queue_promotion()
1258 work.cblock = infer_cblock(mq, e); in queue_promotion()
1259 r = btracker_queue(mq->bg_work, &work, workp); in queue_promotion()
1261 free_entry(&mq->cache_alloc, e); in queue_promotion()
1280 static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e, in should_promote() argument
1284 if (!allocator_empty(&mq->cache_alloc) && fast_promote) in should_promote()
1287 return maybe_promote(hs_e->level >= mq->write_promote_level); in should_promote()
1289 return maybe_promote(hs_e->level >= mq->read_promote_level); in should_promote()
1292 static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b) in to_hblock() argument
1295 (void) sector_div(r, mq->cache_blocks_per_hotspot_block); in to_hblock()
1299 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b) in update_hotspot_queue() argument
1302 dm_oblock_t hb = to_hblock(mq, b); in update_hotspot_queue()
1303 struct entry *e = h_lookup(&mq->hotspot_table, hb); in update_hotspot_queue()
1306 stats_level_accessed(&mq->hotspot_stats, e->level); in update_hotspot_queue()
1308 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1309 q_requeue(&mq->hotspot, e, in update_hotspot_queue()
1310 test_and_set_bit(hi, mq->hotspot_hit_bits) ? in update_hotspot_queue()
1311 0u : mq->hotspot_level_jump, in update_hotspot_queue()
1315 stats_miss(&mq->hotspot_stats); in update_hotspot_queue()
1317 e = alloc_entry(&mq->hotspot_alloc); in update_hotspot_queue()
1319 e = q_pop(&mq->hotspot); in update_hotspot_queue()
1321 h_remove(&mq->hotspot_table, e); in update_hotspot_queue()
1322 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1323 clear_bit(hi, mq->hotspot_hit_bits); in update_hotspot_queue()
1330 q_push(&mq->hotspot, e); in update_hotspot_queue()
1331 h_insert(&mq->hotspot_table, e); in update_hotspot_queue()
1352 struct smq_policy *mq = to_smq_policy(p); in smq_destroy() local
1354 btracker_destroy(mq->bg_work); in smq_destroy()
1355 h_exit(&mq->hotspot_table); in smq_destroy()
1356 h_exit(&mq->table); in smq_destroy()
1357 free_bitset(mq->hotspot_hit_bits); in smq_destroy()
1358 free_bitset(mq->cache_hit_bits); in smq_destroy()
1359 space_exit(&mq->es); in smq_destroy()
1360 kfree(mq); in smq_destroy()
1365 static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock, in __lookup() argument
1374 e = h_lookup(&mq->table, oblock); in __lookup()
1376 stats_level_accessed(&mq->cache_stats, e->level); in __lookup()
1378 requeue(mq, e); in __lookup()
1379 *cblock = infer_cblock(mq, e); in __lookup()
1383 stats_miss(&mq->cache_stats); in __lookup()
1388 hs_e = update_hotspot_queue(mq, oblock); in __lookup()
1390 pr = should_promote(mq, hs_e, data_dir, fast_copy); in __lookup()
1392 queue_promotion(mq, oblock, work); in __lookup()
1406 struct smq_policy *mq = to_smq_policy(p); in smq_lookup() local
1408 spin_lock_irqsave(&mq->lock, flags); in smq_lookup()
1409 r = __lookup(mq, oblock, cblock, in smq_lookup()
1412 spin_unlock_irqrestore(&mq->lock, flags); in smq_lookup()
1425 struct smq_policy *mq = to_smq_policy(p); in smq_lookup_with_work() local
1427 spin_lock_irqsave(&mq->lock, flags); in smq_lookup_with_work()
1428 r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued); in smq_lookup_with_work()
1429 spin_unlock_irqrestore(&mq->lock, flags); in smq_lookup_with_work()
1439 struct smq_policy *mq = to_smq_policy(p); in smq_get_background_work() local
1441 spin_lock_irqsave(&mq->lock, flags); in smq_get_background_work()
1442 r = btracker_issue(mq->bg_work, result); in smq_get_background_work()
1444 if (!clean_target_met(mq, idle)) { in smq_get_background_work()
1445 queue_writeback(mq, idle); in smq_get_background_work()
1446 r = btracker_issue(mq->bg_work, result); in smq_get_background_work()
1449 spin_unlock_irqrestore(&mq->lock, flags); in smq_get_background_work()
1458 static void __complete_background_work(struct smq_policy *mq, in __complete_background_work() argument
1462 struct entry *e = get_entry(&mq->cache_alloc, in __complete_background_work()
1468 clear_pending(mq, e); in __complete_background_work()
1472 push(mq, e); in __complete_background_work()
1475 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1483 h_remove(&mq->table, e); in __complete_background_work()
1484 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1487 clear_pending(mq, e); in __complete_background_work()
1488 push_queue(mq, e); in __complete_background_work()
1495 clear_pending(mq, e); in __complete_background_work()
1496 push_queue(mq, e); in __complete_background_work()
1501 btracker_complete(mq->bg_work, work); in __complete_background_work()
1509 struct smq_policy *mq = to_smq_policy(p); in smq_complete_background_work() local
1511 spin_lock_irqsave(&mq->lock, flags); in smq_complete_background_work()
1512 __complete_background_work(mq, work, success); in smq_complete_background_work()
1513 spin_unlock_irqrestore(&mq->lock, flags); in smq_complete_background_work()
1517 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set) in __smq_set_clear_dirty() argument
1519 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __smq_set_clear_dirty()
1524 del_queue(mq, e); in __smq_set_clear_dirty()
1526 push_queue(mq, e); in __smq_set_clear_dirty()
1533 struct smq_policy *mq = to_smq_policy(p); in smq_set_dirty() local
1535 spin_lock_irqsave(&mq->lock, flags); in smq_set_dirty()
1536 __smq_set_clear_dirty(mq, cblock, true); in smq_set_dirty()
1537 spin_unlock_irqrestore(&mq->lock, flags); in smq_set_dirty()
1542 struct smq_policy *mq = to_smq_policy(p); in smq_clear_dirty() local
1545 spin_lock_irqsave(&mq->lock, flags); in smq_clear_dirty()
1546 __smq_set_clear_dirty(mq, cblock, false); in smq_clear_dirty()
1547 spin_unlock_irqrestore(&mq->lock, flags); in smq_clear_dirty()
1559 struct smq_policy *mq = to_smq_policy(p); in smq_load_mapping() local
1562 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping()
1572 push_front(mq, e); in smq_load_mapping()
1579 struct smq_policy *mq = to_smq_policy(p); in smq_invalidate_mapping() local
1580 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_invalidate_mapping()
1586 del_queue(mq, e); in smq_invalidate_mapping()
1587 h_remove(&mq->table, e); in smq_invalidate_mapping()
1588 free_entry(&mq->cache_alloc, e); in smq_invalidate_mapping()
1594 struct smq_policy *mq = to_smq_policy(p); in smq_get_hint() local
1595 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_get_hint()
1607 struct smq_policy *mq = to_smq_policy(p); in smq_residency() local
1609 spin_lock_irqsave(&mq->lock, flags); in smq_residency()
1610 r = to_cblock(mq->cache_alloc.nr_allocated); in smq_residency()
1611 spin_unlock_irqrestore(&mq->lock, flags); in smq_residency()
1618 struct smq_policy *mq = to_smq_policy(p); in smq_tick() local
1621 spin_lock_irqsave(&mq->lock, flags); in smq_tick()
1622 mq->tick++; in smq_tick()
1623 update_sentinels(mq); in smq_tick()
1624 end_hotspot_period(mq); in smq_tick()
1625 end_cache_period(mq); in smq_tick()
1626 spin_unlock_irqrestore(&mq->lock, flags); in smq_tick()
1631 struct smq_policy *mq = to_smq_policy(p); in smq_allow_migrations() local
1632 mq->migrations_allowed = allow; in smq_allow_migrations()
1676 static void init_policy_functions(struct smq_policy *mq, bool mimic_mq) in init_policy_functions() argument
1678 mq->policy.destroy = smq_destroy; in init_policy_functions()
1679 mq->policy.lookup = smq_lookup; in init_policy_functions()
1680 mq->policy.lookup_with_work = smq_lookup_with_work; in init_policy_functions()
1681 mq->policy.get_background_work = smq_get_background_work; in init_policy_functions()
1682 mq->policy.complete_background_work = smq_complete_background_work; in init_policy_functions()
1683 mq->policy.set_dirty = smq_set_dirty; in init_policy_functions()
1684 mq->policy.clear_dirty = smq_clear_dirty; in init_policy_functions()
1685 mq->policy.load_mapping = smq_load_mapping; in init_policy_functions()
1686 mq->policy.invalidate_mapping = smq_invalidate_mapping; in init_policy_functions()
1687 mq->policy.get_hint = smq_get_hint; in init_policy_functions()
1688 mq->policy.residency = smq_residency; in init_policy_functions()
1689 mq->policy.tick = smq_tick; in init_policy_functions()
1690 mq->policy.allow_migrations = smq_allow_migrations; in init_policy_functions()
1693 mq->policy.set_config_value = mq_set_config_value; in init_policy_functions()
1694 mq->policy.emit_config_values = mq_emit_config_values; in init_policy_functions()
1728 struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); in __smq_create() local
1730 if (!mq) in __smq_create()
1733 init_policy_functions(mq, mimic_mq); in __smq_create()
1734 mq->cache_size = cache_size; in __smq_create()
1735 mq->cache_block_size = cache_block_size; in __smq_create()
1738 &mq->hotspot_block_size, &mq->nr_hotspot_blocks); in __smq_create()
1740 mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size); in __smq_create()
1741 mq->hotspot_level_jump = 1u; in __smq_create()
1742 if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) { in __smq_create()
1747 init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue); in __smq_create()
1749 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true; in __smq_create()
1751 init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels); in __smq_create()
1753 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true; in __smq_create()
1755 init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels, in __smq_create()
1756 total_sentinels + mq->nr_hotspot_blocks); in __smq_create()
1758 init_allocator(&mq->cache_alloc, &mq->es, in __smq_create()
1759 total_sentinels + mq->nr_hotspot_blocks, in __smq_create()
1760 total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size)); in __smq_create()
1762 mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks); in __smq_create()
1763 if (!mq->hotspot_hit_bits) { in __smq_create()
1767 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); in __smq_create()
1770 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); in __smq_create()
1771 if (!mq->cache_hit_bits) { in __smq_create()
1775 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in __smq_create()
1777 mq->cache_hit_bits = NULL; in __smq_create()
1779 mq->tick = 0; in __smq_create()
1780 spin_lock_init(&mq->lock); in __smq_create()
1782 q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS); in __smq_create()
1783 mq->hotspot.nr_top_levels = 8; in __smq_create()
1784 mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS, in __smq_create()
1785 from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block); in __smq_create()
1787 q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS); in __smq_create()
1788 q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS); in __smq_create()
1790 stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS); in __smq_create()
1791 stats_init(&mq->cache_stats, NR_CACHE_LEVELS); in __smq_create()
1793 if (h_init(&mq->table, &mq->es, from_cblock(cache_size))) in __smq_create()
1796 if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks)) in __smq_create()
1799 sentinels_init(mq); in __smq_create()
1800 mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS; in __smq_create()
1802 mq->next_hotspot_period = jiffies; in __smq_create()
1803 mq->next_cache_period = jiffies; in __smq_create()
1805 mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */ in __smq_create()
1806 if (!mq->bg_work) in __smq_create()
1809 mq->migrations_allowed = migrations_allowed; in __smq_create()
1811 return &mq->policy; in __smq_create()
1814 h_exit(&mq->hotspot_table); in __smq_create()
1816 h_exit(&mq->table); in __smq_create()
1818 free_bitset(mq->cache_hit_bits); in __smq_create()
1820 free_bitset(mq->hotspot_hit_bits); in __smq_create()
1822 space_exit(&mq->es); in __smq_create()
1824 kfree(mq); in __smq_create()