• Home
  • Raw
  • Download

Lines Matching refs:mq

854 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)  in writeback_sentinel()  argument
856 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels); in writeback_sentinel()
859 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level) in demote_sentinel() argument
861 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels); in demote_sentinel()
864 static void __update_writeback_sentinels(struct smq_policy *mq) in __update_writeback_sentinels() argument
867 struct queue *q = &mq->dirty; in __update_writeback_sentinels()
871 sentinel = writeback_sentinel(mq, level); in __update_writeback_sentinels()
877 static void __update_demote_sentinels(struct smq_policy *mq) in __update_demote_sentinels() argument
880 struct queue *q = &mq->clean; in __update_demote_sentinels()
884 sentinel = demote_sentinel(mq, level); in __update_demote_sentinels()
890 static void update_sentinels(struct smq_policy *mq) in update_sentinels() argument
892 if (time_after(jiffies, mq->next_writeback_period)) { in update_sentinels()
893 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; in update_sentinels()
894 mq->current_writeback_sentinels = !mq->current_writeback_sentinels; in update_sentinels()
895 __update_writeback_sentinels(mq); in update_sentinels()
898 if (time_after(jiffies, mq->next_demote_period)) { in update_sentinels()
899 mq->next_demote_period = jiffies + DEMOTE_PERIOD; in update_sentinels()
900 mq->current_demote_sentinels = !mq->current_demote_sentinels; in update_sentinels()
901 __update_demote_sentinels(mq); in update_sentinels()
905 static void __sentinels_init(struct smq_policy *mq) in __sentinels_init() argument
911 sentinel = writeback_sentinel(mq, level); in __sentinels_init()
913 q_push(&mq->dirty, sentinel); in __sentinels_init()
915 sentinel = demote_sentinel(mq, level); in __sentinels_init()
917 q_push(&mq->clean, sentinel); in __sentinels_init()
921 static void sentinels_init(struct smq_policy *mq) in sentinels_init() argument
923 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; in sentinels_init()
924 mq->next_demote_period = jiffies + DEMOTE_PERIOD; in sentinels_init()
926 mq->current_writeback_sentinels = false; in sentinels_init()
927 mq->current_demote_sentinels = false; in sentinels_init()
928 __sentinels_init(mq); in sentinels_init()
930 mq->current_writeback_sentinels = !mq->current_writeback_sentinels; in sentinels_init()
931 mq->current_demote_sentinels = !mq->current_demote_sentinels; in sentinels_init()
932 __sentinels_init(mq); in sentinels_init()
937 static void del_queue(struct smq_policy *mq, struct entry *e) in del_queue() argument
939 q_del(e->dirty ? &mq->dirty : &mq->clean, e); in del_queue()
942 static void push_queue(struct smq_policy *mq, struct entry *e) in push_queue() argument
945 q_push(&mq->dirty, e); in push_queue()
947 q_push(&mq->clean, e); in push_queue()
951 static void push(struct smq_policy *mq, struct entry *e) in push() argument
953 h_insert(&mq->table, e); in push()
955 push_queue(mq, e); in push()
958 static void push_queue_front(struct smq_policy *mq, struct entry *e) in push_queue_front() argument
961 q_push_front(&mq->dirty, e); in push_queue_front()
963 q_push_front(&mq->clean, e); in push_queue_front()
966 static void push_front(struct smq_policy *mq, struct entry *e) in push_front() argument
968 h_insert(&mq->table, e); in push_front()
970 push_queue_front(mq, e); in push_front()
973 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) in infer_cblock() argument
975 return to_cblock(get_index(&mq->cache_alloc, e)); in infer_cblock()
978 static void requeue(struct smq_policy *mq, struct entry *e) in requeue() argument
986 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue()
988 q_requeue(&mq->clean, e, 1u, NULL, NULL); in requeue()
992 q_requeue(&mq->dirty, e, 1u, in requeue()
993 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels), in requeue()
994 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels)); in requeue()
998 static unsigned default_promote_level(struct smq_policy *mq) in default_promote_level() argument
1018 unsigned hits = mq->cache_stats.hits; in default_promote_level()
1019 unsigned misses = mq->cache_stats.misses; in default_promote_level()
1024 static void update_promote_levels(struct smq_policy *mq) in update_promote_levels() argument
1030 unsigned threshold_level = allocator_empty(&mq->cache_alloc) ? in update_promote_levels()
1031 default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u); in update_promote_levels()
1040 switch (stats_assess(&mq->hotspot_stats)) { in update_promote_levels()
1053 mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level; in update_promote_levels()
1054 mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level); in update_promote_levels()
1061 static void update_level_jump(struct smq_policy *mq) in update_level_jump() argument
1063 switch (stats_assess(&mq->hotspot_stats)) { in update_level_jump()
1065 mq->hotspot_level_jump = 4u; in update_level_jump()
1069 mq->hotspot_level_jump = 2u; in update_level_jump()
1073 mq->hotspot_level_jump = 1u; in update_level_jump()
1078 static void end_hotspot_period(struct smq_policy *mq) in end_hotspot_period() argument
1080 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); in end_hotspot_period()
1081 update_promote_levels(mq); in end_hotspot_period()
1083 if (time_after(jiffies, mq->next_hotspot_period)) { in end_hotspot_period()
1084 update_level_jump(mq); in end_hotspot_period()
1085 q_redistribute(&mq->hotspot); in end_hotspot_period()
1086 stats_reset(&mq->hotspot_stats); in end_hotspot_period()
1087 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD; in end_hotspot_period()
1091 static void end_cache_period(struct smq_policy *mq) in end_cache_period() argument
1093 if (time_after(jiffies, mq->next_cache_period)) { in end_cache_period()
1094 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in end_cache_period()
1096 q_redistribute(&mq->dirty); in end_cache_period()
1097 q_redistribute(&mq->clean); in end_cache_period()
1098 stats_reset(&mq->cache_stats); in end_cache_period()
1100 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD; in end_cache_period()
1112 static unsigned percent_to_target(struct smq_policy *mq, unsigned p) in percent_to_target() argument
1114 return from_cblock(mq->cache_size) * p / 100u; in percent_to_target()
1117 static bool clean_target_met(struct smq_policy *mq, bool idle) in clean_target_met() argument
1127 return q_size(&mq->dirty) == 0u; in clean_target_met()
1136 static bool free_target_met(struct smq_policy *mq) in free_target_met() argument
1140 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; in free_target_met()
1141 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= in free_target_met()
1142 percent_to_target(mq, FREE_TARGET); in free_target_met()
1147 static void mark_pending(struct smq_policy *mq, struct entry *e) in mark_pending() argument
1155 static void clear_pending(struct smq_policy *mq, struct entry *e) in clear_pending() argument
1161 static void queue_writeback(struct smq_policy *mq) in queue_writeback() argument
1167 e = q_peek(&mq->dirty, mq->dirty.nr_levels, !mq->migrations_allowed); in queue_writeback()
1169 mark_pending(mq, e); in queue_writeback()
1170 q_del(&mq->dirty, e); in queue_writeback()
1174 work.cblock = infer_cblock(mq, e); in queue_writeback()
1176 r = btracker_queue(mq->bg_work, &work, NULL); in queue_writeback()
1181 static void queue_demotion(struct smq_policy *mq) in queue_demotion() argument
1186 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) in queue_demotion()
1189 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); in queue_demotion()
1191 if (!clean_target_met(mq, true)) in queue_demotion()
1192 queue_writeback(mq); in queue_demotion()
1196 mark_pending(mq, e); in queue_demotion()
1197 q_del(&mq->clean, e); in queue_demotion()
1201 work.cblock = infer_cblock(mq, e); in queue_demotion()
1202 btracker_queue(mq->bg_work, &work, NULL); in queue_demotion()
1205 static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock, in queue_promotion() argument
1211 if (!mq->migrations_allowed) in queue_promotion()
1214 if (allocator_empty(&mq->cache_alloc)) { in queue_promotion()
1219 if (!free_target_met(mq)) in queue_promotion()
1220 queue_demotion(mq); in queue_promotion()
1224 if (btracker_promotion_already_present(mq->bg_work, oblock)) in queue_promotion()
1231 e = alloc_entry(&mq->cache_alloc); in queue_promotion()
1236 work.cblock = infer_cblock(mq, e); in queue_promotion()
1237 btracker_queue(mq->bg_work, &work, workp); in queue_promotion()
1256 static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e, in should_promote() argument
1260 if (!allocator_empty(&mq->cache_alloc) && fast_promote) in should_promote()
1263 return maybe_promote(hs_e->level >= mq->write_promote_level); in should_promote()
1265 return maybe_promote(hs_e->level >= mq->read_promote_level); in should_promote()
1268 static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b) in to_hblock() argument
1271 (void) sector_div(r, mq->cache_blocks_per_hotspot_block); in to_hblock()
1275 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b) in update_hotspot_queue() argument
1278 dm_oblock_t hb = to_hblock(mq, b); in update_hotspot_queue()
1279 struct entry *e = h_lookup(&mq->hotspot_table, hb); in update_hotspot_queue()
1282 stats_level_accessed(&mq->hotspot_stats, e->level); in update_hotspot_queue()
1284 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1285 q_requeue(&mq->hotspot, e, in update_hotspot_queue()
1286 test_and_set_bit(hi, mq->hotspot_hit_bits) ? in update_hotspot_queue()
1287 0u : mq->hotspot_level_jump, in update_hotspot_queue()
1291 stats_miss(&mq->hotspot_stats); in update_hotspot_queue()
1293 e = alloc_entry(&mq->hotspot_alloc); in update_hotspot_queue()
1295 e = q_pop(&mq->hotspot); in update_hotspot_queue()
1297 h_remove(&mq->hotspot_table, e); in update_hotspot_queue()
1298 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1299 clear_bit(hi, mq->hotspot_hit_bits); in update_hotspot_queue()
1306 q_push(&mq->hotspot, e); in update_hotspot_queue()
1307 h_insert(&mq->hotspot_table, e); in update_hotspot_queue()
1328 struct smq_policy *mq = to_smq_policy(p); in smq_destroy() local
1330 btracker_destroy(mq->bg_work); in smq_destroy()
1331 h_exit(&mq->hotspot_table); in smq_destroy()
1332 h_exit(&mq->table); in smq_destroy()
1333 free_bitset(mq->hotspot_hit_bits); in smq_destroy()
1334 free_bitset(mq->cache_hit_bits); in smq_destroy()
1335 space_exit(&mq->es); in smq_destroy()
1336 kfree(mq); in smq_destroy()
1341 static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock, in __lookup() argument
1350 e = h_lookup(&mq->table, oblock); in __lookup()
1352 stats_level_accessed(&mq->cache_stats, e->level); in __lookup()
1354 requeue(mq, e); in __lookup()
1355 *cblock = infer_cblock(mq, e); in __lookup()
1359 stats_miss(&mq->cache_stats); in __lookup()
1364 hs_e = update_hotspot_queue(mq, oblock); in __lookup()
1366 pr = should_promote(mq, hs_e, data_dir, fast_copy); in __lookup()
1368 queue_promotion(mq, oblock, work); in __lookup()
1382 struct smq_policy *mq = to_smq_policy(p); in smq_lookup() local
1384 spin_lock_irqsave(&mq->lock, flags); in smq_lookup()
1385 r = __lookup(mq, oblock, cblock, in smq_lookup()
1388 spin_unlock_irqrestore(&mq->lock, flags); in smq_lookup()
1401 struct smq_policy *mq = to_smq_policy(p); in smq_lookup_with_work() local
1403 spin_lock_irqsave(&mq->lock, flags); in smq_lookup_with_work()
1404 r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued); in smq_lookup_with_work()
1405 spin_unlock_irqrestore(&mq->lock, flags); in smq_lookup_with_work()
1415 struct smq_policy *mq = to_smq_policy(p); in smq_get_background_work() local
1417 spin_lock_irqsave(&mq->lock, flags); in smq_get_background_work()
1418 r = btracker_issue(mq->bg_work, result); in smq_get_background_work()
1420 if (!clean_target_met(mq, idle)) { in smq_get_background_work()
1421 queue_writeback(mq); in smq_get_background_work()
1422 r = btracker_issue(mq->bg_work, result); in smq_get_background_work()
1425 spin_unlock_irqrestore(&mq->lock, flags); in smq_get_background_work()
1434 static void __complete_background_work(struct smq_policy *mq, in __complete_background_work() argument
1438 struct entry *e = get_entry(&mq->cache_alloc, in __complete_background_work()
1444 clear_pending(mq, e); in __complete_background_work()
1448 push(mq, e); in __complete_background_work()
1451 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1459 h_remove(&mq->table, e); in __complete_background_work()
1460 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1463 clear_pending(mq, e); in __complete_background_work()
1464 push_queue(mq, e); in __complete_background_work()
1471 clear_pending(mq, e); in __complete_background_work()
1472 push_queue(mq, e); in __complete_background_work()
1477 btracker_complete(mq->bg_work, work); in __complete_background_work()
1485 struct smq_policy *mq = to_smq_policy(p); in smq_complete_background_work() local
1487 spin_lock_irqsave(&mq->lock, flags); in smq_complete_background_work()
1488 __complete_background_work(mq, work, success); in smq_complete_background_work()
1489 spin_unlock_irqrestore(&mq->lock, flags); in smq_complete_background_work()
1493 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set) in __smq_set_clear_dirty() argument
1495 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __smq_set_clear_dirty()
1500 del_queue(mq, e); in __smq_set_clear_dirty()
1502 push_queue(mq, e); in __smq_set_clear_dirty()
1509 struct smq_policy *mq = to_smq_policy(p); in smq_set_dirty() local
1511 spin_lock_irqsave(&mq->lock, flags); in smq_set_dirty()
1512 __smq_set_clear_dirty(mq, cblock, true); in smq_set_dirty()
1513 spin_unlock_irqrestore(&mq->lock, flags); in smq_set_dirty()
1518 struct smq_policy *mq = to_smq_policy(p); in smq_clear_dirty() local
1521 spin_lock_irqsave(&mq->lock, flags); in smq_clear_dirty()
1522 __smq_set_clear_dirty(mq, cblock, false); in smq_clear_dirty()
1523 spin_unlock_irqrestore(&mq->lock, flags); in smq_clear_dirty()
1535 struct smq_policy *mq = to_smq_policy(p); in smq_load_mapping() local
1538 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping()
1548 push_front(mq, e); in smq_load_mapping()
1555 struct smq_policy *mq = to_smq_policy(p); in smq_invalidate_mapping() local
1556 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_invalidate_mapping()
1562 del_queue(mq, e); in smq_invalidate_mapping()
1563 h_remove(&mq->table, e); in smq_invalidate_mapping()
1564 free_entry(&mq->cache_alloc, e); in smq_invalidate_mapping()
1570 struct smq_policy *mq = to_smq_policy(p); in smq_get_hint() local
1571 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_get_hint()
1583 struct smq_policy *mq = to_smq_policy(p); in smq_residency() local
1585 spin_lock_irqsave(&mq->lock, flags); in smq_residency()
1586 r = to_cblock(mq->cache_alloc.nr_allocated); in smq_residency()
1587 spin_unlock_irqrestore(&mq->lock, flags); in smq_residency()
1594 struct smq_policy *mq = to_smq_policy(p); in smq_tick() local
1597 spin_lock_irqsave(&mq->lock, flags); in smq_tick()
1598 mq->tick++; in smq_tick()
1599 update_sentinels(mq); in smq_tick()
1600 end_hotspot_period(mq); in smq_tick()
1601 end_cache_period(mq); in smq_tick()
1602 spin_unlock_irqrestore(&mq->lock, flags); in smq_tick()
1607 struct smq_policy *mq = to_smq_policy(p); in smq_allow_migrations() local
1608 mq->migrations_allowed = allow; in smq_allow_migrations()
1652 static void init_policy_functions(struct smq_policy *mq, bool mimic_mq) in init_policy_functions() argument
1654 mq->policy.destroy = smq_destroy; in init_policy_functions()
1655 mq->policy.lookup = smq_lookup; in init_policy_functions()
1656 mq->policy.lookup_with_work = smq_lookup_with_work; in init_policy_functions()
1657 mq->policy.get_background_work = smq_get_background_work; in init_policy_functions()
1658 mq->policy.complete_background_work = smq_complete_background_work; in init_policy_functions()
1659 mq->policy.set_dirty = smq_set_dirty; in init_policy_functions()
1660 mq->policy.clear_dirty = smq_clear_dirty; in init_policy_functions()
1661 mq->policy.load_mapping = smq_load_mapping; in init_policy_functions()
1662 mq->policy.invalidate_mapping = smq_invalidate_mapping; in init_policy_functions()
1663 mq->policy.get_hint = smq_get_hint; in init_policy_functions()
1664 mq->policy.residency = smq_residency; in init_policy_functions()
1665 mq->policy.tick = smq_tick; in init_policy_functions()
1666 mq->policy.allow_migrations = smq_allow_migrations; in init_policy_functions()
1669 mq->policy.set_config_value = mq_set_config_value; in init_policy_functions()
1670 mq->policy.emit_config_values = mq_emit_config_values; in init_policy_functions()
1704 struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); in __smq_create() local
1706 if (!mq) in __smq_create()
1709 init_policy_functions(mq, mimic_mq); in __smq_create()
1710 mq->cache_size = cache_size; in __smq_create()
1711 mq->cache_block_size = cache_block_size; in __smq_create()
1714 &mq->hotspot_block_size, &mq->nr_hotspot_blocks); in __smq_create()
1716 mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size); in __smq_create()
1717 mq->hotspot_level_jump = 1u; in __smq_create()
1718 if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) { in __smq_create()
1723 init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue); in __smq_create()
1725 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true; in __smq_create()
1727 init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels); in __smq_create()
1729 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true; in __smq_create()
1731 init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels, in __smq_create()
1732 total_sentinels + mq->nr_hotspot_blocks); in __smq_create()
1734 init_allocator(&mq->cache_alloc, &mq->es, in __smq_create()
1735 total_sentinels + mq->nr_hotspot_blocks, in __smq_create()
1736 total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size)); in __smq_create()
1738 mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks); in __smq_create()
1739 if (!mq->hotspot_hit_bits) { in __smq_create()
1743 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); in __smq_create()
1746 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); in __smq_create()
1747 if (!mq->cache_hit_bits) { in __smq_create()
1751 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in __smq_create()
1753 mq->cache_hit_bits = NULL; in __smq_create()
1755 mq->tick = 0; in __smq_create()
1756 spin_lock_init(&mq->lock); in __smq_create()
1758 q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS); in __smq_create()
1759 mq->hotspot.nr_top_levels = 8; in __smq_create()
1760 mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS, in __smq_create()
1761 from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block); in __smq_create()
1763 q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS); in __smq_create()
1764 q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS); in __smq_create()
1766 stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS); in __smq_create()
1767 stats_init(&mq->cache_stats, NR_CACHE_LEVELS); in __smq_create()
1769 if (h_init(&mq->table, &mq->es, from_cblock(cache_size))) in __smq_create()
1772 if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks)) in __smq_create()
1775 sentinels_init(mq); in __smq_create()
1776 mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS; in __smq_create()
1778 mq->next_hotspot_period = jiffies; in __smq_create()
1779 mq->next_cache_period = jiffies; in __smq_create()
1781 mq->bg_work = btracker_create(10240); /* FIXME: hard coded value */ in __smq_create()
1782 if (!mq->bg_work) in __smq_create()
1785 mq->migrations_allowed = migrations_allowed; in __smq_create()
1787 return &mq->policy; in __smq_create()
1790 h_exit(&mq->hotspot_table); in __smq_create()
1792 h_exit(&mq->table); in __smq_create()
1794 free_bitset(mq->cache_hit_bits); in __smq_create()
1796 free_bitset(mq->hotspot_hit_bits); in __smq_create()
1798 space_exit(&mq->es); in __smq_create()
1800 kfree(mq); in __smq_create()