Lines Matching refs:e
87 struct entry *e; in __get_entry() local
89 e = es->begin + block; in __get_entry()
90 BUG_ON(e >= es->end); in __get_entry()
92 return e; in __get_entry()
95 static unsigned to_index(struct entry_space *es, struct entry *e) in to_index() argument
97 BUG_ON(e < es->begin || e >= es->end); in to_index()
98 return e - es->begin; in to_index()
132 static struct entry *l_next(struct entry_space *es, struct entry *e) in l_next() argument
134 return to_entry(es, e->next); in l_next()
137 static struct entry *l_prev(struct entry_space *es, struct entry *e) in l_prev() argument
139 return to_entry(es, e->prev); in l_prev()
147 static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e) in l_add_head() argument
151 e->next = l->head; in l_add_head()
152 e->prev = INDEXER_NULL; in l_add_head()
155 head->prev = l->head = to_index(es, e); in l_add_head()
157 l->head = l->tail = to_index(es, e); in l_add_head()
159 if (!e->sentinel) in l_add_head()
163 static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e) in l_add_tail() argument
167 e->next = INDEXER_NULL; in l_add_tail()
168 e->prev = l->tail; in l_add_tail()
171 tail->next = l->tail = to_index(es, e); in l_add_tail()
173 l->head = l->tail = to_index(es, e); in l_add_tail()
175 if (!e->sentinel) in l_add_tail()
180 struct entry *old, struct entry *e) in l_add_before() argument
185 l_add_head(es, l, e); in l_add_before()
188 e->prev = old->prev; in l_add_before()
189 e->next = to_index(es, old); in l_add_before()
190 prev->next = old->prev = to_index(es, e); in l_add_before()
192 if (!e->sentinel) in l_add_before()
197 static void l_del(struct entry_space *es, struct ilist *l, struct entry *e) in l_del() argument
199 struct entry *prev = l_prev(es, e); in l_del()
200 struct entry *next = l_next(es, e); in l_del()
203 prev->next = e->next; in l_del()
205 l->head = e->next; in l_del()
208 next->prev = e->prev; in l_del()
210 l->tail = e->prev; in l_del()
212 if (!e->sentinel) in l_del()
218 struct entry *e; in l_pop_tail() local
220 for (e = l_tail(es, l); e; e = l_prev(es, e)) in l_pop_tail()
221 if (!e->sentinel) { in l_pop_tail()
222 l_del(es, l, e); in l_pop_tail()
223 return e; in l_pop_tail()
282 static void q_push(struct queue *q, struct entry *e) in q_push() argument
284 BUG_ON(e->pending_work); in q_push()
286 if (!e->sentinel) in q_push()
289 l_add_tail(q->es, q->qs + e->level, e); in q_push()
292 static void q_push_front(struct queue *q, struct entry *e) in q_push_front() argument
294 BUG_ON(e->pending_work); in q_push_front()
296 if (!e->sentinel) in q_push_front()
299 l_add_head(q->es, q->qs + e->level, e); in q_push_front()
302 static void q_push_before(struct queue *q, struct entry *old, struct entry *e) in q_push_before() argument
304 BUG_ON(e->pending_work); in q_push_before()
306 if (!e->sentinel) in q_push_before()
309 l_add_before(q->es, q->qs + e->level, old, e); in q_push_before()
312 static void q_del(struct queue *q, struct entry *e) in q_del() argument
314 l_del(q->es, q->qs + e->level, e); in q_del()
315 if (!e->sentinel) in q_del()
325 struct entry *e; in q_peek() local
330 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) { in q_peek()
331 if (e->sentinel) { in q_peek()
338 return e; in q_peek()
346 struct entry *e = q_peek(q, q->nr_levels, true); in q_pop() local
348 if (e) in q_pop()
349 q_del(q, e); in q_pop()
351 return e; in q_pop()
361 struct entry *e; in __redist_pop_from() local
364 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) in __redist_pop_from()
365 if (!e->sentinel) { in __redist_pop_from()
366 l_del(q->es, q->qs + e->level, e); in __redist_pop_from()
367 return e; in __redist_pop_from()
418 struct entry *e; in q_redistribute() local
430 e = __redist_pop_from(q, level + 1u); in q_redistribute()
431 if (!e) { in q_redistribute()
436 e->level = level; in q_redistribute()
437 l_add_tail(q->es, l, e); in q_redistribute()
445 e = l_pop_tail(q->es, l); in q_redistribute()
447 if (!e) in q_redistribute()
451 e->level = level + 1u; in q_redistribute()
452 l_add_tail(q->es, l_above, e); in q_redistribute()
457 static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels, in q_requeue() argument
462 unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels); in q_requeue()
465 if (extra_levels && (e->level < q->nr_levels - 1u)) { in q_requeue()
471 de->level = e->level; in q_requeue()
490 q_del(q, e); in q_requeue()
491 e->level = new_level; in q_requeue()
492 q_push(q, e); in q_requeue()
598 static struct entry *h_next(struct smq_hash_table *ht, struct entry *e) in h_next() argument
600 return to_entry(ht->es, e->hash_next); in h_next()
603 static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e) in __h_insert() argument
605 e->hash_next = ht->buckets[bucket]; in __h_insert()
606 ht->buckets[bucket] = to_index(ht->es, e); in __h_insert()
609 static void h_insert(struct smq_hash_table *ht, struct entry *e) in h_insert() argument
611 unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits); in h_insert()
612 __h_insert(ht, h, e); in h_insert()
618 struct entry *e; in __h_lookup() local
621 for (e = h_head(ht, h); e; e = h_next(ht, e)) { in __h_lookup()
622 if (e->oblock == oblock) in __h_lookup()
623 return e; in __h_lookup()
625 *prev = e; in __h_lookup()
632 struct entry *e, struct entry *prev) in __h_unlink() argument
635 prev->hash_next = e->hash_next; in __h_unlink()
637 ht->buckets[h] = e->hash_next; in __h_unlink()
645 struct entry *e, *prev; in h_lookup() local
648 e = __h_lookup(ht, h, oblock, &prev); in h_lookup()
649 if (e && prev) { in h_lookup()
654 __h_unlink(ht, h, e, prev); in h_lookup()
655 __h_insert(ht, h, e); in h_lookup()
658 return e; in h_lookup()
661 static void h_remove(struct smq_hash_table *ht, struct entry *e) in h_remove() argument
663 unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits); in h_remove()
670 e = __h_lookup(ht, h, e->oblock, &prev); in h_remove()
671 if (e) in h_remove()
672 __h_unlink(ht, h, e, prev); in h_remove()
699 static void init_entry(struct entry *e) in init_entry() argument
705 e->hash_next = INDEXER_NULL; in init_entry()
706 e->next = INDEXER_NULL; in init_entry()
707 e->prev = INDEXER_NULL; in init_entry()
708 e->level = 0u; in init_entry()
709 e->dirty = true; /* FIXME: audit */ in init_entry()
710 e->allocated = true; in init_entry()
711 e->sentinel = false; in init_entry()
712 e->pending_work = false; in init_entry()
717 struct entry *e; in alloc_entry() local
722 e = l_pop_tail(ea->es, &ea->free); in alloc_entry()
723 init_entry(e); in alloc_entry()
726 return e; in alloc_entry()
734 struct entry *e = __get_entry(ea->es, ea->begin + i); in alloc_particular_entry() local
736 BUG_ON(e->allocated); in alloc_particular_entry()
738 l_del(ea->es, &ea->free, e); in alloc_particular_entry()
739 init_entry(e); in alloc_particular_entry()
742 return e; in alloc_particular_entry()
745 static void free_entry(struct entry_alloc *ea, struct entry *e) in free_entry() argument
748 BUG_ON(!e->allocated); in free_entry()
751 e->allocated = false; in free_entry()
752 l_add_tail(ea->es, &ea->free, e); in free_entry()
760 static unsigned get_index(struct entry_alloc *ea, struct entry *e) in get_index() argument
762 return to_index(ea->es, e) - ea->begin; in get_index()
937 static void del_queue(struct smq_policy *mq, struct entry *e) in del_queue() argument
939 q_del(e->dirty ? &mq->dirty : &mq->clean, e); in del_queue()
942 static void push_queue(struct smq_policy *mq, struct entry *e) in push_queue() argument
944 if (e->dirty) in push_queue()
945 q_push(&mq->dirty, e); in push_queue()
947 q_push(&mq->clean, e); in push_queue()
951 static void push(struct smq_policy *mq, struct entry *e) in push() argument
953 h_insert(&mq->table, e); in push()
954 if (!e->pending_work) in push()
955 push_queue(mq, e); in push()
958 static void push_queue_front(struct smq_policy *mq, struct entry *e) in push_queue_front() argument
960 if (e->dirty) in push_queue_front()
961 q_push_front(&mq->dirty, e); in push_queue_front()
963 q_push_front(&mq->clean, e); in push_queue_front()
966 static void push_front(struct smq_policy *mq, struct entry *e) in push_front() argument
968 h_insert(&mq->table, e); in push_front()
969 if (!e->pending_work) in push_front()
970 push_queue_front(mq, e); in push_front()
973 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) in infer_cblock() argument
975 return to_cblock(get_index(&mq->cache_alloc, e)); in infer_cblock()
978 static void requeue(struct smq_policy *mq, struct entry *e) in requeue() argument
983 if (e->pending_work) in requeue()
986 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue()
987 if (!e->dirty) { in requeue()
988 q_requeue(&mq->clean, e, 1u, NULL, NULL); in requeue()
992 q_requeue(&mq->dirty, e, 1u, in requeue()
993 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels), in requeue()
994 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels)); in requeue()
1147 static void mark_pending(struct smq_policy *mq, struct entry *e) in mark_pending() argument
1149 BUG_ON(e->sentinel); in mark_pending()
1150 BUG_ON(!e->allocated); in mark_pending()
1151 BUG_ON(e->pending_work); in mark_pending()
1152 e->pending_work = true; in mark_pending()
1155 static void clear_pending(struct smq_policy *mq, struct entry *e) in clear_pending() argument
1157 BUG_ON(!e->pending_work); in clear_pending()
1158 e->pending_work = false; in clear_pending()
1165 struct entry *e; in queue_writeback() local
1167 e = q_peek(&mq->dirty, mq->dirty.nr_levels, !mq->migrations_allowed); in queue_writeback()
1168 if (e) { in queue_writeback()
1169 mark_pending(mq, e); in queue_writeback()
1170 q_del(&mq->dirty, e); in queue_writeback()
1173 work.oblock = e->oblock; in queue_writeback()
1174 work.cblock = infer_cblock(mq, e); in queue_writeback()
1184 struct entry *e; in queue_demotion() local
1189 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); in queue_demotion()
1190 if (!e) { in queue_demotion()
1196 mark_pending(mq, e); in queue_demotion()
1197 q_del(&mq->clean, e); in queue_demotion()
1200 work.oblock = e->oblock; in queue_demotion()
1201 work.cblock = infer_cblock(mq, e); in queue_demotion()
1208 struct entry *e; in queue_promotion() local
1231 e = alloc_entry(&mq->cache_alloc); in queue_promotion()
1232 BUG_ON(!e); in queue_promotion()
1233 e->pending_work = true; in queue_promotion()
1236 work.cblock = infer_cblock(mq, e); in queue_promotion()
1279 struct entry *e = h_lookup(&mq->hotspot_table, hb); in update_hotspot_queue() local
1281 if (e) { in update_hotspot_queue()
1282 stats_level_accessed(&mq->hotspot_stats, e->level); in update_hotspot_queue()
1284 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1285 q_requeue(&mq->hotspot, e, in update_hotspot_queue()
1293 e = alloc_entry(&mq->hotspot_alloc); in update_hotspot_queue()
1294 if (!e) { in update_hotspot_queue()
1295 e = q_pop(&mq->hotspot); in update_hotspot_queue()
1296 if (e) { in update_hotspot_queue()
1297 h_remove(&mq->hotspot_table, e); in update_hotspot_queue()
1298 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1304 if (e) { in update_hotspot_queue()
1305 e->oblock = hb; in update_hotspot_queue()
1306 q_push(&mq->hotspot, e); in update_hotspot_queue()
1307 h_insert(&mq->hotspot_table, e); in update_hotspot_queue()
1311 return e; in update_hotspot_queue()
1345 struct entry *e, *hs_e; in __lookup() local
1350 e = h_lookup(&mq->table, oblock); in __lookup()
1351 if (e) { in __lookup()
1352 stats_level_accessed(&mq->cache_stats, e->level); in __lookup()
1354 requeue(mq, e); in __lookup()
1355 *cblock = infer_cblock(mq, e); in __lookup()
1438 struct entry *e = get_entry(&mq->cache_alloc, in __complete_background_work() local
1444 clear_pending(mq, e); in __complete_background_work()
1446 e->oblock = work->oblock; in __complete_background_work()
1447 e->level = NR_CACHE_LEVELS - 1; in __complete_background_work()
1448 push(mq, e); in __complete_background_work()
1451 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1459 h_remove(&mq->table, e); in __complete_background_work()
1460 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1463 clear_pending(mq, e); in __complete_background_work()
1464 push_queue(mq, e); in __complete_background_work()
1471 clear_pending(mq, e); in __complete_background_work()
1472 push_queue(mq, e); in __complete_background_work()
1495 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __smq_set_clear_dirty() local
1497 if (e->pending_work) in __smq_set_clear_dirty()
1498 e->dirty = set; in __smq_set_clear_dirty()
1500 del_queue(mq, e); in __smq_set_clear_dirty()
1501 e->dirty = set; in __smq_set_clear_dirty()
1502 push_queue(mq, e); in __smq_set_clear_dirty()
1536 struct entry *e; in smq_load_mapping() local
1538 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping()
1539 e->oblock = oblock; in smq_load_mapping()
1540 e->dirty = dirty; in smq_load_mapping()
1541 e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock); in smq_load_mapping()
1542 e->pending_work = false; in smq_load_mapping()
1548 push_front(mq, e); in smq_load_mapping()
1556 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_invalidate_mapping() local
1558 if (!e->allocated) in smq_invalidate_mapping()
1562 del_queue(mq, e); in smq_invalidate_mapping()
1563 h_remove(&mq->table, e); in smq_invalidate_mapping()
1564 free_entry(&mq->cache_alloc, e); in smq_invalidate_mapping()
1571 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_get_hint() local
1573 if (!e->allocated) in smq_get_hint()
1576 return e->level; in smq_get_hint()