Searched refs:backoffs (Results 1 – 4 of 4) sorted by relevance
88 int backoffs; in jffs2_rtime_decompress() local94 backoffs = positions[value]; in jffs2_rtime_decompress()98 if (backoffs + repeat >= outpos) { in jffs2_rtime_decompress()100 cpage_out[outpos++] = cpage_out[backoffs++]; in jffs2_rtime_decompress()104 memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); in jffs2_rtime_decompress()
307 struct rb_root backoffs; member
1662 spg->backoffs = RB_ROOT; in alloc_spg_mapping()1669 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); in free_spg_mapping()1915 while (!RB_EMPTY_ROOT(&spg->backoffs)) { in DEFINE_RB_FUNCS()1917 rb_entry(rb_first(&spg->backoffs), in DEFINE_RB_FUNCS()1920 erase_backoff(&spg->backoffs, backoff); in DEFINE_RB_FUNCS()1965 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); in should_plug_request()4347 insert_backoff(&spg->backoffs, backoff); in handle_backoff_block()4403 erase_backoff(&spg->backoffs, backoff); in handle_backoff_unblock()4407 if (RB_EMPTY_ROOT(&spg->backoffs)) { in handle_backoff_unblock()
48 However, the Wound-Wait algorithm is typically stated to generate fewer backoffs