• Home
  • Raw
  • Download

Lines Matching full:ui

289 		struct ubifs_inode *ui = ubifs_inode(inode);  in write_begin_slow()  local
296 mutex_lock(&ui->ui_mutex); in write_begin_slow()
297 if (ui->dirty) in write_begin_slow()
302 ubifs_release_dirty_inode_budget(c, ui); in write_begin_slow()
313 * @ui: UBIFS inode object the page belongs to
319 * function leaves the @ui->ui_mutex locked in case of appending. Returns zero
323 struct ubifs_inode *ui, int appending) in allocate_budget() argument
335 mutex_lock(&ui->ui_mutex); in allocate_budget()
336 if (ui->dirty) in allocate_budget()
341 * but @ui->ui_mutex hast to be left locked because we in allocate_budget()
372 mutex_lock(&ui->ui_mutex); in allocate_budget()
373 if (!ui->dirty) in allocate_budget()
424 struct ubifs_inode *ui = ubifs_inode(inode); in ubifs_write_begin() local
465 err = allocate_budget(c, page, ui, appending); in ubifs_write_begin()
482 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); in ubifs_write_begin()
483 mutex_unlock(&ui->ui_mutex); in ubifs_write_begin()
494 * with @ui->ui_mutex locked if we are appending pages, and unlocked in ubifs_write_begin()
506 * @ui: UBIFS inode object the page belongs to
510 * @ui->ui_mutex in case of appending.
513 struct ubifs_inode *ui, int appending) in cancel_budget() argument
516 if (!ui->dirty) in cancel_budget()
517 ubifs_release_dirty_inode_budget(c, ui); in cancel_budget()
518 mutex_unlock(&ui->ui_mutex); in cancel_budget()
533 struct ubifs_inode *ui = ubifs_inode(inode); in ubifs_write_end() local
553 cancel_budget(c, page, ui, appending); in ubifs_write_end()
575 ui->ui_size = end_pos; in ubifs_write_end()
582 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); in ubifs_write_end()
583 mutex_unlock(&ui->ui_mutex); in ubifs_write_end()
718 struct ubifs_inode *ui = ubifs_inode(inode); in ubifs_do_bulk_read() local
730 ui->read_in_a_row = 1; in ubifs_do_bulk_read()
731 ui->bulk_read = 0; in ubifs_do_bulk_read()
797 ui->last_page_read = offset + page_idx - 1; in ubifs_do_bulk_read()
809 ui->read_in_a_row = ui->bulk_read = 0; in ubifs_do_bulk_read()
826 struct ubifs_inode *ui = ubifs_inode(inode); in ubifs_bulk_read() local
827 pgoff_t index = page->index, last_page_read = ui->last_page_read; in ubifs_bulk_read()
831 ui->last_page_read = index; in ubifs_bulk_read()
836 * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, in ubifs_bulk_read()
839 if (!mutex_trylock(&ui->ui_mutex)) in ubifs_bulk_read()
844 ui->read_in_a_row = 1; in ubifs_bulk_read()
845 if (ui->bulk_read) in ubifs_bulk_read()
846 ui->bulk_read = 0; in ubifs_bulk_read()
850 if (!ui->bulk_read) { in ubifs_bulk_read()
851 ui->read_in_a_row += 1; in ubifs_bulk_read()
852 if (ui->read_in_a_row < 3) in ubifs_bulk_read()
855 ui->bulk_read = 1; in ubifs_bulk_read()
884 mutex_unlock(&ui->ui_mutex); in ubifs_bulk_read()
907 struct ubifs_inode *ui = ubifs_inode(inode); in do_writepage() local
908 spin_lock(&ui->ui_lock); in do_writepage()
909 ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT); in do_writepage()
910 spin_unlock(&ui->ui_lock); in do_writepage()
990 * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size
1004 struct ubifs_inode *ui = ubifs_inode(inode); in ubifs_writepage() local
1020 spin_lock(&ui->ui_lock); in ubifs_writepage()
1021 synced_i_size = ui->synced_i_size; in ubifs_writepage()
1022 spin_unlock(&ui->ui_lock); in ubifs_writepage()
1116 struct ubifs_inode *ui = ubifs_inode(inode); in do_truncation() local
1186 mutex_lock(&ui->ui_mutex); in do_truncation()
1187 ui->ui_size = inode->i_size; in do_truncation()
1193 mutex_unlock(&ui->ui_mutex); in do_truncation()
1220 struct ubifs_inode *ui = ubifs_inode(inode); in do_setattr() local
1222 .dirtied_ino_d = ALIGN(ui->data_len, 8) }; in do_setattr()
1233 mutex_lock(&ui->ui_mutex); in do_setattr()
1238 ui->ui_size = inode->i_size; in do_setattr()
1243 release = ui->dirty; in do_setattr()
1252 mutex_unlock(&ui->ui_mutex); in do_setattr()
1375 struct ubifs_inode *ui = ubifs_inode(inode); in ubifs_update_time() local
1378 .dirtied_ino_d = ALIGN(ui->data_len, 8) }; in ubifs_update_time()
1388 mutex_lock(&ui->ui_mutex); in ubifs_update_time()
1396 release = ui->dirty; in ubifs_update_time()
1398 mutex_unlock(&ui->ui_mutex); in ubifs_update_time()
1415 struct ubifs_inode *ui = ubifs_inode(inode); in update_mctime() local
1421 .dirtied_ino_d = ALIGN(ui->data_len, 8) }; in update_mctime()
1427 mutex_lock(&ui->ui_mutex); in update_mctime()
1429 release = ui->dirty; in update_mctime()
1431 mutex_unlock(&ui->ui_mutex); in update_mctime()
1577 struct ubifs_inode *ui = ubifs_inode(inode); in ubifs_vm_page_mkwrite() local
1579 mutex_lock(&ui->ui_mutex); in ubifs_vm_page_mkwrite()
1581 release = ui->dirty; in ubifs_vm_page_mkwrite()
1583 mutex_unlock(&ui->ui_mutex); in ubifs_vm_page_mkwrite()
1585 ubifs_release_dirty_inode_budget(c, ui); in ubifs_vm_page_mkwrite()
1622 struct ubifs_inode *ui = ubifs_inode(inode); in ubifs_get_link() local
1625 return ui->data; in ubifs_get_link()
1630 return fscrypt_get_symlink(inode, ui->data, ui->data_len, done); in ubifs_get_link()