diff -urN v2.4.19p4/fs/reiserfs/file.c linux/fs/reiserfs/file.c --- v2.4.19p4/fs/reiserfs/file.c Tue Mar 26 17:03:22 2002 +++ linux/fs/reiserfs/file.c Fri Mar 29 14:10:42 2002 @@ -47,6 +47,7 @@ #ifdef REISERFS_PREALLOCATE reiserfs_discard_prealloc (&th, inode); #endif + journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3) ; if (atomic_read(&inode->i_count) <= 1 && @@ -60,6 +61,13 @@ reiserfs_truncate_file(inode, 0) ; pop_journal_writer(windex) ; } + + if (reiserfs_iicache(inode->i_sb)) { + if (inode->u.reiserfs_i.iic) { + kfree(inode->u.reiserfs_i.iic); + } + } + up (&inode->i_sem); unlock_kernel() ; return 0; diff -urN v2.4.19p4/fs/reiserfs/inode.c linux/fs/reiserfs/inode.c --- v2.4.19p4/fs/reiserfs/inode.c Tue Mar 26 17:04:31 2002 +++ linux/fs/reiserfs/inode.c Fri Mar 29 14:10:42 2002 @@ -17,6 +17,8 @@ #define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */ #define GET_BLOCK_NO_ISEM 8 /* i_sem is not held, don't preallocate */ +#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) + static int reiserfs_get_block (struct inode * inode, long block, struct buffer_head * bh_result, int create); // @@ -53,6 +55,7 @@ /* no object items are in the tree */ ; } + clear_inode (inode); /* note this must go after the journal_end to prevent deadlock */ inode->i_blocks = 0; unlock_kernel() ; @@ -240,83 +243,286 @@ reiserfs_update_inode_transaction(inode) ; } -// it is called by get_block when create == 0. Returns block number -// for 'block'-th logical block of file. When it hits direct item it -// returns 0 (being called from bmap) or read direct item into piece -// of page (bh_result) +/* +** Get block number from the indirect item by position. +*/ +static inline long iitem_get_blocknr (struct path *path, int pos) +{ + struct buffer_head * bh = get_last_bh (path); + struct item_head * ih = get_ih (path); + __u32 * ind_item; + + if (is_indirect_le_ih (ih)) { + ind_item = (__u32 *)B_I_PITEM (bh, ih); + return le32_to_cpu(ind_item [path->pos_in_item + pos]); + } -// Please improve the english/clarity in the comment above, as it is -// hard to understand. + return 0; +} -static int _get_block_create_0 (struct inode * inode, long block, - struct buffer_head * bh_result, - int args) +/* +** Get the indirect item size. +*/ +static inline int iitem_size (struct path *path) { - INITIALIZE_PATH (path); - struct cpu_key key; - struct buffer_head * bh; - struct item_head * ih, tmp_ih; - int fs_gen ; - int blocknr; - char * p = NULL; - int chars; - int ret ; - int done = 0 ; - unsigned long offset ; + struct item_head * ih = get_ih (path); + return (I_UNFM_NUM(ih) - (path->pos_in_item + 1)); +} - // prepare the key to look for the 'block'-th block of file - make_cpu_key (&key, inode, - (loff_t)block * inode->i_sb->s_blocksize + 1, TYPE_ANY, 3); +/* +** Return "1" if last position of the indirect item reached, +** "0" - otherwise. +*/ +static inline int last_pos_of_iitem (struct path *path, int pos) +{ + struct item_head * ih = get_ih (path); + return ((path->pos_in_item + 1 + pos) >= (I_UNFM_NUM(ih)) ? 1 : 0); +} -research: - if (search_for_position_by_key (inode->i_sb, &key, &path) != POSITION_FOUND) { - pathrelse (&path); - if (p) - kunmap(bh_result->b_page) ; - // We do not return -ENOENT if there is a hole but page is uptodate, because it means - // That there is some MMAPED data associated with it that is yet to be written to disk. - if ((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page) ) { - return -ENOENT ; - } - return 0 ; +/* +** Get the number of contiguous blocks in the indirect item +** from given pos to the end of the item. +*/ +static inline int iitem_amount_contiguous (struct path *path, int pos) +{ + long curr = 0; + long next = 0; + int item_size = iitem_size(path); + int amount = 1; + + if (pos >= item_size) { + return 0; + } + curr = iitem_get_blocknr(path, pos++); + if (curr==0) { + while (pos <= item_size) { + next = iitem_get_blocknr(path, pos++); + if (next != 0) break; + amount++; } - - // - bh = get_last_bh (&path); - ih = get_ih (&path); - if (is_indirect_le_ih (ih)) { - __u32 * ind_item = (__u32 *)B_I_PITEM (bh, ih); + return amount; + } + + while (pos <= item_size) { + next = iitem_get_blocknr(path, pos++); + if ((next - curr) != 1) break; + curr = next; + amount++; + } + + return amount; +} + +/* +** Return "1" if fs changed and item moved. +*/ +static inline int need_research (int fs_gen, struct super_block * sb, + struct item_head * ih, struct path * path ) +{ + return (fs_changed(fs_gen, sb) && item_moved(ih, path)); +} + +/* Fill indirect item cache. +** Put N block numbers from current indirect item. +*/ +static inline void iicache_fill (struct inode * inode, long block, + struct path * path, struct cpu_key * key) +{ + long blocknr=0, blk=block; + int pos=0; + int amount=0,i=0; + struct super_block * sb = inode->i_sb; + struct item_head * ih = get_ih (path); + + if (inode->i_size < sb->s_blocksize * 4) return; + + if (inode->u.reiserfs_i.iic==NULL) { + inode->u.reiserfs_i.iic = (struct iicache *)kmalloc(sizeof(struct iicache), GFP_NOFS); + if (inode->u.reiserfs_i.iic) { + iicache_clear(inode); + } else { + return; + } + } + + + for (i=0; is_blocksize) < inode->i_size) { + if ((i+1) < IICACHE_N) { + set_cpu_key_k_offset (key, cpu_key_k_offset(key) + pos * sb->s_blocksize); + + if (search_for_position_by_key (sb, key, path) != POSITION_FOUND) { + break; + } + + ih = get_ih (path); + if (!is_indirect_le_ih(ih) || + (le_ih_k_offset(ih) + path->pos_in_item) > inode->i_size) { + break ; + } + pos=0; amount=0; - /* FIXME: here we could cache indirect item or part of it in - the inode to avoid search_by_key in case of subsequent - access to file */ - blocknr = get_block_num(ind_item, path.pos_in_item) ; - ret = 0 ; - if (blocknr) { - bh_result->b_dev = inode->i_dev; - bh_result->b_blocknr = blocknr; - bh_result->b_state |= (1UL << BH_Mapped); - } else - // We do not return -ENOENT if there is a hole but page is uptodate, because it means - // That there is some MMAPED data associated with it that is yet to be written to disk. - if ((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page) ) { - ret = -ENOENT ; - } + } + } + } + + if (i < IICACHE_N) { + iicache_clear_from_pos(inode, i); + } + +} + +/* +** Truncate indirect item cache. +*/ +static inline void iicache_truncate (struct inode * inode) +{ + long new_file_end = inode->i_size >> inode->i_blkbits; + long last_cached, truncate_size, n=0; + int i=0; + + if (inode->u.reiserfs_i.iic==NULL) return; + //iicache_clear(inode); + //return; + + if (iicache_size(inode,0)) { + if (new_file_end <= iicache_first_cached(inode,0)) { + iicache_clear(inode); + return; + } + if ((n=block_is_iicached(inode, new_file_end))) { + last_cached = iicache_last_cached(inode, n-1); + + if (iicache_size(inode,n) && (new_file_end <= last_cached)) { + truncate_size = last_cached - new_file_end + 1; + inode->u.reiserfs_i.iic->i_cache_size[n-1] -= truncate_size; + + i=n; + while(ib_page) ; - return ret ; } + } +} + + +/* +** Helper function for _get_block_create_0 +*/ +static inline int iitem_map_indirect_block (struct path * path, struct inode * inode, + long block, struct buffer_head * bh_result, + int args, struct cpu_key * key) +{ + struct buffer_head * bh = get_last_bh (path); + struct item_head * ih = get_ih (path); + __u32 * ind_item = (__u32 *)B_I_PITEM (bh, ih); + int blocknr= get_block_num(ind_item, path->pos_in_item) ; + + // We do not return -ENOENT if there is a hole but page is uptodate, because it means + // That there is some MMAPED data associated with it that is yet to be written to disk. + if (!blocknr && (args & GET_BLOCK_NO_HOLE)&& !Page_Uptodate(bh_result->b_page)) { + return -ENOENT ; + } + + // map the found block + set_block_dev_mapped (bh_result, blocknr, inode); + + //printk("...block=%li, block_nr=%i\n", block, blocknr); + + return 0; +} + + + +/* +** Helper function for _get_block_create_0 +*/ +static inline void path_relse_page_unmap (struct path * path, char * p, + struct page * page) { + pathrelse(path); + if (p) + kunmap(page); +} + +/* +** Handle Indirect Item case and simple direct case. +** "gbc0" stands for "get_block_create_0" +*/ +static inline int gbc0_indirect_case (char * p, struct path * path, + struct inode *inode, long block, + struct buffer_head * bh_result, + int args, struct cpu_key * key) +{ + struct super_block * sb = inode->i_sb; + struct page * page = bh_result->b_page; + struct item_head * ih = get_ih (path); + int ret=0; + + // requested data are in indirect item(s) + if (is_indirect_le_ih (ih)) { + + ret = iitem_map_indirect_block (path, inode, block, bh_result, args, key); + if (ret<0) { + path_relse_page_unmap (path, p, page); + return ret; + } + + if (p) + kunmap(page); + + /* + ** Here we fill indirect item cache or part of it + ** in the inode to avoid search_by_key in case of + ** subsequent access to file. + */ + // if "iicache" mount option is used + if (reiserfs_iicache(sb)) { + iicache_fill (inode, block, path, key); + } + pathrelse(path); + //path_relse_page_unmap (path, p, page); + return 0 ; + } + + return 1; +} + +/* +** Direct Item case start. +** "gbc0" stands for "get_block_create_0" +*/ +static inline int gbc0_direct_case_start (char * p, struct path * path, + struct inode *inode, + struct buffer_head * bh_result, + int args) +{ + struct page * page = bh_result->b_page; // requested data are in direct item(s) if (!(args & GET_BLOCK_READ_DIRECT)) { - // we are called by bmap. FIXME: we can not map block of file - // when it is stored in direct item(s) - pathrelse (&path); - if (p) - kunmap(bh_result->b_page) ; - return -ENOENT; + // we are called by bmap. FIXME: we can not map block of file + // when it is stored in direct item(s) + path_relse_page_unmap (path, p, page); + return -ENOENT; } /* if we've got a direct item, and the buffer was uptodate, @@ -324,90 +530,203 @@ ** end, where we map the buffer and return */ if (buffer_uptodate(bh_result)) { - goto finished ; - } else - /* - ** grab_tail_page can trigger calls to reiserfs_get_block on up to date - ** pages without any buffers. If the page is up to date, we don't want - ** read old data off disk. Set the up to date bit on the buffer instead - ** and jump to the end - */ - if (Page_Uptodate(bh_result->b_page)) { - mark_buffer_uptodate(bh_result, 1); - goto finished ; + set_block_dev_mapped (bh_result, 0, inode); + path_relse_page_unmap (path, p, page); + return 0; + } else { + /* + ** grab_tail_page can trigger calls to reiserfs_get_block on up to date + ** pages without any buffers. If the page is up to date, we don't want + ** read old data off disk. Set the up to date bit on the buffer instead + ** and jump to the end + */ + if (Page_Uptodate(bh_result->b_page)) { + mark_buffer_uptodate(bh_result, 1); + set_block_dev_mapped (bh_result, 0, inode); + path_relse_page_unmap (path, p, page); + return 0; + } } + return 1; +} - // read file tail into part of page - offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1) ; - fs_gen = get_generation(inode->i_sb) ; - copy_item_head (&tmp_ih, ih); - - /* we only want to kmap if we are reading the tail into the page. - ** this is not the common case, so we don't kmap until we are - ** sure we need to. But, this means the item might move if - ** kmap schedules +/* +** Handle Direct Item case. +** "gbc0" stands for "get_block_create_0" +*/ +static inline void gbc0_direct_case (char * p, struct path * path, + struct inode *inode, + struct cpu_key * key) +{ + struct buffer_head * bh; + struct super_block * sb = inode->i_sb; + struct item_head * ih = get_ih (path); + int chars=0, done=0; + + do { + if (!is_direct_le_ih (ih)) { + BUG (); + } + /* make sure we don't read more bytes than actually exist in + ** the file. This can happen in odd cases where i_size isn't + ** correct, and when direct item padding results in a few + ** extra bytes at the end of the direct item */ - if (!p) { - p = (char *)kmap(bh_result->b_page) ; - if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) { - goto research; - } + if ((le_ih_k_offset(ih) + path->pos_in_item) > inode->i_size) + break ; + + if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) { + chars = inode->i_size - (le_ih_k_offset(ih) - 1) - path->pos_in_item; + done = 1 ; + } else { + chars = ih_item_len(ih) - path->pos_in_item; } - p += offset ; - memset (p, 0, inode->i_sb->s_blocksize); - do { - if (!is_direct_le_ih (ih)) { - BUG (); - } - /* make sure we don't read more bytes than actually exist in - ** the file. This can happen in odd cases where i_size isn't - ** correct, and when direct item padding results in a few - ** extra bytes at the end of the direct item - */ - if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size) - break ; - if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) { - chars = inode->i_size - (le_ih_k_offset(ih) - 1) - path.pos_in_item; - done = 1 ; - } else { - chars = ih_item_len(ih) - path.pos_in_item; - } - memcpy (p, B_I_PITEM (bh, ih) + path.pos_in_item, chars); - if (done) - break ; + bh = get_last_bh (path); + memcpy (p, B_I_PITEM (bh, ih) + path->pos_in_item, chars); - p += chars; + if (done) + break ; - if (PATH_LAST_POSITION (&path) != (B_NR_ITEMS (bh) - 1)) - // we done, if read direct item is not the last item of - // node FIXME: we could try to check right delimiting key - // to see whether direct item continues in the right - // neighbor or rely on i_size - break; + p += chars; - // update key to look for the next piece - set_cpu_key_k_offset (&key, cpu_key_k_offset (&key) + chars); - if (search_for_position_by_key (inode->i_sb, &key, &path) != POSITION_FOUND) - // we read something from tail, even if now we got IO_ERROR - break; - bh = get_last_bh (&path); - ih = get_ih (&path); - } while (1); + if (PATH_LAST_POSITION (path) != (B_NR_ITEMS (bh) - 1)) + // we done, if read direct item is not the last item of + // node FIXME: we could try to check right delimiting key + // to see whether direct item continues in the right + // neighbor or rely on i_size + break; - flush_dcache_page(bh_result->b_page) ; - kunmap(bh_result->b_page) ; + // update key to look for the next piece + set_cpu_key_k_offset (key, cpu_key_k_offset(key) + chars); -finished: - pathrelse (&path); - bh_result->b_blocknr = 0 ; - bh_result->b_dev = inode->i_dev; + if (search_for_position_by_key (sb, key, path) != POSITION_FOUND) + // we read something from tail, even if now we got IO_ERROR + break; + + bh = get_last_bh (path); + ih = get_ih (path); + + } while (1); + +} + + +/* +** Helper function for _get_block_create_0 +** Check iicache. +** If needed block is in iicache we map it and return "1". +*/ +static int check_iicache (struct inode * inode, long block, + struct buffer_head * bh_result) +{ + struct super_block * sb = inode->i_sb; + int n=0, block_nr=0; + + /* + ** Here we use the cache of indirect item. + ** Getting the unfm_block number from the cache + ** we are trying to avoid some of the search_by_key() calls. + */ + + // if "iicache" mount option is used + if (reiserfs_iicache(sb)) { + + // Check iicache and get the iicache array number + 1 , + // where the needed block_nr corresponded given logical block + // could be found. + n = block_is_iicached(inode, block); + + // if the iicache is not empty for this file and + // the requested logical block of file is cached + // then we return corresponded block number. + if (n) { + block_nr = iicache_blocknr(inode, block, n-1); + set_block_dev_mapped (bh_result, block_nr, inode); + //printk("n=%i, block=%li, block_nr=%i\n", n, block, block_nr); + //iicache_print(inode); + + //if (n>2) return 0; + return 1; + } + + } + return 0; +} + +// +// It is called by reiserfs_get_block when create == 0. +// Returns disk block number by logical block number of file. +// +// When it hits direct item it returns 0 (being called from bmap) +// or read direct item into piece of page (bh_result) +// +static int _get_block_create_0 (struct inode * inode, long block, + struct buffer_head * bh_result, + int args) +{ + INITIALIZE_PATH (path); + struct cpu_key key; + struct item_head * ih, tmp_ih; + struct super_block * sb = inode->i_sb; + struct page * page = bh_result->b_page; + char * p = NULL; + unsigned long offset ; + int fs_gen=0, ret=0, block_iicached=0; + + + block_iicached = check_iicache (inode, block, bh_result); + + if (block_iicached) { + return 0; + } + + // prepare the key to look for the 'block'-th block of file + offset = block * sb->s_blocksize + 1; + make_cpu_key (&key, inode, (loff_t)offset, TYPE_ANY, 3); + + do { + + if (search_for_position_by_key (sb, &key, &path) != POSITION_FOUND) { + path_relse_page_unmap (&path, p, page); + // We do not return -ENOENT if there is a hole but page is uptodate, because it means + // That there is some MMAPED data associated with it that is yet to be written to disk. + return (((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page)) ? (-ENOENT) : 0 ) ; + } + + // check and handle indirect case + ret = gbc0_indirect_case (p, &path, inode, block, bh_result, args, &key); + if (ret <= 0) + return ret; + + // start the direct case + ret = gbc0_direct_case_start (p, &path, inode, bh_result, args); + if (ret <= 0) + return ret; + + // we should read the file tail into part of page. + offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1) ; + fs_gen = get_generation(sb) ; + ih = get_ih (&path); + copy_item_head (&tmp_ih, ih); + if (!p) + p=(char *)kmap(page); + + } while (need_research(fs_gen, sb, &tmp_ih, &path)); + + // ok, we have direct item and kmapped page, + // do copy from direct item to page now. + p += offset; + memset (p, 0, sb->s_blocksize); + gbc0_direct_case (p, &path, inode, &key); + + flush_dcache_page(page) ; + path_relse_page_unmap (&path, p, page); + set_block_dev_mapped (bh_result, 0, inode); mark_buffer_uptodate (bh_result, 1); - bh_result->b_state |= (1UL << BH_Mapped); return 0; } - // this is called to create file map. So, _get_block_create_0 will not // read direct item int reiserfs_bmap (struct inode * inode, long block, @@ -560,10 +879,13 @@ struct cpu_key key; struct buffer_head * bh, * unbh = 0; struct item_head * ih, tmp_ih; + struct super_block * sb = inode->i_sb; __u32 * item; int done; int fs_gen; int windex ; + int block_iicached=0; + struct reiserfs_transaction_handle th ; /* space reserved in transaction batch: . 3 balancings in direct->indirect conversion @@ -590,6 +912,7 @@ return -EFBIG; } + /* if !create, we aren't changing the FS, so we don't need to ** log anything, so we don't need to start a transaction */ @@ -601,7 +924,15 @@ unlock_kernel() ; return ret; } - + /**** + if (reiserfs_iicache(sb)) { + if (inode->u.reiserfs_i.iic) { + iicache_spin_lock(inode); + iicache_clear(inode); + iicache_spin_unlock(inode); + } + } + ***/ inode->u.reiserfs_i.i_flags |= i_pack_on_close_mask; windex = push_journal_writer("reiserfs_get_block") ; @@ -921,6 +1252,26 @@ INIT_LIST_HEAD(&inode->u.reiserfs_i.i_prealloc_list) ; + if (reiserfs_iicache(inode->i_sb)) { + iicache_spin_lock_init (inode); + } + +/* + if ( S_ISREG(inode->i_mode) ) { + if (reiserfs_iicache(inode->i_sb)) { + iicache_spin_lock_init (inode); + if (inode->u.reiserfs_i.iic==NULL) { + inode->u.reiserfs_i.iic = (struct iicache *)kmalloc(sizeof(struct iicache), GFP_NOFS); + if (inode->u.reiserfs_i.iic) { + //printk("kmalloc\n"); + iicache_clear(inode); + } else { + printk("kmalloc : no memory anymore\n"); + } + } + } + } +*/ if (stat_data_v1 (ih)) { struct stat_data_v1 * sd = (struct stat_data_v1 *)B_I_PITEM (bh, ih); unsigned long blocks; @@ -1531,6 +1882,8 @@ /* item head of new item */ ih.ih_key.k_dir_id = INODE_PKEY (dir)->k_objectid; ih.ih_key.k_objectid = cpu_to_le32 (reiserfs_get_unused_objectid (th)); + + if (!ih.ih_key.k_objectid) { iput(inode) ; *err = -ENOMEM; @@ -1597,6 +1950,11 @@ INIT_LIST_HEAD(&inode->u.reiserfs_i.i_prealloc_list) ; + if (reiserfs_iicache(inode->i_sb)) { + iicache_spin_lock_init (inode); + iicache_clear(inode); + } + if (old_format_only (sb)) { if (inode->i_uid & ~0xffff || inode->i_gid & ~0xffff) { pathrelse (&path_to_key); @@ -1757,6 +2115,7 @@ */ void reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) { struct reiserfs_transaction_handle th ; + struct super_block * sb = p_s_inode->i_sb; int windex ; /* we want the offset for the first byte after the end of the file */ @@ -1792,6 +2151,15 @@ journal_begin(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1 ) ; reiserfs_update_inode_transaction(p_s_inode) ; windex = push_journal_writer("reiserfs_vfs_truncate_file") ; + +/********* + if (reiserfs_iicache(sb)) { + iicache_spin_lock(p_s_inode); + iicache_truncate (p_s_inode); + iicache_spin_unlock(p_s_inode); + } +***********/ + if (update_timestamps) /* we are doing real truncate: if the system crashes before the last transaction of truncating gets committed - on reboot the file diff -urN v2.4.19p4/fs/reiserfs/journal.c linux/fs/reiserfs/journal.c --- v2.4.19p4/fs/reiserfs/journal.c Tue Mar 26 17:04:32 2002 +++ linux/fs/reiserfs/journal.c Fri Mar 29 14:10:42 2002 @@ -1886,7 +1886,7 @@ break ; } wake_up(&reiserfs_commit_thread_done) ; - interruptible_sleep_on_timeout(&reiserfs_commit_thread_wait, 5 * HZ) ; + interruptible_sleep_on_timeout(&reiserfs_commit_thread_wait, 5) ; } unlock_kernel() ; wake_up(&reiserfs_commit_thread_done) ; diff -urN v2.4.19p4/fs/reiserfs/namei.c linux/fs/reiserfs/namei.c --- v2.4.19p4/fs/reiserfs/namei.c Tue Mar 26 17:04:32 2002 +++ linux/fs/reiserfs/namei.c Fri Mar 29 14:10:42 2002 @@ -309,10 +309,9 @@ while (1) { retval = search_by_entry_key (dir->i_sb, &key_to_search, path_to_entry, de); - if (retval == IO_ERROR) { - reiserfs_warning ("zam-7001: io error in " __FUNCTION__ "\n"); - return IO_ERROR; - } + if (retval == IO_ERROR) + // FIXME: still has to be dealt with + reiserfs_panic (dir->i_sb, "zam-7001: io error in " __FUNCTION__ "\n"); /* compare names for all entries having given hash value */ retval = linear_search_in_dir_item (&key_to_search, de, name, namelen); diff -urN v2.4.19p4/fs/reiserfs/stree.c linux/fs/reiserfs/stree.c --- v2.4.19p4/fs/reiserfs/stree.c Tue Mar 26 17:04:35 2002 +++ linux/fs/reiserfs/stree.c Fri Mar 29 14:10:42 2002 @@ -1031,7 +1031,6 @@ char c_mode; /* Returned mode of the balance. */ int need_research; - n_blk_size = p_s_sb->s_blocksize; /* Search for the needed object indirect item until there are no unformatted nodes to be removed. */ diff -urN v2.4.19p4/fs/reiserfs/super.c linux/fs/reiserfs/super.c --- v2.4.19p4/fs/reiserfs/super.c Tue Mar 26 17:04:35 2002 +++ linux/fs/reiserfs/super.c Fri Mar 29 14:10:42 2002 @@ -455,6 +455,8 @@ set_bit (REISERFS_HASHED_RELOCATION, mount_options); } else if (!strcmp (this_char, "test4")) { set_bit (REISERFS_TEST4, mount_options); + } else if (!strcmp (this_char, "iicache")) { + set_bit (REISERFS_IICACHE, mount_options); } else if (!strcmp (this_char, "nolog")) { reiserfs_warning("reiserfs: nolog mount option not supported yet\n"); } else if (!strcmp (this_char, "replayonly")) { @@ -558,6 +560,19 @@ handle_attrs( s ); +#define SET_OPT( opt, bits, super ) \ + if( ( bits ) & ( 1 << ( opt ) ) ) \ + ( super ) -> u.reiserfs_sb.s_mount_opt |= ( 1 << ( opt ) ) + + /* set options in the super-block bitmask */ + SET_OPT( NOTAIL, mount_options, s ); + SET_OPT( REISERFS_IICACHE, mount_options, s ); + SET_OPT( REISERFS_NO_BORDER, mount_options, s ); + SET_OPT( REISERFS_NO_UNHASHED_RELOCATION, mount_options, s ); + SET_OPT( REISERFS_HASHED_RELOCATION, mount_options, s ); + SET_OPT( REISERFS_TEST4, mount_options, s ); +#undef SET_OPT + if(blocks) { int rc = reiserfs_resize(s, blocks); if (rc != 0) diff -urN v2.4.19p4/include/linux/reiserfs_fs.h linux/include/linux/reiserfs_fs.h --- v2.4.19p4/include/linux/reiserfs_fs.h Tue Mar 26 17:04:51 2002 +++ linux/include/linux/reiserfs_fs.h Fri Mar 29 14:11:17 2002 @@ -197,7 +197,6 @@ ( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size) * 3)/4) ) ) \ ) - /* * values for s_state field */ @@ -1731,6 +1730,185 @@ struct dentry *dentry, struct inode *inode, int * err); int reiserfs_sync_inode (struct reiserfs_transaction_handle *th, struct inode * inode); void reiserfs_update_sd (struct reiserfs_transaction_handle *th, struct inode * inode); + +/* +** The indirect item cache - iicache. +** +** We put the indirect item or part of it to iicache and +** can avoid now a lot of search_by_key calls. +*/ + +#define IICACHE_BLOCKNR 1 +#define IICACHE_SIZE 2 +#define IICACHE_BLOCK 3 + +/* +** Set parameter of given type to iicache +*/ +static inline void iicache_set (struct inode * inode, + long param, int type, int i) +{ + if (inode->u.reiserfs_i.iic==NULL) return; + + switch (type) { + case IICACHE_BLOCKNR : inode->u.reiserfs_i.iic->i_cache_blocknr[i] = param; + break; + case IICACHE_SIZE : inode->u.reiserfs_i.iic->i_cache_size[i] = param; + break; + case IICACHE_BLOCK : inode->u.reiserfs_i.iic->i_cache_block[i] = param; + break; + } +} + +/* +** Get parameter of given type from iicache +*/ +static inline long iicache_get (struct inode * inode, int type, int i) +{ + long val; + if (inode->u.reiserfs_i.iic==NULL) return 0; + + switch (type) { + case IICACHE_BLOCKNR : val=inode->u.reiserfs_i.iic->i_cache_blocknr[i]; + break; + case IICACHE_SIZE : val=inode->u.reiserfs_i.iic->i_cache_size[i]; + break; + case IICACHE_BLOCK : val=inode->u.reiserfs_i.iic->i_cache_block[i]; + break; + } + return val; +} + +/* +** Clear the indirect item cache +*/ +static inline void iicache_clear(struct inode * inode) +{ + int i; + + if (inode->u.reiserfs_i.iic==NULL) return; + + for (i=0; iu.reiserfs_i.iic==NULL) return; + + for (i=0; iu.reiserfs_i.iic==NULL) return; + + for (i=pos; iu.reiserfs_i.iic==NULL) return 0; + return (iicache_get(inode, IICACHE_BLOCKNR, i)); +} + +/* +** Get the size of indirect item cache +*/ +static inline long iicache_size(struct inode * inode, int i) +{ + if (inode->u.reiserfs_i.iic==NULL) return 0; + return (iicache_get(inode, IICACHE_SIZE, i)); +} + +/* +** Get the first cached logical block of file +*/ +static inline long iicache_first_cached(struct inode * inode, int i) +{ + if (inode->u.reiserfs_i.iic==NULL) return 0; + return (iicache_get(inode, IICACHE_BLOCK, i)); +} + +/* +** Get the last cached logical block of file +*/ +static inline long iicache_last_cached(struct inode * inode, int i) +{ + if (inode->u.reiserfs_i.iic==NULL) return 0; + return (iicache_first_cached(inode,i) + iicache_size(inode,i) - 1); +} + +/* +** Check the logical block of file: is it in iicache +*/ +static inline int block_is_iicached(struct inode * inode, long block) +{ + int i; + if (inode->u.reiserfs_i.iic==NULL) return 0; + + for (i=0; i= iicache_first_cached(inode, i)) && + (block <= iicache_last_cached(inode, i)) ) + return i+1; + } + return 0; +} + +/* +** Get the disk block number by the logical block number of file +*/ +static inline long iicache_blocknr(struct inode * inode, long block, int i) +{ + long offset=0, block_nr=0; + if (inode->u.reiserfs_i.iic==NULL) return 0; + + offset = block - iicache_first_cached(inode,i); + block_nr = iicache_get_blocknr(inode,i); + if (block_nr == 0) + return 0; + else + return (block_nr + offset); +} + +static inline void iicache_spin_lock_init(struct inode * inode) +{ + inode->u.reiserfs_i.i_cache_lock = SPIN_LOCK_UNLOCKED; +} + +static inline void iicache_spin_lock(struct inode * inode) +{ + spin_lock ( &(inode->u.reiserfs_i.i_cache_lock) ); +} + +static inline void iicache_spin_unlock(struct inode * inode) +{ + spin_unlock ( &(inode->u.reiserfs_i.i_cache_lock) ); +} void sd_attrs_to_i_attrs( __u16 sd_attrs, struct inode *inode ); void i_attrs_to_sd_attrs( struct inode *inode, __u16 *sd_attrs ); diff -urN v2.4.19p4/include/linux/reiserfs_fs_i.h linux/include/linux/reiserfs_fs_i.h --- v2.4.19p4/include/linux/reiserfs_fs_i.h Tue Mar 26 17:04:51 2002 +++ linux/include/linux/reiserfs_fs_i.h Fri Mar 29 14:11:20 2002 @@ -3,6 +3,15 @@ #include +#define IICACHE_N 8 /* iicache array size */ + +// The cache for indirect item (iicache). +struct iicache { + long i_cache_blocknr[IICACHE_N]; /* the first of set of contiguous blocknrs */ + long i_cache_size [IICACHE_N]; /* the amount of set of contiguous blocknrs */ + long i_cache_block [IICACHE_N]; /* the first, cached logical block of file */ +}; + /** bitmasks for i_flags field in reiserfs-specific part of inode */ typedef enum { /** this says what format of key do all items (but stat data) of @@ -46,6 +55,10 @@ ** flushed */ unsigned long i_trans_id ; unsigned long i_trans_index ; + + // The cache for indirect item (iicache). + struct iicache * iic; + spinlock_t i_cache_lock; /* spimlock to protect iicache changing */ }; #endif diff -urN v2.4.19p4/include/linux/reiserfs_fs_sb.h linux/include/linux/reiserfs_fs_sb.h --- v2.4.19p4/include/linux/reiserfs_fs_sb.h Tue Mar 26 17:04:51 2002 +++ linux/include/linux/reiserfs_fs_sb.h Fri Mar 29 14:11:31 2002 @@ -482,6 +482,7 @@ #define REISERFS_NO_UNHASHED_RELOCATION 12 #define REISERFS_HASHED_RELOCATION 13 #define REISERFS_TEST4 14 +#define REISERFS_IICACHE 17 #define REISERFS_TEST1 11 #define REISERFS_TEST2 12 @@ -498,6 +499,8 @@ #define reiserfs_no_unhashed_relocation(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_NO_UNHASHED_RELOCATION)) #define reiserfs_hashed_relocation(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_HASHED_RELOCATION)) #define reiserfs_test4(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_TEST4)) + +#define reiserfs_iicache(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_IICACHE)) #define dont_have_tails(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << NOTAIL)) #define replay_only(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REPLAYONLY)) .