Skip to content

Commit

Permalink
Merge branch 'for-chris-4.7' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/kdave/linux into for-linus-4.7
  • Loading branch information
masoncl committed May 26, 2016
2 parents c315ef8 + 4c6143d commit 9257b4c
Show file tree
Hide file tree
Showing 36 changed files with 202 additions and 200 deletions.
2 changes: 1 addition & 1 deletion fs/btrfs/backref.c
Original file line number Diff line number Diff line change
Expand Up @@ -1939,7 +1939,7 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
* from ipath->fspath->val[i].
* when it returns, there are ipath->fspath->elem_cnt number of paths available
* in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
* number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
* number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
* it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
* have been needed to return all paths.
*/
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/btrfs_inode.h
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ struct btrfs_dio_private {
struct bio *dio_bio;

/*
* The original bio may be splited to several sub-bios, this is
* The original bio may be split to several sub-bios, this is
* done during endio of sub-bios
*/
int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/check-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -1939,7 +1939,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
/*
* Clear all references of this block. Do not free
* the block itself even if is not referenced anymore
* because it still carries valueable information
* because it still carries valuable information
* like whether it was ever written and IO completed.
*/
list_for_each_entry_safe(l, tmp, &block->ref_to_list,
Expand Down
14 changes: 7 additions & 7 deletions fs/btrfs/ctree.c
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)

/*
* RCU really hurts here, we could free up the root node because
* it was cow'ed but we may not get the new root node yet so do
* it was COWed but we may not get the new root node yet so do
* the inc_not_zero dance and if it doesn't work then
* synchronize_rcu and try again.
*/
Expand Down Expand Up @@ -955,7 +955,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
struct extent_buffer *buf)
{
/*
* Tree blocks not in refernece counted trees and tree roots
* Tree blocks not in reference counted trees and tree roots
* are never shared. If a block was allocated after the last
* snapshot and the block was not allocated by tree relocation,
* we know the block is not shared.
Expand Down Expand Up @@ -1270,7 +1270,7 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,

/*
* tm is a pointer to the first operation to rewind within eb. then, all
* previous operations will be rewinded (until we reach something older than
* previous operations will be rewound (until we reach something older than
* time_seq).
*/
static void
Expand Down Expand Up @@ -1345,7 +1345,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
}

/*
* Called with eb read locked. If the buffer cannot be rewinded, the same buffer
* Called with eb read locked. If the buffer cannot be rewound, the same buffer
* is returned. If rewind operations happen, a fresh buffer is returned. The
* returned buffer is always read-locked. If the returned buffer is not the
* input buffer, the lock on the input buffer is released and the input buffer
Expand Down Expand Up @@ -1516,7 +1516,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
* 3) the root is not forced COW.
*
* What is forced COW:
* when we create snapshot during commiting the transaction,
* when we create snapshot during committing the transaction,
* after we've finished coping src root, we must COW the shared
* block to ensure the metadata consistency.
*/
Expand All @@ -1531,7 +1531,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,

/*
* cows a single block, see __btrfs_cow_block for the real work.
* This version of it has extra checks so that a block isn't cow'd more than
* This version of it has extra checks so that a block isn't COWed more than
* once per transaction, as long as it hasn't been written yet
*/
noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
Expand Down Expand Up @@ -2986,7 +2986,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
btrfs_unlock_up_safe(p, level + 1);

/*
* Since we can unwind eb's we want to do a real search every
* Since we can unwind ebs we want to do a real search every
* time.
*/
prev_cmp = -1;
Expand Down
6 changes: 3 additions & 3 deletions fs/btrfs/ctree.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ static const int btrfs_csum_sizes[] = { 4 };
/* four bytes for CRC32 */
#define BTRFS_EMPTY_DIR_SIZE 0

/* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */
/* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */
#define REQ_GET_READ_MIRRORS (1 << 30)

/* ioprio of readahead is set to idle */
Expand Down Expand Up @@ -431,7 +431,7 @@ struct btrfs_space_info {
* bytes_pinned does not reflect the bytes that will be pinned once the
* delayed refs are flushed, so this counter is inc'ed every time we
* call btrfs_free_extent so it is a realtime count of what will be
* freed once the transaction is committed. It will be zero'ed every
* freed once the transaction is committed. It will be zeroed every
* time the transaction commits.
*/
struct percpu_counter total_bytes_pinned;
Expand Down Expand Up @@ -1401,7 +1401,7 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
token->kaddr = NULL;
}

/* some macros to generate set/get funcs for the struct fields. This
/* some macros to generate set/get functions for the struct fields. This
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
* one for u8:
*/
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/delayed-ref.h
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ struct btrfs_delayed_ref_root {

/*
* To make qgroup to skip given root.
* This is for snapshot, as btrfs_qgroup_inherit() will manully
* This is for snapshot, as btrfs_qgroup_inherit() will manually
* modify counters for snapshot and its source, so we should skip
* the snapshot in new_root/old_roots or it will get calculated twice
*/
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/dev-replace.c
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
}

/*
* blocked until all flighting bios are finished.
* blocked until all in-flight bios operations are finished.
*/
static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
{
Expand Down
14 changes: 7 additions & 7 deletions fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
/*
* Things reading via commit roots that don't have normal protection,
* like send, can have a really old block in cache that may point at a
* block that has been free'd and re-allocated. So don't clear uptodate
* block that has been freed and re-allocated. So don't clear uptodate
* if we find an eb that is under IO (dirty/writeback) because we could
* end up reading in the stale data and then writing it back out and
* making everybody very sad.
Expand Down Expand Up @@ -418,7 +418,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
/*
* The super_block structure does not span the whole
* BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
* is filled with zeros and is included in the checkum.
* is filled with zeros and is included in the checksum.
*/
crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
Expand Down Expand Up @@ -600,7 +600,7 @@ static noinline int check_leaf(struct btrfs_root *root,

/*
* Check to make sure that we don't point outside of the leaf,
* just incase all the items are consistent to eachother, but
* just in case all the items are consistent to each other, but
* all point outside of the leaf.
*/
if (btrfs_item_end_nr(leaf, slot) >
Expand Down Expand Up @@ -3022,7 +3022,7 @@ int open_ctree(struct super_block *sb,
}

/*
* Mount does not set all options immediatelly, we can do it now and do
* Mount does not set all options immediately, we can do it now and do
* not have to wait for transaction commit
*/
btrfs_apply_pending_changes(fs_info);
Expand Down Expand Up @@ -3255,7 +3255,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
"lost page write due to IO error on %s",
rcu_str_deref(device->name));
/* note, we dont' set_buffer_write_io_error because we have
/* note, we don't set_buffer_write_io_error because we have
* our own ways of dealing with the IO errors
*/
clear_buffer_uptodate(bh);
Expand Down Expand Up @@ -4367,7 +4367,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
if (ret)
break;

clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
clear_extent_bits(dirty_pages, start, end, mark);
while (start <= end) {
eb = btrfs_find_tree_block(root->fs_info, start);
start += root->nodesize;
Expand Down Expand Up @@ -4402,7 +4402,7 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
if (ret)
break;

clear_extent_dirty(unpin, start, end, GFP_NOFS);
clear_extent_dirty(unpin, start, end);
btrfs_error_unpin_extent_range(root, start, end);
cond_resched();
}
Expand Down
48 changes: 24 additions & 24 deletions fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,9 +231,9 @@ static int add_excluded_extent(struct btrfs_root *root,
{
u64 end = start + num_bytes - 1;
set_extent_bits(&root->fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE, GFP_NOFS);
start, end, EXTENT_UPTODATE);
set_extent_bits(&root->fs_info->freed_extents[1],
start, end, EXTENT_UPTODATE, GFP_NOFS);
start, end, EXTENT_UPTODATE);
return 0;
}

Expand All @@ -246,9 +246,9 @@ static void free_excluded_extents(struct btrfs_root *root,
end = start + cache->key.offset - 1;

clear_extent_bits(&root->fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE, GFP_NOFS);
start, end, EXTENT_UPTODATE);
clear_extent_bits(&root->fs_info->freed_extents[1],
start, end, EXTENT_UPTODATE, GFP_NOFS);
start, end, EXTENT_UPTODATE);
}

static int exclude_super_stripes(struct btrfs_root *root,
Expand Down Expand Up @@ -980,7 +980,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
* event that tree block loses its owner tree's reference and do the
* back refs conversion.
*
* When a tree block is COW'd through a tree, there are four cases:
* When a tree block is COWed through a tree, there are four cases:
*
* The reference count of the block is one and the tree is the block's
* owner tree. Nothing to do in this case.
Expand Down Expand Up @@ -2595,7 +2595,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
}

/*
* Need to drop our head ref lock and re-aqcuire the
* Need to drop our head ref lock and re-acquire the
* delayed ref lock and then re-check to make sure
* nobody got added.
*/
Expand Down Expand Up @@ -2747,7 +2747,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)

/*
* We don't ever fill up leaves all the way so multiply by 2 just to be
* closer to what we're really going to want to ouse.
* closer to what we're really going to want to use.
*/
return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
}
Expand Down Expand Up @@ -2851,7 +2851,7 @@ static void delayed_ref_async_start(struct btrfs_work *work)
}

/*
* trans->sync means that when we call end_transaciton, we won't
* trans->sync means that when we call end_transaction, we won't
* wait on delayed refs
*/
trans->sync = true;
Expand Down Expand Up @@ -4296,7 +4296,7 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
* Called if we need to clear a data reservation for this inode
* Normally in a error case.
*
* This one will handle the per-indoe data rsv map for accurate reserved
* This one will handle the per-inode data rsv map for accurate reserved
* space framework.
*/
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
Expand Down Expand Up @@ -4967,7 +4967,7 @@ void btrfs_init_async_reclaim_work(struct work_struct *work)
* @orig_bytes - the number of bytes we want
* @flush - whether or not we can flush to make our reservation
*
* This will reserve orgi_bytes number of bytes from the space info associated
* This will reserve orig_bytes number of bytes from the space info associated
* with the block_rsv. If there is not enough space it will make an attempt to
* flush out space to make room. It will do this by flushing delalloc if
* possible or committing the transaction. If flush is 0 then no attempts to
Expand Down Expand Up @@ -5572,7 +5572,7 @@ void btrfs_orphan_release_metadata(struct inode *inode)
* common file/directory operations, they change two fs/file trees
* and root tree, the number of items that the qgroup reserves is
* different with the free space reservation. So we can not use
* the space reseravtion mechanism in start_transaction().
* the space reservation mechanism in start_transaction().
*/
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
Expand Down Expand Up @@ -5621,7 +5621,7 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
/**
* drop_outstanding_extent - drop an outstanding extent
* @inode: the inode we're dropping the extent for
* @num_bytes: the number of bytes we're relaseing.
* @num_bytes: the number of bytes we're releasing.
*
* This is called when we are freeing up an outstanding extent, either called
* after an error or after an extent is written. This will return the number of
Expand All @@ -5647,7 +5647,7 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
drop_inode_space = 1;

/*
* If we have more or the same amount of outsanding extents than we have
* If we have more or the same amount of outstanding extents than we have
* reserved then we need to leave the reserved extents count alone.
*/
if (BTRFS_I(inode)->outstanding_extents >=
Expand All @@ -5661,8 +5661,8 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
}

/**
* calc_csum_metadata_size - return the amount of metada space that must be
* reserved/free'd for the given bytes.
* calc_csum_metadata_size - return the amount of metadata space that must be
* reserved/freed for the given bytes.
* @inode: the inode we're manipulating
* @num_bytes: the number of bytes in question
* @reserve: 1 if we are reserving space, 0 if we are freeing space
Expand Down Expand Up @@ -5814,7 +5814,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)

/*
* This is tricky, but first we need to figure out how much we
* free'd from any free-ers that occurred during this
* freed from any free-ers that occurred during this
* reservation, so we reset ->csum_bytes to the csum_bytes
* before we dropped our lock, and then call the free for the
* number of bytes that were freed while we were trying our
Expand All @@ -5836,7 +5836,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)

/*
* Now reset ->csum_bytes to what it should be. If bytes is
* more than to_free then we would have free'd more space had we
* more than to_free then we would have freed more space had we
* not had an artificially high ->csum_bytes, so we need to free
* the remainder. If bytes is the same or less then we don't
* need to do anything, the other free-ers did the correct
Expand Down Expand Up @@ -6515,7 +6515,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
ret = btrfs_discard_extent(root, start,
end + 1 - start, NULL);

clear_extent_dirty(unpin, start, end, GFP_NOFS);
clear_extent_dirty(unpin, start, end);
unpin_extent_range(root, start, end, true);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
Expand Down Expand Up @@ -7578,7 +7578,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
if (loop == LOOP_CACHING_NOWAIT) {
/*
* We want to skip the LOOP_CACHING_WAIT step if we
* don't have any unached bgs and we've alrelady done a
* don't have any uncached bgs and we've already done a
* full search through.
*/
if (orig_have_caching_bg || !full_search)
Expand Down Expand Up @@ -7982,7 +7982,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,

/*
* Mixed block groups will exclude before processing the log so we only
* need to do the exlude dance if this fs isn't mixed.
* need to do the exclude dance if this fs isn't mixed.
*/
if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
Expand Down Expand Up @@ -8032,7 +8032,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
buf->start + buf->len - 1, GFP_NOFS);
else
set_extent_new(&root->dirty_log_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
buf->start + buf->len - 1);
} else {
buf->log_index = -1;
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
Expand Down Expand Up @@ -9426,7 +9426,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
u64 free_bytes = 0;
int factor;

/* It's df, we don't care if it's racey */
/* It's df, we don't care if it's racy */
if (list_empty(&sinfo->ro_bgs))
return 0;

Expand Down Expand Up @@ -10635,14 +10635,14 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
*/
mutex_lock(&fs_info->unused_bg_unpin_mutex);
ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
EXTENT_DIRTY, GFP_NOFS);
EXTENT_DIRTY);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
btrfs_dec_block_group_ro(root, block_group);
goto end_trans;
}
ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
EXTENT_DIRTY, GFP_NOFS);
EXTENT_DIRTY);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
btrfs_dec_block_group_ro(root, block_group);
Expand Down
Loading

0 comments on commit 9257b4c

Please sign in to comment.