From 3f4d190c621a90018e3dd202567e6c59cf254dda Mon Sep 17 00:00:00 2001 From: Thomas Bertschinger Date: Fri, 13 Sep 2024 18:11:22 -0600 Subject: [PATCH 1/4] bcachefs: move bch2_xattr_handlers to .rodata A series posted previously moved all of the `struct xattr_handler` tables to .rodata for each filesystem [1]. However, this appears to have been done shortly before bcachefs was merged, so bcachefs was missed at that time. Link: https://lkml.kernel.org/r/20230930050033.41174-1-wedsonaf@gmail.com [1] Cc: Wedson Almeida Filho Signed-off-by: Thomas Bertschinger Signed-off-by: Kent Overstreet --- fs/bcachefs/xattr.c | 2 +- fs/bcachefs/xattr.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c index 56c8d3fe55a4e..9986bc7ffe73e 100644 --- a/fs/bcachefs/xattr.c +++ b/fs/bcachefs/xattr.c @@ -609,7 +609,7 @@ static const struct xattr_handler bch_xattr_bcachefs_effective_handler = { #endif /* NO_BCACHEFS_FS */ -const struct xattr_handler *bch2_xattr_handlers[] = { +const struct xattr_handler * const bch2_xattr_handlers[] = { &bch_xattr_user_handler, &bch_xattr_trusted_handler, &bch_xattr_security_handler, diff --git a/fs/bcachefs/xattr.h b/fs/bcachefs/xattr.h index c188a5ad64cef..2c96de051f3e2 100644 --- a/fs/bcachefs/xattr.h +++ b/fs/bcachefs/xattr.h @@ -44,6 +44,6 @@ int bch2_xattr_set(struct btree_trans *, subvol_inum, ssize_t bch2_xattr_list(struct dentry *, char *, size_t); -extern const struct xattr_handler *bch2_xattr_handlers[]; +extern const struct xattr_handler * const bch2_xattr_handlers[]; #endif /* _BCACHEFS_XATTR_H */ From 7cc822650f9fa6f2f1eccc7acdb43e90c1b2ecdf Mon Sep 17 00:00:00 2001 From: Dennis Lam Date: Wed, 11 Sep 2024 21:16:28 -0400 Subject: [PATCH 2/4] docs: filesystems: bcachefs: fixed some spelling mistakes in the bcachefs coding style page Specifically, fixed spelling of "commit" and pluralization of last sentence. Signed-off-by: Dennis Lam Signed-off-by: Kent Overstreet --- Documentation/filesystems/bcachefs/CodingStyle.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/filesystems/bcachefs/CodingStyle.rst b/Documentation/filesystems/bcachefs/CodingStyle.rst index 01de555e21d85..b29562a6bf555 100644 --- a/Documentation/filesystems/bcachefs/CodingStyle.rst +++ b/Documentation/filesystems/bcachefs/CodingStyle.rst @@ -183,4 +183,4 @@ even better as a code comment. A good code comment is wonderful, but even better is the comment that didn't need to exist because the code was so straightforward as to be obvious; organized into small clean and tidy modules, with clear and descriptive names -for functions and variable, where every line of code has a clear purpose. +for functions and variables, where every line of code has a clear purpose. From 87a3e08121cbb59a314fc727b4f1202782ed56b1 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 28 Aug 2024 10:00:56 -0400 Subject: [PATCH 3/4] bcachefs: Switch to memalloc_flags_do() for vmalloc allocations vmalloc doesn't correctly respect gfp flags - gfp flags aren't used for pte allocation, so doing vmalloc/kvmalloc allocations with reclaim unsafe locks is a potential deadlock. Note that we also want to use PF_MEMALLOC_NORECLAIM, not PF_MEMALLOC_NOFS, because when we're doing allocations with btree locks held we have a fallback available - drop locks and do a normal GFP_KERNEL allocation. We don't want to be invoking reclaim with btree locks held at all, since these are big shared locks and overalll system performance is sensitive to hold times. Signed-off-by: Kent Overstreet --- fs/bcachefs/acl.c | 5 ++-- fs/bcachefs/btree_cache.c | 3 ++- fs/bcachefs/btree_iter.h | 48 ++++++++++++++++++++--------------- fs/bcachefs/btree_key_cache.c | 10 ++++---- fs/bcachefs/ec.c | 12 ++++----- fs/bcachefs/fs.c | 8 ------ 6 files changed, 43 insertions(+), 43 deletions(-) diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c index 87f1be9d4db46..1def61875a6fd 100644 --- a/fs/bcachefs/acl.c +++ b/fs/bcachefs/acl.c @@ -137,7 +137,7 @@ static struct posix_acl *bch2_acl_from_disk(struct btree_trans *trans, return NULL; acl = allocate_dropping_locks(trans, ret, - posix_acl_alloc(count, _gfp)); + posix_acl_alloc(count, GFP_KERNEL)); if (!acl) return ERR_PTR(-ENOMEM); if (ret) { @@ -427,7 +427,8 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum, if (ret) goto err; - ret = allocate_dropping_locks_errcode(trans, __posix_acl_chmod(&acl, _gfp, mode)); + ret = allocate_dropping_locks_errcode(trans, + __posix_acl_chmod(&acl, GFP_KERNEL, mode)); if (ret) goto err; diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 6e4afb2b54413..7b951b2733068 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -804,7 +804,8 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea mutex_unlock(&bc->lock); - if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) { + if (memalloc_flags_do(PF_MEMALLOC_NORECLAIM, + btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN))) { bch2_trans_unlock(trans); if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN)) goto err; diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 78e63ad7d380e..aec89e001ddc7 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -6,6 +6,8 @@ #include "btree_types.h" #include "trace.h" +#include + void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *); void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t); void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *); @@ -871,29 +873,33 @@ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); (_do) ?: bch2_trans_relock(_trans); \ }) -#define allocate_dropping_locks_errcode(_trans, _do) \ -({ \ - gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ - int _ret = _do; \ - \ - if (bch2_err_matches(_ret, ENOMEM)) { \ - _gfp = GFP_KERNEL; \ - _ret = drop_locks_do(_trans, _do); \ - } \ - _ret; \ +#define memalloc_flags_do(_flags, _do) \ +({ \ + unsigned _saved_flags = memalloc_flags_save(_flags); \ + typeof(_do) _ret = _do; \ + memalloc_noreclaim_restore(_saved_flags); \ + _ret; \ }) -#define allocate_dropping_locks(_trans, _ret, _do) \ -({ \ - gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ - typeof(_do) _p = _do; \ - \ - _ret = 0; \ - if (unlikely(!_p)) { \ - _gfp = GFP_KERNEL; \ - _ret = drop_locks_do(_trans, ((_p = _do), 0)); \ - } \ - _p; \ +#define allocate_dropping_locks_errcode(_trans, _do) \ +({ \ + int _ret = memalloc_flags_do(PF_MEMALLOC_NORECLAIM|PF_MEMALLOC_NOWARN, _do);\ + \ + if (bch2_err_matches(_ret, ENOMEM)) { \ + _ret = drop_locks_do(_trans, _do); \ + } \ + _ret; \ +}) + +#define allocate_dropping_locks(_trans, _ret, _do) \ +({ \ + typeof(_do) _p = memalloc_flags_do(PF_MEMALLOC_NORECLAIM|PF_MEMALLOC_NOWARN, _do);\ + \ + _ret = 0; \ + if (unlikely(!_p)) { \ + _ret = drop_locks_do(_trans, ((_p = _do), 0)); \ + } \ + _p; \ }) #define bch2_trans_run(_c, _do) \ diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 244610b1d0b59..d91f249735585 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -116,14 +116,14 @@ static void bkey_cached_free(struct btree_key_cache *bc, this_cpu_inc(*bc->nr_pending); } -static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp) +static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s) { - gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE; + gfp_t gfp = GFP_KERNEL|__GFP_ACCOUNT|__GFP_RECLAIMABLE; struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp); if (unlikely(!ck)) return NULL; - ck->k = kmalloc(key_u64s * sizeof(u64), gfp); + ck->k = kmalloc(key_u64s * sizeof(u64), GFP_KERNEL); if (unlikely(!ck->k)) { kmem_cache_free(bch2_key_cache, ck); return NULL; @@ -147,7 +147,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned k goto lock; ck = allocate_dropping_locks(trans, ret, - __bkey_cached_alloc(key_u64s, _gfp)); + __bkey_cached_alloc(key_u64s)); if (ret) { if (ck) kfree(ck->k); @@ -241,7 +241,7 @@ static int btree_key_cache_create(struct btree_trans *trans, struct btree_path * mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED); struct bkey_i *new_k = allocate_dropping_locks(trans, ret, - kmalloc(key_u64s * sizeof(u64), _gfp)); + kmalloc(key_u64s * sizeof(u64), GFP_KERNEL)); if (unlikely(!new_k)) { bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u", bch2_btree_id_str(ck->key.btree_id), key_u64s); diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index 1587c6e1866ae..c26319475876a 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -907,12 +907,12 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio, /* stripe bucket accounting: */ -static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp) +static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx) { ec_stripes_heap n, *h = &c->ec_stripes_heap; if (idx >= h->size) { - if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp)) + if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), GFP_KERNEL)) return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; mutex_lock(&c->ec_stripes_heap_lock); @@ -926,11 +926,11 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp) free_heap(&n); } - if (!genradix_ptr_alloc(&c->stripes, idx, gfp)) + if (!genradix_ptr_alloc(&c->stripes, idx, GFP_KERNEL)) return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; if (c->gc_pos.phase != GC_PHASE_not_running && - !genradix_ptr_alloc(&c->gc_stripes, idx, gfp)) + !genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL)) return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; return 0; @@ -940,7 +940,7 @@ static int ec_stripe_mem_alloc(struct btree_trans *trans, struct btree_iter *iter) { return allocate_dropping_locks_errcode(trans, - __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp)); + __ec_stripe_mem_alloc(trans->c, iter->pos.offset)); } /* @@ -2333,7 +2333,7 @@ int bch2_stripes_read(struct bch_fs *c) if (k.k->type != KEY_TYPE_stripe) continue; - ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL); + ret = __ec_stripe_mem_alloc(c, k.k->p.offset); if (ret) break; diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index 1aee5bafaae54..e774cb3e6b1ae 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -278,14 +278,6 @@ static struct bch_inode_info *bch2_inode_hash_insert(struct bch_fs *c, } } -#define memalloc_flags_do(_flags, _do) \ -({ \ - unsigned _saved_flags = memalloc_flags_save(_flags); \ - typeof(_do) _ret = _do; \ - memalloc_noreclaim_restore(_saved_flags); \ - _ret; \ -}) - static struct inode *bch2_alloc_inode(struct super_block *sb) { BUG(); From e360f376b33432bb267c4baf8997c49bdfab15f7 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Thu, 26 Sep 2024 16:06:25 +0200 Subject: [PATCH 4/4] Fix typos Signed-off-by: Andrea Gelmini --- fs/bcachefs/Kconfig | 2 +- fs/bcachefs/alloc_background.c | 2 +- fs/bcachefs/alloc_foreground.c | 4 ++-- fs/bcachefs/bcachefs.h | 2 +- fs/bcachefs/bcachefs_format.h | 10 ++++------ fs/bcachefs/bcachefs_ioctl.h | 4 ++-- fs/bcachefs/bset.h | 4 ++-- fs/bcachefs/btree_cache.c | 2 +- fs/bcachefs/btree_iter.c | 4 ++-- fs/bcachefs/btree_types.h | 2 +- fs/bcachefs/btree_update.h | 2 +- fs/bcachefs/btree_update_interior.h | 2 +- fs/bcachefs/btree_write_buffer.c | 2 +- fs/bcachefs/checksum.c | 2 +- fs/bcachefs/data_update.c | 4 ++-- fs/bcachefs/disk_accounting.c | 4 ++-- fs/bcachefs/disk_accounting_format.h | 2 +- fs/bcachefs/errcode.h | 2 +- fs/bcachefs/error.c | 2 +- fs/bcachefs/eytzinger.h | 2 +- fs/bcachefs/fs-io.c | 2 +- fs/bcachefs/fs.h | 2 +- fs/bcachefs/fsck.c | 4 ++-- fs/bcachefs/inode.c | 2 +- fs/bcachefs/io_misc.c | 2 +- fs/bcachefs/io_write.c | 2 +- fs/bcachefs/journal.c | 4 ++-- fs/bcachefs/journal_io.c | 2 +- fs/bcachefs/journal_types.h | 4 ++-- fs/bcachefs/mean_and_variance.h | 4 ++-- fs/bcachefs/mean_and_variance_test.c | 2 +- fs/bcachefs/opts.h | 2 +- fs/bcachefs/printbuf.c | 8 ++++---- fs/bcachefs/printbuf.h | 4 ++-- fs/bcachefs/rcu_pending.c | 4 ++-- fs/bcachefs/rebalance.c | 2 +- fs/bcachefs/sb-members_format.h | 2 +- fs/bcachefs/siphash.h | 2 +- fs/bcachefs/six.c | 14 +++++++------- fs/bcachefs/six.h | 4 ++-- fs/bcachefs/snapshot.c | 2 +- fs/bcachefs/snapshot_format.h | 2 +- fs/bcachefs/subvolume.c | 2 +- fs/bcachefs/super-io.c | 2 +- fs/bcachefs/time_stats.h | 2 +- 45 files changed, 70 insertions(+), 72 deletions(-) diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig index 5bac803ea367c..30a54983ba0f3 100644 --- a/fs/bcachefs/Kconfig +++ b/fs/bcachefs/Kconfig @@ -38,7 +38,7 @@ config BCACHEFS_ERASURE_CODING depends on BCACHEFS_FS select QUOTACTL help - This enables the "erasure_code" filesysystem and inode option, which + This enables the "erasure_code" filesystem and inode option, which organizes data into reed-solomon stripes instead of ordinary replication. diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 645b5ed4babb1..f00e744458448 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -1380,7 +1380,7 @@ static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_tran if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), trans, need_discard_freespace_key_to_invalid_dev_bucket, - "entry in %s btree for nonexistant dev:bucket %llu:%llu", + "entry in %s btree for nonexistent dev:bucket %llu:%llu", bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset)) goto delete; diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index d0e0b56892e39..3c0ded0ce514e 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -353,7 +353,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc if (!bkey_eq(bp_pos, POS_MAX)) { /* * Bucket may have data in it - we don't call - * bc2h_trans_inconnsistent() because fsck hasn't + * bc2h_trans_inconsistent() because fsck hasn't * finished yet */ ob = NULL; @@ -1552,7 +1552,7 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c) mutex_init(&c->write_points_hash_lock); c->write_points_nr = ARRAY_SIZE(c->write_points); - /* open bucket 0 is a sentinal NULL: */ + /* open bucket 0 is a sentinel NULL: */ spin_lock_init(&c->open_buckets[0].lock); for (ob = c->open_buckets + 1; diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index c711d4c27a03f..863429119ba4b 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -733,7 +733,7 @@ struct bch_fs { struct percpu_ref writes; #endif /* - * Analagous to c->writes, for asynchronous ops that don't necessarily + * Analogous to c->writes, for asynchronous ops that don't necessarily * need fs to be read-write */ refcount_t ro_ref; diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h index 8c4addddd07e0..bf22c1305f5d1 100644 --- a/fs/bcachefs/bcachefs_format.h +++ b/fs/bcachefs/bcachefs_format.h @@ -239,7 +239,7 @@ struct bkey { * * Specifically, when i was designing bkey, I wanted the header to be no * bigger than necessary so that bkey_packed could use the rest. That means that - * decently offten extent keys will fit into only 8 bytes, instead of spilling over + * decently often extent keys will fit into only 8 bytes, instead of spilling over * to 16. * * But packed_bkey treats the part after the header - the packed section - @@ -251,7 +251,7 @@ struct bkey { * So that constrains the key part of a bkig endian bkey to start right * after the header. * - * If we ever do a bkey_v2 and need to expand the hedaer by another byte for + * If we ever do a bkey_v2 and need to expand the header by another byte for * some reason - that will clean up this wart. */ __aligned(8) @@ -499,8 +499,6 @@ struct bch_sb_field { #include "disk_groups_format.h" #include "extents_format.h" #include "ec_format.h" -#include "dirent_format.h" -#include "disk_groups_format.h" #include "inode_format.h" #include "journal_seq_blacklist_format.h" #include "logged_ops_format.h" @@ -643,7 +641,7 @@ struct bch_sb_field_ext { /* * field 1: version name * field 2: BCH_VERSION(major, minor) - * field 3: recovery passess required on upgrade + * field 3: recovery passes required on upgrade */ #define BCH_METADATA_VERSIONS() \ x(bkey_renumber, BCH_VERSION(0, 10)) \ @@ -758,7 +756,7 @@ struct bch_sb { /* * Flags: - * BCH_SB_INITALIZED - set on first mount + * BCH_SB_INITIALIZED - set on first mount * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect * behaviour of mount/recovery path: * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits diff --git a/fs/bcachefs/bcachefs_ioctl.h b/fs/bcachefs/bcachefs_ioctl.h index 3c23bdf788cea..926a1af506abe 100644 --- a/fs/bcachefs/bcachefs_ioctl.h +++ b/fs/bcachefs/bcachefs_ioctl.h @@ -131,7 +131,7 @@ struct bch_ioctl_start { * may be either offline or offline. * * Will fail removing @dev would leave us with insufficient read write devices - * or degraded/unavailable data, unless the approprate BCH_FORCE_IF_* flags are + * or degraded/unavailable data, unless the appropriate BCH_FORCE_IF_* flags are * set. */ @@ -154,7 +154,7 @@ struct bch_ioctl_start { * * Will fail (similarly to BCH_IOCTL_DISK_SET_STATE) if offlining @dev would * leave us with insufficient read write devices or degraded/unavailable data, - * unless the approprate BCH_FORCE_IF_* flags are set. + * unless the appropriate BCH_FORCE_IF_* flags are set. */ struct bch_ioctl_disk { diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h index 6953d55b72cca..bdd250e1be16c 100644 --- a/fs/bcachefs/bset.h +++ b/fs/bcachefs/bset.h @@ -45,7 +45,7 @@ * 4 in memory - we lazily resort as needed. * * We implement code here for creating and maintaining auxiliary search trees - * (described below) for searching an individial bset, and on top of that we + * (described below) for searching an individual bset, and on top of that we * implement a btree iterator. * * BTREE ITERATOR: @@ -178,7 +178,7 @@ static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree * it used to be 64, but I realized the lookup code would touch slightly less * memory if it was 128. * - * It definites the number of bytes (in struct bset) per struct bkey_float in + * It defines the number of bytes (in struct bset) per struct bkey_float in * the auxiliar search tree - when we're done searching the bset_float tree we * have this many bytes left that we do a linear search over. * diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 7b951b2733068..3940597078811 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -1154,7 +1154,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path * /* * Check b->hash_val _before_ calling btree_node_lock() - this might not * be the node we want anymore, and trying to lock the wrong node could - * cause an unneccessary transaction restart: + * cause an unnecessary transaction restart: */ if (unlikely(!c->opts.btree_node_mem_ptr_optimization || !b || diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index bfe9f0c1e1be8..d2542f8492afc 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -2372,7 +2372,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e } /* - * iter->pos should be mononotically increasing, and always be + * iter->pos should be monotonically increasing, and always be * equal to the key we just returned - except extents can * straddle iter->pos: */ @@ -3070,7 +3070,7 @@ u32 bch2_trans_begin(struct btree_trans *trans) /* * If the transaction wasn't restarted, we're presuming to be - * doing something new: dont keep iterators excpt the ones that + * doing something new: don't keep iterators except the ones that * are in use - except for the subvolumes btree: */ if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes) diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 4568a41fefaf6..64a9c66fa1331 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -446,7 +446,7 @@ struct btree_insert_entry { /* Number of btree paths we preallocate, usually enough */ #define BTREE_ITER_INITIAL 64 /* - * Lmiit for btree_trans_too_many_iters(); this is enough that almost all code + * Limit for btree_trans_too_many_iters(); this is enough that almost all code * paths should run inside this limit, and if they don't it usually indicates a * bug (leaking/duplicated btree paths). * diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 60393e98084d7..c1db85b78e84b 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -82,7 +82,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id, * For use when splitting extents in existing snapshots: * * If @old_pos is an interior snapshot node, iterate over descendent snapshot - * nodes: for every descendent snapshot in whiche @old_pos is overwritten and + * nodes: for every descendent snapshot in which @old_pos is overwritten and * not visible, emit a whiteout at @new_pos. */ static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans, diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h index 10f400957f21a..30788fd3197b9 100644 --- a/fs/bcachefs/btree_update_interior.h +++ b/fs/bcachefs/btree_update_interior.h @@ -116,7 +116,7 @@ struct btree_update { struct keylist parent_keys; /* * Enough room for btree_split's keys without realloc - btree node - * pointers never have crc/compression info, so we only need to acount + * pointers never have crc/compression info, so we only need to account * for the pointers for three keys */ u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3]; diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index 3f56b584f8ec2..1dadfdc3377f7 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -451,7 +451,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) * journal replay has to split/rewrite nodes to make room for * its updates. * - * And for those new acounting updates, updates to the same + * And for those new accounting updates, updates to the same * counters get accumulated as they're flushed from the journal * to the write buffer - see the patch for eytzingcer tree * accumulated. So we could only overflow if the number of diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c index ce8fc677bef90..b23e573ce23d8 100644 --- a/fs/bcachefs/checksum.c +++ b/fs/bcachefs/checksum.c @@ -22,7 +22,7 @@ /* * bch2_checksum state is an abstraction of the checksum state calculated over different pages. * it features page merging without having the checksum algorithm lose its state. - * for native checksum aglorithms (like crc), a default seed value will do. + * for native checksum algorithms (like crc), a default seed value will do. * for hash-like algorithms, a state needs to be stored */ diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 757b9884ef558..21e4f4247a9ac 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -228,7 +228,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, * other updates * @new: extent with new pointers that we'll be adding to @insert * - * Fist, drop rewrite_ptrs from @new: + * First, drop rewrite_ptrs from @new: */ i = 0; bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) { @@ -701,7 +701,7 @@ int bch2_data_update_init(struct btree_trans *trans, /* * If device(s) were set to durability=0 after data was written to them - * we can end up with a duribilty=0 extent, and the normal algorithm + * we can end up with a durability=0 extent, and the normal algorithm * that tries not to increase durability doesn't work: */ if (!(durability_have + durability_removing)) diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c index e972e2bca546a..9bc1b40d35601 100644 --- a/fs/bcachefs/disk_accounting.c +++ b/fs/bcachefs/disk_accounting.c @@ -25,7 +25,7 @@ * expensive, so we also have * * - In memory accounting, where accounting is stored as an array of percpu - * counters, indexed by an eytzinger array of disk acounting keys/bpos (which + * counters, indexed by an eytzinger array of disk accounting keys/bpos (which * are the same thing, excepting byte swabbing on big endian). * * Cheap to read, but non persistent. @@ -371,7 +371,7 @@ void bch2_accounting_mem_gc(struct bch_fs *c) * Read out accounting keys for replicas entries, as an array of * bch_replicas_usage entries. * - * Note: this may be deprecated/removed at smoe point in the future and replaced + * Note: this may be deprecated/removed at some point in the future and replaced * with something more general, it exists to support the ioctl used by the * 'bcachefs fs usage' command. */ diff --git a/fs/bcachefs/disk_accounting_format.h b/fs/bcachefs/disk_accounting_format.h index 7b6e6c97e6aa6..6588f69e98d7e 100644 --- a/fs/bcachefs/disk_accounting_format.h +++ b/fs/bcachefs/disk_accounting_format.h @@ -10,7 +10,7 @@ * Here, the key has considerably more structure than a typical key (bpos); an * accounting key is 'struct disk_accounting_pos', which is a union of bpos. * - * More specifically: a key is just a muliword integer (where word endianness + * More specifically: a key is just a multiword integer (where word endianness * matches native byte order), so we're treating bpos as an opaque 20 byte * integer and mapping bch_accounting_key to that. * diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h index 60b7875adada3..2deaffa22a55e 100644 --- a/fs/bcachefs/errcode.h +++ b/fs/bcachefs/errcode.h @@ -303,4 +303,4 @@ static inline long bch2_err_class(long err) const char *bch2_blk_status_to_str(blk_status_t); -#endif /* _BCACHFES_ERRCODE_H */ +#endif /* _BCACHEFS_ERRCODE_H */ diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c index 95afa7bf20205..5f21eb250f533 100644 --- a/fs/bcachefs/error.c +++ b/fs/bcachefs/error.c @@ -269,7 +269,7 @@ int __bch2_fsck_err(struct bch_fs *c, if (s) { /* * We may be called multiple times for the same error on - * transaction restart - this memoizes instead of asking the user + * transaction restart - this memorizes instead of asking the user * multiple times for the same error: */ if (s->last_msg && !strcmp(buf.buf, s->last_msg)) { diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h index 0541192d7bc02..13ce67a1c05af 100644 --- a/fs/bcachefs/eytzinger.h +++ b/fs/bcachefs/eytzinger.h @@ -12,7 +12,7 @@ #endif /* - * Traversal for trees in eytzinger layout - a full binary tree layed out in an + * Traversal for trees in eytzinger layout - a full binary tree laid out in an * array. * * Consider using an eytzinger tree any time you would otherwise be doing binary diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 71d0fa3875094..36efbb6ff40dc 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -877,7 +877,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, goto err; /* - * due to alignment, we might have remapped slightly more than requsted + * due to alignment, we might have remapped slightly more than requested */ ret = min((u64) ret << 9, (u64) len); diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h index da74ecc236e7d..f44467f4fc036 100644 --- a/fs/bcachefs/fs.h +++ b/fs/bcachefs/fs.h @@ -33,7 +33,7 @@ struct bch_inode_info { * * XXX: a device may have had a flush issued by some other codepath. It * would be better to keep for each device a sequence number that's - * incremented when we isusue a cache flush, and track here the sequence + * incremented when we issue a cache flush, and track here the sequence * number that needs flushing. */ struct bch_devs_mask ei_devs_need_flush; diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 9b3470a975461..ebd9b84010d8a 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -570,7 +570,7 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see /* * We know that @id is a descendant of @ancestor, we're checking if * we've seen a key that overwrote @ancestor - i.e. also a descendent of - * @ascestor and with @id as a descendent. + * @ancestor and with @id as a descendent. * * But we already know that we're scanning IDs between @id and @ancestor * numerically, since snapshot ID lists are kept sorted, so if we find @@ -2002,7 +2002,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter * if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol, trans, subvol_fs_path_parent_wrong, - "subvol with wrong fs_path_parent, should be be %u\n%s", + "subvol with wrong fs_path_parent, should be %u\n%s", parent_subvol, (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { struct bkey_i_subvolume *n = diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index 6ac0ff7e074ba..752620f6d4439 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -1161,7 +1161,7 @@ int bch2_delete_dead_inodes(struct bch_fs *c) /* * if we ran check_inodes() unlinked inodes will have already been * cleaned up but the write buffer will be out of sync; therefore we - * alway need a write buffer flush + * always need a write buffer flush */ ret = bch2_btree_write_buffer_flush_sync(trans); if (ret) diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c index 177ed331c00b1..2806c49936b84 100644 --- a/fs/bcachefs/io_misc.c +++ b/fs/bcachefs/io_misc.c @@ -133,7 +133,7 @@ int bch2_extent_fallocate(struct btree_trans *trans, } /* - * Returns -BCH_ERR_transacton_restart if we had to drop locks: + * Returns -BCH_ERR_transaction_restart if we had to drop locks: */ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, subvol_inum inum, u64 end, diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index d3b5be7fd9bf6..d04c2449dcda5 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -1494,7 +1494,7 @@ static void __bch2_write(struct bch_write_op *op) /* * Sync or no? * - * If we're running asynchronously, wne may still want to block + * If we're running asynchronously, we may still want to block * synchronously here if we weren't able to submit all of the IO at * once, as that signals backpressure to the caller. */ diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index f5f7db50ca310..12249cf95087f 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -167,7 +167,7 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags) j->err_seq = journal_cur_seq(j); spin_unlock(&j->lock); - bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)", + bch_err(c, "Journal stuck! Have a pre-reservation but journal full (error %s)", bch2_journal_errors[error]); bch2_journal_debug_to_text(&buf, j); bch_err(c, "%s", buf.buf); @@ -803,7 +803,7 @@ bool bch2_journal_noflush_seq(struct journal *j, u64 seq) unwritten_seq++) { struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq); - /* journal flush already in flight, or flush requseted */ + /* journal flush already in flight, or flush requested */ if (buf->must_flush) goto out; diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 30460bce04bec..916f7dfe6bfec 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -157,7 +157,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca, /* * genradixes are indexed by a ulong, not a u64, so we can't index them * by sequence number directly: Assume instead that they will all fall - * within the range of +-2billion of the filrst one we find. + * within the range of +-2billion of the first one we find. */ if (!c->journal_entries_base_seq) c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX); diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h index 19183fcf7ad7f..952211f56ede2 100644 --- a/fs/bcachefs/journal_types.h +++ b/fs/bcachefs/journal_types.h @@ -107,7 +107,7 @@ union journal_res_state { #define JOURNAL_ENTRY_SIZE_MAX (4U << 20) /* 4M */ /* - * We stash some journal state as sentinal values in cur_entry_offset: + * We stash some journal state as sentinel values in cur_entry_offset: * note - cur_entry_offset is in units of u64s */ #define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1) @@ -202,7 +202,7 @@ struct journal { darray_u64 early_journal_entries; /* - * Protects journal_buf->data, when accessing without a jorunal + * Protects journal_buf->data, when accessing without a journal * reservation: for synchronization between the btree write buffer code * and the journal write path: */ diff --git a/fs/bcachefs/mean_and_variance.h b/fs/bcachefs/mean_and_variance.h index 47e4a3c3d26e7..281d6f9d1a74f 100644 --- a/fs/bcachefs/mean_and_variance.h +++ b/fs/bcachefs/mean_and_variance.h @@ -152,7 +152,7 @@ struct mean_and_variance { u128_u sum_squares; }; -/* expontentially weighted variant */ +/* exponentially weighted variant */ struct mean_and_variance_weighted { s64 mean; u64 variance; @@ -200,4 +200,4 @@ u64 mean_and_variance_weighted_get_variance(struct mean_and_variance_weighted s, u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s, u8 weight); -#endif // MEAN_AND_VAIRANCE_H_ +#endif // MEAN_AND_VARIANCE_H_ diff --git a/fs/bcachefs/mean_and_variance_test.c b/fs/bcachefs/mean_and_variance_test.c index e9d9c0212e44b..86f38db112d8c 100644 --- a/fs/bcachefs/mean_and_variance_test.c +++ b/fs/bcachefs/mean_and_variance_test.c @@ -25,7 +25,7 @@ static void mean_and_variance_basic_test(struct kunit *test) } /* - * Test values computed using a spreadsheet from the psuedocode at the bottom: + * Test values computed using a spreadsheet from the pseudocode at the bottom: * https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf */ diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h index cb2e244a24298..3233bfc3716c0 100644 --- a/fs/bcachefs/opts.h +++ b/fs/bcachefs/opts.h @@ -503,7 +503,7 @@ enum fsck_err_opts { OPT_FS|OPT_MOUNT|OPT_RUNTIME, \ OPT_BOOL(), \ BCH2_NO_SB_OPT, true, \ - NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\ + NULL, "BTREE_ITER_prefetch causes btree nodes to be\n"\ " prefetched sequentially") #define BCH_DEV_OPT_SETTERS() \ diff --git a/fs/bcachefs/printbuf.c b/fs/bcachefs/printbuf.c index 4cf5a2af1e6ff..855c7c07b6185 100644 --- a/fs/bcachefs/printbuf.c +++ b/fs/bcachefs/printbuf.c @@ -236,7 +236,7 @@ void bch2_printbuf_tabstop_pop(struct printbuf *buf) * bch2_printbuf_tabstop_set() - add a tabstop, n spaces from the previous tabstop * * @buf: printbuf to control - * @spaces: number of spaces from previous tabpstop + * @spaces: number of spaces from previous tabstop * * In the future this function may allocate memory if setting more than * PRINTBUF_INLINE_TABSTOPS or setting tabstops more than 255 spaces from start @@ -388,7 +388,7 @@ void bch2_prt_tab_rjust(struct printbuf *buf) * @str: string to print * @count: number of bytes to print * - * The following contol characters are handled as so: + * The following control characters are handled as so: * \n: prt_newline newline that obeys current indent level * \t: prt_tab advance to next tabstop * \r: prt_tab_rjust advance to next tabstop, with right justification @@ -435,7 +435,7 @@ void bch2_prt_human_readable_s64(struct printbuf *out, s64 v) * @out: output printbuf * @v: integer to print * - * Units are either raw (default), or human reabable units (controlled via + * Units are either raw (default), or human readable units (controlled via * @buf->human_readable_units) */ void bch2_prt_units_u64(struct printbuf *out, u64 v) @@ -451,7 +451,7 @@ void bch2_prt_units_u64(struct printbuf *out, u64 v) * @out: output printbuf * @v: integer to print * - * Units are either raw (default), or human reabable units (controlled via + * Units are either raw (default), or human readable units (controlled via * @buf->human_readable_units) */ void bch2_prt_units_s64(struct printbuf *out, s64 v) diff --git a/fs/bcachefs/printbuf.h b/fs/bcachefs/printbuf.h index 1d570387b77f1..bfb2ca11afe4c 100644 --- a/fs/bcachefs/printbuf.h +++ b/fs/bcachefs/printbuf.h @@ -25,7 +25,7 @@ * everything to the kernel log buffer, and then those pretty-printers can be * used by other code that outputs to kernel log, sysfs, debugfs, etc. * - * Memory allocation: Outputing to a printbuf may allocate memory. This + * Memory allocation: Outputting to a printbuf may allocate memory. This * allocation is done with GFP_KERNEL, by default: use the newer * memalloc_*_(save|restore) functions as needed. * @@ -56,7 +56,7 @@ * next tabstop - right justifying it. * * Make sure you use prt_newline() instead of \n in the format string for indent - * level and tabstops to work corretly. + * level and tabstops to work correctly. * * Output units: printbuf->units exists to tell pretty-printers how to output * numbers: a raw value (e.g. directly from a superblock field), as bytes, or as diff --git a/fs/bcachefs/rcu_pending.c b/fs/bcachefs/rcu_pending.c index 40a20192eee89..62812f742caa8 100644 --- a/fs/bcachefs/rcu_pending.c +++ b/fs/bcachefs/rcu_pending.c @@ -338,7 +338,7 @@ rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, unsigned long seq, /* * kvfree_rcu_mightsleep(): we weren't passed an * rcu_head, but we need one: use the low bit of the - * ponter to free to flag that the head needs to be + * pointer to free to flag that the head needs to be * freed as well: */ ptr = (void *)(((unsigned long) ptr)|1UL); @@ -385,7 +385,7 @@ rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, unsigned long seq, /* * __rcu_pending_enqueue: enqueue a pending RCU item, to be processed (via - * pending->pracess) once grace period elapses. + * pending->process) once grace period elapses. * * Attempt to enqueue items onto a radix tree; if memory allocation fails, fall * back to a linked list. diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index 2d299a37cf07d..27ed8267a3839 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -451,7 +451,7 @@ void bch2_rebalance_stop(struct bch_fs *c) c->rebalance.thread = NULL; if (p) { - /* for sychronizing with rebalance_wakeup() */ + /* for synchronizing with rebalance_wakeup() */ synchronize_rcu(); kthread_stop(p); diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h index d727d2dfda08f..0a9e17ba104af 100644 --- a/fs/bcachefs/sb-members_format.h +++ b/fs/bcachefs/sb-members_format.h @@ -9,7 +9,7 @@ #define BCH_SB_MEMBERS_MAX 64 /* - * Sentinal value - indicates a device that does not exist + * Sentinel value - indicates a device that does not exist */ #define BCH_SB_MEMBER_INVALID 255 diff --git a/fs/bcachefs/siphash.h b/fs/bcachefs/siphash.h index 3dfaf34a43b28..b1374d9e1c1a7 100644 --- a/fs/bcachefs/siphash.h +++ b/fs/bcachefs/siphash.h @@ -36,7 +36,7 @@ * optimized for speed on short messages returning a 64bit hash/digest value. * * The number of rounds is defined during the initialization: - * SipHash24_Init() for the fast and resonable strong version + * SipHash24_Init() for the fast and reasonable strong version * SipHash48_Init() for the strong version (half as fast) * * struct SIPHASH_CTX ctx; diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c index 3a494c5d12478..dc157e719b6be 100644 --- a/fs/bcachefs/six.c +++ b/fs/bcachefs/six.c @@ -111,7 +111,7 @@ static inline unsigned pcpu_read_count(struct six_lock *lock) * Returns 1 on success, 0 on failure * * In percpu reader mode, a failed trylock may cause a spurious trylock failure - * for anoter thread taking the competing lock type, and we may havve to do a + * for another thread taking the competing lock type, and we may have to do a * wakeup: when a wakeup is required, we return -1 - wakeup_type. */ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type, @@ -228,7 +228,7 @@ static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_typ /* * Similar to percpu_rwsem_wake_function(), we need to guard - * against the wakee noticing w->lock_acquired, returning, and + * against the wake noticing w->lock_acquired, returning, and * then exiting before we do the wakeup: */ task = get_task_struct(w->task); @@ -591,7 +591,7 @@ static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type) * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_ * - * When a lock is held multiple times (because six_lock_incement()) was used), + * When a lock is held multiple times (because six_lock_increment()) was used), * this decrements the 'lock held' counter by one. * * For example: @@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(six_unlock_ip); /** * six_lock_downgrade - convert an intent lock to a read lock - * @lock: lock to dowgrade + * @lock: lock to downgrade * * @lock will have read count incremented and intent count decremented */ @@ -744,7 +744,7 @@ EXPORT_SYMBOL_GPL(six_lock_increment); * six_lock_wakeup_all - wake up all waiters on @lock * @lock: lock to wake up waiters for * - * Wakeing up waiters will cause them to re-run should_sleep_fn, which may then + * Waking up waiters will cause them to re-run should_sleep_fn, which may then * abort the lock operation. * * This function is never needed in a bug-free program; it's only useful in @@ -792,7 +792,7 @@ EXPORT_SYMBOL_GPL(six_lock_counts); * @lock: lock to add/subtract readers for * @nr: reader count to add/subtract * - * When an upper layer is implementing lock reentrency, we may have both read + * When an upper layer is implementing lock reentrancy, we may have both read * and intent locks on the same lock. * * When we need to take a write lock, the read locks will cause self-deadlock, @@ -823,7 +823,7 @@ EXPORT_SYMBOL_GPL(six_lock_readers_add); * six_lock_exit - release resources held by a lock prior to freeing * @lock: lock to exit * - * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is + * When a lock was initialized in percpu mode (SIX_LOCK_INIT_PCPU), this is * required to free the percpu read counts. */ void six_lock_exit(struct six_lock *lock) diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h index 68d46fd7f3912..d110f88e04802 100644 --- a/fs/bcachefs/six.h +++ b/fs/bcachefs/six.h @@ -79,7 +79,7 @@ * six_unlock_read(&foo->lock); * foo->lock is now fully unlocked. * - * Since the intent state supercedes read, it's legal to increment the read + * Since the intent state supersedes read, it's legal to increment the read * counter when holding an intent lock, but not the reverse. * * A lock may only be held once for write: six_lock_increment(.., SIX_LOCK_write) @@ -296,7 +296,7 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long * @lock: lock to unlock * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write * - * When a lock is held multiple times (because six_lock_incement()) was used), + * When a lock is held multiple times (because six_lock_increment()) was used), * this decrements the 'lock held' counter by one. * * For example: diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c index dff83ebbd912c..fe3593dd18f1b 100644 --- a/fs/bcachefs/snapshot.c +++ b/fs/bcachefs/snapshot.c @@ -1419,7 +1419,7 @@ static int delete_dead_snapshots_process_key(struct btree_trans *trans, * position, we're only going to keep the one in the newest * snapshot (we delete the others above) - the rest have been * overwritten and are redundant, and for the key we're going to keep we - * need to move it to the equivalance class ID if it's not there + * need to move it to the equivalence class ID if it's not there * already. */ if (equiv != k.k->p.snapshot) { diff --git a/fs/bcachefs/snapshot_format.h b/fs/bcachefs/snapshot_format.h index aabcd3a74cd95..73c2dce0a320e 100644 --- a/fs/bcachefs/snapshot_format.h +++ b/fs/bcachefs/snapshot_format.h @@ -23,7 +23,7 @@ LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2) /* * Snapshot trees: * - * The snapshot_trees btree gives us persistent indentifier for each tree of + * The snapshot_trees btree gives us persistent identifier for each tree of * bch_snapshot nodes, and allow us to record and easily find the root/master * subvolume that other snapshots were created from: */ diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c index dbe834cb349f4..ef7235b0e9f47 100644 --- a/fs/bcachefs/subvolume.c +++ b/fs/bcachefs/subvolume.c @@ -383,7 +383,7 @@ static int bch2_subvolume_reparent(struct btree_trans *trans, * structure of how snapshot subvolumes were created - the parent subvolume of * each snapshot subvolume. * - * When a subvolume is deleted, we scan for child subvolumes and reparant them, + * When a subvolume is deleted, we scan for children subvolumes and fix them, * to avoid dangling references: */ static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete) diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index d86d5dae54c9d..cd09a796b96de 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -405,7 +405,7 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, /* * Been seeing a bug where these are getting inexplicably * zeroed, so we're now validating them, but we have to be - * careful not to preven people's filesystems from mounting: + * careful not to prevent people's filesystems from mounting: */ if (!BCH_SB_JOURNAL_FLUSH_DELAY(sb)) SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000); diff --git a/fs/bcachefs/time_stats.h b/fs/bcachefs/time_stats.h index dc6493f7bbabc..40ade9bf15d7a 100644 --- a/fs/bcachefs/time_stats.h +++ b/fs/bcachefs/time_stats.h @@ -12,7 +12,7 @@ * - sum of all event durations * - average event duration, standard and weighted * - standard deviation of event durations, standard and weighted - * and analagous statistics for the frequency of events + * and analogous statistics for the frequency of events * * We provide both mean and weighted mean (exponentially weighted), and standard * deviation and weighted standard deviation, to give an efficient-to-compute