1
0
Fork 0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-01-22 07:53:11 -05:00

bcachefs: Move bch_extent_rebalance code to rebalance.c

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2024-10-28 23:23:18 -04:00
parent a652c56590
commit 161d13835e
8 changed files with 251 additions and 238 deletions

View file

@ -18,6 +18,7 @@
#include "error.h"
#include "inode.h"
#include "movinggc.h"
#include "rebalance.h"
#include "recovery.h"
#include "reflink.h"
#include "replicas.h"

View file

@ -1436,105 +1436,6 @@ void bch2_ptr_swab(struct bkey_s k)
}
}
const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
bkey_extent_entry_for_each(ptrs, entry)
if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
return &entry->rebalance;
return NULL;
}
unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c,
struct bch_io_opts *opts,
struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
return bch2_bkey_ptrs_need_compress(c, opts, k, ptrs) |
bch2_bkey_ptrs_need_move(c, opts, ptrs);
}
u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
{
const struct bch_extent_rebalance *opts = bch2_bkey_rebalance_opts(k);
if (!opts)
return 0;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
u64 sectors = 0;
if (opts->background_compression) {
unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression);
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
p.ptr.unwritten) {
sectors = 0;
goto incompressible;
}
if (!p.ptr.cached && p.crc.compression_type != compression_type)
sectors += p.crc.compressed_size;
}
}
incompressible:
if (opts->background_target &&
bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target)) {
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, opts->background_target))
sectors += p.crc.compressed_size;
}
return sectors;
}
bool bch2_bkey_rebalance_needs_update(struct bch_fs *c, struct bch_io_opts *opts,
struct bkey_s_c k)
{
if (!bkey_extent_is_direct_data(k.k))
return 0;
const struct bch_extent_rebalance *old = bch2_bkey_rebalance_opts(k);
if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k)) {
struct bch_extent_rebalance new = io_opts_to_rebalance_opts(opts);
return old == NULL || memcmp(old, &new, sizeof(new));
} else {
return old != NULL;
}
}
int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bch_io_opts *opts,
struct bkey_i *_k)
{
if (!bkey_extent_is_direct_data(&_k->k))
return 0;
struct bkey_s k = bkey_i_to_s(_k);
struct bch_extent_rebalance *old =
(struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k.s_c)) {
if (!old) {
old = bkey_val_end(k);
k.k->u64s += sizeof(*old) / sizeof(u64);
}
*old = io_opts_to_rebalance_opts(opts);
} else {
if (old)
extent_entry_drop(k, (union bch_extent_entry *) old);
}
return 0;
}
/* Generic extent code: */
int bch2_cut_front_s(struct bpos where, struct bkey_s k)

View file

@ -710,13 +710,6 @@ static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1,
void bch2_ptr_swab(struct bkey_s);
const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c);
unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bch_io_opts *, struct bkey_s_c);
u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c);
bool bch2_bkey_rebalance_needs_update(struct bch_fs *, struct bch_io_opts *, struct bkey_s_c);
int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bch_io_opts *, struct bkey_i *);
/* Generic extent code: */
enum bch_extent_overlap {

View file

@ -201,52 +201,8 @@ struct bch_extent_stripe_ptr {
#endif
};
struct bch_extent_rebalance {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:6,
unused:3,
promote_target_from_inode:1,
erasure_code_from_inode:1,
data_checksum_from_inode:1,
background_compression_from_inode:1,
data_replicas_from_inode:1,
background_target_from_inode:1,
promote_target:16,
erasure_code:1,
data_checksum:4,
data_replicas:4,
background_compression:8, /* enum bch_compression_opt */
background_target:16;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 background_target:16,
background_compression:8,
data_replicas:4,
data_checksum:4,
erasure_code:1,
promote_target:16,
background_target_from_inode:1,
data_replicas_from_inode:1,
background_compression_from_inode:1,
data_checksum_from_inode:1,
erasure_code_from_inode:1,
promote_target_from_inode:1,
unused:3,
type:6;
#endif
};
/* subset of BCH_INODE_OPTS */
#define BCH_REBALANCE_OPTS() \
x(data_checksum) \
x(background_compression) \
x(data_replicas) \
x(promote_target) \
x(background_target) \
x(erasure_code)
/* bch_extent_rebalance: */
#include "rebalance_format.h"
union bch_extent_entry {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64

View file

@ -21,6 +21,7 @@
#include "journal_reclaim.h"
#include "keylist.h"
#include "move.h"
#include "rebalance.h"
#include "replicas.h"
#include "snapshot.h"
#include "super-io.h"
@ -379,44 +380,6 @@ int bch2_move_extent(struct moving_context *ctxt,
return ret;
}
static int get_update_rebalance_opts(struct btree_trans *trans,
struct bch_io_opts *io_opts,
struct btree_iter *iter,
struct bkey_s_c k)
{
BUG_ON(iter->flags & BTREE_ITER_is_extents);
BUG_ON(iter->flags & BTREE_ITER_filter_snapshots);
const struct bch_extent_rebalance *r = k.k->type == KEY_TYPE_reflink_v
? bch2_bkey_rebalance_opts(k) : NULL;
if (r) {
#define x(_name) \
if (r->_name##_from_inode) { \
io_opts->_name = r->_name; \
io_opts->_name##_from_inode = true; \
}
BCH_REBALANCE_OPTS()
#undef x
}
if (!bch2_bkey_rebalance_needs_update(trans->c, io_opts, k))
return 0;
struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + 8);
int ret = PTR_ERR_OR_ZERO(n);
if (ret)
return ret;
bkey_reassemble(n, k);
/* On successfull transaction commit, @k was invalidated: */
return bch2_bkey_set_needs_rebalance(trans->c, io_opts, n) ?:
bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
bch2_trans_commit(trans, NULL, NULL, 0) ?:
-BCH_ERR_transaction_restart_nested;
}
static struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
struct per_snapshot_io_opts *io_opts,
struct btree_iter *extent_iter,
@ -463,7 +426,7 @@ static struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
break;
}
out:
ret = get_update_rebalance_opts(trans, opts_ret, extent_iter, extent_k);
ret = bch2_get_update_rebalance_opts(trans, opts_ret, extent_iter, extent_k);
if (ret)
return ERR_PTR(ret);
return opts_ret;
@ -497,7 +460,7 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans,
}
bch2_trans_iter_exit(trans, &inode_iter);
out:
return get_update_rebalance_opts(trans, io_opts, extent_iter, extent_k);
return bch2_get_update_rebalance_opts(trans, io_opts, extent_iter, extent_k);
}
int bch2_move_ratelimit(struct moving_context *ctxt)

View file

@ -24,6 +24,192 @@
#include <linux/kthread.h>
#include <linux/sched/cputime.h>
/* bch_extent_rebalance: */
static const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
bkey_extent_entry_for_each(ptrs, entry)
if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
return &entry->rebalance;
return NULL;
}
static inline unsigned bch2_bkey_ptrs_need_compress(struct bch_fs *c,
struct bch_io_opts *opts,
struct bkey_s_c k,
struct bkey_ptrs_c ptrs)
{
if (!opts->background_compression)
return 0;
unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
unsigned ptr_bit = 1;
unsigned rewrite_ptrs = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
p.ptr.unwritten)
return 0;
if (!p.ptr.cached && p.crc.compression_type != compression_type)
rewrite_ptrs |= ptr_bit;
ptr_bit <<= 1;
}
return rewrite_ptrs;
}
static inline unsigned bch2_bkey_ptrs_need_move(struct bch_fs *c,
struct bch_io_opts *opts,
struct bkey_ptrs_c ptrs)
{
if (!opts->background_target ||
!bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target))
return 0;
unsigned ptr_bit = 1;
unsigned rewrite_ptrs = 0;
bkey_for_each_ptr(ptrs, ptr) {
if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, opts->background_target))
rewrite_ptrs |= ptr_bit;
ptr_bit <<= 1;
}
return rewrite_ptrs;
}
static unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c,
struct bch_io_opts *opts,
struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
return bch2_bkey_ptrs_need_compress(c, opts, k, ptrs) |
bch2_bkey_ptrs_need_move(c, opts, ptrs);
}
u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
{
const struct bch_extent_rebalance *opts = bch2_bkey_rebalance_opts(k);
if (!opts)
return 0;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
u64 sectors = 0;
if (opts->background_compression) {
unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression);
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
p.ptr.unwritten) {
sectors = 0;
goto incompressible;
}
if (!p.ptr.cached && p.crc.compression_type != compression_type)
sectors += p.crc.compressed_size;
}
}
incompressible:
if (opts->background_target &&
bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target)) {
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, opts->background_target))
sectors += p.crc.compressed_size;
}
return sectors;
}
static bool bch2_bkey_rebalance_needs_update(struct bch_fs *c, struct bch_io_opts *opts,
struct bkey_s_c k)
{
if (!bkey_extent_is_direct_data(k.k))
return 0;
const struct bch_extent_rebalance *old = bch2_bkey_rebalance_opts(k);
if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k)) {
struct bch_extent_rebalance new = io_opts_to_rebalance_opts(opts);
return old == NULL || memcmp(old, &new, sizeof(new));
} else {
return old != NULL;
}
}
int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bch_io_opts *opts,
struct bkey_i *_k)
{
if (!bkey_extent_is_direct_data(&_k->k))
return 0;
struct bkey_s k = bkey_i_to_s(_k);
struct bch_extent_rebalance *old =
(struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k.s_c)) {
if (!old) {
old = bkey_val_end(k);
k.k->u64s += sizeof(*old) / sizeof(u64);
}
*old = io_opts_to_rebalance_opts(opts);
} else {
if (old)
extent_entry_drop(k, (union bch_extent_entry *) old);
}
return 0;
}
int bch2_get_update_rebalance_opts(struct btree_trans *trans,
struct bch_io_opts *io_opts,
struct btree_iter *iter,
struct bkey_s_c k)
{
BUG_ON(iter->flags & BTREE_ITER_is_extents);
BUG_ON(iter->flags & BTREE_ITER_filter_snapshots);
const struct bch_extent_rebalance *r = k.k->type == KEY_TYPE_reflink_v
? bch2_bkey_rebalance_opts(k) : NULL;
if (r) {
#define x(_name) \
if (r->_name##_from_inode) { \
io_opts->_name = r->_name; \
io_opts->_name##_from_inode = true; \
}
BCH_REBALANCE_OPTS()
#undef x
}
if (!bch2_bkey_rebalance_needs_update(trans->c, io_opts, k))
return 0;
struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + 8);
int ret = PTR_ERR_OR_ZERO(n);
if (ret)
return ret;
bkey_reassemble(n, k);
/* On successfull transaction commit, @k was invalidated: */
return bch2_bkey_set_needs_rebalance(trans->c, io_opts, n) ?:
bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
bch2_trans_commit(trans, NULL, NULL, 0) ?:
-BCH_ERR_transaction_restart_nested;
}
#define REBALANCE_WORK_SCAN_OFFSET (U64_MAX - 1)
static const char * const bch2_rebalance_state_strs[] = {

View file

@ -6,52 +6,12 @@
#include "disk_groups.h"
#include "rebalance_types.h"
static inline unsigned bch2_bkey_ptrs_need_compress(struct bch_fs *c,
struct bch_io_opts *opts,
struct bkey_s_c k,
struct bkey_ptrs_c ptrs)
{
if (!opts->background_compression)
return 0;
unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
unsigned ptr_bit = 1;
unsigned rewrite_ptrs = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
p.ptr.unwritten)
return 0;
if (!p.ptr.cached && p.crc.compression_type != compression_type)
rewrite_ptrs |= ptr_bit;
ptr_bit <<= 1;
}
return rewrite_ptrs;
}
static inline unsigned bch2_bkey_ptrs_need_move(struct bch_fs *c,
struct bch_io_opts *opts,
struct bkey_ptrs_c ptrs)
{
if (!opts->background_target ||
!bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target))
return 0;
unsigned ptr_bit = 1;
unsigned rewrite_ptrs = 0;
bkey_for_each_ptr(ptrs, ptr) {
if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, opts->background_target))
rewrite_ptrs |= ptr_bit;
ptr_bit <<= 1;
}
return rewrite_ptrs;
}
u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c);
int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bch_io_opts *, struct bkey_i *);
int bch2_get_update_rebalance_opts(struct btree_trans *,
struct bch_io_opts *,
struct btree_iter *,
struct bkey_s_c);
int bch2_set_rebalance_needs_scan_trans(struct btree_trans *, u64);
int bch2_set_rebalance_needs_scan(struct bch_fs *, u64 inum);

View file

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_REBALANCE_FORMAT_H
#define _BCACHEFS_REBALANCE_FORMAT_H
struct bch_extent_rebalance {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:6,
unused:3,
promote_target_from_inode:1,
erasure_code_from_inode:1,
data_checksum_from_inode:1,
background_compression_from_inode:1,
data_replicas_from_inode:1,
background_target_from_inode:1,
promote_target:16,
erasure_code:1,
data_checksum:4,
data_replicas:4,
background_compression:8, /* enum bch_compression_opt */
background_target:16;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 background_target:16,
background_compression:8,
data_replicas:4,
data_checksum:4,
erasure_code:1,
promote_target:16,
background_target_from_inode:1,
data_replicas_from_inode:1,
background_compression_from_inode:1,
data_checksum_from_inode:1,
erasure_code_from_inode:1,
promote_target_from_inode:1,
unused:3,
type:6;
#endif
};
/* subset of BCH_INODE_OPTS */
#define BCH_REBALANCE_OPTS() \
x(data_checksum) \
x(background_compression) \
x(data_replicas) \
x(promote_target) \
x(background_target) \
x(erasure_code)
#endif /* _BCACHEFS_REBALANCE_FORMAT_H */