mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
bcache: Clean up keylist code
More random refactoring. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
parent
4f3d40147b
commit
c2f95ae2eb
5 changed files with 57 additions and 52 deletions
|
@ -14,22 +14,12 @@
|
||||||
|
|
||||||
/* Keylists */
|
/* Keylists */
|
||||||
|
|
||||||
void bch_keylist_copy(struct keylist *dest, struct keylist *src)
|
|
||||||
{
|
|
||||||
*dest = *src;
|
|
||||||
|
|
||||||
if (src->list == src->d) {
|
|
||||||
size_t n = (uint64_t *) src->top - src->d;
|
|
||||||
dest->top = (struct bkey *) &dest->d[n];
|
|
||||||
dest->list = dest->d;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
|
int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
|
||||||
{
|
{
|
||||||
unsigned oldsize = (uint64_t *) l->top - l->list;
|
size_t oldsize = bch_keylist_nkeys(l);
|
||||||
unsigned newsize = oldsize + 2 + nptrs;
|
size_t newsize = oldsize + 2 + nptrs;
|
||||||
uint64_t *new;
|
uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
|
||||||
|
uint64_t *new_keys;
|
||||||
|
|
||||||
/* The journalling code doesn't handle the case where the keys to insert
|
/* The journalling code doesn't handle the case where the keys to insert
|
||||||
* is bigger than an empty write: If we just return -ENOMEM here,
|
* is bigger than an empty write: If we just return -ENOMEM here,
|
||||||
|
@ -45,24 +35,23 @@ int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
|
||||||
roundup_pow_of_two(oldsize) == newsize)
|
roundup_pow_of_two(oldsize) == newsize)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
new = krealloc(l->list == l->d ? NULL : l->list,
|
new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
|
||||||
sizeof(uint64_t) * newsize, GFP_NOIO);
|
|
||||||
|
|
||||||
if (!new)
|
if (!new_keys)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (l->list == l->d)
|
if (!old_keys)
|
||||||
memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE);
|
memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
|
||||||
|
|
||||||
l->list = new;
|
l->keys_p = new_keys;
|
||||||
l->top = (struct bkey *) (&l->list[oldsize]);
|
l->top_p = new_keys + oldsize;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bkey *bch_keylist_pop(struct keylist *l)
|
struct bkey *bch_keylist_pop(struct keylist *l)
|
||||||
{
|
{
|
||||||
struct bkey *k = l->bottom;
|
struct bkey *k = l->keys;
|
||||||
|
|
||||||
if (k == l->top)
|
if (k == l->top)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -75,14 +64,11 @@ struct bkey *bch_keylist_pop(struct keylist *l)
|
||||||
|
|
||||||
void bch_keylist_pop_front(struct keylist *l)
|
void bch_keylist_pop_front(struct keylist *l)
|
||||||
{
|
{
|
||||||
struct bkey *next = bkey_next(l->bottom);
|
l->top_p -= bkey_u64s(l->keys);
|
||||||
size_t bytes = ((void *) l->top) - ((void *) next);
|
|
||||||
|
|
||||||
memmove(l->bottom,
|
memmove(l->keys,
|
||||||
next,
|
bkey_next(l->keys),
|
||||||
bytes);
|
bch_keylist_bytes(l));
|
||||||
|
|
||||||
l->top = ((void *) l->bottom) + bytes;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Pointer validation */
|
/* Pointer validation */
|
||||||
|
|
|
@ -227,20 +227,23 @@ static inline struct bkey *bkey_next(const struct bkey *k)
|
||||||
/* Keylists */
|
/* Keylists */
|
||||||
|
|
||||||
struct keylist {
|
struct keylist {
|
||||||
struct bkey *top;
|
|
||||||
union {
|
union {
|
||||||
uint64_t *list;
|
struct bkey *keys;
|
||||||
struct bkey *bottom;
|
uint64_t *keys_p;
|
||||||
|
};
|
||||||
|
union {
|
||||||
|
struct bkey *top;
|
||||||
|
uint64_t *top_p;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Enough room for btree_split's keys without realloc */
|
/* Enough room for btree_split's keys without realloc */
|
||||||
#define KEYLIST_INLINE 16
|
#define KEYLIST_INLINE 16
|
||||||
uint64_t d[KEYLIST_INLINE];
|
uint64_t inline_keys[KEYLIST_INLINE];
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void bch_keylist_init(struct keylist *l)
|
static inline void bch_keylist_init(struct keylist *l)
|
||||||
{
|
{
|
||||||
l->top = (void *) (l->list = l->d);
|
l->top_p = l->keys_p = l->inline_keys;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void bch_keylist_push(struct keylist *l)
|
static inline void bch_keylist_push(struct keylist *l)
|
||||||
|
@ -256,16 +259,30 @@ static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
|
||||||
|
|
||||||
static inline bool bch_keylist_empty(struct keylist *l)
|
static inline bool bch_keylist_empty(struct keylist *l)
|
||||||
{
|
{
|
||||||
return l->top == (void *) l->list;
|
return l->top == l->keys;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void bch_keylist_reset(struct keylist *l)
|
||||||
|
{
|
||||||
|
l->top = l->keys;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void bch_keylist_free(struct keylist *l)
|
static inline void bch_keylist_free(struct keylist *l)
|
||||||
{
|
{
|
||||||
if (l->list != l->d)
|
if (l->keys_p != l->inline_keys)
|
||||||
kfree(l->list);
|
kfree(l->keys_p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t bch_keylist_nkeys(struct keylist *l)
|
||||||
|
{
|
||||||
|
return l->top_p - l->keys_p;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t bch_keylist_bytes(struct keylist *l)
|
||||||
|
{
|
||||||
|
return bch_keylist_nkeys(l) * sizeof(uint64_t);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch_keylist_copy(struct keylist *, struct keylist *);
|
|
||||||
struct bkey *bch_keylist_pop(struct keylist *);
|
struct bkey *bch_keylist_pop(struct keylist *);
|
||||||
void bch_keylist_pop_front(struct keylist *);
|
void bch_keylist_pop_front(struct keylist *);
|
||||||
int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
|
int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
|
||||||
|
|
|
@ -1866,7 +1866,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
|
||||||
|
|
||||||
while (!bch_keylist_empty(insert_keys)) {
|
while (!bch_keylist_empty(insert_keys)) {
|
||||||
struct bset *i = write_block(b);
|
struct bset *i = write_block(b);
|
||||||
struct bkey *k = insert_keys->bottom;
|
struct bkey *k = insert_keys->keys;
|
||||||
|
|
||||||
if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
|
if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
|
||||||
> btree_blocks(b))
|
> btree_blocks(b))
|
||||||
|
@ -1887,10 +1887,10 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
BKEY_PADDED(key) temp;
|
BKEY_PADDED(key) temp;
|
||||||
bkey_copy(&temp.key, insert_keys->bottom);
|
bkey_copy(&temp.key, insert_keys->keys);
|
||||||
|
|
||||||
bch_cut_back(&b->key, &temp.key);
|
bch_cut_back(&b->key, &temp.key);
|
||||||
bch_cut_front(&b->key, insert_keys->bottom);
|
bch_cut_front(&b->key, insert_keys->keys);
|
||||||
|
|
||||||
ret |= btree_insert_key(b, op, &temp.key);
|
ret |= btree_insert_key(b, op, &temp.key);
|
||||||
break;
|
break;
|
||||||
|
@ -1984,7 +1984,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
|
||||||
} else if (!b->parent) {
|
} else if (!b->parent) {
|
||||||
/* Root filled up but didn't need to be split */
|
/* Root filled up but didn't need to be split */
|
||||||
|
|
||||||
parent_keys->top = parent_keys->bottom;
|
bch_keylist_reset(parent_keys);
|
||||||
closure_sync(&op->cl);
|
closure_sync(&op->cl);
|
||||||
bch_btree_set_root(n1);
|
bch_btree_set_root(n1);
|
||||||
} else {
|
} else {
|
||||||
|
@ -2118,12 +2118,12 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
|
||||||
if (b->level) {
|
if (b->level) {
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
|
|
||||||
k = bch_next_recurse_key(b, &START_KEY(keys->bottom));
|
k = bch_next_recurse_key(b, &START_KEY(keys->keys));
|
||||||
if (!k) {
|
if (!k) {
|
||||||
btree_bug(b, "no key to recurse on at level %i/%i",
|
btree_bug(b, "no key to recurse on at level %i/%i",
|
||||||
b->level, b->c->root->level);
|
b->level, b->c->root->level);
|
||||||
|
|
||||||
keys->top = keys->bottom;
|
bch_keylist_reset(keys);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -713,7 +713,7 @@ void bch_journal(struct closure *cl)
|
||||||
struct btree_op *op = container_of(cl, struct btree_op, cl);
|
struct btree_op *op = container_of(cl, struct btree_op, cl);
|
||||||
struct cache_set *c = op->c;
|
struct cache_set *c = op->c;
|
||||||
struct journal_write *w;
|
struct journal_write *w;
|
||||||
size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list;
|
size_t sectors, nkeys;
|
||||||
|
|
||||||
if (op->type != BTREE_INSERT ||
|
if (op->type != BTREE_INSERT ||
|
||||||
!CACHE_SYNC(&c->sb))
|
!CACHE_SYNC(&c->sb))
|
||||||
|
@ -741,10 +741,12 @@ void bch_journal(struct closure *cl)
|
||||||
}
|
}
|
||||||
|
|
||||||
w = c->journal.cur;
|
w = c->journal.cur;
|
||||||
b = __set_blocks(w->data, w->data->keys + n, c);
|
nkeys = w->data->keys + bch_keylist_nkeys(&op->keys);
|
||||||
|
sectors = __set_blocks(w->data, nkeys, c) * c->sb.block_size;
|
||||||
|
|
||||||
if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
|
if (sectors > min_t(size_t,
|
||||||
b > c->journal.blocks_free) {
|
c->journal.blocks_free * c->sb.block_size,
|
||||||
|
PAGE_SECTORS << JSET_BITS)) {
|
||||||
trace_bcache_journal_entry_full(c);
|
trace_bcache_journal_entry_full(c);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -760,8 +762,8 @@ void bch_journal(struct closure *cl)
|
||||||
continue_at(cl, bch_journal, bcache_wq);
|
continue_at(cl, bch_journal, bcache_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t));
|
memcpy(end(w->data), op->keys.keys, bch_keylist_bytes(&op->keys));
|
||||||
w->data->keys += n;
|
w->data->keys += bch_keylist_nkeys(&op->keys);
|
||||||
|
|
||||||
op->journal = &fifo_back(&c->journal.pin);
|
op->journal = &fifo_back(&c->journal.pin);
|
||||||
atomic_inc(op->journal);
|
atomic_inc(op->journal);
|
||||||
|
|
|
@ -438,13 +438,13 @@ static void bch_insert_data_error(struct closure *cl)
|
||||||
* from the keys we'll accomplish just that.
|
* from the keys we'll accomplish just that.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
|
struct bkey *src = op->keys.keys, *dst = op->keys.keys;
|
||||||
|
|
||||||
while (src != op->keys.top) {
|
while (src != op->keys.top) {
|
||||||
struct bkey *n = bkey_next(src);
|
struct bkey *n = bkey_next(src);
|
||||||
|
|
||||||
SET_KEY_PTRS(src, 0);
|
SET_KEY_PTRS(src, 0);
|
||||||
bkey_copy(dst, src);
|
memmove(dst, src, bkey_bytes(src));
|
||||||
|
|
||||||
dst = bkey_next(dst);
|
dst = bkey_next(dst);
|
||||||
src = n;
|
src = n;
|
||||||
|
|
Loading…
Add table
Reference in a new issue