We must have some way of letting a storage device driver know what encryption context it should use for en/decrypting a request. However, it's the filesystem/fscrypt that knows about and manages encryption contexts. As such, when the filesystem layer submits a bio to the block layer, and this bio eventually reaches a device driver with support for inline encryption, the device driver will need to have been told the encryption context for that bio. We want to communicate the encryption context from the filesystem layer to the storage device along with the bio, when the bio is submitted to the block layer. To do this, we add a struct bio_crypt_ctx to struct bio, which can represent an encryption context (note that we can't use the bi_private field in struct bio to do this because that field does not function to pass information across layers in the storage stack). We also introduce various functions to manipulate the bio_crypt_ctx and make the bio/request merging logic aware of the bio_crypt_ctx. Bug: 137270441 Test: tested as series; see Ie1b77f7615d6a7a60fdc9105c7ab2200d17636a8 Change-Id: I479de9ec13758f1978b34d897e6956e680caeb92 Signed-off-by: Satya Tangirala <satyat@google.com> Link: https://lore.kernel.org/linux-fscrypt/20191028072032.6911-3-satyat@google.com/tirimbino
parent
66b5609826
commit
138adbbe5e
@ -0,0 +1,137 @@ |
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright 2019 Google LLC |
||||
*/ |
||||
|
||||
#include <linux/bio.h> |
||||
#include <linux/blkdev.h> |
||||
#include <linux/slab.h> |
||||
#include <linux/keyslot-manager.h> |
||||
|
||||
static int num_prealloc_crypt_ctxs = 128; |
||||
static struct kmem_cache *bio_crypt_ctx_cache; |
||||
static mempool_t *bio_crypt_ctx_pool; |
||||
|
||||
int bio_crypt_ctx_init(void) |
||||
{ |
||||
bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); |
||||
if (!bio_crypt_ctx_cache) |
||||
return -ENOMEM; |
||||
|
||||
bio_crypt_ctx_pool = mempool_create_slab_pool( |
||||
num_prealloc_crypt_ctxs, |
||||
bio_crypt_ctx_cache); |
||||
|
||||
if (!bio_crypt_ctx_pool) |
||||
return -ENOMEM; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask) |
||||
{ |
||||
return mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
||||
} |
||||
EXPORT_SYMBOL(bio_crypt_alloc_ctx); |
||||
|
||||
void bio_crypt_free_ctx(struct bio *bio) |
||||
{ |
||||
mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); |
||||
bio->bi_crypt_context = NULL; |
||||
} |
||||
EXPORT_SYMBOL(bio_crypt_free_ctx); |
||||
|
||||
int bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) |
||||
{ |
||||
if (!bio_has_crypt_ctx(src)) |
||||
return 0; |
||||
|
||||
dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask); |
||||
if (!dst->bi_crypt_context) |
||||
return -ENOMEM; |
||||
|
||||
*dst->bi_crypt_context = *src->bi_crypt_context; |
||||
|
||||
if (bio_crypt_has_keyslot(src)) |
||||
keyslot_manager_get_slot(src->bi_crypt_context->processing_ksm, |
||||
src->bi_crypt_context->keyslot); |
||||
|
||||
return 0; |
||||
} |
||||
EXPORT_SYMBOL(bio_crypt_clone); |
||||
|
||||
bool bio_crypt_should_process(struct bio *bio, struct request_queue *q) |
||||
{ |
||||
if (!bio_has_crypt_ctx(bio)) |
||||
return false; |
||||
|
||||
WARN_ON(!bio_crypt_has_keyslot(bio)); |
||||
return q->ksm == bio->bi_crypt_context->processing_ksm; |
||||
} |
||||
EXPORT_SYMBOL(bio_crypt_should_process); |
||||
|
||||
/*
|
||||
* Checks that two bio crypt contexts are compatible - i.e. that |
||||
* they are mergeable except for data_unit_num continuity. |
||||
*/ |
||||
bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2) |
||||
{ |
||||
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context; |
||||
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context; |
||||
|
||||
if (bio_has_crypt_ctx(b_1) != bio_has_crypt_ctx(b_2)) |
||||
return false; |
||||
|
||||
if (!bio_has_crypt_ctx(b_1)) |
||||
return true; |
||||
|
||||
return bc1->keyslot == bc2->keyslot && |
||||
bc1->data_unit_size_bits == bc2->data_unit_size_bits; |
||||
} |
||||
|
||||
/*
|
||||
* Checks that two bio crypt contexts are compatible, and also |
||||
* that their data_unit_nums are continuous (and can hence be merged) |
||||
*/ |
||||
bool bio_crypt_ctx_back_mergeable(struct bio *b_1, |
||||
unsigned int b1_sectors, |
||||
struct bio *b_2) |
||||
{ |
||||
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context; |
||||
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context; |
||||
|
||||
if (!bio_crypt_ctx_compatible(b_1, b_2)) |
||||
return false; |
||||
|
||||
return !bio_has_crypt_ctx(b_1) || |
||||
(bc1->data_unit_num + |
||||
(b1_sectors >> (bc1->data_unit_size_bits - 9)) == |
||||
bc2->data_unit_num); |
||||
} |
||||
|
||||
void bio_crypt_ctx_release_keyslot(struct bio *bio) |
||||
{ |
||||
struct bio_crypt_ctx *crypt_ctx = bio->bi_crypt_context; |
||||
|
||||
keyslot_manager_put_slot(crypt_ctx->processing_ksm, crypt_ctx->keyslot); |
||||
bio->bi_crypt_context->processing_ksm = NULL; |
||||
bio->bi_crypt_context->keyslot = -1; |
||||
} |
||||
|
||||
int bio_crypt_ctx_acquire_keyslot(struct bio *bio, struct keyslot_manager *ksm) |
||||
{ |
||||
int slot; |
||||
enum blk_crypto_mode_num crypto_mode = bio_crypto_mode(bio); |
||||
|
||||
if (!ksm) |
||||
return -ENOMEM; |
||||
|
||||
slot = keyslot_manager_get_slot_for_key(ksm, |
||||
bio_crypt_raw_key(bio), crypto_mode, |
||||
1 << bio->bi_crypt_context->data_unit_size_bits); |
||||
if (slot < 0) |
||||
return slot; |
||||
|
||||
bio_crypt_set_keyslot(bio, slot, ksm); |
||||
return 0; |
||||
} |
@ -0,0 +1,219 @@ |
||||
/* SPDX-License-Identifier: GPL-2.0 */ |
||||
/*
|
||||
* Copyright 2019 Google LLC |
||||
*/ |
||||
#ifndef __LINUX_BIO_CRYPT_CTX_H |
||||
#define __LINUX_BIO_CRYPT_CTX_H |
||||
|
||||
enum blk_crypto_mode_num { |
||||
BLK_ENCRYPTION_MODE_INVALID = 0, |
||||
BLK_ENCRYPTION_MODE_AES_256_XTS = 1, |
||||
}; |
||||
|
||||
#ifdef CONFIG_BLOCK |
||||
#include <linux/blk_types.h> |
||||
|
||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION |
||||
struct bio_crypt_ctx { |
||||
int keyslot; |
||||
const u8 *raw_key; |
||||
enum blk_crypto_mode_num crypto_mode; |
||||
u64 data_unit_num; |
||||
unsigned int data_unit_size_bits; |
||||
|
||||
/*
|
||||
* The keyslot manager where the key has been programmed |
||||
* with keyslot. |
||||
*/ |
||||
struct keyslot_manager *processing_ksm; |
||||
|
||||
/*
|
||||
* Copy of the bvec_iter when this bio was submitted. |
||||
* We only want to en/decrypt the part of the bio |
||||
* as described by the bvec_iter upon submission because |
||||
* bio might be split before being resubmitted |
||||
*/ |
||||
struct bvec_iter crypt_iter; |
||||
u64 sw_data_unit_num; |
||||
}; |
||||
|
||||
extern int bio_crypt_clone(struct bio *dst, struct bio *src, |
||||
gfp_t gfp_mask); |
||||
|
||||
static inline bool bio_has_crypt_ctx(struct bio *bio) |
||||
{ |
||||
return bio->bi_crypt_context; |
||||
} |
||||
|
||||
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) |
||||
{ |
||||
if (bio_has_crypt_ctx(bio)) { |
||||
bio->bi_crypt_context->data_unit_num += |
||||
bytes >> bio->bi_crypt_context->data_unit_size_bits; |
||||
} |
||||
} |
||||
|
||||
static inline bool bio_crypt_has_keyslot(struct bio *bio) |
||||
{ |
||||
return bio->bi_crypt_context->keyslot >= 0; |
||||
} |
||||
|
||||
extern int bio_crypt_ctx_init(void); |
||||
|
||||
extern struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask); |
||||
|
||||
extern void bio_crypt_free_ctx(struct bio *bio); |
||||
|
||||
static inline int bio_crypt_set_ctx(struct bio *bio, |
||||
const u8 *raw_key, |
||||
enum blk_crypto_mode_num crypto_mode, |
||||
u64 dun, |
||||
unsigned int dun_bits, |
||||
gfp_t gfp_mask) |
||||
{ |
||||
struct bio_crypt_ctx *crypt_ctx; |
||||
|
||||
crypt_ctx = bio_crypt_alloc_ctx(gfp_mask); |
||||
if (!crypt_ctx) |
||||
return -ENOMEM; |
||||
|
||||
crypt_ctx->raw_key = raw_key; |
||||
crypt_ctx->data_unit_num = dun; |
||||
crypt_ctx->data_unit_size_bits = dun_bits; |
||||
crypt_ctx->crypto_mode = crypto_mode; |
||||
crypt_ctx->processing_ksm = NULL; |
||||
crypt_ctx->keyslot = -1; |
||||
bio->bi_crypt_context = crypt_ctx; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static inline void bio_set_data_unit_num(struct bio *bio, u64 dun) |
||||
{ |
||||
bio->bi_crypt_context->data_unit_num = dun; |
||||
} |
||||
|
||||
static inline int bio_crypt_get_keyslot(struct bio *bio) |
||||
{ |
||||
return bio->bi_crypt_context->keyslot; |
||||
} |
||||
|
||||
static inline void bio_crypt_set_keyslot(struct bio *bio, |
||||
unsigned int keyslot, |
||||
struct keyslot_manager *ksm) |
||||
{ |
||||
bio->bi_crypt_context->keyslot = keyslot; |
||||
bio->bi_crypt_context->processing_ksm = ksm; |
||||
} |
||||
|
||||
extern void bio_crypt_ctx_release_keyslot(struct bio *bio); |
||||
|
||||
extern int bio_crypt_ctx_acquire_keyslot(struct bio *bio, |
||||
struct keyslot_manager *ksm); |
||||
|
||||
static inline const u8 *bio_crypt_raw_key(struct bio *bio) |
||||
{ |
||||
return bio->bi_crypt_context->raw_key; |
||||
} |
||||
|
||||
static inline enum blk_crypto_mode_num bio_crypto_mode(struct bio *bio) |
||||
{ |
||||
return bio->bi_crypt_context->crypto_mode; |
||||
} |
||||
|
||||
static inline u64 bio_crypt_data_unit_num(struct bio *bio) |
||||
{ |
||||
return bio->bi_crypt_context->data_unit_num; |
||||
} |
||||
|
||||
static inline u64 bio_crypt_sw_data_unit_num(struct bio *bio) |
||||
{ |
||||
return bio->bi_crypt_context->sw_data_unit_num; |
||||
} |
||||
|
||||
extern bool bio_crypt_should_process(struct bio *bio, struct request_queue *q); |
||||
|
||||
extern bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2); |
||||
|
||||
extern bool bio_crypt_ctx_back_mergeable(struct bio *b_1, |
||||
unsigned int b1_sectors, |
||||
struct bio *b_2); |
||||
|
||||
#else /* CONFIG_BLK_INLINE_ENCRYPTION */ |
||||
struct keyslot_manager; |
||||
|
||||
static inline int bio_crypt_ctx_init(void) |
||||
{ |
||||
return 0; |
||||
} |
||||
|
||||
static inline int bio_crypt_clone(struct bio *dst, struct bio *src, |
||||
gfp_t gfp_mask) |
||||
{ |
||||
return 0; |
||||
} |
||||
|
||||
static inline void bio_crypt_advance(struct bio *bio, |
||||
unsigned int bytes) { } |
||||
|
||||
static inline bool bio_has_crypt_ctx(struct bio *bio) |
||||
{ |
||||
return false; |
||||
} |
||||
|
||||
static inline void bio_crypt_free_ctx(struct bio *bio) { } |
||||
|
||||
static inline void bio_crypt_set_ctx(struct bio *bio, |
||||
u8 *raw_key, |
||||
enum blk_crypto_mode_num crypto_mode, |
||||
u64 dun, |
||||
unsigned int dun_bits, |
||||
gfp_t gfp_mask) { } |
||||
|
||||
static inline void bio_set_data_unit_num(struct bio *bio, u64 dun) { } |
||||
|
||||
static inline bool bio_crypt_has_keyslot(struct bio *bio) |
||||
{ |
||||
return false; |
||||
} |
||||
|
||||
static inline void bio_crypt_set_keyslot(struct bio *bio, |
||||
unsigned int keyslot, |
||||
struct keyslot_manager *ksm) { } |
||||
|
||||
static inline int bio_crypt_get_keyslot(struct bio *bio) |
||||
{ |
||||
return -1; |
||||
} |
||||
|
||||
static inline u8 *bio_crypt_raw_key(struct bio *bio) |
||||
{ |
||||
return NULL; |
||||
} |
||||
|
||||
static inline u64 bio_crypt_data_unit_num(struct bio *bio) |
||||
{ |
||||
return 0; |
||||
} |
||||
|
||||
static inline bool bio_crypt_should_process(struct bio *bio, |
||||
struct request_queue *q) |
||||
{ |
||||
return false; |
||||
} |
||||
|
||||
static inline bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2) |
||||
{ |
||||
return true; |
||||
} |
||||
|
||||
static inline bool bio_crypt_ctx_back_mergeable(struct bio *b_1, |
||||
unsigned int b1_sectors, |
||||
struct bio *b_2) |
||||
{ |
||||
return true; |
||||
} |
||||
|
||||
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ |
||||
#endif /* CONFIG_BLOCK */ |
||||
#endif /* __LINUX_BIO_CRYPT_CTX_H */ |
Loading…
Reference in new issue