Integrate the new file encryption framework

These changes integrate new file encryption framework to use new V2 encryption policies.

These changes were earlier reverted in 'commit 4211691d29 ("Reverting crypto and incrementalfs changes")',
as part of android-4.14.171 merge from Android common kernel. This patch attempts to bring them back
post validation.

commit a9a5450 ANDROID: dm: prevent default-key from being enabled without needed hooks
commit e1a94e6 ANDROID: dm: add dm-default-key target for metadata encryption
commit commit 232fd35 ANDROID: dm: enable may_passthrough_inline_crypto on some targets
commit 53bc059 ANDROID: dm: add support for passing through inline crypto support
commit aeed6db ANDROID: block: Introduce passthrough keyslot manager
commit 4f27c8b ANDROID: ext4, f2fs: enable direct I/O with inline encryption
commit c91db46 BACKPORT: FROMLIST: scsi: ufs: add program_key() variant op
commit f9a8e4a ANDROID: block: export symbols needed for modules to use inline crypto
commit 75fea5f ANDROID: block: fix some inline crypto bugs
commit 2871f73 ANDROID: fscrypt: add support for hardware-wrapped keys
commit bb5a657 ANDROID: block: add KSM op to derive software secret from wrapped key
commit d42ba87 ANDROID: block: provide key size as input to inline crypto APIs
commit 86646eb ANDROID: ufshcd-crypto: export cap find API
commit 83bc20e ANDROID: scsi: ufs-qcom: Enable BROKEN_CRYPTO quirk flag
commit c266a13 ANDROID: scsi: ufs: Add quirk bit for controllers that don't play well with inline crypto
commit ea09b99 ANDROID: cuttlefish_defconfig: Enable blk-crypto fallback
commit e12563c BACKPORT: FROMLIST: Update Inline Encryption from v5 to v6 of patch series
commit 8e8f55d ANDROID: scsi: ufs: UFS init should not require inline crypto
commit dae9899 ANDROID: scsi: ufs: UFS crypto variant operations API
commit a69516d ANDROID: cuttlefish_defconfig: enable inline encryption
commit b8f7b23 BACKPORT: FROMLIST: ext4: add inline encryption support
commit e64327f BACKPORT: FROMLIST: f2fs: add inline encryption support
commit a0dc8da BACKPORT: FROMLIST: fscrypt: add inline encryption support
commit 19c3c62 BACKPORT: FROMLIST: scsi: ufs: Add inline encryption support to UFS
commit f858a99 BACKPORT: FROMLIST: scsi: ufs: UFS crypto API
commit 011b834 BACKPORT: FROMLIST: scsi: ufs: UFS driver v2.1 spec crypto additions
commit ec0b569 BACKPORT: FROMLIST: block: blk-crypto for Inline Encryption
commit 760b328 ANDROID: block: Fix bio_crypt_should_process WARN_ON
commit 138adbb BACKPORT: FROMLIST: block: Add encryption context to struct bio
commit 66b5609 BACKPORT: FROMLIST: block: Keyslot Manager for Inline Encryption

Git-repo: https://android.googlesource.com/kernel/common/+/refs/heads/android-4.14-stable
Git-commit: a9a545067a
Git-commit: e1a94e6b17
Git-commit: 232fd353e4
Git-commit: 53bc059bc6
Git-commit: aeed6db424
Git-commit: 4f27c8b90b
Git-commit: c91db466b5
Git-commit: f9a8e4a5c5
Git-commit: 75fea5f605
Git-commit: 2871f73194
Git-commit: bb5a65771a
Git-commit: d42ba87e29
Git-commit: 86646ebb17
Git-commit: 83bc20ed4b
Git-commit: c266a1311e
Git-commit: ea09b9954c
Git-commit: e12563c18d
Git-commit: 8e8f55d1a7
Git-commit: dae9899044
Git-commit: a69516d091
Git-commit: b8f7b23674
Git-commit: e64327f571
Git-commit: a0dc8da519
Git-commit: 19c3c62836
Git-commit: f858a9981a
Git-commit: 011b8344c3
Git-commit: ec0b569b5c
Git-commit: 760b3283e8
Git-commit: 138adbbe5e
Git-commit: 66b5609826

Change-Id: I171d90de41185824e0c7515f3a3b43ab88f4e058
Signed-off-by: Neeraj Soni <neersoni@codeaurora.org>
tirimbino
Neeraj Soni 5 years ago committed by Gerrit - the friendly Code Review server
parent 7a42f09a94
commit 88205c5d99
  1. 2
      Documentation/block/00-INDEX
  2. 26
      Documentation/block/index.rst
  3. 183
      Documentation/block/inline-encryption.rst
  4. 3
      arch/arm64/configs/cuttlefish_defconfig
  5. 3
      arch/x86/configs/x86_64_cuttlefish_defconfig
  6. 17
      block/Kconfig
  7. 3
      block/Makefile
  8. 142
      block/bio-crypt-ctx.c
  9. 23
      block/bio.c
  10. 11
      block/blk-core.c
  11. 650
      block/blk-crypto-fallback.c
  12. 58
      block/blk-crypto-internal.h
  13. 251
      block/blk-crypto.c
  14. 11
      block/blk-merge.c
  15. 560
      block/keyslot-manager.c
  16. 21
      drivers/md/Kconfig
  17. 1
      drivers/md/Makefile
  18. 1
      drivers/md/dm-bow.c
  19. 403
      drivers/md/dm-default-key.c
  20. 1
      drivers/md/dm-linear.c
  21. 52
      drivers/md/dm-table.c
  22. 100
      drivers/md/dm.c
  23. 9
      drivers/scsi/ufs/Kconfig
  24. 4
      drivers/scsi/ufs/Makefile
  25. 6
      drivers/scsi/ufs/ufs-qcom.c
  26. 499
      drivers/scsi/ufs/ufshcd-crypto.c
  27. 167
      drivers/scsi/ufs/ufshcd-crypto.h
  28. 67
      drivers/scsi/ufs/ufshcd.c
  29. 59
      drivers/scsi/ufs/ufshcd.h
  30. 56
      drivers/scsi/ufs/ufshci.h
  31. 3
      fs/buffer.c
  32. 6
      fs/crypto/Kconfig
  33. 1
      fs/crypto/Makefile
  34. 28
      fs/crypto/bio.c
  35. 2
      fs/crypto/crypto.c
  36. 4
      fs/crypto/fname.c
  37. 155
      fs/crypto/fscrypt_private.h
  38. 353
      fs/crypto/inline_crypt.c
  39. 59
      fs/crypto/keyring.c
  40. 163
      fs/crypto/keysetup.c
  41. 17
      fs/crypto/keysetup_v1.c
  42. 5
      fs/direct-io.c
  43. 1
      fs/ext4/ext4.h
  44. 16
      fs/ext4/inode.c
  45. 6
      fs/ext4/page-io.c
  46. 11
      fs/ext4/readpage.c
  47. 13
      fs/ext4/super.c
  48. 71
      fs/f2fs/data.c
  49. 11
      fs/f2fs/f2fs.h
  50. 41
      fs/f2fs/super.c
  51. 6
      fs/iomap.c
  52. 228
      include/linux/bio-crypt-ctx.h
  53. 1
      include/linux/bio.h
  54. 66
      include/linux/blk-crypto.h
  55. 9
      include/linux/blk_types.h
  56. 6
      include/linux/blkdev.h
  57. 6
      include/linux/device-mapper.h
  58. 72
      include/linux/fscrypt.h
  59. 84
      include/linux/keyslot-manager.h
  60. 2
      include/uapi/linux/fscrypt.h

@ -16,6 +16,8 @@ data-integrity.txt
- Block data integrity
deadline-iosched.txt
- Deadline IO scheduler tunables
inline-encryption.rst
- Blk-crypto internals and inline encryption
ioprio.txt
- Block io priorities (in CFQ scheduler)
pr.txt

@ -0,0 +1,26 @@
.. SPDX-License-Identifier: GPL-2.0
=====
Block
=====
.. toctree::
:maxdepth: 1
bfq-iosched
biodoc
biovecs
capability
cmdline-partition
data-integrity
deadline-iosched
inline-encryption
ioprio
kyber-iosched
null_blk
pr
queue-sysfs
request
stat
switching-sched
writeback_cache_control

@ -0,0 +1,183 @@
.. SPDX-License-Identifier: GPL-2.0
=================
Inline Encryption
=================
Objective
=========
We want to support inline encryption (IE) in the kernel.
To allow for testing, we also want a crypto API fallback when actual
IE hardware is absent. We also want IE to work with layered devices
like dm and loopback (i.e. we want to be able to use the IE hardware
of the underlying devices if present, or else fall back to crypto API
en/decryption).
Constraints and notes
=====================
- IE hardware have a limited number of "keyslots" that can be programmed
with an encryption context (key, algorithm, data unit size, etc.) at any time.
One can specify a keyslot in a data request made to the device, and the
device will en/decrypt the data using the encryption context programmed into
that specified keyslot. When possible, we want to make multiple requests with
the same encryption context share the same keyslot.
- We need a way for filesystems to specify an encryption context to use for
en/decrypting a struct bio, and a device driver (like UFS) needs to be able
to use that encryption context when it processes the bio.
- We need a way for device drivers to expose their capabilities in a unified
way to the upper layers.
Design
======
We add a struct bio_crypt_ctx to struct bio that can represent an
encryption context, because we need to be able to pass this encryption
context from the FS layer to the device driver to act upon.
While IE hardware works on the notion of keyslots, the FS layer has no
knowledge of keyslots - it simply wants to specify an encryption context to
use while en/decrypting a bio.
We introduce a keyslot manager (KSM) that handles the translation from
encryption contexts specified by the FS to keyslots on the IE hardware.
This KSM also serves as the way IE hardware can expose their capabilities to
upper layers. The generic mode of operation is: each device driver that wants
to support IE will construct a KSM and set it up in its struct request_queue.
Upper layers that want to use IE on this device can then use this KSM in
the device's struct request_queue to translate an encryption context into
a keyslot. The presence of the KSM in the request queue shall be used to mean
that the device supports IE.
On the device driver end of the interface, the device driver needs to tell the
KSM how to actually manipulate the IE hardware in the device to do things like
programming the crypto key into the IE hardware into a particular keyslot. All
this is achieved through the :c:type:`struct keyslot_mgmt_ll_ops` that the
device driver passes to the KSM when creating it.
It uses refcounts to track which keyslots are idle (either they have no
encryption context programmed, or there are no in-flight struct bios
referencing that keyslot). When a new encryption context needs a keyslot, it
tries to find a keyslot that has already been programmed with the same
encryption context, and if there is no such keyslot, it evicts the least
recently used idle keyslot and programs the new encryption context into that
one. If no idle keyslots are available, then the caller will sleep until there
is at least one.
Blk-crypto
==========
The above is sufficient for simple cases, but does not work if there is a
need for a crypto API fallback, or if we are want to use IE with layered
devices. To these ends, we introduce blk-crypto. Blk-crypto allows us to
present a unified view of encryption to the FS (so FS only needs to specify
an encryption context and not worry about keyslots at all), and blk-crypto
can decide whether to delegate the en/decryption to IE hardware or to the
crypto API. Blk-crypto maintains an internal KSM that serves as the crypto
API fallback.
Blk-crypto needs to ensure that the encryption context is programmed into the
"correct" keyslot manager for IE. If a bio is submitted to a layered device
that eventually passes the bio down to a device that really does support IE, we
want the encryption context to be programmed into a keyslot for the KSM of the
device with IE support. However, blk-crypto does not know a priori whether a
particular device is the final device in the layering structure for a bio or
not. So in the case that a particular device does not support IE, since it is
possibly the final destination device for the bio, if the bio requires
encryption (i.e. the bio is doing a write operation), blk-crypto must fallback
to the crypto API *before* sending the bio to the device.
Blk-crypto ensures that:
- The bio's encryption context is programmed into a keyslot in the KSM of the
request queue that the bio is being submitted to (or the crypto API fallback
KSM if the request queue doesn't have a KSM), and that the ``bc_ksm``
in the ``bi_crypt_context`` is set to this KSM
- That the bio has its own individual reference to the keyslot in this KSM.
Once the bio passes through blk-crypto, its encryption context is programmed
in some KSM. The "its own individual reference to the keyslot" ensures that
keyslots can be released by each bio independently of other bios while
ensuring that the bio has a valid reference to the keyslot when, for e.g., the
crypto API fallback KSM in blk-crypto performs crypto on the device's behalf.
The individual references are ensured by increasing the refcount for the
keyslot in the ``bc_ksm`` when a bio with a programmed encryption
context is cloned.
What blk-crypto does on bio submission
--------------------------------------
**Case 1:** blk-crypto is given a bio with only an encryption context that hasn't
been programmed into any keyslot in any KSM (for e.g. a bio from the FS).
In this case, blk-crypto will program the encryption context into the KSM of the
request queue the bio is being submitted to (and if this KSM does not exist,
then it will program it into blk-crypto's internal KSM for crypto API
fallback). The KSM that this encryption context was programmed into is stored
as the ``bc_ksm`` in the bio's ``bi_crypt_context``.
**Case 2:** blk-crypto is given a bio whose encryption context has already been
programmed into a keyslot in the *crypto API fallback* KSM.
In this case, blk-crypto does nothing; it treats the bio as not having
specified an encryption context. Note that we cannot do here what we will do
in Case 3 because we would have already encrypted the bio via the crypto API
by this point.
**Case 3:** blk-crypto is given a bio whose encryption context has already been
programmed into a keyslot in some KSM (that is *not* the crypto API fallback
KSM).
In this case, blk-crypto first releases that keyslot from that KSM and then
treats the bio as in Case 1.
This way, when a device driver is processing a bio, it can be sure that
the bio's encryption context has been programmed into some KSM (either the
device driver's request queue's KSM, or blk-crypto's crypto API fallback KSM).
It then simply needs to check if the bio's ``bc_ksm`` is the device's
request queue's KSM. If so, then it should proceed with IE. If not, it should
simply do nothing with respect to crypto, because some other KSM (perhaps the
blk-crypto crypto API fallback KSM) is handling the en/decryption.
Blk-crypto will release the keyslot that is being held by the bio (and also
decrypt it if the bio is using the crypto API fallback KSM) once
``bio_remaining_done`` returns true for the bio.
Layered Devices
===============
Layered devices that wish to support IE need to create their own keyslot
manager for their request queue, and expose whatever functionality they choose.
When a layered device wants to pass a bio to another layer (either by
resubmitting the same bio, or by submitting a clone), it doesn't need to do
anything special because the bio (or the clone) will once again pass through
blk-crypto, which will work as described in Case 3. If a layered device wants
for some reason to do the IO by itself instead of passing it on to a child
device, but it also chose to expose IE capabilities by setting up a KSM in its
request queue, it is then responsible for en/decrypting the data itself. In
such cases, the device can choose to call the blk-crypto function
``blk_crypto_fallback_to_kernel_crypto_api`` (TODO: Not yet implemented), which will
cause the en/decryption to be done via the crypto API fallback.
Future Optimizations for layered devices
========================================
Creating a keyslot manager for the layered device uses up memory for each
keyslot, and in general, a layered device (like dm-linear) merely passes the
request on to a "child" device, so the keyslots in the layered device itself
might be completely unused. We can instead define a new type of KSM; the
"passthrough KSM", that layered devices can use to let blk-crypto know that
this layered device *will* pass the bio to some child device (and hence
through blk-crypto again, at which point blk-crypto can program the encryption
context, instead of programming it into the layered device's KSM). Again, if
the device "lies" and decides to do the IO itself instead of passing it on to
a child device, it is responsible for doing the en/decryption (and can choose
to call ``blk_crypto_fallback_to_kernel_crypto_api``). Another use case for the
"passthrough KSM" is for IE devices that want to manage their own keyslots/do
not have a limited number of keyslots.

@ -47,6 +47,8 @@ CONFIG_REFCOUNT_FULL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PCI=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PREEMPT=y
@ -431,6 +433,7 @@ CONFIG_EXT4_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
CONFIG_FS_VERITY=y
CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
# CONFIG_DNOTIFY is not set

@ -46,6 +46,8 @@ CONFIG_REFCOUNT_FULL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_SMP=y
CONFIG_HYPERVISOR_GUEST=y
@ -449,6 +451,7 @@ CONFIG_EXT4_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
CONFIG_FS_VERITY=y
CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
CONFIG_QUOTA=y

@ -184,6 +184,23 @@ config BLK_SED_OPAL
Enabling this option enables users to setup/unlock/lock
Locking ranges for SED devices using the Opal protocol.
config BLK_INLINE_ENCRYPTION
bool "Enable inline encryption support in block layer"
help
Build the blk-crypto subsystem. Enabling this lets the
block layer handle encryption, so users can take
advantage of inline encryption hardware if present.
config BLK_INLINE_ENCRYPTION_FALLBACK
bool "Enable crypto API fallback for blk-crypto"
depends on BLK_INLINE_ENCRYPTION
select CRYPTO
select CRYPTO_BLKCIPHER
help
Enabling this lets the block layer handle inline encryption
by falling back to the kernel crypto API when inline
encryption hardware is not present.
menu "Partition Types"
source "block/partitions/Kconfig"

@ -35,3 +35,6 @@ obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \
blk-crypto.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o

@ -0,0 +1,142 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Google LLC
*/
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/keyslot-manager.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "blk-crypto-internal.h"
static int num_prealloc_crypt_ctxs = 128;
module_param(num_prealloc_crypt_ctxs, int, 0444);
MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
"Number of bio crypto contexts to preallocate");
static struct kmem_cache *bio_crypt_ctx_cache;
static mempool_t *bio_crypt_ctx_pool;
int __init bio_crypt_ctx_init(void)
{
size_t i;
bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
if (!bio_crypt_ctx_cache)
return -ENOMEM;
bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
bio_crypt_ctx_cache);
if (!bio_crypt_ctx_pool)
return -ENOMEM;
/* This is assumed in various places. */
BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
/* Sanity check that no algorithm exceeds the defined limits. */
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
}
return 0;
}
struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask)
{
return mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
}
EXPORT_SYMBOL_GPL(bio_crypt_alloc_ctx);
void bio_crypt_free_ctx(struct bio *bio)
{
mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
bio->bi_crypt_context = NULL;
}
void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
{
const struct bio_crypt_ctx *src_bc = src->bi_crypt_context;
bio_clone_skip_dm_default_key(dst, src);
/*
* If a bio is fallback_crypted, then it will be decrypted when
* bio_endio is called. As we only want the data to be decrypted once,
* copies of the bio must not have have a crypt context.
*/
if (!src_bc || bio_crypt_fallback_crypted(src_bc))
return;
dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask);
*dst->bi_crypt_context = *src_bc;
if (src_bc->bc_keyslot >= 0)
keyslot_manager_get_slot(src_bc->bc_ksm, src_bc->bc_keyslot);
}
EXPORT_SYMBOL_GPL(bio_crypt_clone);
bool bio_crypt_should_process(struct request *rq)
{
struct bio *bio = rq->bio;
if (!bio || !bio->bi_crypt_context)
return false;
return rq->q->ksm == bio->bi_crypt_context->bc_ksm;
}
EXPORT_SYMBOL_GPL(bio_crypt_should_process);
/*
* Checks that two bio crypt contexts are compatible - i.e. that
* they are mergeable except for data_unit_num continuity.
*/
bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
{
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
if (!bc1)
return !bc2;
return bc2 && bc1->bc_key == bc2->bc_key;
}
/*
* Checks that two bio crypt contexts are compatible, and also
* that their data_unit_nums are continuous (and can hence be merged)
* in the order b_1 followed by b_2.
*/
bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes,
struct bio *b_2)
{
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
if (!bio_crypt_ctx_compatible(b_1, b_2))
return false;
return !bc1 || bio_crypt_dun_is_contiguous(bc1, b1_bytes, bc2->bc_dun);
}
void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc)
{
keyslot_manager_put_slot(bc->bc_ksm, bc->bc_keyslot);
bc->bc_ksm = NULL;
bc->bc_keyslot = -1;
}
int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc,
struct keyslot_manager *ksm)
{
int slot = keyslot_manager_get_slot_for_key(ksm, bc->bc_key);
if (slot < 0)
return slot;
bc->bc_keyslot = slot;
bc->bc_ksm = ksm;
return 0;
}

@ -28,6 +28,7 @@
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <linux/blk-crypto.h>
#include <trace/events/block.h>
#include "blk.h"
@ -243,6 +244,8 @@ fallback:
void bio_uninit(struct bio *bio)
{
bio_disassociate_task(bio);
bio_crypt_free_ctx(bio);
}
EXPORT_SYMBOL(bio_uninit);
@ -628,15 +631,12 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
__bio_clone_fast(b, bio);
if (bio_integrity(bio)) {
int ret;
ret = bio_integrity_clone(b, bio, gfp_mask);
bio_crypt_clone(b, bio, gfp_mask);
if (ret < 0) {
bio_put(b);
return NULL;
}
if (bio_integrity(bio) &&
bio_integrity_clone(b, bio, gfp_mask) < 0) {
bio_put(b);
return NULL;
}
return b;
@ -704,6 +704,8 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
break;
}
bio_crypt_clone(bio, bio_src, gfp_mask);
if (bio_integrity(bio_src)) {
int ret;
@ -1035,6 +1037,7 @@ void bio_advance(struct bio *bio, unsigned bytes)
if (bio_integrity(bio))
bio_integrity_advance(bio, bytes);
bio_crypt_advance(bio, bytes);
bio_advance_iter(bio, &bio->bi_iter, bytes);
}
EXPORT_SYMBOL(bio_advance);
@ -1892,6 +1895,10 @@ void bio_endio(struct bio *bio)
again:
if (!bio_remaining_done(bio))
return;
if (!blk_crypto_endio(bio))
return;
if (!bio_integrity_endio(bio))
return;

@ -35,6 +35,7 @@
#include <linux/blk-cgroup.h>
#include <linux/debugfs.h>
#include <linux/psi.h>
#include <linux/blk-crypto.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@ -2284,7 +2285,9 @@ blk_qc_t generic_make_request(struct bio *bio)
/* Create a fresh bio_list for all subordinate requests */
bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
if (!blk_crypto_submit_bio(&bio))
ret = q->make_request_fn(q, bio);
/* sort new bios into those for a lower level
* and those for the same level
@ -3728,6 +3731,12 @@ int __init blk_dev_init(void)
blk_debugfs_root = debugfs_create_dir("block", NULL);
#endif
if (bio_crypt_ctx_init() < 0)
panic("Failed to allocate mem for bio crypt ctxs\n");
if (blk_crypto_fallback_init() < 0)
panic("Failed to init blk-crypto-fallback\n");
return 0;
}

@ -0,0 +1,650 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Google LLC
*/
/*
* Refer to Documentation/block/inline-encryption.rst for detailed explanation.
*/
#define pr_fmt(fmt) "blk-crypto-fallback: " fmt
#include <crypto/skcipher.h>
#include <linux/blk-cgroup.h>
#include <linux/blk-crypto.h>
#include <linux/crypto.h>
#include <linux/keyslot-manager.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/random.h>
#include "blk-crypto-internal.h"
static unsigned int num_prealloc_bounce_pg = 32;
module_param(num_prealloc_bounce_pg, uint, 0);
MODULE_PARM_DESC(num_prealloc_bounce_pg,
"Number of preallocated bounce pages for the blk-crypto crypto API fallback");
static unsigned int blk_crypto_num_keyslots = 100;
module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
MODULE_PARM_DESC(num_keyslots,
"Number of keyslots for the blk-crypto crypto API fallback");
static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
"Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
struct bio_fallback_crypt_ctx {
struct bio_crypt_ctx crypt_ctx;
/*
* Copy of the bvec_iter when this bio was submitted.
* We only want to en/decrypt the part of the bio as described by the
* bvec_iter upon submission because bio might be split before being
* resubmitted
*/
struct bvec_iter crypt_iter;
u64 fallback_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
};
/* The following few vars are only used during the crypto API fallback */
static struct kmem_cache *bio_fallback_crypt_ctx_cache;
static mempool_t *bio_fallback_crypt_ctx_pool;
/*
* Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
* all of a mode's tfms when that mode starts being used. Since each mode may
* need all the keyslots at some point, each mode needs its own tfm for each
* keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
* match the behavior of real inline encryption hardware (which only supports a
* single encryption context per keyslot), we only allow one tfm per keyslot to
* be used at a time - the rest of the unused tfms have their keys cleared.
*/
static DEFINE_MUTEX(tfms_init_lock);
static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
struct blk_crypto_decrypt_work {
struct work_struct work;
struct bio *bio;
};
static struct blk_crypto_keyslot {
struct crypto_skcipher *tfm;
enum blk_crypto_mode_num crypto_mode;
struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
} *blk_crypto_keyslots;
/* The following few vars are only used during the crypto API fallback */
static struct keyslot_manager *blk_crypto_ksm;
static struct workqueue_struct *blk_crypto_wq;
static mempool_t *blk_crypto_bounce_page_pool;
static struct kmem_cache *blk_crypto_decrypt_work_cache;
bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
{
return bc && bc->bc_ksm == blk_crypto_ksm;
}
/*
* This is the key we set when evicting a keyslot. This *should* be the all 0's
* key, but AES-XTS rejects that key, so we use some random bytes instead.
*/
static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
static void blk_crypto_evict_keyslot(unsigned int slot)
{
struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
int err;
WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
/* Clear the key in the skcipher */
err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
blk_crypto_modes[crypto_mode].keysize);
WARN_ON(err);
slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
}
static int blk_crypto_keyslot_program(struct keyslot_manager *ksm,
const struct blk_crypto_key *key,
unsigned int slot)
{
struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
const enum blk_crypto_mode_num crypto_mode = key->crypto_mode;
int err;
if (crypto_mode != slotp->crypto_mode &&
slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) {
blk_crypto_evict_keyslot(slot);
}
if (!slotp->tfms[crypto_mode])
return -ENOMEM;
slotp->crypto_mode = crypto_mode;
err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
key->size);
if (err) {
blk_crypto_evict_keyslot(slot);
return err;
}
return 0;
}
static int blk_crypto_keyslot_evict(struct keyslot_manager *ksm,
const struct blk_crypto_key *key,
unsigned int slot)
{
blk_crypto_evict_keyslot(slot);
return 0;
}
/*
* The crypto API fallback KSM ops - only used for a bio when it specifies a
* blk_crypto_mode for which we failed to get a keyslot in the device's inline
* encryption hardware (which probably means the device doesn't have inline
* encryption hardware that supports that crypto mode).
*/
static const struct keyslot_mgmt_ll_ops blk_crypto_ksm_ll_ops = {
.keyslot_program = blk_crypto_keyslot_program,
.keyslot_evict = blk_crypto_keyslot_evict,
};
static void blk_crypto_encrypt_endio(struct bio *enc_bio)
{
struct bio *src_bio = enc_bio->bi_private;
int i;
for (i = 0; i < enc_bio->bi_vcnt; i++)
mempool_free(enc_bio->bi_io_vec[i].bv_page,
blk_crypto_bounce_page_pool);
src_bio->bi_status = enc_bio->bi_status;
bio_put(enc_bio);
bio_endio(src_bio);
}
static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
{
struct bvec_iter iter;
struct bio_vec bv;
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL);
if (!bio)
return NULL;
bio->bi_disk = bio_src->bi_disk;
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
bio_for_each_segment(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv;
if (bio_integrity(bio_src) &&
bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) {
bio_put(bio);
return NULL;
}
bio_clone_blkcg_association(bio, bio_src);
bio_clone_skip_dm_default_key(bio, bio_src);
return bio;
}
static int blk_crypto_alloc_cipher_req(struct bio *src_bio,
struct skcipher_request **ciph_req_ret,
struct crypto_wait *wait)
{
struct skcipher_request *ciph_req;
const struct blk_crypto_keyslot *slotp;
slotp = &blk_crypto_keyslots[src_bio->bi_crypt_context->bc_keyslot];
ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
GFP_NOIO);
if (!ciph_req) {
src_bio->bi_status = BLK_STS_RESOURCE;
return -ENOMEM;
}
skcipher_request_set_callback(ciph_req,
CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, wait);
*ciph_req_ret = ciph_req;
return 0;
}
static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
{
struct bio *bio = *bio_ptr;
unsigned int i = 0;
unsigned int num_sectors = 0;
struct bio_vec bv;
struct bvec_iter iter;
bio_for_each_segment(bv, bio, iter) {
num_sectors += bv.bv_len >> SECTOR_SHIFT;
if (++i == BIO_MAX_PAGES)
break;
}
if (num_sectors < bio_sectors(bio)) {
struct bio *split_bio;
split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
if (!split_bio) {
bio->bi_status = BLK_STS_RESOURCE;
return -ENOMEM;
}
bio_chain(split_bio, bio);
generic_make_request(bio);
*bio_ptr = split_bio;
}
return 0;
}
union blk_crypto_iv {
__le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
};
static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
union blk_crypto_iv *iv)
{
int i;
for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
iv->dun[i] = cpu_to_le64(dun[i]);
}
/*
* The crypto API fallback's encryption routine.
* Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
* and replace *bio_ptr with the bounce bio. May split input bio if it's too
* large.
*/
static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
{
struct bio *src_bio;
struct skcipher_request *ciph_req = NULL;
DECLARE_CRYPTO_WAIT(wait);
u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
union blk_crypto_iv iv;
struct scatterlist src, dst;
struct bio *enc_bio;
unsigned int i, j;
int data_unit_size;
struct bio_crypt_ctx *bc;
int err = 0;
/* Split the bio if it's too big for single page bvec */
err = blk_crypto_split_bio_if_needed(bio_ptr);
if (err)
return err;
src_bio = *bio_ptr;
bc = src_bio->bi_crypt_context;
data_unit_size = bc->bc_key->data_unit_size;
/* Allocate bounce bio for encryption */
enc_bio = blk_crypto_clone_bio(src_bio);
if (!enc_bio) {
src_bio->bi_status = BLK_STS_RESOURCE;
return -ENOMEM;
}
/*
* Use the crypto API fallback keyslot manager to get a crypto_skcipher
* for the algorithm and key specified for this bio.
*/
err = bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm);
if (err) {
src_bio->bi_status = BLK_STS_IOERR;
goto out_put_enc_bio;
}
/* and then allocate an skcipher_request for it */
err = blk_crypto_alloc_cipher_req(src_bio, &ciph_req, &wait);
if (err)
goto out_release_keyslot;
memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
sg_init_table(&src, 1);
sg_init_table(&dst, 1);
skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
iv.bytes);
/* Encrypt each page in the bounce bio */
for (i = 0; i < enc_bio->bi_vcnt; i++) {
struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
struct page *plaintext_page = enc_bvec->bv_page;
struct page *ciphertext_page =
mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
enc_bvec->bv_page = ciphertext_page;
if (!ciphertext_page) {
src_bio->bi_status = BLK_STS_RESOURCE;
err = -ENOMEM;
goto out_free_bounce_pages;
}
sg_set_page(&src, plaintext_page, data_unit_size,
enc_bvec->bv_offset);
sg_set_page(&dst, ciphertext_page, data_unit_size,
enc_bvec->bv_offset);
/* Encrypt each data unit in this page */
for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
blk_crypto_dun_to_iv(curr_dun, &iv);
err = crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
&wait);
if (err) {
i++;
src_bio->bi_status = BLK_STS_RESOURCE;
goto out_free_bounce_pages;
}
bio_crypt_dun_increment(curr_dun, 1);
src.offset += data_unit_size;
dst.offset += data_unit_size;
}
}
enc_bio->bi_private = src_bio;
enc_bio->bi_end_io = blk_crypto_encrypt_endio;
*bio_ptr = enc_bio;
enc_bio = NULL;
err = 0;
goto out_free_ciph_req;
out_free_bounce_pages:
while (i > 0)
mempool_free(enc_bio->bi_io_vec[--i].bv_page,
blk_crypto_bounce_page_pool);
out_free_ciph_req:
skcipher_request_free(ciph_req);
out_release_keyslot:
bio_crypt_ctx_release_keyslot(bc);
out_put_enc_bio:
if (enc_bio)
bio_put(enc_bio);
return err;
}
static void blk_crypto_free_fallback_crypt_ctx(struct bio *bio)
{
mempool_free(container_of(bio->bi_crypt_context,
struct bio_fallback_crypt_ctx,
crypt_ctx),
bio_fallback_crypt_ctx_pool);
bio->bi_crypt_context = NULL;
}
/*
* The crypto API fallback's main decryption routine.
* Decrypts input bio in place.
*/
static void blk_crypto_decrypt_bio(struct work_struct *work)
{
struct blk_crypto_decrypt_work *decrypt_work =
container_of(work, struct blk_crypto_decrypt_work, work);
struct bio *bio = decrypt_work->bio;
struct skcipher_request *ciph_req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct bio_vec bv;
struct bvec_iter iter;
u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
union blk_crypto_iv iv;
struct scatterlist sg;
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
struct bio_fallback_crypt_ctx *f_ctx =
container_of(bc, struct bio_fallback_crypt_ctx, crypt_ctx);
const int data_unit_size = bc->bc_key->data_unit_size;
unsigned int i;
int err;
/*
* Use the crypto API fallback keyslot manager to get a crypto_skcipher
* for the algorithm and key specified for this bio.
*/
if (bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm)) {
bio->bi_status = BLK_STS_RESOURCE;
goto out_no_keyslot;
}
/* and then allocate an skcipher_request for it */
err = blk_crypto_alloc_cipher_req(bio, &ciph_req, &wait);
if (err)
goto out;
memcpy(curr_dun, f_ctx->fallback_dun, sizeof(curr_dun));
sg_init_table(&sg, 1);
skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
iv.bytes);
/* Decrypt each segment in the bio */
__bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
struct page *page = bv.bv_page;
sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
/* Decrypt each data unit in the segment */
for (i = 0; i < bv.bv_len; i += data_unit_size) {
blk_crypto_dun_to_iv(curr_dun, &iv);
if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
&wait)) {
bio->bi_status = BLK_STS_IOERR;
goto out;
}
bio_crypt_dun_increment(curr_dun, 1);
sg.offset += data_unit_size;
}
}
out:
skcipher_request_free(ciph_req);
bio_crypt_ctx_release_keyslot(bc);
out_no_keyslot:
kmem_cache_free(blk_crypto_decrypt_work_cache, decrypt_work);
blk_crypto_free_fallback_crypt_ctx(bio);
bio_endio(bio);
}
/*
* Queue bio for decryption.
* Returns true iff bio was queued for decryption.
*/
bool blk_crypto_queue_decrypt_bio(struct bio *bio)
{
struct blk_crypto_decrypt_work *decrypt_work;
/* If there was an IO error, don't queue for decrypt. */
if (bio->bi_status)
goto out;
decrypt_work = kmem_cache_zalloc(blk_crypto_decrypt_work_cache,
GFP_ATOMIC);
if (!decrypt_work) {
bio->bi_status = BLK_STS_RESOURCE;
goto out;
}
INIT_WORK(&decrypt_work->work, blk_crypto_decrypt_bio);
decrypt_work->bio = bio;
queue_work(blk_crypto_wq, &decrypt_work->work);
return true;
out:
blk_crypto_free_fallback_crypt_ctx(bio);
return false;
}
/**
* blk_crypto_start_using_mode() - Start using a crypto algorithm on a device
* @mode_num: the blk_crypto_mode we want to allocate ciphers for.
* @data_unit_size: the data unit size that will be used
* @q: the request queue for the device
*
* Upper layers must call this function to ensure that a the crypto API fallback
* has transforms for this algorithm, if they become necessary.
*
* Return: 0 on success and -err on error.
*/
int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
unsigned int data_unit_size,
struct request_queue *q)
{
struct blk_crypto_keyslot *slotp;
unsigned int i;
int err = 0;
/*
* Fast path
* Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
* for each i are visible before we try to access them.
*/
if (likely(smp_load_acquire(&tfms_inited[mode_num])))
return 0;
/*
* If the keyslot manager of the request queue supports this
* crypto mode, then we don't need to allocate this mode.
*/
if (keyslot_manager_crypto_mode_supported(q->ksm, mode_num,
data_unit_size))
return 0;
mutex_lock(&tfms_init_lock);
if (likely(tfms_inited[mode_num]))
goto out;
for (i = 0; i < blk_crypto_num_keyslots; i++) {
slotp = &blk_crypto_keyslots[i];
slotp->tfms[mode_num] = crypto_alloc_skcipher(
blk_crypto_modes[mode_num].cipher_str,
0, 0);
if (IS_ERR(slotp->tfms[mode_num])) {
err = PTR_ERR(slotp->tfms[mode_num]);
slotp->tfms[mode_num] = NULL;
goto out_free_tfms;
}
crypto_skcipher_set_flags(slotp->tfms[mode_num],
CRYPTO_TFM_REQ_WEAK_KEY);
}
/*
* Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
* for each i are visible before we set tfms_inited[mode_num].
*/
smp_store_release(&tfms_inited[mode_num], true);
goto out;
out_free_tfms:
for (i = 0; i < blk_crypto_num_keyslots; i++) {
slotp = &blk_crypto_keyslots[i];
crypto_free_skcipher(slotp->tfms[mode_num]);
slotp->tfms[mode_num] = NULL;
}
out:
mutex_unlock(&tfms_init_lock);
return err;
}
EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode);
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
{
return keyslot_manager_evict_key(blk_crypto_ksm, key);
}
int blk_crypto_fallback_submit_bio(struct bio **bio_ptr)
{
struct bio *bio = *bio_ptr;
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
struct bio_fallback_crypt_ctx *f_ctx;
if (!tfms_inited[bc->bc_key->crypto_mode]) {
bio->bi_status = BLK_STS_IOERR;
return -EIO;
}
if (bio_data_dir(bio) == WRITE)
return blk_crypto_encrypt_bio(bio_ptr);
/*
* Mark bio as fallback crypted and replace the bio_crypt_ctx with
* another one contained in a bio_fallback_crypt_ctx, so that the
* fallback has space to store the info it needs for decryption.
*/
bc->bc_ksm = blk_crypto_ksm;
f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
f_ctx->crypt_ctx = *bc;
memcpy(f_ctx->fallback_dun, bc->bc_dun, sizeof(f_ctx->fallback_dun));
f_ctx->crypt_iter = bio->bi_iter;
bio_crypt_free_ctx(bio);
bio->bi_crypt_context = &f_ctx->crypt_ctx;
return 0;
}
int __init blk_crypto_fallback_init(void)
{
int i;
unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX];
prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
/* All blk-crypto modes have a crypto API fallback. */
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
crypto_mode_supported[i] = 0xFFFFFFFF;
crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
blk_crypto_ksm = keyslot_manager_create(blk_crypto_num_keyslots,
&blk_crypto_ksm_ll_ops,
crypto_mode_supported, NULL);
if (!blk_crypto_ksm)
return -ENOMEM;
blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
WQ_UNBOUND | WQ_HIGHPRI |
WQ_MEM_RECLAIM, num_online_cpus());
if (!blk_crypto_wq)
return -ENOMEM;
blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
sizeof(blk_crypto_keyslots[0]),
GFP_KERNEL);
if (!blk_crypto_keyslots)
return -ENOMEM;
blk_crypto_bounce_page_pool =
mempool_create_page_pool(num_prealloc_bounce_pg, 0);
if (!blk_crypto_bounce_page_pool)
return -ENOMEM;
blk_crypto_decrypt_work_cache = KMEM_CACHE(blk_crypto_decrypt_work,
SLAB_RECLAIM_ACCOUNT);
if (!blk_crypto_decrypt_work_cache)
return -ENOMEM;
bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
if (!bio_fallback_crypt_ctx_cache)
return -ENOMEM;
bio_fallback_crypt_ctx_pool =
mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
bio_fallback_crypt_ctx_cache);
if (!bio_fallback_crypt_ctx_pool)
return -ENOMEM;
return 0;
}

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 Google LLC
*/
#ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
#define __LINUX_BLK_CRYPTO_INTERNAL_H
#include <linux/bio.h>
/* Represents a crypto mode supported by blk-crypto */
struct blk_crypto_mode {
const char *cipher_str; /* crypto API name (for fallback case) */
unsigned int keysize; /* key size in bytes */
unsigned int ivsize; /* iv size in bytes */
};
extern const struct blk_crypto_mode blk_crypto_modes[];
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
int blk_crypto_fallback_submit_bio(struct bio **bio_ptr);
bool blk_crypto_queue_decrypt_bio(struct bio *bio);
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc);
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
static inline bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
{
return false;
}
static inline int blk_crypto_fallback_submit_bio(struct bio **bio_ptr)
{
pr_warn_once("crypto API fallback disabled; failing request\n");
(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
return -EIO;
}
static inline bool blk_crypto_queue_decrypt_bio(struct bio *bio)
{
WARN_ON(1);
return false;
}
static inline int
blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
{
return 0;
}
#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
#endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */

@ -0,0 +1,251 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Google LLC
*/
/*
* Refer to Documentation/block/inline-encryption.rst for detailed explanation.
*/
#define pr_fmt(fmt) "blk-crypto: " fmt
#include <linux/blk-crypto.h>
#include <linux/blkdev.h>
#include <linux/keyslot-manager.h>
#include <linux/random.h>
#include <linux/siphash.h>
#include "blk-crypto-internal.h"
const struct blk_crypto_mode blk_crypto_modes[] = {
[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
.cipher_str = "xts(aes)",
.keysize = 64,
.ivsize = 16,
},
[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
.cipher_str = "essiv(cbc(aes),sha256)",
.keysize = 16,
.ivsize = 16,
},
[BLK_ENCRYPTION_MODE_ADIANTUM] = {
.cipher_str = "adiantum(xchacha12,aes)",
.keysize = 32,
.ivsize = 32,
},
};
/* Check that all I/O segments are data unit aligned */
static int bio_crypt_check_alignment(struct bio *bio)
{
const unsigned int data_unit_size =
bio->bi_crypt_context->bc_key->data_unit_size;
struct bvec_iter iter;
struct bio_vec bv;
bio_for_each_segment(bv, bio, iter) {
if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
return -EIO;
}
return 0;
}
/**
* blk_crypto_submit_bio - handle submitting bio for inline encryption
*
* @bio_ptr: pointer to original bio pointer
*
* If the bio doesn't have inline encryption enabled or the submitter already
* specified a keyslot for the target device, do nothing. Else, a raw key must
* have been provided, so acquire a device keyslot for it if supported. Else,
* use the crypto API fallback.
*
* When the crypto API fallback is used for encryption, blk-crypto may choose to
* split the bio into 2 - the first one that will continue to be processed and
* the second one that will be resubmitted via generic_make_request.
* A bounce bio will be allocated to encrypt the contents of the aforementioned
* "first one", and *bio_ptr will be updated to this bounce bio.
*
* Return: 0 if bio submission should continue; nonzero if bio_endio() was
* already called so bio submission should abort.
*/
int blk_crypto_submit_bio(struct bio **bio_ptr)
{
struct bio *bio = *bio_ptr;
struct request_queue *q;
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
int err;
if (!bc || !bio_has_data(bio))
return 0;
/*
* When a read bio is marked for fallback decryption, its bi_iter is
* saved so that when we decrypt the bio later, we know what part of it
* was marked for fallback decryption (when the bio is passed down after
* blk_crypto_submit bio, it may be split or advanced so we cannot rely
* on the bi_iter while decrypting in blk_crypto_endio)
*/
if (bio_crypt_fallback_crypted(bc))
return 0;
err = bio_crypt_check_alignment(bio);
if (err) {
bio->bi_status = BLK_STS_IOERR;
goto out;
}
q = bio->bi_disk->queue;
if (bc->bc_ksm) {
/* Key already programmed into device? */
if (q->ksm == bc->bc_ksm)
return 0;
/* Nope, release the existing keyslot. */
bio_crypt_ctx_release_keyslot(bc);
}
/* Get device keyslot if supported */
if (keyslot_manager_crypto_mode_supported(q->ksm,
bc->bc_key->crypto_mode,
bc->bc_key->data_unit_size)) {
err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm);
if (!err)
return 0;
pr_warn_once("Failed to acquire keyslot for %s (err=%d). Falling back to crypto API.\n",
bio->bi_disk->disk_name, err);
}
/* Fallback to crypto API */
err = blk_crypto_fallback_submit_bio(bio_ptr);
if (err)
goto out;
return 0;
out:
bio_endio(*bio_ptr);
return err;
}
/**
* blk_crypto_endio - clean up bio w.r.t inline encryption during bio_endio
*
* @bio: the bio to clean up
*
* If blk_crypto_submit_bio decided to fallback to crypto API for this bio,
* we queue the bio for decryption into a workqueue and return false,
* and call bio_endio(bio) at a later time (after the bio has been decrypted).
*
* If the bio is not to be decrypted by the crypto API, this function releases
* the reference to the keyslot that blk_crypto_submit_bio got.
*
* Return: true if bio_endio should continue; false otherwise (bio_endio will
* be called again when bio has been decrypted).
*/
bool blk_crypto_endio(struct bio *bio)
{
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
if (!bc)
return true;
if (bio_crypt_fallback_crypted(bc)) {
/*
* The only bios who's crypto is handled by the blk-crypto
* fallback when they reach here are those with
* bio_data_dir(bio) == READ, since WRITE bios that are
* encrypted by the crypto API fallback are handled by
* blk_crypto_encrypt_endio().
*/
return !blk_crypto_queue_decrypt_bio(bio);
}
if (bc->bc_keyslot >= 0)
bio_crypt_ctx_release_keyslot(bc);
return true;
}
/**
* blk_crypto_init_key() - Prepare a key for use with blk-crypto
* @blk_key: Pointer to the blk_crypto_key to initialize.
* @raw_key: Pointer to the raw key.
* @raw_key_size: Size of raw key. Must be at least the required size for the
* chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed
* to be longer than the mode's actual key size, in order to
* support inline encryption hardware that accepts wrapped keys.)
* @crypto_mode: identifier for the encryption algorithm to use
* @data_unit_size: the data unit size to use for en/decryption
*
* Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error. When
* done using the key, it must be freed with blk_crypto_free_key().
*/
int blk_crypto_init_key(struct blk_crypto_key *blk_key,
const u8 *raw_key, unsigned int raw_key_size,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size)
{
const struct blk_crypto_mode *mode;
static siphash_key_t hash_key;
memset(blk_key, 0, sizeof(*blk_key));
if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
return -EINVAL;
BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
mode = &blk_crypto_modes[crypto_mode];
if (raw_key_size < mode->keysize ||
raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
return -EINVAL;
if (!is_power_of_2(data_unit_size))
return -EINVAL;
blk_key->crypto_mode = crypto_mode;
blk_key->data_unit_size = data_unit_size;
blk_key->data_unit_size_bits = ilog2(data_unit_size);
blk_key->size = raw_key_size;
memcpy(blk_key->raw, raw_key, raw_key_size);
/*
* The keyslot manager uses the SipHash of the key to implement O(1) key
* lookups while avoiding leaking information about the keys. It's
* precomputed here so that it only needs to be computed once per key.
*/
get_random_once(&hash_key, sizeof(hash_key));
blk_key->hash = siphash(raw_key, raw_key_size, &hash_key);
return 0;
}
EXPORT_SYMBOL_GPL(blk_crypto_init_key);
/**
* blk_crypto_evict_key() - Evict a key from any inline encryption hardware
* it may have been programmed into
* @q: The request queue who's keyslot manager this key might have been
* programmed into
* @key: The key to evict
*
* Upper layers (filesystems) should call this function to ensure that a key
* is evicted from hardware that it might have been programmed into. This
* will call keyslot_manager_evict_key on the queue's keyslot manager, if one
* exists, and supports the crypto algorithm with the specified data unit size.
* Otherwise, it will evict the key from the blk-crypto-fallback's ksm.
*
* Return: 0 on success, -err on error.
*/
int blk_crypto_evict_key(struct request_queue *q,
const struct blk_crypto_key *key)
{
if (q->ksm &&
keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode,
key->data_unit_size))
return keyslot_manager_evict_key(q->ksm, key);
return blk_crypto_fallback_evict_key(key);
}
EXPORT_SYMBOL_GPL(blk_crypto_evict_key);

@ -514,6 +514,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
req_set_nomerge(q, req);
return 0;
}
if (!bio_crypt_ctx_mergeable(req->bio, blk_rq_bytes(req), bio))
return 0;
if (!bio_flagged(req->biotail, BIO_SEG_VALID))
blk_recount_segments(q, req->biotail);
if (!bio_flagged(bio, BIO_SEG_VALID))
@ -536,6 +538,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
req_set_nomerge(q, req);
return 0;
}
if (!bio_crypt_ctx_mergeable(bio, bio->bi_iter.bi_size, req->bio))
return 0;
if (!bio_flagged(bio, BIO_SEG_VALID))
blk_recount_segments(q, bio);
if (!bio_flagged(req->bio, BIO_SEG_VALID))
@ -612,6 +616,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
if (blk_integrity_merge_rq(q, req, next) == false)
return 0;
if (!bio_crypt_ctx_mergeable(req->bio, blk_rq_bytes(req), next->bio))
return 0;
/* Merge is OK... */
req->nr_phys_segments = total_phys_segments;
return 1;
@ -833,6 +840,10 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (rq->write_hint != bio->bi_write_hint)
return false;
/* Only merge if the crypt contexts are compatible */
if (!bio_crypt_ctx_compatible(bio, rq->bio))
return false;
return true;
}

@ -0,0 +1,560 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Google LLC
*/
/**
* DOC: The Keyslot Manager
*
* Many devices with inline encryption support have a limited number of "slots"
* into which encryption contexts may be programmed, and requests can be tagged
* with a slot number to specify the key to use for en/decryption.
*
* As the number of slots are limited, and programming keys is expensive on
* many inline encryption hardware, we don't want to program the same key into
* multiple slots - if multiple requests are using the same key, we want to
* program just one slot with that key and use that slot for all requests.
*
* The keyslot manager manages these keyslots appropriately, and also acts as
* an abstraction between the inline encryption hardware and the upper layers.
*
* Lower layer devices will set up a keyslot manager in their request queue
* and tell it how to perform device specific operations like programming/
* evicting keys from keyslots.
*
* Upper layers will call keyslot_manager_get_slot_for_key() to program a
* key into some slot in the inline encryption hardware.
*/
#include <crypto/algapi.h>
#include <linux/keyslot-manager.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/blkdev.h>
#include <linux/overflow.h>
struct keyslot {
atomic_t slot_refs;
struct list_head idle_slot_node;
struct hlist_node hash_node;
struct blk_crypto_key key;
};
struct keyslot_manager {
unsigned int num_slots;
struct keyslot_mgmt_ll_ops ksm_ll_ops;
unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX];
void *ll_priv_data;
/* Protects programming and evicting keys from the device */
struct rw_semaphore lock;
/* List of idle slots, with least recently used slot at front */
wait_queue_head_t idle_slots_wait_queue;
struct list_head idle_slots;
spinlock_t idle_slots_lock;
/*
* Hash table which maps key hashes to keyslots, so that we can find a
* key's keyslot in O(1) time rather than O(num_slots). Protected by
* 'lock'. A cryptographic hash function is used so that timing attacks
* can't leak information about the raw keys.
*/
struct hlist_head *slot_hashtable;
unsigned int slot_hashtable_size;
/* Per-keyslot data */
struct keyslot slots[];
};
static inline bool keyslot_manager_is_passthrough(struct keyslot_manager *ksm)
{
return ksm->num_slots == 0;
}
/**
* keyslot_manager_create() - Create a keyslot manager
* @num_slots: The number of key slots to manage.
* @ksm_ll_ops: The struct keyslot_mgmt_ll_ops for the device that this keyslot
* manager will use to perform operations like programming and
* evicting keys.
* @crypto_mode_supported: Array of size BLK_ENCRYPTION_MODE_MAX of
* bitmasks that represents whether a crypto mode
* and data unit size are supported. The i'th bit
* of crypto_mode_supported[crypto_mode] is set iff
* a data unit size of (1 << i) is supported. We
* only support data unit sizes that are powers of
* 2.
* @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops.
*
* Allocate memory for and initialize a keyslot manager. Called by e.g.
* storage drivers to set up a keyslot manager in their request_queue.
*
* Context: May sleep
* Return: Pointer to constructed keyslot manager or NULL on error.
*/
struct keyslot_manager *keyslot_manager_create(unsigned int num_slots,
const struct keyslot_mgmt_ll_ops *ksm_ll_ops,
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
void *ll_priv_data)
{
struct keyslot_manager *ksm;
unsigned int slot;
unsigned int i;
if (num_slots == 0)
return NULL;
/* Check that all ops are specified */
if (ksm_ll_ops->keyslot_program == NULL ||
ksm_ll_ops->keyslot_evict == NULL)
return NULL;
ksm = kvzalloc(struct_size(ksm, slots, num_slots), GFP_KERNEL);
if (!ksm)
return NULL;
ksm->num_slots = num_slots;
ksm->ksm_ll_ops = *ksm_ll_ops;
memcpy(ksm->crypto_mode_supported, crypto_mode_supported,
sizeof(ksm->crypto_mode_supported));
ksm->ll_priv_data = ll_priv_data;
init_rwsem(&ksm->lock);
init_waitqueue_head(&ksm->idle_slots_wait_queue);
INIT_LIST_HEAD(&ksm->idle_slots);
for (slot = 0; slot < num_slots; slot++) {
list_add_tail(&ksm->slots[slot].idle_slot_node,
&ksm->idle_slots);
}
spin_lock_init(&ksm->idle_slots_lock);
ksm->slot_hashtable_size = roundup_pow_of_two(num_slots);
ksm->slot_hashtable = kvmalloc_array(ksm->slot_hashtable_size,
sizeof(ksm->slot_hashtable[0]),
GFP_KERNEL);
if (!ksm->slot_hashtable)
goto err_free_ksm;
for (i = 0; i < ksm->slot_hashtable_size; i++)
INIT_HLIST_HEAD(&ksm->slot_hashtable[i]);
return ksm;
err_free_ksm:
keyslot_manager_destroy(ksm);
return NULL;
}
EXPORT_SYMBOL_GPL(keyslot_manager_create);
static inline struct hlist_head *
hash_bucket_for_key(struct keyslot_manager *ksm,
const struct blk_crypto_key *key)
{
return &ksm->slot_hashtable[key->hash & (ksm->slot_hashtable_size - 1)];
}
static void remove_slot_from_lru_list(struct keyslot_manager *ksm, int slot)
{
unsigned long flags;
spin_lock_irqsave(&ksm->idle_slots_lock, flags);
list_del(&ksm->slots[slot].idle_slot_node);
spin_unlock_irqrestore(&ksm->idle_slots_lock, flags);
}
static int find_keyslot(struct keyslot_manager *ksm,
const struct blk_crypto_key *key)
{
const struct hlist_head *head = hash_bucket_for_key(ksm, key);
const struct keyslot *slotp;
hlist_for_each_entry(slotp, head, hash_node) {
if (slotp->key.hash == key->hash &&
slotp->key.crypto_mode == key->crypto_mode &&
slotp->key.size == key->size &&
slotp->key.data_unit_size == key->data_unit_size &&
!crypto_memneq(slotp->key.raw, key->raw, key->size))
return slotp - ksm->slots;
}
return -ENOKEY;
}
static int find_and_grab_keyslot(struct keyslot_manager *ksm,
const struct blk_crypto_key *key)
{
int slot;
slot = find_keyslot(ksm, key);
if (slot < 0)
return slot;
if (atomic_inc_return(&ksm->slots[slot].slot_refs) == 1) {
/* Took first reference to this slot; remove it from LRU list */
remove_slot_from_lru_list(ksm, slot);
}
return slot;
}
/**
* keyslot_manager_get_slot_for_key() - Program a key into a keyslot.
* @ksm: The keyslot manager to program the key into.
* @key: Pointer to the key object to program, including the raw key, crypto
* mode, and data unit size.
*
* Get a keyslot that's been programmed with the specified key. If one already
* exists, return it with incremented refcount. Otherwise, wait for a keyslot
* to become idle and program it.
*
* Context: Process context. Takes and releases ksm->lock.
* Return: The keyslot on success, else a -errno value.
*/
int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm,
const struct blk_crypto_key *key)
{
int slot;
int err;
struct keyslot *idle_slot;
if (keyslot_manager_is_passthrough(ksm))
return 0;
down_read(&ksm->lock);
slot = find_and_grab_keyslot(ksm, key);
up_read(&ksm->lock);
if (slot != -ENOKEY)
return slot;
for (;;) {
down_write(&ksm->lock);
slot = find_and_grab_keyslot(ksm, key);
if (slot != -ENOKEY) {
up_write(&ksm->lock);
return slot;
}
/*
* If we're here, that means there wasn't a slot that was
* already programmed with the key. So try to program it.
*/
if (!list_empty(&ksm->idle_slots))
break;
up_write(&ksm->lock);
wait_event(ksm->idle_slots_wait_queue,
!list_empty(&ksm->idle_slots));
}
idle_slot = list_first_entry(&ksm->idle_slots, struct keyslot,
idle_slot_node);
slot = idle_slot - ksm->slots;
err = ksm->ksm_ll_ops.keyslot_program(ksm, key, slot);
if (err) {
wake_up(&ksm->idle_slots_wait_queue);
up_write(&ksm->lock);
return err;
}
/* Move this slot to the hash list for the new key. */
if (idle_slot->key.crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
hlist_del(&idle_slot->hash_node);
hlist_add_head(&idle_slot->hash_node, hash_bucket_for_key(ksm, key));
atomic_set(&idle_slot->slot_refs, 1);
idle_slot->key = *key;
remove_slot_from_lru_list(ksm, slot);
up_write(&ksm->lock);
return slot;
}
/**
* keyslot_manager_get_slot() - Increment the refcount on the specified slot.
* @ksm: The keyslot manager that we want to modify.
* @slot: The slot to increment the refcount of.
*
* This function assumes that there is already an active reference to that slot
* and simply increments the refcount. This is useful when cloning a bio that
* already has a reference to a keyslot, and we want the cloned bio to also have
* its own reference.
*
* Context: Any context.
*/
void keyslot_manager_get_slot(struct keyslot_manager *ksm, unsigned int slot)
{
if (keyslot_manager_is_passthrough(ksm))
return;
if (WARN_ON(slot >= ksm->num_slots))
return;
WARN_ON(atomic_inc_return(&ksm->slots[slot].slot_refs) < 2);
}
/**
* keyslot_manager_put_slot() - Release a reference to a slot
* @ksm: The keyslot manager to release the reference from.
* @slot: The slot to release the reference from.
*
* Context: Any context.
*/
void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot)
{
unsigned long flags;
if (keyslot_manager_is_passthrough(ksm))
return;
if (WARN_ON(slot >= ksm->num_slots))
return;
if (atomic_dec_and_lock_irqsave(&ksm->slots[slot].slot_refs,
&ksm->idle_slots_lock, flags)) {
list_add_tail(&ksm->slots[slot].idle_slot_node,
&ksm->idle_slots);
spin_unlock_irqrestore(&ksm->idle_slots_lock, flags);
wake_up(&ksm->idle_slots_wait_queue);
}
}
/**
* keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode/data
* unit size combination is supported
* by a ksm.
* @ksm: The keyslot manager to check
* @crypto_mode: The crypto mode to check for.
* @data_unit_size: The data_unit_size for the mode.
*
* Calls and returns the result of the crypto_mode_supported function specified
* by the ksm.
*
* Context: Process context.
* Return: Whether or not this ksm supports the specified crypto_mode/
* data_unit_size combo.
*/
bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size)
{
if (!ksm)
return false;
if (WARN_ON(crypto_mode >= BLK_ENCRYPTION_MODE_MAX))
return false;
if (WARN_ON(!is_power_of_2(data_unit_size)))
return false;
return ksm->crypto_mode_supported[crypto_mode] & data_unit_size;
}
/**
* keyslot_manager_evict_key() - Evict a key from the lower layer device.
* @ksm: The keyslot manager to evict from
* @key: The key to evict
*
* Find the keyslot that the specified key was programmed into, and evict that
* slot from the lower layer device if that slot is not currently in use.
*
* Context: Process context. Takes and releases ksm->lock.
* Return: 0 on success, -EBUSY if the key is still in use, or another
* -errno value on other error.
*/
int keyslot_manager_evict_key(struct keyslot_manager *ksm,
const struct blk_crypto_key *key)
{
int slot;
int err;
struct keyslot *slotp;
if (keyslot_manager_is_passthrough(ksm)) {
if (ksm->ksm_ll_ops.keyslot_evict) {
down_write(&ksm->lock);
err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, -1);
up_write(&ksm->lock);
return err;
}
return 0;
}
down_write(&ksm->lock);
slot = find_keyslot(ksm, key);
if (slot < 0) {
err = slot;
goto out_unlock;
}
slotp = &ksm->slots[slot];
if (atomic_read(&slotp->slot_refs) != 0) {
err = -EBUSY;
goto out_unlock;
}
err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, slot);
if (err)
goto out_unlock;
hlist_del(&slotp->hash_node);
memzero_explicit(&slotp->key, sizeof(slotp->key));
err = 0;
out_unlock:
up_write(&ksm->lock);
return err;
}
/**
* keyslot_manager_reprogram_all_keys() - Re-program all keyslots.
* @ksm: The keyslot manager
*
* Re-program all keyslots that are supposed to have a key programmed. This is
* intended only for use by drivers for hardware that loses its keys on reset.
*
* Context: Process context. Takes and releases ksm->lock.
*/
void keyslot_manager_reprogram_all_keys(struct keyslot_manager *ksm)
{
unsigned int slot;
if (WARN_ON(keyslot_manager_is_passthrough(ksm)))
return;
down_write(&ksm->lock);
for (slot = 0; slot < ksm->num_slots; slot++) {
const struct keyslot *slotp = &ksm->slots[slot];
int err;
if (slotp->key.crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
continue;
err = ksm->ksm_ll_ops.keyslot_program(ksm, &slotp->key, slot);
WARN_ON(err);
}
up_write(&ksm->lock);
}
EXPORT_SYMBOL_GPL(keyslot_manager_reprogram_all_keys);
/**
* keyslot_manager_private() - return the private data stored with ksm
* @ksm: The keyslot manager
*
* Returns the private data passed to the ksm when it was created.
*/
void *keyslot_manager_private(struct keyslot_manager *ksm)
{
return ksm->ll_priv_data;
}
EXPORT_SYMBOL_GPL(keyslot_manager_private);
void keyslot_manager_destroy(struct keyslot_manager *ksm)
{
if (ksm) {
kvfree(ksm->slot_hashtable);
memzero_explicit(ksm, struct_size(ksm, slots, ksm->num_slots));
kvfree(ksm);
}
}
EXPORT_SYMBOL_GPL(keyslot_manager_destroy);
/**
* keyslot_manager_create_passthrough() - Create a passthrough keyslot manager
* @ksm_ll_ops: The struct keyslot_mgmt_ll_ops
* @crypto_mode_supported: Bitmasks for supported encryption modes
* @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops.
*
* Allocate memory for and initialize a passthrough keyslot manager.
* Called by e.g. storage drivers to set up a keyslot manager in their
* request_queue, when the storage driver wants to manage its keys by itself.
* This is useful for inline encryption hardware that don't have a small fixed
* number of keyslots, and for layered devices.
*
* See keyslot_manager_create() for more details about the parameters.
*
* Context: This function may sleep
* Return: Pointer to constructed keyslot manager or NULL on error.
*/
struct keyslot_manager *keyslot_manager_create_passthrough(
const struct keyslot_mgmt_ll_ops *ksm_ll_ops,
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
void *ll_priv_data)
{
struct keyslot_manager *ksm;
ksm = kzalloc(sizeof(*ksm), GFP_KERNEL);
if (!ksm)
return NULL;
ksm->ksm_ll_ops = *ksm_ll_ops;
memcpy(ksm->crypto_mode_supported, crypto_mode_supported,
sizeof(ksm->crypto_mode_supported));
ksm->ll_priv_data = ll_priv_data;
init_rwsem(&ksm->lock);
return ksm;
}
EXPORT_SYMBOL_GPL(keyslot_manager_create_passthrough);
/**
* keyslot_manager_intersect_modes() - restrict supported modes by child device
* @parent: The keyslot manager for parent device
* @child: The keyslot manager for child device, or NULL
*
* Clear any crypto mode support bits in @parent that aren't set in @child.
* If @child is NULL, then all parent bits are cleared.
*
* Only use this when setting up the keyslot manager for a layered device,
* before it's been exposed yet.
*/
void keyslot_manager_intersect_modes(struct keyslot_manager *parent,
const struct keyslot_manager *child)
{
if (child) {
unsigned int i;
for (i = 0; i < ARRAY_SIZE(child->crypto_mode_supported); i++) {
parent->crypto_mode_supported[i] &=
child->crypto_mode_supported[i];
}
} else {
memset(parent->crypto_mode_supported, 0,
sizeof(parent->crypto_mode_supported));
}
}
EXPORT_SYMBOL_GPL(keyslot_manager_intersect_modes);
/**
* keyslot_manager_derive_raw_secret() - Derive software secret from wrapped key
* @ksm: The keyslot manager
* @wrapped_key: The wrapped key
* @wrapped_key_size: Size of the wrapped key in bytes
* @secret: (output) the software secret
* @secret_size: (output) the number of secret bytes to derive
*
* Given a hardware-wrapped key, ask the hardware to derive a secret which
* software can use for cryptographic tasks other than inline encryption. The
* derived secret is guaranteed to be cryptographically isolated from the key
* with which any inline encryption with this wrapped key would actually be
* done. I.e., both will be derived from the unwrapped key.
*
* Return: 0 on success, -EOPNOTSUPP if hardware-wrapped keys are unsupported,
* or another -errno code.
*/
int keyslot_manager_derive_raw_secret(struct keyslot_manager *ksm,
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *secret, unsigned int secret_size)
{
int err;
down_write(&ksm->lock);
if (ksm->ksm_ll_ops.derive_raw_secret) {
err = ksm->ksm_ll_ops.derive_raw_secret(ksm, wrapped_key,
wrapped_key_size,
secret, secret_size);
} else {
err = -EOPNOTSUPP;
}
up_write(&ksm->lock);
return err;
}
EXPORT_SYMBOL_GPL(keyslot_manager_derive_raw_secret);

@ -286,6 +286,27 @@ config DM_CRYPT
If unsure, say N.
config DM_DEFAULT_KEY
tristate "Default-key target support"
depends on BLK_DEV_DM
depends on BLK_INLINE_ENCRYPTION
# dm-default-key doesn't require -o inlinecrypt, but it does currently
# rely on the inline encryption hooks being built into the kernel.
depends on FS_ENCRYPTION_INLINE_CRYPT
help
This device-mapper target allows you to create a device that
assigns a default encryption key to bios that aren't for the
contents of an encrypted file.
This ensures that all blocks on-disk will be encrypted with
some key, without the performance hit of file contents being
encrypted twice when fscrypt (File-Based Encryption) is used.
It is only appropriate to use dm-default-key when key
configuration is tightly controlled, like it is in Android,
such that all fscrypt keys are at least as hard to compromise
as the default key.
config DM_SNAPSHOT
tristate "Snapshot target"
depends on BLK_DEV_DM

@ -43,6 +43,7 @@ obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DEFAULT_KEY) += dm-default-key.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o

@ -789,6 +789,7 @@ static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv)
rb_insert_color(&br->node, &bc->ranges);
ti->discards_supported = true;
ti->may_passthrough_inline_crypto = true;
return 0;

@ -0,0 +1,403 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Google, Inc.
*/
#include <linux/blk-crypto.h>
#include <linux/device-mapper.h>
#include <linux/module.h>
#define DM_MSG_PREFIX "default-key"
#define DM_DEFAULT_KEY_MAX_KEY_SIZE 64
#define SECTOR_SIZE (1 << SECTOR_SHIFT)
static const struct dm_default_key_cipher {
const char *name;
enum blk_crypto_mode_num mode_num;
int key_size;
} dm_default_key_ciphers[] = {
{
.name = "aes-xts-plain64",
.mode_num = BLK_ENCRYPTION_MODE_AES_256_XTS,
.key_size = 64,
}, {
.name = "xchacha12,aes-adiantum-plain64",
.mode_num = BLK_ENCRYPTION_MODE_ADIANTUM,
.key_size = 32,
},
};
/**
* struct dm_default_c - private data of a default-key target
* @dev: the underlying device
* @start: starting sector of the range of @dev which this target actually maps.
* For this purpose a "sector" is 512 bytes.
* @cipher_string: the name of the encryption algorithm being used
* @iv_offset: starting offset for IVs. IVs are generated as if the target were
* preceded by @iv_offset 512-byte sectors.
* @sector_size: crypto sector size in bytes (usually 4096)
* @sector_bits: log2(sector_size)
* @key: the encryption key to use
*/
struct default_key_c {
struct dm_dev *dev;
sector_t start;
const char *cipher_string;
u64 iv_offset;
unsigned int sector_size;
unsigned int sector_bits;
struct blk_crypto_key key;
};
static const struct dm_default_key_cipher *
lookup_cipher(const char *cipher_string)
{
int i;
for (i = 0; i < ARRAY_SIZE(dm_default_key_ciphers); i++) {
if (strcmp(cipher_string, dm_default_key_ciphers[i].name) == 0)
return &dm_default_key_ciphers[i];
}
return NULL;
}
static void default_key_dtr(struct dm_target *ti)
{
struct default_key_c *dkc = ti->private;
int err;
if (dkc->dev) {
err = blk_crypto_evict_key(dkc->dev->bdev->bd_queue, &dkc->key);
if (err && err != -ENOKEY)
DMWARN("Failed to evict crypto key: %d", err);
dm_put_device(ti, dkc->dev);
}
kzfree(dkc->cipher_string);
kzfree(dkc);
}
static int default_key_ctr_optional(struct dm_target *ti,
unsigned int argc, char **argv)
{
struct default_key_c *dkc = ti->private;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
{0, 3, "Invalid number of feature args"},
};
unsigned int opt_params;
const char *opt_string;
bool iv_large_sectors = false;
char dummy;
int err;
as.argc = argc;
as.argv = argv;
err = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
if (err)
return err;
while (opt_params--) {
opt_string = dm_shift_arg(&as);
if (!opt_string) {
ti->error = "Not enough feature arguments";
return -EINVAL;
}
if (!strcmp(opt_string, "allow_discards")) {
ti->num_discard_bios = 1;
} else if (sscanf(opt_string, "sector_size:%u%c",
&dkc->sector_size, &dummy) == 1) {
if (dkc->sector_size < SECTOR_SIZE ||
dkc->sector_size > 4096 ||
!is_power_of_2(dkc->sector_size)) {
ti->error = "Invalid sector_size";
return -EINVAL;
}
} else if (!strcmp(opt_string, "iv_large_sectors")) {
iv_large_sectors = true;
} else {
ti->error = "Invalid feature arguments";
return -EINVAL;
}
}
/* dm-default-key doesn't implement iv_large_sectors=false. */
if (dkc->sector_size != SECTOR_SIZE && !iv_large_sectors) {
ti->error = "iv_large_sectors must be specified";
return -EINVAL;
}
return 0;
}
/*
* Construct a default-key mapping:
* <cipher> <key> <iv_offset> <dev_path> <start>
*
* This syntax matches dm-crypt's, but lots of unneeded functionality has been
* removed. Also, dm-default-key requires that the "iv_large_sectors" option be
* given whenever a non-default sector size is used.
*/
static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct default_key_c *dkc;
const struct dm_default_key_cipher *cipher;
u8 raw_key[DM_DEFAULT_KEY_MAX_KEY_SIZE];
unsigned long long tmpll;
char dummy;
int err;
if (argc < 5) {
ti->error = "Not enough arguments";
return -EINVAL;
}
dkc = kzalloc(sizeof(*dkc), GFP_KERNEL);
if (!dkc) {
ti->error = "Out of memory";
return -ENOMEM;
}
ti->private = dkc;
/* <cipher> */
dkc->cipher_string = kstrdup(argv[0], GFP_KERNEL);
if (!dkc->cipher_string) {
ti->error = "Out of memory";
err = -ENOMEM;
goto bad;
}
cipher = lookup_cipher(dkc->cipher_string);
if (!cipher) {
ti->error = "Unsupported cipher";
err = -EINVAL;
goto bad;
}
/* <key> */
if (strlen(argv[1]) != 2 * cipher->key_size) {
ti->error = "Incorrect key size for cipher";
err = -EINVAL;
goto bad;
}
if (hex2bin(raw_key, argv[1], cipher->key_size) != 0) {
ti->error = "Malformed key string";
err = -EINVAL;
goto bad;
}
/* <iv_offset> */
if (sscanf(argv[2], "%llu%c", &dkc->iv_offset, &dummy) != 1) {
ti->error = "Invalid iv_offset sector";
err = -EINVAL;
goto bad;
}
/* <dev_path> */
err = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
&dkc->dev);
if (err) {
ti->error = "Device lookup failed";
goto bad;
}
/* <start> */
if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 ||
tmpll != (sector_t)tmpll) {
ti->error = "Invalid start sector";
err = -EINVAL;
goto bad;
}
dkc->start = tmpll;
/* optional arguments */
dkc->sector_size = SECTOR_SIZE;
if (argc > 5) {
err = default_key_ctr_optional(ti, argc - 5, &argv[5]);
if (err)
goto bad;
}
dkc->sector_bits = ilog2(dkc->sector_size);
if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) {
ti->error = "Device size is not a multiple of sector_size";
err = -EINVAL;
goto bad;
}
err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size,
cipher->mode_num, dkc->sector_size);
if (err) {
ti->error = "Error initializing blk-crypto key";
goto bad;
}
err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size,
dkc->dev->bdev->bd_queue);
if (err) {
ti->error = "Error starting to use blk-crypto";
goto bad;
}
ti->num_flush_bios = 1;
ti->may_passthrough_inline_crypto = true;
err = 0;
goto out;
bad:
default_key_dtr(ti);
out:
memzero_explicit(raw_key, sizeof(raw_key));
return err;
}
static int default_key_map(struct dm_target *ti, struct bio *bio)
{
const struct default_key_c *dkc = ti->private;
sector_t sector_in_target;
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE] = { 0 };
bio_set_dev(bio, dkc->dev->bdev);
/*
* If the bio is a device-level request which doesn't target a specific
* sector, there's nothing more to do.
*/
if (bio_sectors(bio) == 0)
return DM_MAPIO_REMAPPED;
/* Map the bio's sector to the underlying device. (512-byte sectors) */
sector_in_target = dm_target_offset(ti, bio->bi_iter.bi_sector);
bio->bi_iter.bi_sector = dkc->start + sector_in_target;
/*
* If the bio should skip dm-default-key (i.e. if it's for an encrypted
* file's contents), or if it doesn't have any data (e.g. if it's a
* DISCARD request), there's nothing more to do.
*/
if (bio_should_skip_dm_default_key(bio) || !bio_has_data(bio))
return DM_MAPIO_REMAPPED;
/*
* Else, dm-default-key needs to set this bio's encryption context.
* It must not already have one.
*/
if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
return DM_MAPIO_KILL;
/* Calculate the DUN and enforce data-unit (crypto sector) alignment. */
dun[0] = dkc->iv_offset + sector_in_target; /* 512-byte sectors */
if (dun[0] & ((dkc->sector_size >> SECTOR_SHIFT) - 1))
return DM_MAPIO_KILL;
dun[0] >>= dkc->sector_bits - SECTOR_SHIFT; /* crypto sectors */
bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO);
return DM_MAPIO_REMAPPED;
}
static void default_key_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result,
unsigned int maxlen)
{
const struct default_key_c *dkc = ti->private;
unsigned int sz = 0;
int num_feature_args = 0;
switch (type) {
case STATUSTYPE_INFO:
result[0] = '\0';
break;
case STATUSTYPE_TABLE:
/* Omit the key for now. */
DMEMIT("%s - %llu %s %llu", dkc->cipher_string, dkc->iv_offset,
dkc->dev->name, (unsigned long long)dkc->start);
num_feature_args += !!ti->num_discard_bios;
if (dkc->sector_size != SECTOR_SIZE)
num_feature_args += 2;
if (num_feature_args != 0) {
DMEMIT(" %d", num_feature_args);
if (ti->num_discard_bios)
DMEMIT(" allow_discards");
if (dkc->sector_size != SECTOR_SIZE) {
DMEMIT(" sector_size:%u", dkc->sector_size);
DMEMIT(" iv_large_sectors");
}
}
break;
}
}
static int default_key_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev,
fmode_t *mode)
{
const struct default_key_c *dkc = ti->private;
const struct dm_dev *dev = dkc->dev;
*bdev = dev->bdev;
/* Only pass ioctls through if the device sizes match exactly. */
if (dkc->start != 0 ||
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
return 1;
return 0;
}
static int default_key_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn,
void *data)
{
const struct default_key_c *dkc = ti->private;
return fn(ti, dkc->dev, dkc->start, ti->len, data);
}
static void default_key_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
const struct default_key_c *dkc = ti->private;
const unsigned int sector_size = dkc->sector_size;
limits->logical_block_size =
max_t(unsigned short, limits->logical_block_size, sector_size);
limits->physical_block_size =
max_t(unsigned int, limits->physical_block_size, sector_size);
limits->io_min = max_t(unsigned int, limits->io_min, sector_size);
}
static struct target_type default_key_target = {
.name = "default-key",
.version = {2, 0, 0},
.module = THIS_MODULE,
.ctr = default_key_ctr,
.dtr = default_key_dtr,
.map = default_key_map,
.status = default_key_status,
.prepare_ioctl = default_key_prepare_ioctl,
.iterate_devices = default_key_iterate_devices,
.io_hints = default_key_io_hints,
};
static int __init dm_default_key_init(void)
{
return dm_register_target(&default_key_target);
}
static void __exit dm_default_key_exit(void)
{
dm_unregister_target(&default_key_target);
}
module_init(dm_default_key_init);
module_exit(dm_default_key_exit);
MODULE_AUTHOR("Paul Lawrence <paullawrence@google.com>");
MODULE_AUTHOR("Paul Crowley <paulcrowley@google.com>");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata");
MODULE_LICENSE("GPL");

@ -61,6 +61,7 @@ int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1;
ti->may_passthrough_inline_crypto = true;
ti->private = lc;
return 0;

@ -22,6 +22,8 @@
#include <linux/blk-mq.h>
#include <linux/mount.h>
#include <linux/dax.h>
#include <linux/bio.h>
#include <linux/keyslot-manager.h>
#define DM_MSG_PREFIX "table"
@ -1597,6 +1599,54 @@ static void dm_table_verify_integrity(struct dm_table *t)
}
}
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
static int device_intersect_crypto_modes(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
struct keyslot_manager *parent = data;
struct keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
keyslot_manager_intersect_modes(parent, child);
return 0;
}
/*
* Update the inline crypto modes supported by 'q->ksm' to be the intersection
* of the modes supported by all targets in the table.
*
* For any mode to be supported at all, all targets must have explicitly
* declared that they can pass through inline crypto support. For a particular
* mode to be supported, all underlying devices must also support it.
*
* Assume that 'q->ksm' initially declares all modes to be supported.
*/
static void dm_calculate_supported_crypto_modes(struct dm_table *t,
struct request_queue *q)
{
struct dm_target *ti;
unsigned int i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->may_passthrough_inline_crypto) {
keyslot_manager_intersect_modes(q->ksm, NULL);
return;
}
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, device_intersect_crypto_modes,
q->ksm);
}
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline void dm_calculate_supported_crypto_modes(struct dm_table *t,
struct request_queue *q)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@ -1871,6 +1921,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
dm_table_verify_integrity(t);
dm_calculate_supported_crypto_modes(t, q);
/*
* Some devices don't use blk_integrity but still want stable pages
* because they do their own checksumming.

@ -24,6 +24,8 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/pr.h>
#include <linux/blk-crypto.h>
#include <linux/keyslot-manager.h>
#define DM_MSG_PREFIX "core"
@ -1249,9 +1251,10 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
__bio_clone_fast(clone, bio);
bio_crypt_clone(clone, bio, GFP_NOIO);
if (unlikely(bio_integrity(bio) != NULL)) {
int r;
if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
!dm_target_passes_integrity(tio->ti->type))) {
DMWARN("%s: the target %s doesn't support integrity data.",
@ -1661,6 +1664,8 @@ void dm_init_normal_md_queue(struct mapped_device *md)
md->queue->backing_dev_info->congested_fn = dm_any_congested;
}
static void dm_destroy_inline_encryption(struct request_queue *q);
static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
@ -1685,8 +1690,10 @@ static void cleanup_mapped_device(struct mapped_device *md)
put_disk(md->disk);
}
if (md->queue)
if (md->queue) {
dm_destroy_inline_encryption(md->queue);
blk_cleanup_queue(md->queue);
}
cleanup_srcu_struct(&md->io_barrier);
@ -2035,6 +2042,89 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct dm_keyslot_evict_args {
const struct blk_crypto_key *key;
int err;
};
static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_keyslot_evict_args *args = data;
int err;
err = blk_crypto_evict_key(dev->bdev->bd_queue, args->key);
if (!args->err)
args->err = err;
/* Always try to evict the key from all devices. */
return 0;
}
/*
* When an inline encryption key is evicted from a device-mapper device, evict
* it from all the underlying devices.
*/
static int dm_keyslot_evict(struct keyslot_manager *ksm,
const struct blk_crypto_key *key, unsigned int slot)
{
struct mapped_device *md = keyslot_manager_private(ksm);
struct dm_keyslot_evict_args args = { key };
struct dm_table *t;
int srcu_idx;
int i;
struct dm_target *ti;
t = dm_get_live_table(md, &srcu_idx);
if (!t)
return 0;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
}
dm_put_live_table(md, srcu_idx);
return args.err;
}
static struct keyslot_mgmt_ll_ops dm_ksm_ll_ops = {
.keyslot_evict = dm_keyslot_evict,
};
static int dm_init_inline_encryption(struct mapped_device *md)
{
unsigned int mode_masks[BLK_ENCRYPTION_MODE_MAX];
/*
* Start out with all crypto mode support bits set. Any unsupported
* bits will be cleared later when calculating the device restrictions.
*/
memset(mode_masks, 0xFF, sizeof(mode_masks));
md->queue->ksm = keyslot_manager_create_passthrough(&dm_ksm_ll_ops,
mode_masks, md);
if (!md->queue->ksm)
return -ENOMEM;
return 0;
}
static void dm_destroy_inline_encryption(struct request_queue *q)
{
keyslot_manager_destroy(q->ksm);
q->ksm = NULL;
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline int dm_init_inline_encryption(struct mapped_device *md)
{
return 0;
}
static inline void dm_destroy_inline_encryption(struct request_queue *q)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
/*
* Setup the DM device's queue based on md's type
*/
@ -2073,6 +2163,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
break;
}
r = dm_init_inline_encryption(md);
if (r) {
DMERR("Cannot initialize inline encryption");
return r;
}
return 0;
}

@ -122,3 +122,12 @@ config SCSI_UFSHCD_CMD_LOGGING
Select this if you want above mentioned debug information captured.
If unsure, say N.
config SCSI_UFS_CRYPTO
bool "UFS Crypto Engine Support"
depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION
help
Enable Crypto Engine Support in UFS.
Enabling this makes it possible for the kernel to use the crypto
capabilities of the UFS device (if present) to perform crypto
operations on data being transferred to/from the device.

@ -3,8 +3,10 @@
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
ufshcd-core-y := ufshcd.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o
obj-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufs-qcom-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o

@ -1452,6 +1452,12 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
if (host->disable_lpm)
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
/*
* Inline crypto is currently broken with ufs-qcom at least because the
* device tree doesn't include the crypto registers. There are likely
* to be other issues that will need to be addressed too.
*/
//hba->quirks |= UFSHCD_QUIRK_BROKEN_CRYPTO;
}
static void ufs_qcom_set_caps(struct ufs_hba *hba)

@ -0,0 +1,499 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Google LLC
*/
#include <linux/keyslot-manager.h>
#include "ufshcd.h"
#include "ufshcd-crypto.h"
static bool ufshcd_cap_idx_valid(struct ufs_hba *hba, unsigned int cap_idx)
{
return cap_idx < hba->crypto_capabilities.num_crypto_cap;
}
static u8 get_data_unit_size_mask(unsigned int data_unit_size)
{
if (data_unit_size < 512 || data_unit_size > 65536 ||
!is_power_of_2(data_unit_size))
return 0;
return data_unit_size / 512;
}
static size_t get_keysize_bytes(enum ufs_crypto_key_size size)
{
switch (size) {
case UFS_CRYPTO_KEY_SIZE_128:
return 16;
case UFS_CRYPTO_KEY_SIZE_192:
return 24;
case UFS_CRYPTO_KEY_SIZE_256:
return 32;
case UFS_CRYPTO_KEY_SIZE_512:
return 64;
default:
return 0;
}
}
int ufshcd_crypto_cap_find(struct ufs_hba *hba,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size)
{
enum ufs_crypto_alg ufs_alg;
u8 data_unit_mask;
int cap_idx;
enum ufs_crypto_key_size ufs_key_size;
union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array;
if (!ufshcd_hba_is_crypto_supported(hba))
return -EINVAL;
switch (crypto_mode) {
case BLK_ENCRYPTION_MODE_AES_256_XTS:
ufs_alg = UFS_CRYPTO_ALG_AES_XTS;
ufs_key_size = UFS_CRYPTO_KEY_SIZE_256;
break;
default:
return -EINVAL;
}
data_unit_mask = get_data_unit_size_mask(data_unit_size);
for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap;
cap_idx++) {
if (ccap_array[cap_idx].algorithm_id == ufs_alg &&
(ccap_array[cap_idx].sdus_mask & data_unit_mask) &&
ccap_array[cap_idx].key_size == ufs_key_size)
return cap_idx;
}
return -EINVAL;
}
EXPORT_SYMBOL(ufshcd_crypto_cap_find);
/**
* ufshcd_crypto_cfg_entry_write_key - Write a key into a crypto_cfg_entry
*
* Writes the key with the appropriate format - for AES_XTS,
* the first half of the key is copied as is, the second half is
* copied with an offset halfway into the cfg->crypto_key array.
* For the other supported crypto algs, the key is just copied.
*
* @cfg: The crypto config to write to
* @key: The key to write
* @cap: The crypto capability (which specifies the crypto alg and key size)
*
* Returns 0 on success, or -EINVAL
*/
static int ufshcd_crypto_cfg_entry_write_key(union ufs_crypto_cfg_entry *cfg,
const u8 *key,
union ufs_crypto_cap_entry cap)
{
size_t key_size_bytes = get_keysize_bytes(cap.key_size);
if (key_size_bytes == 0)
return -EINVAL;
switch (cap.algorithm_id) {
case UFS_CRYPTO_ALG_AES_XTS:
key_size_bytes *= 2;
if (key_size_bytes > UFS_CRYPTO_KEY_MAX_SIZE)
return -EINVAL;
memcpy(cfg->crypto_key, key, key_size_bytes/2);
memcpy(cfg->crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2,
key + key_size_bytes/2, key_size_bytes/2);
return 0;
case UFS_CRYPTO_ALG_BITLOCKER_AES_CBC:
/* fall through */
case UFS_CRYPTO_ALG_AES_ECB:
/* fall through */
case UFS_CRYPTO_ALG_ESSIV_AES_CBC:
memcpy(cfg->crypto_key, key, key_size_bytes);
return 0;
}
return -EINVAL;
}
static int ufshcd_program_key(struct ufs_hba *hba,
const union ufs_crypto_cfg_entry *cfg, int slot)
{
int i;
u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
int err;
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false);
if (hba->vops->program_key) {
err = hba->vops->program_key(hba, cfg, slot);
goto out;
}
/* Clear the dword 16 */
ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
/* Ensure that CFGE is cleared before programming the key */
wmb();
for (i = 0; i < 16; i++) {
ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[i]),
slot_offset + i * sizeof(cfg->reg_val[0]));
/* Spec says each dword in key must be written sequentially */
wmb();
}
/* Write dword 17 */
ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[17]),
slot_offset + 17 * sizeof(cfg->reg_val[0]));
/* Dword 16 must be written last */
wmb();
/* Write dword 16 */
ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]),
slot_offset + 16 * sizeof(cfg->reg_val[0]));
wmb();
err = 0;
out:
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
return err;
}
static void ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
{
union ufs_crypto_cfg_entry cfg = { 0 };
int err;
err = ufshcd_program_key(hba, &cfg, slot);
WARN_ON_ONCE(err);
}
/* Clear all keyslots at driver init time */
static void ufshcd_clear_all_keyslots(struct ufs_hba *hba)
{
int slot;
for (slot = 0; slot < ufshcd_num_keyslots(hba); slot++)
ufshcd_clear_keyslot(hba, slot);
}
static int ufshcd_crypto_keyslot_program(struct keyslot_manager *ksm,
const struct blk_crypto_key *key,
unsigned int slot)
{
struct ufs_hba *hba = keyslot_manager_private(ksm);
int err = 0;
u8 data_unit_mask;
union ufs_crypto_cfg_entry cfg;
int cap_idx;
cap_idx = ufshcd_crypto_cap_find(hba, key->crypto_mode,
key->data_unit_size);
if (!ufshcd_is_crypto_enabled(hba) ||
!ufshcd_keyslot_valid(hba, slot) ||
!ufshcd_cap_idx_valid(hba, cap_idx))
return -EINVAL;
data_unit_mask = get_data_unit_size_mask(key->data_unit_size);
if (!(data_unit_mask & hba->crypto_cap_array[cap_idx].sdus_mask))
return -EINVAL;
memset(&cfg, 0, sizeof(cfg));
cfg.data_unit_size = data_unit_mask;
cfg.crypto_cap_idx = cap_idx;
cfg.config_enable |= UFS_CRYPTO_CONFIGURATION_ENABLE;
err = ufshcd_crypto_cfg_entry_write_key(&cfg, key->raw,
hba->crypto_cap_array[cap_idx]);
if (err)
return err;
err = ufshcd_program_key(hba, &cfg, slot);
memzero_explicit(&cfg, sizeof(cfg));
return err;
}
static int ufshcd_crypto_keyslot_evict(struct keyslot_manager *ksm,
const struct blk_crypto_key *key,
unsigned int slot)
{
struct ufs_hba *hba = keyslot_manager_private(ksm);
if (!ufshcd_is_crypto_enabled(hba) ||
!ufshcd_keyslot_valid(hba, slot))
return -EINVAL;
/*
* Clear the crypto cfg on the device. Clearing CFGE
* might not be sufficient, so just clear the entire cfg.
*/
ufshcd_clear_keyslot(hba, slot);
return 0;
}
/* Functions implementing UFSHCI v2.1 specification behaviour */
void ufshcd_crypto_enable_spec(struct ufs_hba *hba)
{
if (!ufshcd_hba_is_crypto_supported(hba))
return;
hba->caps |= UFSHCD_CAP_CRYPTO;
/* Reset might clear all keys, so reprogram all the keys. */
keyslot_manager_reprogram_all_keys(hba->ksm);
}
EXPORT_SYMBOL_GPL(ufshcd_crypto_enable_spec);
void ufshcd_crypto_disable_spec(struct ufs_hba *hba)
{
hba->caps &= ~UFSHCD_CAP_CRYPTO;
}
EXPORT_SYMBOL_GPL(ufshcd_crypto_disable_spec);
static const struct keyslot_mgmt_ll_ops ufshcd_ksm_ops = {
.keyslot_program = ufshcd_crypto_keyslot_program,
.keyslot_evict = ufshcd_crypto_keyslot_evict,
};
enum blk_crypto_mode_num ufshcd_blk_crypto_mode_num_for_alg_dusize(
enum ufs_crypto_alg ufs_crypto_alg,
enum ufs_crypto_key_size key_size)
{
/*
* This is currently the only mode that UFS and blk-crypto both support.
*/
if (ufs_crypto_alg == UFS_CRYPTO_ALG_AES_XTS &&
key_size == UFS_CRYPTO_KEY_SIZE_256)
return BLK_ENCRYPTION_MODE_AES_256_XTS;
return BLK_ENCRYPTION_MODE_INVALID;
}
/**
* ufshcd_hba_init_crypto - Read crypto capabilities, init crypto fields in hba
* @hba: Per adapter instance
*
* Return: 0 if crypto was initialized or is not supported, else a -errno value.
*/
int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba,
const struct keyslot_mgmt_ll_ops *ksm_ops)
{
int cap_idx = 0;
int err = 0;
unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX];
enum blk_crypto_mode_num blk_mode_num;
/* Default to disabling crypto */
hba->caps &= ~UFSHCD_CAP_CRYPTO;
/* Return 0 if crypto support isn't present */
if (!(hba->capabilities & MASK_CRYPTO_SUPPORT) ||
(hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO))
goto out;
/*
* Crypto Capabilities should never be 0, because the
* config_array_ptr > 04h. So we use a 0 value to indicate that
* crypto init failed, and can't be enabled.
*/
hba->crypto_capabilities.reg_val =
cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP));
hba->crypto_cfg_register =
(u32)hba->crypto_capabilities.config_array_ptr * 0x100;
hba->crypto_cap_array =
devm_kcalloc(hba->dev,
hba->crypto_capabilities.num_crypto_cap,
sizeof(hba->crypto_cap_array[0]),
GFP_KERNEL);
if (!hba->crypto_cap_array) {
err = -ENOMEM;
goto out;
}
memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported));
/*
* Store all the capabilities now so that we don't need to repeatedly
* access the device each time we want to know its capabilities
*/
for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap;
cap_idx++) {
hba->crypto_cap_array[cap_idx].reg_val =
cpu_to_le32(ufshcd_readl(hba,
REG_UFS_CRYPTOCAP +
cap_idx * sizeof(__le32)));
blk_mode_num = ufshcd_blk_crypto_mode_num_for_alg_dusize(
hba->crypto_cap_array[cap_idx].algorithm_id,
hba->crypto_cap_array[cap_idx].key_size);
if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID)
continue;
crypto_modes_supported[blk_mode_num] |=
hba->crypto_cap_array[cap_idx].sdus_mask * 512;
}
ufshcd_clear_all_keyslots(hba);
hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), ksm_ops,
crypto_modes_supported, hba);
if (!hba->ksm) {
err = -ENOMEM;
goto out_free_caps;
}
return 0;
out_free_caps:
devm_kfree(hba->dev, hba->crypto_cap_array);
out:
/* Indicate that init failed by setting crypto_capabilities to 0 */
hba->crypto_capabilities.reg_val = 0;
return err;
}
EXPORT_SYMBOL_GPL(ufshcd_hba_init_crypto_spec);
void ufshcd_crypto_setup_rq_keyslot_manager_spec(struct ufs_hba *hba,
struct request_queue *q)
{
if (!ufshcd_hba_is_crypto_supported(hba) || !q)
return;
q->ksm = hba->ksm;
}
EXPORT_SYMBOL_GPL(ufshcd_crypto_setup_rq_keyslot_manager_spec);
void ufshcd_crypto_destroy_rq_keyslot_manager_spec(struct ufs_hba *hba,
struct request_queue *q)
{
keyslot_manager_destroy(hba->ksm);
}
EXPORT_SYMBOL_GPL(ufshcd_crypto_destroy_rq_keyslot_manager_spec);
int ufshcd_prepare_lrbp_crypto_spec(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp)
{
struct bio_crypt_ctx *bc;
if (!bio_crypt_should_process(cmd->request)) {
lrbp->crypto_enable = false;
return 0;
}
bc = cmd->request->bio->bi_crypt_context;
if (WARN_ON(!ufshcd_is_crypto_enabled(hba))) {
/*
* Upper layer asked us to do inline encryption
* but that isn't enabled, so we fail this request.
*/
return -EINVAL;
}
if (!ufshcd_keyslot_valid(hba, bc->bc_keyslot))
return -EINVAL;
lrbp->crypto_enable = true;
lrbp->crypto_key_slot = bc->bc_keyslot;
lrbp->data_unit_num = bc->bc_dun[0];
return 0;
}
EXPORT_SYMBOL_GPL(ufshcd_prepare_lrbp_crypto_spec);
/* Crypto Variant Ops Support */
void ufshcd_crypto_enable(struct ufs_hba *hba)
{
if (hba->crypto_vops && hba->crypto_vops->enable)
return hba->crypto_vops->enable(hba);
return ufshcd_crypto_enable_spec(hba);
}
void ufshcd_crypto_disable(struct ufs_hba *hba)
{
if (hba->crypto_vops && hba->crypto_vops->disable)
return hba->crypto_vops->disable(hba);
return ufshcd_crypto_disable_spec(hba);
}
int ufshcd_hba_init_crypto(struct ufs_hba *hba)
{
if (hba->crypto_vops && hba->crypto_vops->hba_init_crypto)
return hba->crypto_vops->hba_init_crypto(hba,
&ufshcd_ksm_ops);
return ufshcd_hba_init_crypto_spec(hba, &ufshcd_ksm_ops);
}
void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
struct request_queue *q)
{
if (hba->crypto_vops && hba->crypto_vops->setup_rq_keyslot_manager)
return hba->crypto_vops->setup_rq_keyslot_manager(hba, q);
return ufshcd_crypto_setup_rq_keyslot_manager_spec(hba, q);
}
void ufshcd_crypto_destroy_rq_keyslot_manager(struct ufs_hba *hba,
struct request_queue *q)
{
if (hba->crypto_vops && hba->crypto_vops->destroy_rq_keyslot_manager)
return hba->crypto_vops->destroy_rq_keyslot_manager(hba, q);
return ufshcd_crypto_destroy_rq_keyslot_manager_spec(hba, q);
}
int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp)
{
if (hba->crypto_vops && hba->crypto_vops->prepare_lrbp_crypto)
return hba->crypto_vops->prepare_lrbp_crypto(hba, cmd, lrbp);
return ufshcd_prepare_lrbp_crypto_spec(hba, cmd, lrbp);
}
int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp)
{
if (hba->crypto_vops && hba->crypto_vops->complete_lrbp_crypto)
return hba->crypto_vops->complete_lrbp_crypto(hba, cmd, lrbp);
return 0;
}
void ufshcd_crypto_debug(struct ufs_hba *hba)
{
if (hba->crypto_vops && hba->crypto_vops->debug)
hba->crypto_vops->debug(hba);
}
int ufshcd_crypto_suspend(struct ufs_hba *hba,
enum ufs_pm_op pm_op)
{
if (hba->crypto_vops && hba->crypto_vops->suspend)
return hba->crypto_vops->suspend(hba, pm_op);
return 0;
}
int ufshcd_crypto_resume(struct ufs_hba *hba,
enum ufs_pm_op pm_op)
{
if (hba->crypto_vops && hba->crypto_vops->resume)
return hba->crypto_vops->resume(hba, pm_op);
return 0;
}
void ufshcd_crypto_set_vops(struct ufs_hba *hba,
struct ufs_hba_crypto_variant_ops *crypto_vops)
{
hba->crypto_vops = crypto_vops;
}

@ -0,0 +1,167 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 Google LLC
*/
#ifndef _UFSHCD_CRYPTO_H
#define _UFSHCD_CRYPTO_H
#ifdef CONFIG_SCSI_UFS_CRYPTO
#include <linux/keyslot-manager.h>
#include "ufshcd.h"
#include "ufshci.h"
static inline int ufshcd_num_keyslots(struct ufs_hba *hba)
{
return hba->crypto_capabilities.config_count + 1;
}
static inline bool ufshcd_keyslot_valid(struct ufs_hba *hba, unsigned int slot)
{
/*
* The actual number of configurations supported is (CFGC+1), so slot
* numbers range from 0 to config_count inclusive.
*/
return slot < ufshcd_num_keyslots(hba);
}
static inline bool ufshcd_hba_is_crypto_supported(struct ufs_hba *hba)
{
return hba->crypto_capabilities.reg_val != 0;
}
static inline bool ufshcd_is_crypto_enabled(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_CRYPTO;
}
/* Functions implementing UFSHCI v2.1 specification behaviour */
int ufshcd_crypto_cap_find(struct ufs_hba *hba,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size);
int ufshcd_prepare_lrbp_crypto_spec(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp);
void ufshcd_crypto_enable_spec(struct ufs_hba *hba);
void ufshcd_crypto_disable_spec(struct ufs_hba *hba);
struct keyslot_mgmt_ll_ops;
int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba,
const struct keyslot_mgmt_ll_ops *ksm_ops);
void ufshcd_crypto_setup_rq_keyslot_manager_spec(struct ufs_hba *hba,
struct request_queue *q);
void ufshcd_crypto_destroy_rq_keyslot_manager_spec(struct ufs_hba *hba,
struct request_queue *q);
static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp)
{
return lrbp->crypto_enable;
}
/* Crypto Variant Ops Support */
void ufshcd_crypto_enable(struct ufs_hba *hba);
void ufshcd_crypto_disable(struct ufs_hba *hba);
int ufshcd_hba_init_crypto(struct ufs_hba *hba);
void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
struct request_queue *q);
void ufshcd_crypto_destroy_rq_keyslot_manager(struct ufs_hba *hba,
struct request_queue *q);
int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp);
int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp);
void ufshcd_crypto_debug(struct ufs_hba *hba);
int ufshcd_crypto_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op);
int ufshcd_crypto_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op);
void ufshcd_crypto_set_vops(struct ufs_hba *hba,
struct ufs_hba_crypto_variant_ops *crypto_vops);
#else /* CONFIG_SCSI_UFS_CRYPTO */
static inline bool ufshcd_keyslot_valid(struct ufs_hba *hba,
unsigned int slot)
{
return false;
}
static inline bool ufshcd_hba_is_crypto_supported(struct ufs_hba *hba)
{
return false;
}
static inline bool ufshcd_is_crypto_enabled(struct ufs_hba *hba)
{
return false;
}
static inline void ufshcd_crypto_enable(struct ufs_hba *hba) { }
static inline void ufshcd_crypto_disable(struct ufs_hba *hba) { }
static inline int ufshcd_hba_init_crypto(struct ufs_hba *hba)
{
return 0;
}
static inline void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
struct request_queue *q) { }
static inline void ufshcd_crypto_destroy_rq_keyslot_manager(struct ufs_hba *hba,
struct request_queue *q) { }
static inline int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp)
{
return 0;
}
static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp)
{
return false;
}
static inline int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp)
{
return 0;
}
static inline void ufshcd_crypto_debug(struct ufs_hba *hba) { }
static inline int ufshcd_crypto_suspend(struct ufs_hba *hba,
enum ufs_pm_op pm_op)
{
return 0;
}
static inline int ufshcd_crypto_resume(struct ufs_hba *hba,
enum ufs_pm_op pm_op)
{
return 0;
}
static inline void ufshcd_crypto_set_vops(struct ufs_hba *hba,
struct ufs_hba_crypto_variant_ops *crypto_vops) { }
#endif /* CONFIG_SCSI_UFS_CRYPTO */
#endif /* _UFSHCD_CRYPTO_H */

@ -197,6 +197,7 @@ static void ufshcd_update_uic_error_cnt(struct ufs_hba *hba, u32 reg, int type)
break;
}
}
#include "ufshcd-crypto.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@ -918,6 +919,8 @@ static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
static void ufshcd_print_host_regs(struct ufs_hba *hba)
{
__ufshcd_print_host_regs(hba, false);
ufshcd_crypto_debug(hba);
}
static
@ -1409,6 +1412,11 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba)
{
u32 val = CONTROLLER_ENABLE;
if (ufshcd_hba_is_crypto_supported(hba)) {
ufshcd_crypto_enable(hba);
val |= CRYPTO_GENERAL_ENABLE;
}
ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
}
@ -3391,9 +3399,23 @@ static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
dword_0 |= UTP_REQ_DESC_INT_CMD;
/* Transfer request descriptor header fields */
if (ufshcd_lrbp_crypto_enabled(lrbp)) {
#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD;
dword_0 |= lrbp->crypto_key_slot;
req_desc->header.dword_1 =
cpu_to_le32(lower_32_bits(lrbp->data_unit_num));
req_desc->header.dword_3 =
cpu_to_le32(upper_32_bits(lrbp->data_unit_num));
#endif /* CONFIG_SCSI_UFS_CRYPTO */
} else {
/* dword_1 and dword_3 are reserved, hence they are set to 0 */
req_desc->header.dword_1 = 0;
req_desc->header.dword_3 = 0;
}
req_desc->header.dword_0 = cpu_to_le32(dword_0);
/* dword_1 is reserved, hence it is set to 0 */
req_desc->header.dword_1 = 0;
/*
* assigning invalid value for command status. Controller
* updates OCS on command completion, with the command
@ -3401,8 +3423,6 @@ static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
*/
req_desc->header.dword_2 =
cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
/* dword_3 is reserved, hence it is set to 0 */
req_desc->header.dword_3 = 0;
req_desc->prd_table_length = 0;
@ -3780,6 +3800,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
err = ufshcd_prepare_lrbp_crypto(hba, cmd, lrbp);
if (err) {
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
lrbp->req_abort_skip = false;
err = ufshcd_comp_scsi_upiu(hba, lrbp);
@ -3843,6 +3870,9 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
lrbp->task_tag = tag;
lrbp->lun = 0; /* device management cmd is not specific to any LUN */
lrbp->intr_cmd = true; /* No interrupt aggregation */
#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
lrbp->crypto_enable = false; /* No crypto operations */
#endif
hba->dev_cmd.type = cmd_type;
return ufshcd_comp_devman_upiu(hba, lrbp);
@ -5696,6 +5726,8 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
{
int err;
ufshcd_crypto_disable(hba);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
@ -6067,8 +6099,8 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
*/
static int ufshcd_slave_configure(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
@ -6080,6 +6112,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
sdev->use_rpm_auto = 1;
ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
return 0;
}
@ -6091,6 +6124,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
static void ufshcd_slave_destroy(struct scsi_device *sdev)
{
struct ufs_hba *hba;
struct request_queue *q = sdev->request_queue;
hba = shost_priv(sdev->host);
/* Drop the reference as it won't be needed anymore */
@ -6101,6 +6135,8 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
hba->sdev_ufs_device = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
ufshcd_crypto_destroy_rq_keyslot_manager(hba, q);
}
/**
@ -6376,6 +6412,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
clear_bit_unlock(index, &hba->lrb_in_use);
lrbp->complete_time_stamp = ktime_get();
update_req_stats(hba, lrbp);
ufshcd_complete_lrbp_crypto(hba, cmd, lrbp);
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
@ -10105,6 +10142,10 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
req_link_state = UIC_LINK_OFF_STATE;
}
ret = ufshcd_crypto_suspend(hba, pm_op);
if (ret)
goto out;
/*
* If we can't transition into any of the low power modes
* just gate the clocks.
@ -10225,6 +10266,7 @@ enable_gating:
hba->hibern8_on_idle.is_suspended = false;
hba->clk_gating.is_suspended = false;
ufshcd_release_all(hba);
ufshcd_crypto_resume(hba, pm_op);
out:
hba->pm_op_in_progress = 0;
@ -10248,9 +10290,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret;
enum uic_link_state old_link_state;
enum ufs_dev_pwr_mode old_pwr_mode;
hba->pm_op_in_progress = 1;
old_link_state = hba->uic_link_state;
old_pwr_mode = hba->curr_dev_pwr_mode;
ufshcd_hba_vreg_set_hpm(hba);
/* Make sure clocks are enabled before accessing controller */
@ -10327,6 +10371,10 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto set_old_link_state;
}
ret = ufshcd_crypto_resume(hba, pm_op);
if (ret)
goto set_old_dev_pwr_mode;
if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
ufshcd_enable_auto_bkops(hba);
else
@ -10347,6 +10395,9 @@ skip_dev_ops:
ufshcd_release_all(hba);
goto out;
set_old_dev_pwr_mode:
if (old_pwr_mode != hba->curr_dev_pwr_mode)
ufshcd_set_dev_pwr_mode(hba, old_pwr_mode);
set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0);
if (ufshcd_is_link_hibern8(hba) &&
@ -11177,6 +11228,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
if (hba->force_g4)
hba->reinit_g4_rate_A = true;
/* Init crypto */
err = ufshcd_hba_init_crypto(hba);
if (err) {
dev_err(hba->dev, "crypto setup failed\n");
goto out_remove_scsi_host;
}
/* Host controller enable */
err = ufshcd_hba_enable(hba);

@ -197,6 +197,9 @@ struct ufs_pm_lvl_states {
* @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
* @issue_time_stamp: time stamp for debug purposes
* @complete_time_stamp: time stamp for statistics
* @crypto_enable: whether or not the request needs inline crypto operations
* @crypto_key_slot: the key slot to use for inline crypto
* @data_unit_num: the data unit number for the first block for inline crypto
* @req_abort_skip: skip request abort task flag
*/
struct ufshcd_lrb {
@ -221,6 +224,11 @@ struct ufshcd_lrb {
bool intr_cmd;
ktime_t issue_time_stamp;
ktime_t complete_time_stamp;
#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
bool crypto_enable;
u8 crypto_key_slot;
u64 data_unit_num;
#endif /* CONFIG_SCSI_UFS_CRYPTO */
bool req_abort_skip;
};
@ -302,6 +310,8 @@ struct ufs_pwr_mode_info {
struct ufs_pa_layer_attr info;
};
union ufs_crypto_cfg_entry;
/**
* struct ufs_hba_variant_ops - variant specific callbacks
* @init: called when the driver is initialized
@ -332,6 +342,7 @@ struct ufs_pwr_mode_info {
* scale down
* @set_bus_vote: called to vote for the required bus bandwidth
* @phy_initialization: used to initialize phys
* @program_key: program an inline encryption key into a keyslot
*/
struct ufs_hba_variant_ops {
int (*init)(struct ufs_hba *);
@ -367,6 +378,8 @@ struct ufs_hba_variant_ops {
void (*add_debugfs)(struct ufs_hba *hba, struct dentry *root);
void (*remove_debugfs)(struct ufs_hba *hba);
#endif
int (*program_key)(struct ufs_hba *hba,
const union ufs_crypto_cfg_entry *cfg, int slot);
};
/**
@ -388,6 +401,28 @@ struct ufs_hba_variant {
struct ufs_hba_pm_qos_variant_ops *pm_qos_vops;
};
struct keyslot_mgmt_ll_ops;
struct ufs_hba_crypto_variant_ops {
void (*setup_rq_keyslot_manager)(struct ufs_hba *hba,
struct request_queue *q);
void (*destroy_rq_keyslot_manager)(struct ufs_hba *hba,
struct request_queue *q);
int (*hba_init_crypto)(struct ufs_hba *hba,
const struct keyslot_mgmt_ll_ops *ksm_ops);
void (*enable)(struct ufs_hba *hba);
void (*disable)(struct ufs_hba *hba);
int (*suspend)(struct ufs_hba *hba, enum ufs_pm_op pm_op);
int (*resume)(struct ufs_hba *hba, enum ufs_pm_op pm_op);
int (*debug)(struct ufs_hba *hba);
int (*prepare_lrbp_crypto)(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp);
int (*complete_lrbp_crypto)(struct ufs_hba *hba,
struct scsi_cmnd *cmd,
struct ufshcd_lrb *lrbp);
void *priv;
};
/* clock gating state */
enum clk_gating_state {
CLKS_OFF,
@ -749,6 +784,10 @@ enum ufshcd_card_state {
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
* @scsi_block_reqs_cnt: reference counting for scsi block requests
* @crypto_capabilities: Content of crypto capabilities register (0x100)
* @crypto_cap_array: Array of crypto capabilities
* @crypto_cfg_register: Start of the crypto cfg array
* @ksm: the keyslot manager tied to this hba
*/
struct ufs_hba {
void __iomem *mmio_base;
@ -794,6 +833,7 @@ struct ufs_hba {
struct ufs_hba_variant *var;
void *priv;
size_t sg_entry_size;
const struct ufs_hba_crypto_variant_ops *crypto_vops;
unsigned int irq;
bool is_irq_enabled;
bool crash_on_err;
@ -883,6 +923,12 @@ struct ufs_hba {
/* Auto hibern8 support is broken */
#define UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 UFS_BIT(15)
/*
* This quirk needs to be enabled if the host controller advertises
* inline encryption support but it doesn't work correctly.
*/
#define UFSHCD_QUIRK_BROKEN_CRYPTO UFS_BIT(16)
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
wait_queue_head_t tm_wq;
@ -995,6 +1041,11 @@ struct ufs_hba {
* in hibern8 then enable this cap.
*/
#define UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8 (1 << 7)
/*
* This capability allows the host controller driver to use the
* inline crypto engine, if it is present
*/
#define UFSHCD_CAP_CRYPTO (1 << 8)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
@ -1027,6 +1078,14 @@ struct ufs_hba {
bool force_g4;
/* distinguish between resume and restore */
bool restore;
#ifdef CONFIG_SCSI_UFS_CRYPTO
/* crypto */
union ufs_crypto_capabilities crypto_capabilities;
union ufs_crypto_cap_entry *crypto_cap_array;
u32 crypto_cfg_register;
struct keyslot_manager *ksm;
#endif /* CONFIG_SCSI_UFS_CRYPTO */
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)

@ -347,6 +347,61 @@ enum {
INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
};
/* CCAP - Crypto Capability 100h */
union ufs_crypto_capabilities {
__le32 reg_val;
struct {
u8 num_crypto_cap;
u8 config_count;
u8 reserved;
u8 config_array_ptr;
};
};
enum ufs_crypto_key_size {
UFS_CRYPTO_KEY_SIZE_INVALID = 0x0,
UFS_CRYPTO_KEY_SIZE_128 = 0x1,
UFS_CRYPTO_KEY_SIZE_192 = 0x2,
UFS_CRYPTO_KEY_SIZE_256 = 0x3,
UFS_CRYPTO_KEY_SIZE_512 = 0x4,
};
enum ufs_crypto_alg {
UFS_CRYPTO_ALG_AES_XTS = 0x0,
UFS_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1,
UFS_CRYPTO_ALG_AES_ECB = 0x2,
UFS_CRYPTO_ALG_ESSIV_AES_CBC = 0x3,
};
/* x-CRYPTOCAP - Crypto Capability X */
union ufs_crypto_cap_entry {
__le32 reg_val;
struct {
u8 algorithm_id;
u8 sdus_mask; /* Supported data unit size mask */
u8 key_size;
u8 reserved;
};
};
#define UFS_CRYPTO_CONFIGURATION_ENABLE (1 << 7)
#define UFS_CRYPTO_KEY_MAX_SIZE 64
/* x-CRYPTOCFG - Crypto Configuration X */
union ufs_crypto_cfg_entry {
__le32 reg_val[32];
struct {
u8 crypto_key[UFS_CRYPTO_KEY_MAX_SIZE];
u8 data_unit_size;
u8 crypto_cap_idx;
u8 reserved_1;
u8 config_enable;
u8 reserved_multi_host;
u8 reserved_2;
u8 vsb[2];
u8 reserved_3[56];
};
};
/*
* Request Descriptor Definitions
*/
@ -368,6 +423,7 @@ enum {
UTP_NATIVE_UFS_COMMAND = 0x10000000,
UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
UTP_REQ_DESC_INT_CMD = 0x01000000,
UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
};
/* UTP Transfer Request Data Direction (DD) */

@ -46,6 +46,7 @@
#include <linux/bit_spinlock.h>
#include <linux/pagevec.h>
#include <trace/events/block.h>
#include <linux/fscrypt.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
@ -3172,6 +3173,8 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
*/
bio = bio_alloc(GFP_NOIO, 1);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
if (wbc) {
wbc_init_bio(wbc, bio);
wbc_account_io(wbc, bh->b_page, bh->b_size);

@ -15,3 +15,9 @@ config FS_ENCRYPTION
efficient since it avoids caching the encrypted and
decrypted pages in the page cache. Currently Ext4,
F2FS and UBIFS make use of this feature.
config FS_ENCRYPTION_INLINE_CRYPT
bool "Enable fscrypt to use inline crypto"
depends on FS_ENCRYPTION && BLK_INLINE_ENCRYPTION
help
Enable fscrypt to use inline encryption hardware if available.

@ -10,3 +10,4 @@ fscrypto-y := crypto.o \
policy.o
fscrypto-$(CONFIG_BLOCK) += bio.o
fscrypto-$(CONFIG_FS_ENCRYPTION_INLINE_CRYPT) += inline_crypt.o

@ -46,26 +46,35 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
{
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
const bool inlinecrypt = fscrypt_inode_uses_inline_crypto(inode);
struct page *ciphertext_page;
struct bio *bio;
int ret, err = 0;
ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
if (!ciphertext_page)
return -ENOMEM;
if (inlinecrypt) {
ciphertext_page = ZERO_PAGE(0);
} else {
ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
if (!ciphertext_page)
return -ENOMEM;
}
while (len--) {
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
blocksize, 0, GFP_NOFS);
if (err)
goto errout;
if (!inlinecrypt) {
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
blocksize, 0, GFP_NOFS);
if (err)
goto errout;
}
bio = bio_alloc(GFP_NOWAIT, 1);
if (!bio) {
err = -ENOMEM;
goto errout;
}
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOIO);
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@ -87,7 +96,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
}
err = 0;
errout:
fscrypt_free_bounce_page(ciphertext_page);
if (!inlinecrypt)
fscrypt_free_bounce_page(ciphertext_page);
return err;
}
EXPORT_SYMBOL(fscrypt_zeroout_range);

@ -96,7 +96,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist dst, src;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
struct crypto_skcipher *tfm = ci->ci_key.tfm;
int res = 0;
if (WARN_ON_ONCE(len <= 0))

@ -40,7 +40,7 @@ int fname_encrypt(struct inode *inode, const struct qstr *iname,
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
struct crypto_skcipher *tfm = ci->ci_key.tfm;
union fscrypt_iv iv;
struct scatterlist sg;
int res;
@ -93,7 +93,7 @@ static int fname_decrypt(struct inode *inode,
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
struct crypto_skcipher *tfm = ci->ci_key.tfm;
union fscrypt_iv iv;
int res;

@ -13,12 +13,14 @@
#include <linux/fscrypt.h>
#include <crypto/hash.h>
#include <linux/bio-crypt-ctx.h>
#define CONST_STRLEN(str) (sizeof(str) - 1)
#define FS_KEY_DERIVATION_NONCE_SIZE 16
#define FSCRYPT_MIN_KEY_SIZE 16
#define FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE 128
#define FSCRYPT_CONTEXT_V1 1
#define FSCRYPT_CONTEXT_V2 2
@ -151,6 +153,20 @@ struct fscrypt_symlink_data {
char encrypted_path[1];
} __packed;
/**
* struct fscrypt_prepared_key - a key prepared for actual encryption/decryption
* @tfm: crypto API transform object
* @blk_key: key for blk-crypto
*
* Normally only one of the fields will be non-NULL.
*/
struct fscrypt_prepared_key {
struct crypto_skcipher *tfm;
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
struct fscrypt_blk_crypto_key *blk_key;
#endif
};
/*
* fscrypt_info - the "encryption key" for an inode
*
@ -159,15 +175,20 @@ struct fscrypt_symlink_data {
* inode is evicted.
*/
struct fscrypt_info {
/* The actual crypto transform used for encryption and decryption */
u8 ci_data_mode;
u8 ci_filename_mode;
u8 ci_flags;
struct crypto_skcipher *ci_ctfm;
/* The key in a form prepared for actual encryption/decryption */
struct fscrypt_prepared_key ci_key;
/* True if the key should be freed when this fscrypt_info is freed */
bool ci_owns_key;
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
/*
* True if this inode will use inline encryption (blk-crypto) instead of
* the traditional filesystem-layer encryption.
*/
bool ci_inlinecrypt;
#endif
/*
* Encryption mode used for this inode. It corresponds to either the
* contents or filenames encryption mode, depending on the inode type.
@ -192,7 +213,7 @@ struct fscrypt_info {
/*
* If non-NULL, then encryption is done using the master key directly
* and ci_ctfm will equal ci_direct_key->dk_ctfm.
* and ci_key will equal ci_direct_key->dk_key.
*/
struct fscrypt_direct_key *ci_direct_key;
@ -257,6 +278,7 @@ union fscrypt_iv {
u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
};
u8 raw[FSCRYPT_MAX_IV_SIZE];
__le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)];
};
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
@ -296,6 +318,94 @@ extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context,
extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf);
/* inline_crypt.c */
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
extern void fscrypt_select_encryption_impl(struct fscrypt_info *ci);
static inline bool
fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
{
return ci->ci_inlinecrypt;
}
extern int fscrypt_prepare_inline_crypt_key(
struct fscrypt_prepared_key *prep_key,
const u8 *raw_key,
unsigned int raw_key_size,
const struct fscrypt_info *ci);
extern void fscrypt_destroy_inline_crypt_key(
struct fscrypt_prepared_key *prep_key);
extern int fscrypt_derive_raw_secret(struct super_block *sb,
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *raw_secret,
unsigned int raw_secret_size);
/*
* Check whether the crypto transform or blk-crypto key has been allocated in
* @prep_key, depending on which encryption implementation the file will use.
*/
static inline bool
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
const struct fscrypt_info *ci)
{
/*
* The READ_ONCE() here pairs with the smp_store_release() in
* fscrypt_prepare_key(). (This only matters for the per-mode keys,
* which are shared by multiple inodes.)
*/
if (fscrypt_using_inline_encryption(ci))
return READ_ONCE(prep_key->blk_key) != NULL;
return READ_ONCE(prep_key->tfm) != NULL;
}
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
static inline void fscrypt_select_encryption_impl(struct fscrypt_info *ci)
{
}
static inline bool fscrypt_using_inline_encryption(
const struct fscrypt_info *ci)
{
return false;
}
static inline int
fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key, unsigned int raw_key_size,
const struct fscrypt_info *ci)
{
WARN_ON(1);
return -EOPNOTSUPP;
}
static inline void
fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
{
}
static inline int fscrypt_derive_raw_secret(struct super_block *sb,
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *raw_secret,
unsigned int raw_secret_size)
{
fscrypt_warn(NULL,
"kernel built without support for hardware-wrapped keys");
return -EOPNOTSUPP;
}
static inline bool
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
const struct fscrypt_info *ci)
{
return READ_ONCE(prep_key->tfm) != NULL;
}
#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
/* keyring.c */
/*
@ -312,8 +422,15 @@ struct fscrypt_master_key_secret {
/* Size of the raw key in bytes. Set even if ->raw isn't set. */
u32 size;
/* For v1 policy keys: the raw key. Wiped for v2 policy keys. */
u8 raw[FSCRYPT_MAX_KEY_SIZE];
/* True if the key in ->raw is a hardware-wrapped key. */
bool is_hw_wrapped;
/*
* For v1 policy keys: the raw key. Wiped for v2 policy keys, unless
* ->is_hw_wrapped is true, in which case this contains the wrapped key
* rather than the key with which 'hkdf' was keyed.
*/
u8 raw[FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE];
} __randomize_layout;
@ -385,14 +502,11 @@ struct fscrypt_master_key {
struct list_head mk_decrypted_inodes;
spinlock_t mk_decrypted_inodes_lock;
/* Crypto API transforms for DIRECT_KEY policies, allocated on-demand */
struct crypto_skcipher *mk_direct_tfms[__FSCRYPT_MODE_MAX + 1];
/* Per-mode keys for DIRECT_KEY policies, allocated on-demand */
struct fscrypt_prepared_key mk_direct_keys[__FSCRYPT_MODE_MAX + 1];
/*
* Crypto API transforms for filesystem-layer implementation of
* IV_INO_LBLK_64 policies, allocated on-demand.
*/
struct crypto_skcipher *mk_iv_ino_lblk_64_tfms[__FSCRYPT_MODE_MAX + 1];
/* Per-mode keys for IV_INO_LBLK_64 policies, allocated on-demand */
struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1];
} __randomize_layout;
@ -449,17 +563,22 @@ struct fscrypt_mode {
int keysize;
int ivsize;
int logged_impl_name;
enum blk_crypto_mode_num blk_crypto_mode;
};
extern struct fscrypt_mode fscrypt_modes[];
static inline bool
fscrypt_mode_supports_direct_key(const struct fscrypt_mode *mode)
{
return mode->ivsize >= offsetofend(union fscrypt_iv, nonce);
}
extern struct crypto_skcipher *
fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
const struct inode *inode);
extern int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key, unsigned int raw_key_size,
const struct fscrypt_info *ci);
extern void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key);
extern int fscrypt_set_derived_key(struct fscrypt_info *ci,
const u8 *derived_key);

@ -0,0 +1,353 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Inline encryption support for fscrypt
*
* Copyright 2019 Google LLC
*/
/*
* With "inline encryption", the block layer handles the decryption/encryption
* as part of the bio, instead of the filesystem doing the crypto itself via
* crypto API. See Documentation/block/inline-encryption.rst. fscrypt still
* provides the key and IV to use.
*/
#include <linux/blk-crypto.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/keyslot-manager.h>
#include <linux/overflow.h>
#include "fscrypt_private.h"
struct fscrypt_blk_crypto_key {
struct blk_crypto_key base;
int num_devs;
struct request_queue *devs[];
};
/* Enable inline encryption for this file if supported. */
void fscrypt_select_encryption_impl(struct fscrypt_info *ci)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
/* The file must need contents encryption, not filenames encryption */
if (!S_ISREG(inode->i_mode))
return;
/* blk-crypto must implement the needed encryption algorithm */
if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
return;
/* The filesystem must be mounted with -o inlinecrypt */
if (!sb->s_cop->inline_crypt_enabled ||
!sb->s_cop->inline_crypt_enabled(sb))
return;
ci->ci_inlinecrypt = true;
}
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key,
unsigned int raw_key_size,
const struct fscrypt_info *ci)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
int num_devs = 1;
int queue_refs = 0;
struct fscrypt_blk_crypto_key *blk_key;
int err;
int i;
if (sb->s_cop->get_num_devices)
num_devs = sb->s_cop->get_num_devices(sb);
if (WARN_ON(num_devs < 1))
return -EINVAL;
blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS);
if (!blk_key)
return -ENOMEM;
blk_key->num_devs = num_devs;
if (num_devs == 1)
blk_key->devs[0] = bdev_get_queue(sb->s_bdev);
else
sb->s_cop->get_devices(sb, blk_key->devs);
BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE >
BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE);
err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size,
crypto_mode, sb->s_blocksize);
if (err) {
fscrypt_err(inode, "error %d initializing blk-crypto key", err);
goto fail;
}
/*
* We have to start using blk-crypto on all the filesystem's devices.
* We also have to save all the request_queue's for later so that the
* key can be evicted from them. This is needed because some keys
* aren't destroyed until after the filesystem was already unmounted
* (namely, the per-mode keys in struct fscrypt_master_key).
*/
for (i = 0; i < num_devs; i++) {
if (!blk_get_queue(blk_key->devs[i])) {
fscrypt_err(inode, "couldn't get request_queue");
err = -EAGAIN;
goto fail;
}
queue_refs++;
err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize,
blk_key->devs[i]);
if (err) {
fscrypt_err(inode,
"error %d starting to use blk-crypto", err);
goto fail;
}
}
/*
* Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters
* for the per-mode keys, which are shared by multiple inodes.)
*/
smp_store_release(&prep_key->blk_key, blk_key);
return 0;
fail:
for (i = 0; i < queue_refs; i++)
blk_put_queue(blk_key->devs[i]);
kzfree(blk_key);
return err;
}
void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
{
struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key;
int i;
if (blk_key) {
for (i = 0; i < blk_key->num_devs; i++) {
blk_crypto_evict_key(blk_key->devs[i], &blk_key->base);
blk_put_queue(blk_key->devs[i]);
}
kzfree(blk_key);
}
}
int fscrypt_derive_raw_secret(struct super_block *sb,
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *raw_secret, unsigned int raw_secret_size)
{
struct request_queue *q;
q = sb->s_bdev->bd_queue;
if (!q->ksm)
return -EOPNOTSUPP;
return keyslot_manager_derive_raw_secret(q->ksm,
wrapped_key, wrapped_key_size,
raw_secret, raw_secret_size);
}
/**
* fscrypt_inode_uses_inline_crypto - test whether an inode uses inline
* encryption
* @inode: an inode
*
* Return: true if the inode requires file contents encryption and if the
* encryption should be done in the block layer via blk-crypto rather
* than in the filesystem layer.
*/
bool fscrypt_inode_uses_inline_crypto(const struct inode *inode)
{
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) &&
inode->i_crypt_info->ci_inlinecrypt;
}
EXPORT_SYMBOL_GPL(fscrypt_inode_uses_inline_crypto);
/**
* fscrypt_inode_uses_fs_layer_crypto - test whether an inode uses fs-layer
* encryption
* @inode: an inode
*
* Return: true if the inode requires file contents encryption and if the
* encryption should be done in the filesystem layer rather than in the
* block layer via blk-crypto.
*/
bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
{
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) &&
!inode->i_crypt_info->ci_inlinecrypt;
}
EXPORT_SYMBOL_GPL(fscrypt_inode_uses_fs_layer_crypto);
static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
{
union fscrypt_iv iv;
int i;
fscrypt_generate_iv(&iv, lblk_num, ci);
BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
dun[i] = le64_to_cpu(iv.dun[i]);
}
/**
* fscrypt_set_bio_crypt_ctx - prepare a file contents bio for inline encryption
* @bio: a bio which will eventually be submitted to the file
* @inode: the file's inode
* @first_lblk: the first file logical block number in the I/O
* @gfp_mask: memory allocation flags - these must be a waiting mask so that
* bio_crypt_set_ctx can't fail.
*
* If the contents of the file should be encrypted (or decrypted) with inline
* encryption, then assign the appropriate encryption context to the bio.
*
* Normally the bio should be newly allocated (i.e. no pages added yet), as
* otherwise fscrypt_mergeable_bio() won't work as intended.
*
* The encryption context will be freed automatically when the bio is freed.
*
* This function also handles setting bi_skip_dm_default_key when needed.
*/
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
u64 first_lblk, gfp_t gfp_mask)
{
const struct fscrypt_info *ci = inode->i_crypt_info;
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
if (fscrypt_inode_should_skip_dm_default_key(inode))
bio_set_skip_dm_default_key(bio);
if (!fscrypt_inode_uses_inline_crypto(inode))
return;
fscrypt_generate_dun(ci, first_lblk, dun);
bio_crypt_set_ctx(bio, &ci->ci_key.blk_key->base, dun, gfp_mask);
}
EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
/* Extract the inode and logical block number from a buffer_head. */
static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
const struct inode **inode_ret,
u64 *lblk_num_ret)
{
struct page *page = bh->b_page;
const struct address_space *mapping;
const struct inode *inode;
/*
* The ext4 journal (jbd2) can submit a buffer_head it directly created
* for a non-pagecache page. fscrypt doesn't care about these.
*/
mapping = page_mapping(page);
if (!mapping)
return false;
inode = mapping->host;
*inode_ret = inode;
*lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
(bh_offset(bh) >> inode->i_blkbits);
return true;
}
/**
* fscrypt_set_bio_crypt_ctx_bh - prepare a file contents bio for inline
* encryption
* @bio: a bio which will eventually be submitted to the file
* @first_bh: the first buffer_head for which I/O will be submitted
* @gfp_mask: memory allocation flags
*
* Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead
* of an inode and block number directly.
*/
void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
const struct buffer_head *first_bh,
gfp_t gfp_mask)
{
const struct inode *inode;
u64 first_lblk;
if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
}
EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
/**
* fscrypt_mergeable_bio - test whether data can be added to a bio
* @bio: the bio being built up
* @inode: the inode for the next part of the I/O
* @next_lblk: the next file logical block number in the I/O
*
* When building a bio which may contain data which should undergo inline
* encryption (or decryption) via fscrypt, filesystems should call this function
* to ensure that the resulting bio contains only logically contiguous data.
* This will return false if the next part of the I/O cannot be merged with the
* bio because either the encryption key would be different or the encryption
* data unit numbers would be discontiguous.
*
* fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
*
* This function also returns false if the next part of the I/O would need to
* have a different value for the bi_skip_dm_default_key flag.
*
* Return: true iff the I/O is mergeable
*/
bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
u64 next_lblk)
{
const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
return false;
if (bio_should_skip_dm_default_key(bio) !=
fscrypt_inode_should_skip_dm_default_key(inode))
return false;
if (!bc)
return true;
/*
* Comparing the key pointers is good enough, as all I/O for each key
* uses the same pointer. I.e., there's currently no need to support
* merging requests where the keys are the same but the pointers differ.
*/
if (bc->bc_key != &inode->i_crypt_info->ci_key.blk_key->base)
return false;
fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
}
EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
/**
* fscrypt_mergeable_bio_bh - test whether data can be added to a bio
* @bio: the bio being built up
* @next_bh: the next buffer_head for which I/O will be submitted
*
* Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of
* an inode and block number directly.
*
* Return: true iff the I/O is mergeable
*/
bool fscrypt_mergeable_bio_bh(struct bio *bio,
const struct buffer_head *next_bh)
{
const struct inode *inode;
u64 next_lblk;
if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
return !bio->bi_crypt_context &&
!bio_should_skip_dm_default_key(bio);
return fscrypt_mergeable_bio(bio, inode, next_lblk);
}
EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);

@ -44,8 +44,8 @@ static void free_master_key(struct fscrypt_master_key *mk)
wipe_master_key_secret(&mk->mk_secret);
for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) {
crypto_free_skcipher(mk->mk_direct_tfms[i]);
crypto_free_skcipher(mk->mk_iv_ino_lblk_64_tfms[i]);
fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]);
fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]);
}
key_put(mk->mk_users);
@ -469,8 +469,10 @@ static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep)
{
const struct fscrypt_provisioning_key_payload *payload = prep->data;
BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE < FSCRYPT_MAX_KEY_SIZE);
if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE ||
prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE)
prep->datalen > sizeof(*payload) + FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE)
return -EINVAL;
if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
@ -567,6 +569,8 @@ out_put:
key_ref_put(ref);
return err;
}
/* Size of software "secret" derived from hardware-wrapped key */
#define RAW_SECRET_SIZE 32
/*
* Add a master encryption key to the filesystem, causing all files which were
@ -598,6 +602,9 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg)
struct fscrypt_add_key_arg __user *uarg = _uarg;
struct fscrypt_add_key_arg arg;
struct fscrypt_master_key_secret secret;
u8 _kdf_key[RAW_SECRET_SIZE];
u8 *kdf_key;
unsigned int kdf_key_size;
int err;
if (copy_from_user(&arg, uarg, sizeof(arg)))
@ -609,6 +616,9 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg)
if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved)))
return -EINVAL;
BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE <
FSCRYPT_MAX_KEY_SIZE);
memset(&secret, 0, sizeof(secret));
if (arg.key_id) {
@ -617,16 +627,20 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg)
err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret);
if (err)
goto out_wipe_secret;
err = -EINVAL;
if (!(arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) &&
secret.size > FSCRYPT_MAX_KEY_SIZE)
goto out_wipe_secret;
} else {
if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE ||
arg.raw_size > FSCRYPT_MAX_KEY_SIZE)
arg.raw_size >
((arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ?
FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : FSCRYPT_MAX_KEY_SIZE))
return -EINVAL;
secret.size = arg.raw_size;
err = -EFAULT;
if (copy_from_user(secret.raw, uarg->raw, secret.size)) {
if (copy_from_user(secret.raw, uarg->raw, secret.size))
goto out_wipe_secret;
}
}
switch (arg.key_spec.type) {
@ -639,17 +653,36 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg)
err = -EACCES;
if (!capable(CAP_SYS_ADMIN))
goto out_wipe_secret;
err = -EINVAL;
if (arg.__flags)
goto out_wipe_secret;
break;
case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER:
err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size);
if (err)
err = -EINVAL;
if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED)
goto out_wipe_secret;
if (arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) {
kdf_key = _kdf_key;
kdf_key_size = RAW_SECRET_SIZE;
err = fscrypt_derive_raw_secret(sb, secret.raw,
secret.size,
kdf_key, kdf_key_size);
if (err)
goto out_wipe_secret;
secret.is_hw_wrapped = true;
} else {
kdf_key = secret.raw;
kdf_key_size = secret.size;
}
err = fscrypt_init_hkdf(&secret.hkdf, kdf_key, kdf_key_size);
/*
* Now that the HKDF context is initialized, the raw key is no
* longer needed.
* Now that the HKDF context is initialized, the raw HKDF
* key is no longer needed.
*/
memzero_explicit(secret.raw, secret.size);
memzero_explicit(kdf_key, kdf_key_size);
if (err)
goto out_wipe_secret;
/* Calculate the key identifier and return it to userspace. */
err = fscrypt_hkdf_expand(&secret.hkdf,

@ -13,12 +13,13 @@
#include "fscrypt_private.h"
static struct fscrypt_mode available_modes[] = {
struct fscrypt_mode fscrypt_modes[] = {
[FSCRYPT_MODE_AES_256_XTS] = {
.friendly_name = "AES-256-XTS",
.cipher_str = "xts(aes)",
.keysize = 64,
.ivsize = 16,
.blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS,
},
[FSCRYPT_MODE_AES_256_CTS] = {
.friendly_name = "AES-256-CTS-CBC",
@ -31,6 +32,7 @@ static struct fscrypt_mode available_modes[] = {
.cipher_str = "essiv(cbc(aes),sha256)",
.keysize = 16,
.ivsize = 16,
.blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
},
[FSCRYPT_MODE_AES_128_CTS] = {
.friendly_name = "AES-128-CTS-CBC",
@ -43,6 +45,7 @@ static struct fscrypt_mode available_modes[] = {
.cipher_str = "adiantum(xchacha12,aes)",
.keysize = 32,
.ivsize = 32,
.blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM,
},
[FSCRYPT_MODE_PRIVATE] = {
.friendly_name = "ICE",
@ -56,10 +59,10 @@ select_encryption_mode(const union fscrypt_policy *policy,
const struct inode *inode)
{
if (S_ISREG(inode->i_mode))
return &available_modes[fscrypt_policy_contents_mode(policy)];
return &fscrypt_modes[fscrypt_policy_contents_mode(policy)];
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
return &available_modes[fscrypt_policy_fnames_mode(policy)];
return &fscrypt_modes[fscrypt_policy_fnames_mode(policy)];
WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
inode->i_ino, (inode->i_mode & S_IFMT));
@ -67,9 +70,9 @@ select_encryption_mode(const union fscrypt_policy *policy,
}
/* Create a symmetric cipher object for the given encryption mode and key */
struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode,
const u8 *raw_key,
const struct inode *inode)
static struct crypto_skcipher *
fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
const struct inode *inode)
{
struct crypto_skcipher *tfm;
int err;
@ -109,30 +112,61 @@ err_free_tfm:
return ERR_PTR(err);
}
/* Given the per-file key, set up the file's crypto transform object */
int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
/*
* Prepare the crypto transform object or blk-crypto key in @prep_key, given the
* raw key, encryption mode, and flag indicating which encryption implementation
* (fs-layer or blk-crypto) will be used.
*/
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key, unsigned int raw_key_size,
const struct fscrypt_info *ci)
{
struct crypto_skcipher *tfm;
tfm = fscrypt_allocate_skcipher(ci->ci_mode, derived_key, ci->ci_inode);
if (fscrypt_using_inline_encryption(ci))
return fscrypt_prepare_inline_crypt_key(prep_key,
raw_key, raw_key_size, ci);
if (WARN_ON(raw_key_size != ci->ci_mode->keysize))
return -EINVAL;
tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
/*
* Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters
* for the per-mode keys, which are shared by multiple inodes.)
*/
smp_store_release(&prep_key->tfm, tfm);
return 0;
}
ci->ci_ctfm = tfm;
/* Destroy a crypto transform object and/or blk-crypto key. */
void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key)
{
crypto_free_skcipher(prep_key->tfm);
fscrypt_destroy_inline_crypt_key(prep_key);
}
/* Given the per-file key, set up the file's crypto transform object */
int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
{
ci->ci_owns_key = true;
return 0;
return fscrypt_prepare_key(&ci->ci_key, derived_key,
ci->ci_mode->keysize, ci);
}
static int setup_per_mode_key(struct fscrypt_info *ci,
struct fscrypt_master_key *mk,
struct crypto_skcipher **tfms,
struct fscrypt_prepared_key *keys,
u8 hkdf_context, bool include_fs_uuid)
{
static DEFINE_MUTEX(mode_key_setup_mutex);
const struct inode *inode = ci->ci_inode;
const struct super_block *sb = inode->i_sb;
struct fscrypt_mode *mode = ci->ci_mode;
u8 mode_num = mode - available_modes;
struct crypto_skcipher *tfm, *prev_tfm;
const u8 mode_num = mode - fscrypt_modes;
struct fscrypt_prepared_key *prep_key;
u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
unsigned int hkdf_infolen = 0;
@ -141,39 +175,65 @@ static int setup_per_mode_key(struct fscrypt_info *ci,
if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX))
return -EINVAL;
/* pairs with cmpxchg() below */
tfm = READ_ONCE(tfms[mode_num]);
if (likely(tfm != NULL))
goto done;
BUILD_BUG_ON(sizeof(mode_num) != 1);
BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
BUILD_BUG_ON(sizeof(hkdf_info) != 17);
hkdf_info[hkdf_infolen++] = mode_num;
if (include_fs_uuid) {
memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid,
sizeof(sb->s_uuid));
hkdf_infolen += sizeof(sb->s_uuid);
prep_key = &keys[mode_num];
if (fscrypt_is_key_prepared(prep_key, ci)) {
ci->ci_key = *prep_key;
return 0;
}
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
hkdf_context, hkdf_info, hkdf_infolen,
mode_key, mode->keysize);
if (err)
return err;
tfm = fscrypt_allocate_skcipher(mode, mode_key, inode);
memzero_explicit(mode_key, mode->keysize);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
/* pairs with READ_ONCE() above */
prev_tfm = cmpxchg(&tfms[mode_num], NULL, tfm);
if (prev_tfm != NULL) {
crypto_free_skcipher(tfm);
tfm = prev_tfm;
mutex_lock(&mode_key_setup_mutex);
if (fscrypt_is_key_prepared(prep_key, ci))
goto done_unlock;
if (mk->mk_secret.is_hw_wrapped && S_ISREG(inode->i_mode)) {
int i;
if (!fscrypt_using_inline_encryption(ci)) {
fscrypt_warn(ci->ci_inode,
"Hardware-wrapped keys require inline encryption (-o inlinecrypt)");
err = -EINVAL;
goto out_unlock;
}
for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) {
if (fscrypt_is_key_prepared(&keys[i], ci)) {
fscrypt_warn(ci->ci_inode,
"Each hardware-wrapped key can only be used with one encryption mode");
err = -EINVAL;
goto out_unlock;
}
}
err = fscrypt_prepare_key(prep_key, mk->mk_secret.raw,
mk->mk_secret.size, ci);
if (err)
goto out_unlock;
} else {
BUILD_BUG_ON(sizeof(mode_num) != 1);
BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
BUILD_BUG_ON(sizeof(hkdf_info) != 17);
hkdf_info[hkdf_infolen++] = mode_num;
if (include_fs_uuid) {
memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid,
sizeof(sb->s_uuid));
hkdf_infolen += sizeof(sb->s_uuid);
}
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
hkdf_context, hkdf_info, hkdf_infolen,
mode_key, mode->keysize);
if (err)
goto out_unlock;
err = fscrypt_prepare_key(prep_key, mode_key, mode->keysize,
ci);
memzero_explicit(mode_key, mode->keysize);
if (err)
goto out_unlock;
}
done:
ci->ci_ctfm = tfm;
return 0;
done_unlock:
ci->ci_key = *prep_key;
err = 0;
out_unlock:
mutex_unlock(&mode_key_setup_mutex);
return err;
}
static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
@ -182,6 +242,13 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
u8 derived_key[FSCRYPT_MAX_KEY_SIZE];
int err;
if (mk->mk_secret.is_hw_wrapped &&
!(ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)) {
fscrypt_warn(ci->ci_inode,
"Hardware-wrapped keys are only supported with IV_INO_LBLK_64 policies");
return -EINVAL;
}
if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
/*
* DIRECT_KEY: instead of deriving per-file keys, the per-file
@ -197,7 +264,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
ci->ci_mode->friendly_name);
return -EINVAL;
}
return setup_per_mode_key(ci, mk, mk->mk_direct_tfms,
return setup_per_mode_key(ci, mk, mk->mk_direct_keys,
HKDF_CONTEXT_DIRECT_KEY, false);
} else if (ci->ci_policy.v2.flags &
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
@ -207,7 +274,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
* the IVs. This format is optimized for use with inline
* encryption hardware compliant with the UFS or eMMC standards.
*/
return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms,
return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_keys,
HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
true);
}
@ -242,6 +309,8 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
struct fscrypt_key_specifier mk_spec;
int err;
fscrypt_select_encryption_impl(ci);
switch (ci->ci_policy.version) {
case FSCRYPT_POLICY_V1:
mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR;
@ -334,7 +403,7 @@ static void put_crypt_info(struct fscrypt_info *ci)
if (ci->ci_direct_key)
fscrypt_put_direct_key(ci->ci_direct_key);
else if (ci->ci_owns_key)
crypto_free_skcipher(ci->ci_ctfm);
fscrypt_destroy_prepared_key(&ci->ci_key);
key = ci->ci_master_key;
if (key) {

@ -146,7 +146,7 @@ struct fscrypt_direct_key {
struct hlist_node dk_node;
refcount_t dk_refcount;
const struct fscrypt_mode *dk_mode;
struct crypto_skcipher *dk_ctfm;
struct fscrypt_prepared_key dk_key;
u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
u8 dk_raw[FSCRYPT_MAX_KEY_SIZE];
};
@ -154,7 +154,7 @@ struct fscrypt_direct_key {
static void free_direct_key(struct fscrypt_direct_key *dk)
{
if (dk) {
crypto_free_skcipher(dk->dk_ctfm);
fscrypt_destroy_prepared_key(&dk->dk_key);
kzfree(dk);
}
}
@ -199,6 +199,8 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert,
continue;
if (ci->ci_mode != dk->dk_mode)
continue;
if (!fscrypt_is_key_prepared(&dk->dk_key, ci))
continue;
if (crypto_memneq(raw_key, dk->dk_raw, ci->ci_mode->keysize))
continue;
/* using existing tfm with same (descriptor, mode, raw_key) */
@ -231,13 +233,10 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key)
return ERR_PTR(-ENOMEM);
refcount_set(&dk->dk_refcount, 1);
dk->dk_mode = ci->ci_mode;
dk->dk_ctfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key,
ci->ci_inode);
if (IS_ERR(dk->dk_ctfm)) {
err = PTR_ERR(dk->dk_ctfm);
dk->dk_ctfm = NULL;
err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci->ci_mode->keysize,
ci);
if (err)
goto err_free_dk;
}
memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor,
FSCRYPT_KEY_DESCRIPTOR_SIZE);
memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize);
@ -274,7 +273,7 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci,
if (IS_ERR(dk))
return PTR_ERR(dk);
ci->ci_direct_key = dk;
ci->ci_ctfm = dk->dk_ctfm;
ci->ci_key = dk->dk_key;
return 0;
}

@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/fscrypt.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
@ -431,6 +432,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
sector_t first_sector, int nr_vecs)
{
struct bio *bio;
struct inode *inode = dio->inode;
/*
* bio_alloc() is guaranteed to return a bio when called with
@ -438,6 +440,9 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
*/
bio = bio_alloc(GFP_KERNEL, nr_vecs);
fscrypt_set_bio_crypt_ctx(bio, inode,
sdio->cur_page_fs_offset >> inode->i_blkbits,
GFP_KERNEL);
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = first_sector;
bio_set_op_attrs(bio, dio->op, dio->op_flags);

@ -1155,6 +1155,7 @@ struct ext4_inode_info {
#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
#define EXT4_MOUNT_INLINECRYPT 0x4000000 /* Inline encryption support */
#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
#define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */
#define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */

@ -1236,8 +1236,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
(block_start < from || block_end > to)) {
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = IS_ENCRYPTED(inode) &&
S_ISREG(inode->i_mode);
decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
}
}
/*
@ -3856,10 +3855,12 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret;
int rw = iov_iter_rw(iter);
#ifdef CONFIG_FS_ENCRYPTION
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
return 0;
#endif
if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode)) {
if (!fscrypt_inode_uses_inline_crypto(inode) ||
!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter),
i_blocksize(inode)))
return 0;
}
if (fsverity_active(inode))
return 0;
@ -4067,8 +4068,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) &&
IS_ENCRYPTED(inode)) {
if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);

@ -366,6 +366,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
if (!bio)
return -ENOMEM;
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
wbc_init_bio(io->io_wbc, bio);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
@ -383,7 +384,8 @@ static int io_submit_add_bh(struct ext4_io_submit *io,
{
int ret;
if (io->io_bio && bh->b_blocknr != io->io_next_block) {
if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
!fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
submit_and_retry:
ext4_io_submit(io);
}
@ -469,7 +471,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
bh = head = page_buffers(page);
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) {
if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) {
gfp_t gfp_flags = GFP_NOFS;
/*

@ -198,7 +198,7 @@ static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
unsigned int post_read_steps = 0;
struct bio_post_read_ctx *ctx = NULL;
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
if (fscrypt_inode_uses_fs_layer_crypto(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (ext4_need_verity(inode, first_idx))
@ -259,6 +259,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
const unsigned blkbits = inode->i_blkbits;
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
const unsigned blocksize = 1 << blkbits;
sector_t next_block;
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
@ -290,7 +291,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (page_has_buffers(page))
goto confused;
block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
block_in_file = next_block =
(sector_t)page->index << (PAGE_SHIFT - blkbits);
last_block = block_in_file + nr_pages * blocks_per_page;
last_block_in_file = (ext4_readpage_limit(inode) +
blocksize - 1) >> blkbits;
@ -390,7 +392,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
* This page will go to BIO. Do we need to send this
* BIO off first?
*/
if (bio && (last_block_in_bio != blocks[0] - 1)) {
if (bio && (last_block_in_bio != blocks[0] - 1 ||
!fscrypt_mergeable_bio(bio, inode, next_block))) {
submit_and_realloc:
ext4_submit_bio_read(bio);
bio = NULL;
@ -402,6 +405,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
min_t(int, nr_pages, BIO_MAX_PAGES));
if (!bio)
goto set_error_page;
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
GFP_KERNEL);
ctx = get_bio_post_read_ctx(inode, bio, page->index);
if (IS_ERR(ctx)) {
bio_put(bio);

@ -1300,6 +1300,11 @@ static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
*lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
}
static bool ext4_inline_crypt_enabled(struct super_block *sb)
{
return test_opt(sb, INLINECRYPT);
}
static const struct fscrypt_operations ext4_cryptops = {
.key_prefix = "ext4:",
.get_context = ext4_get_context,
@ -1309,6 +1314,7 @@ static const struct fscrypt_operations ext4_cryptops = {
.max_namelen = EXT4_NAME_LEN,
.has_stable_inodes = ext4_has_stable_inodes,
.get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits,
.inline_crypt_enabled = ext4_inline_crypt_enabled,
};
#endif
@ -1404,6 +1410,7 @@ enum {
Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
Opt_inlinecrypt,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
@ -1497,6 +1504,7 @@ static const match_table_t tokens = {
{Opt_noinit_itable, "noinit_itable"},
{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
{Opt_test_dummy_encryption, "test_dummy_encryption"},
{Opt_inlinecrypt, "inlinecrypt"},
{Opt_nombcache, "nombcache"},
{Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
{Opt_removed, "check=none"}, /* mount option from ext2/3 */
@ -1706,6 +1714,11 @@ static const struct mount_opts {
{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
{Opt_max_dir_size_kb, 0, MOPT_GTE0},
{Opt_test_dummy_encryption, 0, MOPT_GTE0},
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
{Opt_inlinecrypt, EXT4_MOUNT_INLINECRYPT, MOPT_SET},
#else
{Opt_inlinecrypt, EXT4_MOUNT_INLINECRYPT, MOPT_NOSUPPORT},
#endif
{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
{Opt_err, 0, 0}
};

@ -467,6 +467,37 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
return bio;
}
static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
pgoff_t first_idx,
const struct f2fs_io_info *fio,
gfp_t gfp_mask)
{
/*
* The f2fs garbage collector sets ->encrypted_page when it wants to
* read/write raw data without encryption.
*/
if (!fio || !fio->encrypted_page)
fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
else if (fscrypt_inode_should_skip_dm_default_key(inode))
bio_set_skip_dm_default_key(bio);
}
static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
pgoff_t next_idx,
const struct f2fs_io_info *fio)
{
/*
* The f2fs garbage collector sets ->encrypted_page when it wants to
* read/write raw data without encryption.
*/
if (fio && fio->encrypted_page)
return !bio_has_crypt_ctx(bio) &&
(bio_should_skip_dm_default_key(bio) ==
fscrypt_inode_should_skip_dm_default_key(inode));
return fscrypt_mergeable_bio(bio, inode, next_idx);
}
static inline void __submit_bio(struct f2fs_sb_info *sbi,
struct bio *bio, enum page_type type)
{
@ -712,6 +743,9 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
/* Allocate a new bio */
bio = __bio_alloc(fio, 1);
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
fio->page->index, fio, GFP_NOIO);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
@ -895,7 +929,6 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
struct bio *bio = *fio->bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
struct inode *inode;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
@ -904,14 +937,17 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
inode = fio->page->mapping->host;
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
fio->new_blkaddr))
if (bio && (!page_is_mergeable(fio->sbi, bio, *fio->last_block,
fio->new_blkaddr) ||
!f2fs_crypt_mergeable_bio(bio, fio->page->mapping->host,
fio->page->index, fio)))
f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_PAGES);
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
fio->page->index, fio,
GFP_NOIO);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
add_bio_entry(fio->sbi, bio, page, fio->temp);
} else {
@ -967,8 +1003,11 @@ next:
inc_page_count(sbi, WB_DATA_TYPE(bio_page));
if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio,
io->last_block_in_bio, fio->new_blkaddr))
if (io->bio &&
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
fio->new_blkaddr) ||
!f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
fio->page->index, fio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
@ -980,6 +1019,9 @@ alloc_new:
goto skip;
}
io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
fio->page->index, fio,
GFP_NOIO);
io->fio = *fio;
}
@ -1024,11 +1066,14 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
for_write);
if (!bio)
return ERR_PTR(-ENOMEM);
f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
if (f2fs_encrypted_file(inode))
if (fscrypt_inode_uses_fs_layer_crypto(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (f2fs_compressed_file(inode))
post_read_steps |= 1 << STEP_DECOMPRESS;
@ -2054,8 +2099,9 @@ zero_out:
* This page will go to BIO. Do we need to send this
* BIO off first?
*/
if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio,
*last_block_in_bio, block_nr)) {
if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
*last_block_in_bio, block_nr) ||
!f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
submit_and_realloc:
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
@ -2406,6 +2452,9 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
if (fscrypt_inode_uses_inline_crypto(inode))
return 0;
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
PAGE_SIZE, 0, gfp_flags);
@ -2579,7 +2628,7 @@ got_it:
f2fs_unlock_op(fio->sbi);
err = f2fs_inplace_write_data(fio);
if (err) {
if (f2fs_encrypted_file(inode))
if (fscrypt_inode_uses_fs_layer_crypto(inode))
fscrypt_finalize_bounce_page(&fio->encrypted_page);
if (PageWriteback(page))
end_page_writeback(page);

@ -139,6 +139,9 @@ struct f2fs_mount_info {
int fs_mode; /* fs mode: LFS or ADAPTIVE */
int bggc_mode; /* bggc mode: off, on or sync */
bool test_dummy_encryption; /* test dummy encryption */
#ifdef CONFIG_FS_ENCRYPTION
bool inlinecrypt; /* inline encryption enabled */
#endif
block_t unusable_cap; /* Amount of space allowed to be
* unusable when disabling checkpoint
*/
@ -4035,7 +4038,13 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int rw = iov_iter_rw(iter);
if (f2fs_encrypted_file(inode))
if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && f2fs_encrypted_file(inode)) {
if (!fscrypt_inode_uses_inline_crypto(inode) ||
!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter),
F2FS_BLKSIZE))
return true;
}
if (fsverity_active(inode))
return true;
if (f2fs_is_multi_device(sbi))
return true;

@ -137,6 +137,7 @@ enum {
Opt_alloc,
Opt_fsync,
Opt_test_dummy_encryption,
Opt_inlinecrypt,
Opt_checkpoint_disable,
Opt_checkpoint_disable_cap,
Opt_checkpoint_disable_cap_perc,
@ -202,6 +203,7 @@ static match_table_t f2fs_tokens = {
{Opt_alloc, "alloc_mode=%s"},
{Opt_fsync, "fsync_mode=%s"},
{Opt_test_dummy_encryption, "test_dummy_encryption"},
{Opt_inlinecrypt, "inlinecrypt"},
{Opt_checkpoint_disable, "checkpoint=disable"},
{Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
{Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
@ -787,6 +789,13 @@ static int parse_options(struct super_block *sb, char *options)
f2fs_info(sbi, "Test dummy encryption mode enabled");
#else
f2fs_info(sbi, "Test dummy encryption mount option ignored");
#endif
break;
case Opt_inlinecrypt:
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
F2FS_OPTION(sbi).inlinecrypt = true;
#else
f2fs_info(sbi, "inline encryption not supported");
#endif
break;
case Opt_checkpoint_disable_cap_perc:
@ -1574,6 +1583,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
#ifdef CONFIG_FS_ENCRYPTION
if (F2FS_OPTION(sbi).test_dummy_encryption)
seq_puts(seq, ",test_dummy_encryption");
if (F2FS_OPTION(sbi).inlinecrypt)
seq_puts(seq, ",inlinecrypt");
#endif
if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
@ -1604,6 +1615,9 @@ static void default_options(struct f2fs_sb_info *sbi)
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
F2FS_OPTION(sbi).test_dummy_encryption = false;
#ifdef CONFIG_FS_ENCRYPTION
F2FS_OPTION(sbi).inlinecrypt = false;
#endif
F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
@ -2456,6 +2470,30 @@ static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
*lblk_bits_ret = 8 * sizeof(block_t);
}
static bool f2fs_inline_crypt_enabled(struct super_block *sb)
{
return F2FS_OPTION(F2FS_SB(sb)).inlinecrypt;
}
static int f2fs_get_num_devices(struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (f2fs_is_multi_device(sbi))
return sbi->s_ndevs;
return 1;
}
static void f2fs_get_devices(struct super_block *sb,
struct request_queue **devs)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int i;
for (i = 0; i < sbi->s_ndevs; i++)
devs[i] = bdev_get_queue(FDEV(i).bdev);
}
static const struct fscrypt_operations f2fs_cryptops = {
.key_prefix = "f2fs:",
.get_context = f2fs_get_context,
@ -2465,6 +2503,9 @@ static const struct fscrypt_operations f2fs_cryptops = {
.max_namelen = F2FS_NAME_LEN,
.has_stable_inodes = f2fs_has_stable_inodes,
.get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
.inline_crypt_enabled = f2fs_inline_crypt_enabled,
.get_num_devices = f2fs_get_num_devices,
.get_devices = f2fs_get_devices,
};
#endif

@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/fscrypt.h>
#include <linux/iomap.h>
#include <linux/uaccess.h>
#include <linux/gfp.h>
@ -825,10 +826,13 @@ static blk_qc_t
iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
unsigned len)
{
struct inode *inode = file_inode(dio->iocb->ki_filp);
struct page *page = ZERO_PAGE(0);
struct bio *bio;
bio = bio_alloc(GFP_KERNEL, 1);
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
GFP_KERNEL);
bio_set_dev(bio, iomap->bdev);
bio->bi_iter.bi_sector =
iomap->blkno + ((pos - iomap->offset) >> 9);
@ -908,6 +912,8 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
return 0;
bio = bio_alloc(GFP_KERNEL, nr_pages);
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
GFP_KERNEL);
bio_set_dev(bio, iomap->bdev);
bio->bi_iter.bi_sector =
iomap->blkno + ((pos - iomap->offset) >> 9);

@ -0,0 +1,228 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 Google LLC
*/
#ifndef __LINUX_BIO_CRYPT_CTX_H
#define __LINUX_BIO_CRYPT_CTX_H
#include <linux/string.h>
enum blk_crypto_mode_num {
BLK_ENCRYPTION_MODE_INVALID,
BLK_ENCRYPTION_MODE_AES_256_XTS,
BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
BLK_ENCRYPTION_MODE_ADIANTUM,
BLK_ENCRYPTION_MODE_MAX,
};
#ifdef CONFIG_BLOCK
#include <linux/blk_types.h>
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
#define BLK_CRYPTO_MAX_KEY_SIZE 64
#define BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE 128
/**
* struct blk_crypto_key - an inline encryption key
* @crypto_mode: encryption algorithm this key is for
* @data_unit_size: the data unit size for all encryption/decryptions with this
* key. This is the size in bytes of each individual plaintext and
* ciphertext. This is always a power of 2. It might be e.g. the
* filesystem block size or the disk sector size.
* @data_unit_size_bits: log2 of data_unit_size
* @size: size of this key in bytes (determined by @crypto_mode)
* @hash: hash of this key, for keyslot manager use only
* @raw: the raw bytes of this key. Only the first @size bytes are used.
*
* A blk_crypto_key is immutable once created, and many bios can reference it at
* the same time. It must not be freed until all bios using it have completed.
*/
struct blk_crypto_key {
enum blk_crypto_mode_num crypto_mode;
unsigned int data_unit_size;
unsigned int data_unit_size_bits;
unsigned int size;
unsigned int hash;
u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE];
};
#define BLK_CRYPTO_MAX_IV_SIZE 32
#define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE/sizeof(u64))
/**
* struct bio_crypt_ctx - an inline encryption context
* @bc_key: the key, algorithm, and data unit size to use
* @bc_keyslot: the keyslot that has been assigned for this key in @bc_ksm,
* or -1 if no keyslot has been assigned yet.
* @bc_dun: the data unit number (starting IV) to use
* @bc_ksm: the keyslot manager into which the key has been programmed with
* @bc_keyslot, or NULL if this key hasn't yet been programmed.
*
* A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for
* write requests) or decrypted (for read requests) inline by the storage device
* or controller, or by the crypto API fallback.
*/
struct bio_crypt_ctx {
const struct blk_crypto_key *bc_key;
int bc_keyslot;
/* Data unit number */
u64 bc_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
/*
* The keyslot manager where the key has been programmed
* with keyslot.
*/
struct keyslot_manager *bc_ksm;
};
int bio_crypt_ctx_init(void);
struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask);
void bio_crypt_free_ctx(struct bio *bio);
static inline bool bio_has_crypt_ctx(struct bio *bio)
{
return bio->bi_crypt_context;
}
void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
static inline void bio_crypt_set_ctx(struct bio *bio,
const struct blk_crypto_key *key,
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
gfp_t gfp_mask)
{
struct bio_crypt_ctx *bc = bio_crypt_alloc_ctx(gfp_mask);
bc->bc_key = key;
memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
bc->bc_ksm = NULL;
bc->bc_keyslot = -1;
bio->bi_crypt_context = bc;
}
void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc);
int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc,
struct keyslot_manager *ksm);
struct request;
bool bio_crypt_should_process(struct request *rq);
static inline bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
unsigned int bytes,
u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
{
int i = 0;
unsigned int inc = bytes >> bc->bc_key->data_unit_size_bits;
while (i < BLK_CRYPTO_DUN_ARRAY_SIZE) {
if (bc->bc_dun[i] + inc != next_dun[i])
return false;
inc = ((bc->bc_dun[i] + inc) < inc);
i++;
}
return true;
}
static inline void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
unsigned int inc)
{
int i = 0;
while (inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE) {
dun[i] += inc;
inc = (dun[i] < inc);
i++;
}
}
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
{
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
if (!bc)
return;
bio_crypt_dun_increment(bc->bc_dun,
bytes >> bc->bc_key->data_unit_size_bits);
}
bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2);
bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes,
struct bio *b_2);
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline int bio_crypt_ctx_init(void)
{
return 0;
}
static inline bool bio_has_crypt_ctx(struct bio *bio)
{
return false;
}
static inline void bio_crypt_clone(struct bio *dst, struct bio *src,
gfp_t gfp_mask) { }
static inline void bio_crypt_free_ctx(struct bio *bio) { }
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) { }
static inline bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
{
return true;
}
static inline bool bio_crypt_ctx_mergeable(struct bio *b_1,
unsigned int b1_bytes,
struct bio *b_2)
{
return true;
}
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
static inline void bio_set_skip_dm_default_key(struct bio *bio)
{
bio->bi_skip_dm_default_key = true;
}
static inline bool bio_should_skip_dm_default_key(const struct bio *bio)
{
return bio->bi_skip_dm_default_key;
}
static inline void bio_clone_skip_dm_default_key(struct bio *dst,
const struct bio *src)
{
dst->bi_skip_dm_default_key = src->bi_skip_dm_default_key;
}
#else /* CONFIG_DM_DEFAULT_KEY */
static inline void bio_set_skip_dm_default_key(struct bio *bio)
{
}
static inline bool bio_should_skip_dm_default_key(const struct bio *bio)
{
return false;
}
static inline void bio_clone_skip_dm_default_key(struct bio *dst,
const struct bio *src)
{
}
#endif /* !CONFIG_DM_DEFAULT_KEY */
#endif /* CONFIG_BLOCK */
#endif /* __LINUX_BIO_CRYPT_CTX_H */

@ -22,6 +22,7 @@
#include <linux/mempool.h>
#include <linux/ioprio.h>
#include <linux/bug.h>
#include <linux/bio-crypt-ctx.h>
#ifdef CONFIG_BLOCK

@ -0,0 +1,66 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 Google LLC
*/
#ifndef __LINUX_BLK_CRYPTO_H
#define __LINUX_BLK_CRYPTO_H
#include <linux/bio.h>
#define SECTOR_SHIFT 9
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
int blk_crypto_submit_bio(struct bio **bio_ptr);
bool blk_crypto_endio(struct bio *bio);
int blk_crypto_init_key(struct blk_crypto_key *blk_key,
const u8 *raw_key, unsigned int raw_key_size,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size);
int blk_crypto_evict_key(struct request_queue *q,
const struct blk_crypto_key *key);
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline int blk_crypto_submit_bio(struct bio **bio_ptr)
{
return 0;
}
static inline bool blk_crypto_endio(struct bio *bio)
{
return true;
}
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
unsigned int data_unit_size,
struct request_queue *q);
int blk_crypto_fallback_init(void);
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
static inline int
blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
unsigned int data_unit_size,
struct request_queue *q)
{
return 0;
}
static inline int blk_crypto_fallback_init(void)
{
return 0;
}
#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
#endif /* __LINUX_BLK_CRYPTO_H */

@ -17,6 +17,7 @@ struct block_device;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
struct bio_crypt_ctx;
/*
* Block error status values. See block/blk-core:blk_errors for the details.
@ -95,6 +96,14 @@ struct bio {
struct blk_issue_stat bi_issue_stat;
#endif
#endif
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct bio_crypt_ctx *bi_crypt_context;
#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
bool bi_skip_dm_default_key;
#endif
#endif
union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */

@ -43,6 +43,7 @@ struct pr_ops;
struct rq_wb;
struct blk_queue_stats;
struct blk_stat_callback;
struct keyslot_manager;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@ -545,6 +546,11 @@ struct request_queue {
*/
unsigned int request_fn_active;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
/* Inline crypto capabilities */
struct keyslot_manager *ksm;
#endif
unsigned int rq_timeout;
int poll_nsec;

@ -321,6 +321,12 @@ struct dm_target {
* on max_io_len boundary.
*/
bool split_discard_bios:1;
/*
* Set if inline crypto capabilities from this target's underlying
* device(s) can be exposed via the device-mapper device.
*/
bool may_passthrough_inline_crypto:1;
};
/* Each target can link one of these into the table */

@ -65,6 +65,10 @@ struct fscrypt_operations {
bool (*has_stable_inodes)(struct super_block *sb);
void (*get_ino_and_lblk_bits)(struct super_block *sb,
int *ino_bits_ret, int *lblk_bits_ret);
bool (*inline_crypt_enabled)(struct super_block *sb);
int (*get_num_devices)(struct super_block *sb);
void (*get_devices)(struct super_block *sb,
struct request_queue **devs);
};
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
@ -533,6 +537,74 @@ static inline const char *fscrypt_get_symlink(struct inode *inode,
}
#endif /* !CONFIG_FS_ENCRYPTION */
/* inline_crypt.c */
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
extern bool fscrypt_inode_uses_inline_crypto(const struct inode *inode);
extern bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode);
extern void fscrypt_set_bio_crypt_ctx(struct bio *bio,
const struct inode *inode,
u64 first_lblk, gfp_t gfp_mask);
extern void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
const struct buffer_head *first_bh,
gfp_t gfp_mask);
extern bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
u64 next_lblk);
extern bool fscrypt_mergeable_bio_bh(struct bio *bio,
const struct buffer_head *next_bh);
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode)
{
return false;
}
static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
{
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
}
static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
const struct inode *inode,
u64 first_lblk, gfp_t gfp_mask) { }
static inline void fscrypt_set_bio_crypt_ctx_bh(
struct bio *bio,
const struct buffer_head *first_bh,
gfp_t gfp_mask) { }
static inline bool fscrypt_mergeable_bio(struct bio *bio,
const struct inode *inode,
u64 next_lblk)
{
return true;
}
static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
const struct buffer_head *next_bh)
{
return true;
}
#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
#if IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
static inline bool
fscrypt_inode_should_skip_dm_default_key(const struct inode *inode)
{
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
}
#else
static inline bool
fscrypt_inode_should_skip_dm_default_key(const struct inode *inode)
{
return false;
}
#endif
/**
* fscrypt_require_key - require an inode's encryption key
* @inode: the inode we need the key for

@ -0,0 +1,84 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 Google LLC
*/
#ifndef __LINUX_KEYSLOT_MANAGER_H
#define __LINUX_KEYSLOT_MANAGER_H
#include <linux/bio.h>
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct keyslot_manager;
/**
* struct keyslot_mgmt_ll_ops - functions to manage keyslots in hardware
* @keyslot_program: Program the specified key into the specified slot in the
* inline encryption hardware.
* @keyslot_evict: Evict key from the specified keyslot in the hardware.
* The key is provided so that e.g. dm layers can evict
* keys from the devices that they map over.
* Returns 0 on success, -errno otherwise.
* @derive_raw_secret: (Optional) Derive a software secret from a
* hardware-wrapped key. Returns 0 on success, -EOPNOTSUPP
* if unsupported on the hardware, or another -errno code.
*
* This structure should be provided by storage device drivers when they set up
* a keyslot manager - this structure holds the function ptrs that the keyslot
* manager will use to manipulate keyslots in the hardware.
*/
struct keyslot_mgmt_ll_ops {
int (*keyslot_program)(struct keyslot_manager *ksm,
const struct blk_crypto_key *key,
unsigned int slot);
int (*keyslot_evict)(struct keyslot_manager *ksm,
const struct blk_crypto_key *key,
unsigned int slot);
int (*derive_raw_secret)(struct keyslot_manager *ksm,
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *secret, unsigned int secret_size);
};
struct keyslot_manager *keyslot_manager_create(unsigned int num_slots,
const struct keyslot_mgmt_ll_ops *ksm_ops,
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
void *ll_priv_data);
int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm,
const struct blk_crypto_key *key);
void keyslot_manager_get_slot(struct keyslot_manager *ksm, unsigned int slot);
void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot);
bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size);
int keyslot_manager_evict_key(struct keyslot_manager *ksm,
const struct blk_crypto_key *key);
void keyslot_manager_reprogram_all_keys(struct keyslot_manager *ksm);
void *keyslot_manager_private(struct keyslot_manager *ksm);
void keyslot_manager_destroy(struct keyslot_manager *ksm);
struct keyslot_manager *keyslot_manager_create_passthrough(
const struct keyslot_mgmt_ll_ops *ksm_ops,
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
void *ll_priv_data);
void keyslot_manager_intersect_modes(struct keyslot_manager *parent,
const struct keyslot_manager *child);
int keyslot_manager_derive_raw_secret(struct keyslot_manager *ksm,
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *secret, unsigned int secret_size);
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
#endif /* __LINUX_KEYSLOT_MANAGER_H */

@ -126,6 +126,8 @@ struct fscrypt_add_key_arg {
__u32 raw_size;
__u32 key_id;
__u32 __reserved[7];
/* N.B.: "temporary" flag, not reserved upstream */
#define __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001
__u32 __flags;
__u8 raw[];
};

Loading…
Cancel
Save