ANDROID: dm: sync inline crypto support with patches going upstream

Replace the following patches with upstream versions
(well, almost upstream; as of 2021-02-12 they are queued for 5.12 at
https://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git/log/?h=for-next):

	ANDROID-dm-add-support-for-passing-through-inline-crypto-support.patch
	ANDROID-dm-enable-may_passthrough_inline_crypto-on-some-targets.patch
	ANDROID-block-Introduce-passthrough-keyslot-manager.patch

Also, resolve conflicts with the following non-upstream patches for
hardware-wrapped key support.  Notably, we need to handle the field
blk_keyslot_manager::features in a few places:

	ANDROID-block-add-hardware-wrapped-key-support.patch
	ANDROID-dm-add-support-for-passing-through-derive_raw_secret.patch

Finally, update non-upstream device-mapper targets (dm-bow and
dm-default-key) to use the new way of specifying inline crypto
passthrough support (DM_TARGET_PASSES_CRYPTO) rather than the old way
(may_passthrough_inline_crypto).  These changes should be folded into:

	ANDROID-dm-bow-Add-dm-bow-feature.patch
	ANDROID-dm-add-dm-default-key-target-for-metadata-encryption.patch

Test: tested on db845c; verified that inline crypto support gets passed
      through over dm-linear.
Bug: 162257830
Change-Id: I5e3dea1aa09fc1215c90857b5b51d9e3720ef7db
Signed-off-by: Eric Biggers <ebiggers@google.com>
This commit is contained in:
Eric Biggers
2021-02-12 11:53:49 -08:00
committed by Greg Kroah-Hartman
parent a56f081c5b
commit 537d3bb974
11 changed files with 422 additions and 244 deletions
+84 -8
View File
@@ -386,7 +386,7 @@ void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm)
{
unsigned int slot;
if (WARN_ON(blk_ksm_is_passthrough(ksm)))
if (blk_ksm_is_passthrough(ksm))
return;
/* This is for device initialization, so don't resume the device */
@@ -430,7 +430,6 @@ void blk_ksm_unregister(struct request_queue *q)
{
q->ksm = NULL;
}
EXPORT_SYMBOL_GPL(blk_ksm_unregister);
/**
* blk_ksm_derive_raw_secret() - Derive software secret from wrapped key
@@ -490,21 +489,100 @@ void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent,
parent->max_dun_bytes_supported =
min(parent->max_dun_bytes_supported,
child->max_dun_bytes_supported);
parent->features &= child->features;
for (i = 0; i < ARRAY_SIZE(child->crypto_modes_supported);
i++) {
parent->crypto_modes_supported[i] &=
child->crypto_modes_supported[i];
}
parent->features &= child->features;
} else {
parent->max_dun_bytes_supported = 0;
parent->features = 0;
memset(parent->crypto_modes_supported, 0,
sizeof(parent->crypto_modes_supported));
parent->features = 0;
}
}
EXPORT_SYMBOL_GPL(blk_ksm_intersect_modes);
/**
* blk_ksm_is_superset() - Check if a KSM supports a superset of crypto modes
* and DUN bytes that another KSM supports. Here,
* "superset" refers to the mathematical meaning of the
* word - i.e. if two KSMs have the *same* capabilities,
* they *are* considered supersets of each other.
* @ksm_superset: The KSM that we want to verify is a superset
* @ksm_subset: The KSM that we want to verify is a subset
*
* Return: True if @ksm_superset supports a superset of the crypto modes and DUN
* bytes that @ksm_subset supports.
*/
bool blk_ksm_is_superset(struct blk_keyslot_manager *ksm_superset,
struct blk_keyslot_manager *ksm_subset)
{
int i;
if (!ksm_subset)
return true;
if (!ksm_superset)
return false;
for (i = 0; i < ARRAY_SIZE(ksm_superset->crypto_modes_supported); i++) {
if (ksm_subset->crypto_modes_supported[i] &
(~ksm_superset->crypto_modes_supported[i])) {
return false;
}
}
if (ksm_subset->max_dun_bytes_supported >
ksm_superset->max_dun_bytes_supported) {
return false;
}
if (ksm_subset->features & ~ksm_superset->features)
return false;
return true;
}
EXPORT_SYMBOL_GPL(blk_ksm_is_superset);
/**
* blk_ksm_update_capabilities() - Update the restrictions of a KSM to those of
* another KSM
* @target_ksm: The KSM whose restrictions to update.
* @reference_ksm: The KSM to whose restrictions this function will update
* @target_ksm's restrictions to.
*
* Blk-crypto requires that crypto capabilities that were
* advertised when a bio was created continue to be supported by the
* device until that bio is ended. This is turn means that a device cannot
* shrink its advertised crypto capabilities without any explicit
* synchronization with upper layers. So if there's no such explicit
* synchronization, @reference_ksm must support all the crypto capabilities that
* @target_ksm does
* (i.e. we need blk_ksm_is_superset(@reference_ksm, @target_ksm) == true).
*
* Note also that as long as the crypto capabilities are being expanded, the
* order of updates becoming visible is not important because it's alright
* for blk-crypto to see stale values - they only cause blk-crypto to
* believe that a crypto capability isn't supported when it actually is (which
* might result in blk-crypto-fallback being used if available, or the bio being
* failed).
*/
void blk_ksm_update_capabilities(struct blk_keyslot_manager *target_ksm,
struct blk_keyslot_manager *reference_ksm)
{
memcpy(target_ksm->crypto_modes_supported,
reference_ksm->crypto_modes_supported,
sizeof(target_ksm->crypto_modes_supported));
target_ksm->max_dun_bytes_supported =
reference_ksm->max_dun_bytes_supported;
target_ksm->features = reference_ksm->features;
}
EXPORT_SYMBOL_GPL(blk_ksm_update_capabilities);
/**
* blk_ksm_init_passthrough() - Init a passthrough keyslot manager
* @ksm: The keyslot manager to init
@@ -512,10 +590,8 @@ EXPORT_SYMBOL_GPL(blk_ksm_intersect_modes);
* Initialize a passthrough keyslot manager.
* Called by e.g. storage drivers to set up a keyslot manager in their
* request_queue, when the storage driver wants to manage its keys by itself.
* This is useful for inline encryption hardware that don't have a small fixed
* number of keyslots, and for layered devices.
*
* See blk_ksm_init() for more details about the parameters.
* This is useful for inline encryption hardware that doesn't have the concept
* of keyslots, and for layered devices.
*/
void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm)
{
+1 -1
View File
@@ -788,7 +788,6 @@ static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv)
rb_insert_color(&br->node, &bc->ranges);
ti->discards_supported = true;
ti->may_passthrough_inline_crypto = true;
return 0;
@@ -1267,6 +1266,7 @@ static int dm_bow_iterate_devices(struct dm_target *ti,
static struct target_type bow_target = {
.name = "bow",
.version = {1, 2, 0},
.features = DM_TARGET_PASSES_CRYPTO,
.module = THIS_MODULE,
.ctr = dm_bow_ctr,
.dtr = dm_bow_dtr,
+4 -3
View File
@@ -53,9 +53,6 @@ struct mapped_device {
int numa_node_id;
struct request_queue *queue;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_keyslot_manager ksm;
#endif
atomic_t holders;
atomic_t open_count;
@@ -173,6 +170,10 @@ struct dm_table {
void *event_context;
struct dm_md_mempools *mempools;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_keyslot_manager *ksm;
#endif
};
static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
+1 -2
View File
@@ -255,8 +255,6 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1;
ti->may_passthrough_inline_crypto = true;
err = 0;
goto out;
@@ -397,6 +395,7 @@ static void default_key_io_hints(struct dm_target *ti,
static struct target_type default_key_target = {
.name = "default-key",
.version = {2, 1, 0},
.features = DM_TARGET_PASSES_CRYPTO,
.module = THIS_MODULE,
.ctr = default_key_ctr,
.dtr = default_key_dtr,
+3 -1
View File
@@ -482,8 +482,10 @@ static struct target_type flakey_target = {
.name = "flakey",
.version = {1, 5, 0},
#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM,
.features = DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
.report_zones = flakey_report_zones,
#else
.features = DM_TARGET_PASSES_CRYPTO,
#endif
.module = THIS_MODULE,
.ctr = flakey_ctr,
+3 -3
View File
@@ -62,7 +62,6 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_secure_erase_bios = 1;
ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1;
ti->may_passthrough_inline_crypto = true;
ti->private = lc;
return 0;
@@ -230,10 +229,11 @@ static struct target_type linear_target = {
.version = {1, 4, 0},
#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
DM_TARGET_ZONED_HM,
DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
.report_zones = linear_report_zones,
#else
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT,
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
DM_TARGET_PASSES_CRYPTO,
#endif
.module = THIS_MODULE,
.ctr = linear_ctr,
+292 -52
View File
@@ -21,8 +21,6 @@
#include <linux/blk-mq.h>
#include <linux/mount.h>
#include <linux/dax.h>
#include <linux/bio.h>
#include <linux/keyslot-manager.h>
#define DM_MSG_PREFIX "table"
@@ -189,6 +187,8 @@ static void free_devices(struct list_head *devices, struct mapped_device *md)
}
}
static void dm_table_destroy_keyslot_manager(struct dm_table *t);
void dm_table_destroy(struct dm_table *t)
{
unsigned int i;
@@ -217,6 +217,8 @@ void dm_table_destroy(struct dm_table *t)
dm_free_md_mempools(t->mempools);
dm_table_destroy_keyslot_manager(t);
kfree(t);
}
@@ -1212,6 +1214,287 @@ static int dm_table_register_integrity(struct dm_table *t)
return 0;
}
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct dm_keyslot_manager {
struct blk_keyslot_manager ksm;
struct mapped_device *md;
};
struct dm_keyslot_evict_args {
const struct blk_crypto_key *key;
int err;
};
static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_keyslot_evict_args *args = data;
int err;
err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
if (!args->err)
args->err = err;
/* Always try to evict the key from all devices. */
return 0;
}
/*
* When an inline encryption key is evicted from a device-mapper device, evict
* it from all the underlying devices.
*/
static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
const struct blk_crypto_key *key, unsigned int slot)
{
struct dm_keyslot_manager *dksm = container_of(ksm,
struct dm_keyslot_manager,
ksm);
struct mapped_device *md = dksm->md;
struct dm_keyslot_evict_args args = { key };
struct dm_table *t;
int srcu_idx;
int i;
struct dm_target *ti;
t = dm_get_live_table(md, &srcu_idx);
if (!t)
return 0;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
}
dm_put_live_table(md, srcu_idx);
return args.err;
}
struct dm_derive_raw_secret_args {
const u8 *wrapped_key;
unsigned int wrapped_key_size;
u8 *secret;
unsigned int secret_size;
int err;
};
static int dm_derive_raw_secret_callback(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
struct dm_derive_raw_secret_args *args = data;
struct request_queue *q = bdev_get_queue(dev->bdev);
if (!args->err)
return 0;
if (!q->ksm) {
args->err = -EOPNOTSUPP;
return 0;
}
args->err = blk_ksm_derive_raw_secret(q->ksm, args->wrapped_key,
args->wrapped_key_size,
args->secret,
args->secret_size);
/* Try another device in case this fails. */
return 0;
}
/*
* Retrieve the raw_secret from the underlying device. Given that only one
* raw_secret can exist for a particular wrappedkey, retrieve it only from the
* first device that supports derive_raw_secret().
*/
static int dm_derive_raw_secret(struct blk_keyslot_manager *ksm,
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *secret, unsigned int secret_size)
{
struct dm_keyslot_manager *dksm = container_of(ksm,
struct dm_keyslot_manager,
ksm);
struct mapped_device *md = dksm->md;
struct dm_derive_raw_secret_args args = {
.wrapped_key = wrapped_key,
.wrapped_key_size = wrapped_key_size,
.secret = secret,
.secret_size = secret_size,
.err = -EOPNOTSUPP,
};
struct dm_table *t;
int srcu_idx;
int i;
struct dm_target *ti;
t = dm_get_live_table(md, &srcu_idx);
if (!t)
return -EOPNOTSUPP;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, dm_derive_raw_secret_callback,
&args);
if (!args.err)
break;
}
dm_put_live_table(md, srcu_idx);
return args.err;
}
static struct blk_ksm_ll_ops dm_ksm_ll_ops = {
.keyslot_evict = dm_keyslot_evict,
.derive_raw_secret = dm_derive_raw_secret,
};
static int device_intersect_crypto_modes(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
struct blk_keyslot_manager *parent = data;
struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
blk_ksm_intersect_modes(parent, child);
return 0;
}
void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
{
struct dm_keyslot_manager *dksm = container_of(ksm,
struct dm_keyslot_manager,
ksm);
if (!ksm)
return;
blk_ksm_destroy(ksm);
kfree(dksm);
}
static void dm_table_destroy_keyslot_manager(struct dm_table *t)
{
dm_destroy_keyslot_manager(t->ksm);
t->ksm = NULL;
}
/*
* Constructs and initializes t->ksm with a keyslot manager that
* represents the common set of crypto capabilities of the devices
* described by the dm_table. However, if the constructed keyslot
* manager does not support a superset of the crypto capabilities
* supported by the current keyslot manager of the mapped_device,
* it returns an error instead, since we don't support restricting
* crypto capabilities on table changes. Finally, if the constructed
* keyslot manager doesn't actually support any crypto modes at all,
* it just returns NULL.
*/
static int dm_table_construct_keyslot_manager(struct dm_table *t)
{
struct dm_keyslot_manager *dksm;
struct blk_keyslot_manager *ksm;
struct dm_target *ti;
unsigned int i;
bool ksm_is_empty = true;
dksm = kmalloc(sizeof(*dksm), GFP_KERNEL);
if (!dksm)
return -ENOMEM;
dksm->md = t->md;
ksm = &dksm->ksm;
blk_ksm_init_passthrough(ksm);
ksm->ksm_ll_ops = dm_ksm_ll_ops;
ksm->max_dun_bytes_supported = UINT_MAX;
memset(ksm->crypto_modes_supported, 0xFF,
sizeof(ksm->crypto_modes_supported));
ksm->features = BLK_CRYPTO_FEATURE_STANDARD_KEYS |
BLK_CRYPTO_FEATURE_WRAPPED_KEYS;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!dm_target_passes_crypto(ti->type)) {
blk_ksm_intersect_modes(ksm, NULL);
break;
}
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, device_intersect_crypto_modes,
ksm);
}
if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) {
DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
dm_destroy_keyslot_manager(ksm);
return -EINVAL;
}
/*
* If the new KSM doesn't actually support any crypto modes, we may as
* well represent it with a NULL ksm.
*/
ksm_is_empty = true;
for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) {
if (ksm->crypto_modes_supported[i]) {
ksm_is_empty = false;
break;
}
}
if (ksm_is_empty) {
dm_destroy_keyslot_manager(ksm);
ksm = NULL;
}
/*
* t->ksm is only set temporarily while the table is being set
* up, and it gets set to NULL after the capabilities have
* been transferred to the request_queue.
*/
t->ksm = ksm;
return 0;
}
static void dm_update_keyslot_manager(struct request_queue *q,
struct dm_table *t)
{
if (!t->ksm)
return;
/* Make the ksm less restrictive */
if (!q->ksm) {
blk_ksm_register(t->ksm, q);
} else {
blk_ksm_update_capabilities(q->ksm, t->ksm);
dm_destroy_keyslot_manager(t->ksm);
}
t->ksm = NULL;
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static int dm_table_construct_keyslot_manager(struct dm_table *t)
{
return 0;
}
void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
{
}
static void dm_table_destroy_keyslot_manager(struct dm_table *t)
{
}
static void dm_update_keyslot_manager(struct request_queue *q,
struct dm_table *t)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
/*
* Prepares the table for use by building the indices,
* setting the type, and allocating mempools.
@@ -1238,6 +1521,12 @@ int dm_table_complete(struct dm_table *t)
return r;
}
r = dm_table_construct_keyslot_manager(t);
if (r) {
DMERR("could not construct keyslot manager.");
return r;
}
r = dm_table_alloc_md_mempools(t, t->md);
if (r)
DMERR("unable to allocate mempools");
@@ -1536,54 +1825,6 @@ static void dm_table_verify_integrity(struct dm_table *t)
}
}
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
static int device_intersect_crypto_modes(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
struct blk_keyslot_manager *parent = data;
struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
blk_ksm_intersect_modes(parent, child);
return 0;
}
/*
* Update the inline crypto modes supported by 'q->ksm' to be the intersection
* of the modes supported by all targets in the table.
*
* For any mode to be supported at all, all targets must have explicitly
* declared that they can pass through inline crypto support. For a particular
* mode to be supported, all underlying devices must also support it.
*
* Assume that 'q->ksm' initially declares all modes to be supported.
*/
static void dm_calculate_supported_crypto_modes(struct dm_table *t,
struct request_queue *q)
{
struct dm_target *ti;
unsigned int i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->may_passthrough_inline_crypto) {
blk_ksm_intersect_modes(q->ksm, NULL);
return;
}
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, device_intersect_crypto_modes,
q->ksm);
}
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline void dm_calculate_supported_crypto_modes(struct dm_table *t,
struct request_queue *q)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1918,8 +2159,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
dm_table_verify_integrity(t);
dm_calculate_supported_crypto_modes(t, q);
/*
* Some devices don't use blk_integrity but still want stable pages
* because they do their own checksumming.
@@ -1950,6 +2189,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
#endif
dm_update_keyslot_manager(q, t);
blk_queue_update_readahead(q);
}
+13 -160
View File
@@ -1723,7 +1723,18 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
static void dm_destroy_inline_encryption(struct request_queue *q);
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
static void dm_queue_destroy_keyslot_manager(struct request_queue *q)
{
dm_destroy_keyslot_manager(q->ksm);
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
static void cleanup_mapped_device(struct mapped_device *md)
{
@@ -1747,7 +1758,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
}
if (md->queue) {
dm_destroy_inline_encryption(md->queue);
dm_queue_destroy_keyslot_manager(md->queue);
blk_cleanup_queue(md->queue);
}
@@ -2098,161 +2109,6 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct dm_keyslot_evict_args {
const struct blk_crypto_key *key;
int err;
};
static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_keyslot_evict_args *args = data;
int err;
err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
if (!args->err)
args->err = err;
/* Always try to evict the key from all devices. */
return 0;
}
/*
* When an inline encryption key is evicted from a device-mapper device, evict
* it from all the underlying devices.
*/
static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
const struct blk_crypto_key *key, unsigned int slot)
{
struct mapped_device *md = container_of(ksm, struct mapped_device, ksm);
struct dm_keyslot_evict_args args = { key };
struct dm_table *t;
int srcu_idx;
int i;
struct dm_target *ti;
t = dm_get_live_table(md, &srcu_idx);
if (!t)
return 0;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
}
dm_put_live_table(md, srcu_idx);
return args.err;
}
struct dm_derive_raw_secret_args {
const u8 *wrapped_key;
unsigned int wrapped_key_size;
u8 *secret;
unsigned int secret_size;
int err;
};
static int dm_derive_raw_secret_callback(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
struct dm_derive_raw_secret_args *args = data;
struct request_queue *q = bdev_get_queue(dev->bdev);
if (!args->err)
return 0;
if (!q->ksm) {
args->err = -EOPNOTSUPP;
return 0;
}
args->err = blk_ksm_derive_raw_secret(q->ksm, args->wrapped_key,
args->wrapped_key_size,
args->secret,
args->secret_size);
/* Try another device in case this fails. */
return 0;
}
/*
* Retrieve the raw_secret from the underlying device. Given that
* only only one raw_secret can exist for a particular wrappedkey,
* retrieve it only from the first device that supports derive_raw_secret()
*/
static int dm_derive_raw_secret(struct blk_keyslot_manager *ksm,
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *secret, unsigned int secret_size)
{
struct mapped_device *md = container_of(ksm, struct mapped_device, ksm);
struct dm_derive_raw_secret_args args = {
.wrapped_key = wrapped_key,
.wrapped_key_size = wrapped_key_size,
.secret = secret,
.secret_size = secret_size,
.err = -EOPNOTSUPP,
};
struct dm_table *t;
int srcu_idx;
int i;
struct dm_target *ti;
t = dm_get_live_table(md, &srcu_idx);
if (!t)
return -EOPNOTSUPP;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, dm_derive_raw_secret_callback,
&args);
if (!args.err)
break;
}
dm_put_live_table(md, srcu_idx);
return args.err;
}
static struct blk_ksm_ll_ops dm_ksm_ll_ops = {
.keyslot_evict = dm_keyslot_evict,
.derive_raw_secret = dm_derive_raw_secret,
};
static void dm_init_inline_encryption(struct mapped_device *md)
{
blk_ksm_init_passthrough(&md->ksm);
md->ksm.ksm_ll_ops = dm_ksm_ll_ops;
/*
* Initially declare support for all crypto settings. Anything
* unsupported by a child device will be removed later when calculating
* the device restrictions.
*/
md->ksm.max_dun_bytes_supported = UINT_MAX;
md->ksm.features = BLK_CRYPTO_FEATURE_STANDARD_KEYS |
BLK_CRYPTO_FEATURE_WRAPPED_KEYS;
memset(md->ksm.crypto_modes_supported, 0xFF,
sizeof(md->ksm.crypto_modes_supported));
blk_ksm_register(&md->ksm, md->queue);
}
static void dm_destroy_inline_encryption(struct request_queue *q)
{
blk_ksm_destroy(q->ksm);
blk_ksm_unregister(q);
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline void dm_init_inline_encryption(struct mapped_device *md)
{
}
static inline void dm_destroy_inline_encryption(struct request_queue *q)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
/*
* Setup the DM device's queue based on md's type
*/
@@ -2284,9 +2140,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
DMERR("Cannot calculate initial queue limits");
return r;
}
dm_init_inline_encryption(md);
dm_table_set_restrictions(t, md->queue, &limits);
blk_register_queue(md->disk);
+11 -6
View File
@@ -257,6 +257,12 @@ struct target_type {
#define DM_TARGET_NOWAIT 0x00000080
#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
/*
* A target supports passing through inline crypto support.
*/
#define DM_TARGET_PASSES_CRYPTO 0x00000100
#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
struct dm_target {
struct dm_table *table;
struct target_type *type;
@@ -325,12 +331,6 @@ struct dm_target {
* whether or not its underlying devices have support.
*/
bool discards_supported:1;
/*
* Set if inline crypto capabilities from this target's underlying
* device(s) can be exposed via the device-mapper device.
*/
bool may_passthrough_inline_crypto:1;
};
void *dm_per_bio_data(struct bio *bio, size_t data_size);
@@ -539,6 +539,11 @@ void dm_table_run_md_queue_async(struct dm_table *t);
struct dm_table *dm_swap_table(struct mapped_device *md,
struct dm_table *t);
/*
* Table keyslot manager functions
*/
void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
/*
* A wrapper around vmalloc.
*/
+8 -6
View File
@@ -18,8 +18,6 @@ enum {
BLK_CRYPTO_FEATURE_WRAPPED_KEYS = BIT(1),
};
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_keyslot_manager;
/**
@@ -127,16 +125,20 @@ void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm);
void blk_ksm_destroy(struct blk_keyslot_manager *ksm);
void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent,
const struct blk_keyslot_manager *child);
int blk_ksm_derive_raw_secret(struct blk_keyslot_manager *ksm,
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *secret, unsigned int secret_size);
void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent,
const struct blk_keyslot_manager *child);
void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm);
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
bool blk_ksm_is_superset(struct blk_keyslot_manager *ksm_superset,
struct blk_keyslot_manager *ksm_subset);
void blk_ksm_update_capabilities(struct blk_keyslot_manager *target_ksm,
struct blk_keyslot_manager *reference_ksm);
#endif /* __LINUX_KEYSLOT_MANAGER_H */
+2 -2
View File
@@ -272,9 +272,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 43
#define DM_VERSION_MINOR 44
#define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2020-10-01)"
#define DM_VERSION_EXTRA "-ioctl (2021-02-01)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */