@ -15,7 +15,9 @@
# include <crypto/internal/aead.h>
# include <crypto/internal/skcipher.h>
# include <crypto/null.h>
# include <crypto/rng.h>
# include <crypto/scatterwalk.h>
# include <linux/err.h>
# include <linux/init.h>
# include <linux/kernel.h>
@ -29,6 +31,29 @@ struct seqiv_ctx {
u8 salt [ ] __attribute__ ( ( aligned ( __alignof__ ( u32 ) ) ) ) ;
} ;
struct seqiv_aead_ctx {
struct crypto_aead * child ;
spinlock_t lock ;
struct crypto_blkcipher * null ;
u8 salt [ ] __attribute__ ( ( aligned ( __alignof__ ( u32 ) ) ) ) ;
} ;
static int seqiv_aead_setkey ( struct crypto_aead * tfm ,
const u8 * key , unsigned int keylen )
{
struct seqiv_aead_ctx * ctx = crypto_aead_ctx ( tfm ) ;
return crypto_aead_setkey ( ctx - > child , key , keylen ) ;
}
static int seqiv_aead_setauthsize ( struct crypto_aead * tfm ,
unsigned int authsize )
{
struct seqiv_aead_ctx * ctx = crypto_aead_ctx ( tfm ) ;
return crypto_aead_setauthsize ( ctx - > child , authsize ) ;
}
static void seqiv_complete2 ( struct skcipher_givcrypt_request * req , int err )
{
struct ablkcipher_request * subreq = skcipher_givcrypt_reqctx ( req ) ;
@ -81,6 +106,33 @@ static void seqiv_aead_complete(struct crypto_async_request *base, int err)
aead_givcrypt_complete ( req , err ) ;
}
static void seqiv_aead_encrypt_complete2 ( struct aead_request * req , int err )
{
struct aead_request * subreq = aead_request_ctx ( req ) ;
struct crypto_aead * geniv ;
if ( err = = - EINPROGRESS )
return ;
if ( err )
goto out ;
geniv = crypto_aead_reqtfm ( req ) ;
memcpy ( req - > iv , subreq - > iv , crypto_aead_ivsize ( geniv ) ) ;
out :
kzfree ( subreq - > iv ) ;
}
static void seqiv_aead_encrypt_complete ( struct crypto_async_request * base ,
int err )
{
struct aead_request * req = base - > data ;
seqiv_aead_encrypt_complete2 ( req , err ) ;
aead_request_complete ( req , err ) ;
}
static void seqiv_geniv ( struct seqiv_ctx * ctx , u8 * info , u64 seq ,
unsigned int ivsize )
{
@ -186,6 +238,171 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
return err ;
}
static int seqiv_aead_encrypt_compat ( struct aead_request * req )
{
struct crypto_aead * geniv = crypto_aead_reqtfm ( req ) ;
struct seqiv_aead_ctx * ctx = crypto_aead_ctx ( geniv ) ;
struct aead_request * subreq = aead_request_ctx ( req ) ;
crypto_completion_t compl ;
void * data ;
u8 * info ;
unsigned int ivsize ;
int err ;
aead_request_set_tfm ( subreq , ctx - > child ) ;
compl = req - > base . complete ;
data = req - > base . data ;
info = req - > iv ;
ivsize = crypto_aead_ivsize ( geniv ) ;
if ( unlikely ( ! IS_ALIGNED ( ( unsigned long ) info ,
crypto_aead_alignmask ( geniv ) + 1 ) ) ) {
info = kmalloc ( ivsize , req - > base . flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC ) ;
if ( ! info )
return - ENOMEM ;
memcpy ( info , req - > iv , ivsize ) ;
compl = seqiv_aead_encrypt_complete ;
data = req ;
}
aead_request_set_callback ( subreq , req - > base . flags , compl , data ) ;
aead_request_set_crypt ( subreq , req - > src , req - > dst ,
req - > cryptlen - ivsize , info ) ;
aead_request_set_ad ( subreq , req - > assoclen , ivsize ) ;
crypto_xor ( info , ctx - > salt , ivsize ) ;
scatterwalk_map_and_copy ( info , req - > dst , req - > assoclen , ivsize , 1 ) ;
err = crypto_aead_encrypt ( subreq ) ;
if ( unlikely ( info ! = req - > iv ) )
seqiv_aead_encrypt_complete2 ( req , err ) ;
return err ;
}
static int seqiv_aead_encrypt ( struct aead_request * req )
{
struct crypto_aead * geniv = crypto_aead_reqtfm ( req ) ;
struct seqiv_aead_ctx * ctx = crypto_aead_ctx ( geniv ) ;
struct aead_request * subreq = aead_request_ctx ( req ) ;
crypto_completion_t compl ;
void * data ;
u8 * info ;
unsigned int ivsize ;
int err ;
aead_request_set_tfm ( subreq , ctx - > child ) ;
compl = req - > base . complete ;
data = req - > base . data ;
info = req - > iv ;
ivsize = crypto_aead_ivsize ( geniv ) ;
if ( req - > src ! = req - > dst ) {
struct scatterlist src [ 2 ] ;
struct scatterlist dst [ 2 ] ;
struct blkcipher_desc desc = {
. tfm = ctx - > null ,
} ;
err = crypto_blkcipher_encrypt (
& desc ,
scatterwalk_ffwd ( dst , req - > dst ,
req - > assoclen + ivsize ) ,
scatterwalk_ffwd ( src , req - > src ,
req - > assoclen + ivsize ) ,
req - > cryptlen - ivsize ) ;
if ( err )
return err ;
}
if ( unlikely ( ! IS_ALIGNED ( ( unsigned long ) info ,
crypto_aead_alignmask ( geniv ) + 1 ) ) ) {
info = kmalloc ( ivsize , req - > base . flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC ) ;
if ( ! info )
return - ENOMEM ;
memcpy ( info , req - > iv , ivsize ) ;
compl = seqiv_aead_encrypt_complete ;
data = req ;
}
aead_request_set_callback ( subreq , req - > base . flags , compl , data ) ;
aead_request_set_crypt ( subreq , req - > dst , req - > dst ,
req - > cryptlen - ivsize , info ) ;
aead_request_set_ad ( subreq , req - > assoclen + ivsize , 0 ) ;
crypto_xor ( info , ctx - > salt , ivsize ) ;
scatterwalk_map_and_copy ( info , req - > dst , req - > assoclen , ivsize , 1 ) ;
err = crypto_aead_encrypt ( subreq ) ;
if ( unlikely ( info ! = req - > iv ) )
seqiv_aead_encrypt_complete2 ( req , err ) ;
return err ;
}
static int seqiv_aead_decrypt_compat ( struct aead_request * req )
{
struct crypto_aead * geniv = crypto_aead_reqtfm ( req ) ;
struct seqiv_aead_ctx * ctx = crypto_aead_ctx ( geniv ) ;
struct aead_request * subreq = aead_request_ctx ( req ) ;
crypto_completion_t compl ;
void * data ;
unsigned int ivsize ;
aead_request_set_tfm ( subreq , ctx - > child ) ;
compl = req - > base . complete ;
data = req - > base . data ;
ivsize = crypto_aead_ivsize ( geniv ) ;
aead_request_set_callback ( subreq , req - > base . flags , compl , data ) ;
aead_request_set_crypt ( subreq , req - > src , req - > dst ,
req - > cryptlen - ivsize , req - > iv ) ;
aead_request_set_ad ( subreq , req - > assoclen , ivsize ) ;
scatterwalk_map_and_copy ( req - > iv , req - > src , req - > assoclen , ivsize , 0 ) ;
return crypto_aead_decrypt ( subreq ) ;
}
static int seqiv_aead_decrypt ( struct aead_request * req )
{
struct crypto_aead * geniv = crypto_aead_reqtfm ( req ) ;
struct seqiv_aead_ctx * ctx = crypto_aead_ctx ( geniv ) ;
struct aead_request * subreq = aead_request_ctx ( req ) ;
crypto_completion_t compl ;
void * data ;
unsigned int ivsize ;
aead_request_set_tfm ( subreq , ctx - > child ) ;
compl = req - > base . complete ;
data = req - > base . data ;
ivsize = crypto_aead_ivsize ( geniv ) ;
aead_request_set_callback ( subreq , req - > base . flags , compl , data ) ;
aead_request_set_crypt ( subreq , req - > src , req - > dst ,
req - > cryptlen - ivsize , req - > iv ) ;
aead_request_set_ad ( subreq , req - > assoclen + ivsize , 0 ) ;
scatterwalk_map_and_copy ( req - > iv , req - > src , req - > assoclen , ivsize , 0 ) ;
if ( req - > src ! = req - > dst )
scatterwalk_map_and_copy ( req - > iv , req - > dst ,
req - > assoclen , ivsize , 1 ) ;
return crypto_aead_decrypt ( subreq ) ;
}
static int seqiv_givencrypt_first ( struct skcipher_givcrypt_request * req )
{
struct crypto_ablkcipher * geniv = skcipher_givcrypt_reqtfm ( req ) ;
@ -232,6 +449,52 @@ unlock:
return seqiv_aead_givencrypt ( req ) ;
}
static int seqiv_aead_encrypt_compat_first ( struct aead_request * req )
{
struct crypto_aead * geniv = crypto_aead_reqtfm ( req ) ;
struct seqiv_aead_ctx * ctx = crypto_aead_ctx ( geniv ) ;
int err = 0 ;
spin_lock_bh ( & ctx - > lock ) ;
if ( geniv - > encrypt ! = seqiv_aead_encrypt_compat_first )
goto unlock ;
geniv - > encrypt = seqiv_aead_encrypt_compat ;
err = crypto_rng_get_bytes ( crypto_default_rng , ctx - > salt ,
crypto_aead_ivsize ( geniv ) ) ;
unlock :
spin_unlock_bh ( & ctx - > lock ) ;
if ( err )
return err ;
return seqiv_aead_encrypt_compat ( req ) ;
}
static int seqiv_aead_encrypt_first ( struct aead_request * req )
{
struct crypto_aead * geniv = crypto_aead_reqtfm ( req ) ;
struct seqiv_aead_ctx * ctx = crypto_aead_ctx ( geniv ) ;
int err = 0 ;
spin_lock_bh ( & ctx - > lock ) ;
if ( geniv - > encrypt ! = seqiv_aead_encrypt_first )
goto unlock ;
geniv - > encrypt = seqiv_aead_encrypt ;
err = crypto_rng_get_bytes ( crypto_default_rng , ctx - > salt ,
crypto_aead_ivsize ( geniv ) ) ;
unlock :
spin_unlock_bh ( & ctx - > lock ) ;
if ( err )
return err ;
return seqiv_aead_encrypt ( req ) ;
}
static int seqiv_init ( struct crypto_tfm * tfm )
{
struct crypto_ablkcipher * geniv = __crypto_ablkcipher_cast ( tfm ) ;
@ -244,7 +507,7 @@ static int seqiv_init(struct crypto_tfm *tfm)
return skcipher_geniv_init ( tfm ) ;
}
static int seqiv_aead_init ( struct crypto_tfm * tfm )
static int seqiv_old_ aead_init ( struct crypto_tfm * tfm )
{
struct crypto_aead * geniv = __crypto_aead_cast ( tfm ) ;
struct seqiv_ctx * ctx = crypto_aead_ctx ( geniv ) ;
@ -257,6 +520,69 @@ static int seqiv_aead_init(struct crypto_tfm *tfm)
return aead_geniv_init ( tfm ) ;
}
static int seqiv_aead_compat_init ( struct crypto_tfm * tfm )
{
struct crypto_aead * geniv = __crypto_aead_cast ( tfm ) ;
struct seqiv_aead_ctx * ctx = crypto_aead_ctx ( geniv ) ;
int err ;
spin_lock_init ( & ctx - > lock ) ;
crypto_aead_set_reqsize ( geniv , sizeof ( struct aead_request ) ) ;
err = aead_geniv_init ( tfm ) ;
ctx - > child = geniv - > child ;
geniv - > child = geniv ;
return err ;
}
static int seqiv_aead_init ( struct crypto_tfm * tfm )
{
struct crypto_aead * geniv = __crypto_aead_cast ( tfm ) ;
struct seqiv_aead_ctx * ctx = crypto_aead_ctx ( geniv ) ;
int err ;
spin_lock_init ( & ctx - > lock ) ;
crypto_aead_set_reqsize ( geniv , sizeof ( struct aead_request ) ) ;
ctx - > null = crypto_get_default_null_skcipher ( ) ;
err = PTR_ERR ( ctx - > null ) ;
if ( IS_ERR ( ctx - > null ) )
goto out ;
err = aead_geniv_init ( tfm ) ;
if ( err )
goto drop_null ;
ctx - > child = geniv - > child ;
geniv - > child = geniv ;
out :
return err ;
drop_null :
crypto_put_default_null_skcipher ( ) ;
goto out ;
}
static void seqiv_aead_compat_exit ( struct crypto_tfm * tfm )
{
struct seqiv_aead_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_aead ( ctx - > child ) ;
}
static void seqiv_aead_exit ( struct crypto_tfm * tfm )
{
struct seqiv_aead_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_aead ( ctx - > child ) ;
crypto_put_default_null_skcipher ( ) ;
}
static struct crypto_template seqiv_tmpl ;
static struct crypto_instance * seqiv_ablkcipher_alloc ( struct rtattr * * tb )
@ -280,35 +606,76 @@ static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
inst - > alg . cra_exit = skcipher_geniv_exit ;
inst - > alg . cra_ctxsize + = inst - > alg . cra_ablkcipher . ivsize ;
inst - > alg . cra_ctxsize + = sizeof ( struct seqiv_ctx ) ;
out :
return inst ;
}
static struct crypto_instance * seqiv_old_aead_alloc ( struct aead_instance * aead )
{
struct crypto_instance * inst = aead_crypto_instance ( aead ) ;
if ( inst - > alg . cra_aead . ivsize < sizeof ( u64 ) ) {
aead_geniv_free ( aead ) ;
return ERR_PTR ( - EINVAL ) ;
}
inst - > alg . cra_aead . givencrypt = seqiv_aead_givencrypt_first ;
inst - > alg . cra_init = seqiv_old_aead_init ;
inst - > alg . cra_exit = aead_geniv_exit ;
inst - > alg . cra_ctxsize = inst - > alg . cra_aead . ivsize ;
inst - > alg . cra_ctxsize + = sizeof ( struct seqiv_ctx ) ;
return inst ;
}
static struct crypto_instance * seqiv_aead_alloc ( struct rtattr * * tb )
{
struct crypto_instance * inst ;
struct aead_instance * inst ;
struct crypto_aead_spawn * spawn ;
struct aead_alg * alg ;
inst = aead_geniv_alloc ( & seqiv_tmpl , tb , 0 , 0 ) ;
if ( IS_ERR ( inst ) )
goto out ;
if ( inst - > alg . cra_aead . ivsize < sizeof ( u64 ) ) {
if ( inst - > alg . base . cra_aead . encrypt )
return seqiv_old_aead_alloc ( inst ) ;
if ( inst - > alg . ivsize < sizeof ( u64 ) ) {
aead_geniv_free ( inst ) ;
inst = ERR_PTR ( - EINVAL ) ;
goto out ;
}
inst - > alg . cra_aead . givencrypt = seqiv_aead_givencrypt_first ;
spawn = aead_instance_ctx ( inst ) ;
alg = crypto_spawn_aead_alg ( spawn ) ;
inst - > alg . cra_init = seqiv_aead_init ;
inst - > alg . cra_exit = aead_geniv_exit ;
inst - > alg . setkey = seqiv_aead_setkey ;
inst - > alg . setauthsize = seqiv_aead_setauthsize ;
inst - > alg . encrypt = seqiv_aead_encrypt_first ;
inst - > alg . decrypt = seqiv_aead_decrypt ;
inst - > alg . cra_ctxsize = inst - > alg . cra_aead . ivsize ;
inst - > alg . base . cra_init = seqiv_aead_init ;
inst - > alg . base . cra_exit = seqiv_aead_exit ;
inst - > alg . base . cra_ctxsize = sizeof ( struct seqiv_aead_ctx ) ;
inst - > alg . base . cra_ctxsize + = inst - > alg . base . cra_aead . ivsize ;
if ( alg - > base . cra_aead . encrypt ) {
inst - > alg . encrypt = seqiv_aead_encrypt_compat_first ;
inst - > alg . decrypt = seqiv_aead_decrypt_compat ;
inst - > alg . base . cra_init = seqiv_aead_compat_init ;
inst - > alg . base . cra_exit = seqiv_aead_compat_exit ;
}
out :
return inst ;
return aead_crypto_instance ( inst ) ;
}
static struct crypto_instance * seqiv_alloc ( struct rtattr * * tb )
@ -334,7 +701,6 @@ static struct crypto_instance *seqiv_alloc(struct rtattr **tb)
goto put_rng ;
inst - > alg . cra_alignmask | = __alignof__ ( u32 ) - 1 ;
inst - > alg . cra_ctxsize + = sizeof ( struct seqiv_ctx ) ;
out :
return inst ;
@ -349,7 +715,7 @@ static void seqiv_free(struct crypto_instance *inst)
if ( ( inst - > alg . cra_flags ^ CRYPTO_ALG_TYPE_AEAD ) & CRYPTO_ALG_TYPE_MASK )
skcipher_geniv_free ( inst ) ;
else
aead_geniv_free ( inst ) ;
aead_geniv_free ( aead_ instance ( inst ) ) ;
crypto_put_default_rng ( ) ;
}