crypto: x86/chacha - yield the FPU occasionally
authorEric Biggers <ebiggers@google.com>
Wed, 5 Dec 2018 06:20:05 +0000 (22:20 -0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 13 Dec 2018 10:24:58 +0000 (18:24 +0800)
To improve responsiveness, yield the FPU (temporarily re-enabling
preemption) every 4 KiB encrypted/decrypted, rather than keeping
preemption disabled during the entire encryption/decryption operation.

Alternatively we could do this for every skcipher_walk step, but steps
may be small in some cases, and yielding the FPU is expensive on x86.

Suggested-by: Martin Willi <martin@strongswan.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/x86/crypto/chacha_glue.c

index d19c290..9b1d3fa 100644 (file)
@@ -132,6 +132,7 @@ static int chacha_simd_stream_xor(struct skcipher_request *req,
 {
        u32 *state, state_buf[16 + 2] __aligned(8);
        struct skcipher_walk walk;
+       int next_yield = 4096; /* bytes until next FPU yield */
        int err;
 
        BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
@@ -144,12 +145,21 @@ static int chacha_simd_stream_xor(struct skcipher_request *req,
        while (walk.nbytes > 0) {
                unsigned int nbytes = walk.nbytes;
 
-               if (nbytes < walk.total)
+               if (nbytes < walk.total) {
                        nbytes = round_down(nbytes, walk.stride);
+                       next_yield -= nbytes;
+               }
 
                chacha_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
                              nbytes, ctx->nrounds);
 
+               if (next_yield <= 0) {
+                       /* temporarily allow preemption */
+                       kernel_fpu_end();
+                       kernel_fpu_begin();
+                       next_yield = 4096;
+               }
+
                err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
        }