@@ -898,7 +898,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
898
898
}
899
899
900
900
if (iv_dma )
901
- dma_unmap_single (dev , iv_dma , ivsize , DMA_TO_DEVICE );
901
+ dma_unmap_single (dev , iv_dma , ivsize , DMA_BIDIRECTIONAL );
902
902
if (sec4_sg_bytes )
903
903
dma_unmap_single (dev , sec4_sg_dma , sec4_sg_bytes ,
904
904
DMA_TO_DEVICE );
@@ -977,7 +977,6 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
977
977
struct skcipher_request * req = context ;
978
978
struct skcipher_edesc * edesc ;
979
979
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm (req );
980
- struct caam_ctx * ctx = crypto_skcipher_ctx (skcipher );
981
980
int ivsize = crypto_skcipher_ivsize (skcipher );
982
981
983
982
dev_dbg (jrdev , "%s %d: err 0x%x\n" , __func__ , __LINE__ , err );
@@ -991,16 +990,17 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
991
990
992
991
/*
993
992
* The crypto API expects us to set the IV (req->iv) to the last
994
- * ciphertext block when running in CBC mode.
993
+ * ciphertext block (CBC mode) or last counter (CTR mode).
994
+ * This is used e.g. by the CTS mode.
995
995
*/
996
- if (( ctx -> cdata . algtype & OP_ALG_AAI_MASK ) == OP_ALG_AAI_CBC )
997
- scatterwalk_map_and_copy (req -> iv , req -> dst , req -> cryptlen -
998
- ivsize , ivsize , 0 );
996
+ if (ivsize ) {
997
+ memcpy (req -> iv , ( u8 * ) edesc -> sec4_sg + edesc -> sec4_sg_bytes ,
998
+ ivsize );
999
999
1000
- if (ivsize )
1001
1000
print_hex_dump_debug ("dstiv @" __stringify (__LINE__ )": " ,
1002
1001
DUMP_PREFIX_ADDRESS , 16 , 4 , req -> iv ,
1003
1002
edesc -> src_nents > 1 ? 100 : ivsize , 1 );
1003
+ }
1004
1004
1005
1005
caam_dump_sg ("dst @" __stringify (__LINE__ )": " ,
1006
1006
DUMP_PREFIX_ADDRESS , 16 , 4 , req -> dst ,
@@ -1027,8 +1027,20 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1027
1027
1028
1028
skcipher_unmap (jrdev , edesc , req );
1029
1029
1030
- print_hex_dump_debug ("dstiv @" __stringify (__LINE__ )": " ,
1031
- DUMP_PREFIX_ADDRESS , 16 , 4 , req -> iv , ivsize , 1 );
1030
+ /*
1031
+ * The crypto API expects us to set the IV (req->iv) to the last
1032
+ * ciphertext block (CBC mode) or last counter (CTR mode).
1033
+ * This is used e.g. by the CTS mode.
1034
+ */
1035
+ if (ivsize ) {
1036
+ memcpy (req -> iv , (u8 * )edesc -> sec4_sg + edesc -> sec4_sg_bytes ,
1037
+ ivsize );
1038
+
1039
+ print_hex_dump_debug ("dstiv @" __stringify (__LINE__ )": " ,
1040
+ DUMP_PREFIX_ADDRESS , 16 , 4 , req -> iv ,
1041
+ ivsize , 1 );
1042
+ }
1043
+
1032
1044
caam_dump_sg ("dst @" __stringify (__LINE__ )": " ,
1033
1045
DUMP_PREFIX_ADDRESS , 16 , 4 , req -> dst ,
1034
1046
edesc -> dst_nents > 1 ? 100 : req -> cryptlen , 1 );
@@ -1260,15 +1272,15 @@ static void init_skcipher_job(struct skcipher_request *req,
1260
1272
if (likely (req -> src == req -> dst )) {
1261
1273
dst_dma = src_dma + !!ivsize * sizeof (struct sec4_sg_entry );
1262
1274
out_options = in_options ;
1263
- } else if (edesc -> mapped_dst_nents == 1 ) {
1275
+ } else if (! ivsize && edesc -> mapped_dst_nents == 1 ) {
1264
1276
dst_dma = sg_dma_address (req -> dst );
1265
1277
} else {
1266
1278
dst_dma = edesc -> sec4_sg_dma + sec4_sg_index *
1267
1279
sizeof (struct sec4_sg_entry );
1268
1280
out_options = LDST_SGF ;
1269
1281
}
1270
1282
1271
- append_seq_out_ptr (desc , dst_dma , req -> cryptlen , out_options );
1283
+ append_seq_out_ptr (desc , dst_dma , req -> cryptlen + ivsize , out_options );
1272
1284
}
1273
1285
1274
1286
/*
@@ -1699,22 +1711,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1699
1711
dst_sg_idx = sec4_sg_ents ;
1700
1712
1701
1713
/*
1714
+ * Input, output HW S/G tables: [IV, src][dst, IV]
1715
+ * IV entries point to the same buffer
1716
+ * If src == dst, S/G entries are reused (S/G tables overlap)
1717
+ *
1702
1718
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1703
1719
* the end of the table by allocating more S/G entries. Logic:
1704
- * if (src != dst && output S/G)
1720
+ * if (output S/G)
1705
1721
* pad output S/G, if needed
1706
- * else if (src == dst && S/G)
1707
- * overlapping S/Gs; pad one of them
1708
1722
* else if (input S/G) ...
1709
1723
* pad input S/G, if needed
1710
1724
*/
1711
- if (mapped_dst_nents > 1 )
1712
- sec4_sg_ents += pad_sg_nents (mapped_dst_nents );
1713
- else if ((req -> src == req -> dst ) && (mapped_src_nents > 1 ))
1714
- sec4_sg_ents = max (pad_sg_nents (sec4_sg_ents ),
1715
- !!ivsize + pad_sg_nents (mapped_src_nents ));
1716
- else
1725
+ if (ivsize || mapped_dst_nents > 1 ) {
1726
+ if (req -> src == req -> dst )
1727
+ sec4_sg_ents = !!ivsize + pad_sg_nents (sec4_sg_ents );
1728
+ else
1729
+ sec4_sg_ents += pad_sg_nents (mapped_dst_nents +
1730
+ !!ivsize );
1731
+ } else {
1717
1732
sec4_sg_ents = pad_sg_nents (sec4_sg_ents );
1733
+ }
1718
1734
1719
1735
sec4_sg_bytes = sec4_sg_ents * sizeof (struct sec4_sg_entry );
1720
1736
@@ -1740,10 +1756,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1740
1756
1741
1757
/* Make sure IV is located in a DMAable area */
1742
1758
if (ivsize ) {
1743
- iv = (u8 * )edesc -> hw_desc + desc_bytes + sec4_sg_bytes ;
1759
+ iv = (u8 * )edesc -> sec4_sg + sec4_sg_bytes ;
1744
1760
memcpy (iv , req -> iv , ivsize );
1745
1761
1746
- iv_dma = dma_map_single (jrdev , iv , ivsize , DMA_TO_DEVICE );
1762
+ iv_dma = dma_map_single (jrdev , iv , ivsize , DMA_BIDIRECTIONAL );
1747
1763
if (dma_mapping_error (jrdev , iv_dma )) {
1748
1764
dev_err (jrdev , "unable to map IV\n" );
1749
1765
caam_unmap (jrdev , req -> src , req -> dst , src_nents ,
@@ -1755,13 +1771,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1755
1771
dma_to_sec4_sg_one (edesc -> sec4_sg , iv_dma , ivsize , 0 );
1756
1772
}
1757
1773
if (dst_sg_idx )
1758
- sg_to_sec4_sg_last (req -> src , req -> cryptlen , edesc -> sec4_sg +
1759
- !!ivsize , 0 );
1774
+ sg_to_sec4_sg (req -> src , req -> cryptlen , edesc -> sec4_sg +
1775
+ !!ivsize , 0 );
1760
1776
1761
- if (mapped_dst_nents > 1 ) {
1762
- sg_to_sec4_sg_last (req -> dst , req -> cryptlen ,
1763
- edesc -> sec4_sg + dst_sg_idx , 0 );
1764
- }
1777
+ if (req -> src != req -> dst && (ivsize || mapped_dst_nents > 1 ))
1778
+ sg_to_sec4_sg (req -> dst , req -> cryptlen , edesc -> sec4_sg +
1779
+ dst_sg_idx , 0 );
1780
+
1781
+ if (ivsize )
1782
+ dma_to_sec4_sg_one (edesc -> sec4_sg + dst_sg_idx +
1783
+ mapped_dst_nents , iv_dma , ivsize , 0 );
1784
+
1785
+ if (ivsize || mapped_dst_nents > 1 )
1786
+ sg_to_sec4_set_last (edesc -> sec4_sg + dst_sg_idx +
1787
+ mapped_dst_nents );
1765
1788
1766
1789
if (sec4_sg_bytes ) {
1767
1790
edesc -> sec4_sg_dma = dma_map_single (jrdev , edesc -> sec4_sg ,
@@ -1824,7 +1847,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
1824
1847
struct skcipher_edesc * edesc ;
1825
1848
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm (req );
1826
1849
struct caam_ctx * ctx = crypto_skcipher_ctx (skcipher );
1827
- int ivsize = crypto_skcipher_ivsize (skcipher );
1828
1850
struct device * jrdev = ctx -> jrdev ;
1829
1851
u32 * desc ;
1830
1852
int ret = 0 ;
@@ -1834,14 +1856,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
1834
1856
if (IS_ERR (edesc ))
1835
1857
return PTR_ERR (edesc );
1836
1858
1837
- /*
1838
- * The crypto API expects us to set the IV (req->iv) to the last
1839
- * ciphertext block when running in CBC mode.
1840
- */
1841
- if ((ctx -> cdata .algtype & OP_ALG_AAI_MASK ) == OP_ALG_AAI_CBC )
1842
- scatterwalk_map_and_copy (req -> iv , req -> src , req -> cryptlen -
1843
- ivsize , ivsize , 0 );
1844
-
1845
1859
/* Create and submit job descriptor*/
1846
1860
init_skcipher_job (req , edesc , false);
1847
1861
desc = edesc -> hw_desc ;
0 commit comments