diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-31 09:45:28 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-31 09:45:28 -0700 |
| commit | 44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec (patch) | |
| tree | 504034f60c5510ebeb2c0d1d93a68fba999f2896 /drivers/crypto/ccree/cc_hash.c | |
| parent | Merge tag 'ipe-pr-20250728' of git://git.kernel.org/pub/scm/linux/kernel/git/... (diff) | |
| parent | crypto: keembay - Use min() to simplify ocs_create_linked_list_from_sg() (diff) | |
| download | linux-44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec.tar.gz linux-44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec.zip | |
Merge tag 'v6.17-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"API:
- Allow hash drivers without fallbacks (e.g., hardware key)
Algorithms:
- Add hmac hardware key support (phmac) on s390
- Re-enable sha384 in FIPS mode
- Disable sha1 in FIPS mode
- Convert zstd to acomp
Drivers:
- Lower priority of qat skcipher and aead
- Convert aspeed to partial block API
- Add iMX8QXP support in caam
- Add rate limiting support for GEN6 devices in qat
- Enable telemetry for GEN6 devices in qat
- Implement full backlog mode for hisilicon/sec2"
* tag 'v6.17-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (116 commits)
crypto: keembay - Use min() to simplify ocs_create_linked_list_from_sg()
crypto: hisilicon/hpre - fix dma unmap sequence
crypto: qat - make adf_dev_autoreset() static
crypto: ccp - reduce stack usage in ccp_run_aes_gcm_cmd
crypto: qat - refactor ring-related debug functions
crypto: qat - fix seq_file position update in adf_ring_next()
crypto: qat - fix DMA direction for compression on GEN2 devices
crypto: jitter - replace ARRAY_SIZE definition with header include
crypto: engine - remove {prepare,unprepare}_crypt_hardware callbacks
crypto: engine - remove request batching support
crypto: qat - flush misc workqueue during device shutdown
crypto: qat - enable rate limiting feature for GEN6 devices
crypto: qat - add compression slice count for rate limiting
crypto: qat - add get_svc_slice_cnt() in device data structure
crypto: qat - add adf_rl_get_num_svc_aes() in rate limiting
crypto: qat - relocate service related functions
crypto: qat - consolidate service enums
crypto: qat - add decompression service for rate limiting
crypto: qat - validate service in rate limiting sysfs api
crypto: hisilicon/sec2 - implement full backlog mode for sec
...
Diffstat (limited to 'drivers/crypto/ccree/cc_hash.c')
| -rw-r--r-- | drivers/crypto/ccree/cc_hash.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index d0612bec4d58..c6d085c8ff79 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -125,7 +125,7 @@ static int cc_map_result(struct device *dev, struct ahash_req_ctx *state, digestsize); return -ENOMEM; } - dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n", + dev_dbg(dev, "Mapped digest result buffer %u B at va=%p to dma=%pad\n", digestsize, state->digest_result_buff, &state->digest_result_dma_addr); @@ -184,11 +184,11 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state, dma_map_single(dev, state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, state->digest_buff_dma_addr)) { - dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping digest len %d B at va=%p for DMA failed\n", ctx->inter_digestsize, state->digest_buff); return -EINVAL; } - dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n", + dev_dbg(dev, "Mapped digest %d B at va=%p to dma=%pad\n", ctx->inter_digestsize, state->digest_buff, &state->digest_buff_dma_addr); @@ -197,11 +197,11 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state, dma_map_single(dev, state->digest_bytes_len, HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) { - dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping digest len %u B at va=%p for DMA failed\n", HASH_MAX_LEN_SIZE, state->digest_bytes_len); goto unmap_digest_buf; } - dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n", + dev_dbg(dev, "Mapped digest len %u B at va=%p to dma=%pad\n", HASH_MAX_LEN_SIZE, state->digest_bytes_len, &state->digest_bytes_len_dma_addr); } @@ -212,12 +212,12 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state, ctx->inter_digestsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, state->opad_digest_dma_addr)) { - dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping opad digest %d B at va=%p for DMA failed\n", ctx->inter_digestsize, state->opad_digest_buff); goto unmap_digest_len; } - dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n", + dev_dbg(dev, "Mapped opad digest %d B at va=%p to dma=%pad\n", ctx->inter_digestsize, state->opad_digest_buff, &state->opad_digest_dma_addr); } @@ -272,7 +272,7 @@ static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state, if (state->digest_result_dma_addr) { dma_unmap_single(dev, state->digest_result_dma_addr, digestsize, DMA_BIDIRECTIONAL); - dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n", + dev_dbg(dev, "unmpa digest result buffer va (%p) pa (%pad) len %u\n", state->digest_result_buff, &state->digest_result_dma_addr, digestsize); memcpy(result, state->digest_result_buff, digestsize); @@ -287,7 +287,7 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); - dev_dbg(dev, "req=%pK\n", req); + dev_dbg(dev, "req=%p\n", req); if (err != -EINPROGRESS) { /* Not a BACKLOG notification */ @@ -306,7 +306,7 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err) struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); - dev_dbg(dev, "req=%pK\n", req); + dev_dbg(dev, "req=%p\n", req); if (err != -EINPROGRESS) { /* Not a BACKLOG notification */ @@ -326,7 +326,7 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err) struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); - dev_dbg(dev, "req=%pK\n", req); + dev_dbg(dev, "req=%p\n", req); if (err != -EINPROGRESS) { /* Not a BACKLOG notification */ @@ -1077,11 +1077,11 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx) dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) { - dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping digest len %zu B at va=%p for DMA failed\n", sizeof(ctx->digest_buff), ctx->digest_buff); goto fail; } - dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n", + dev_dbg(dev, "Mapped digest %zu B at va=%p to dma=%pad\n", sizeof(ctx->digest_buff), ctx->digest_buff, &ctx->digest_buff_dma_addr); @@ -1090,12 +1090,12 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx) sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) { - dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping opad digest %zu B at va=%p for DMA failed\n", sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff); goto fail; } - dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n", + dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%p to dma=%pad\n", sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff, &ctx->opad_tmp_keys_dma_addr); |
