crypto: rockchip: add procfs info

cat /proc/rkcrypto will show crypto debug info.

Change-Id: I1a295ccf5b8c71036892e866b46d5427d5e53e93
Signed-off-by: Lin Jinhan <troy.lin@rock-chips.com>
This commit is contained in:
Lin Jinhan
2022-07-21 15:03:28 +08:00
parent 9a4df920b5
commit 965f3dc100
5 changed files with 237 additions and 4 deletions
+2 -1
View File
@@ -3,7 +3,8 @@ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
rk_crypto-objs := rk_crypto_core.o \
rk_crypto_utils.o \
rk_crypto_ahash_utils.o \
rk_crypto_skcipher_utils.o
rk_crypto_skcipher_utils.o \
procfs.o
rk_crypto-$(CONFIG_CRYPTO_DEV_ROCKCHIP_V1) += \
rk_crypto_v1.o \
+160
View File
@@ -0,0 +1,160 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Rockchip Electronics Co., Ltd. */
#include <linux/clk.h>
#include <linux/proc_fs.h>
#include <linux/sem.h>
#include <linux/seq_file.h>
#include "procfs.h"
#ifdef CONFIG_PROC_FS
static const char *alg_type2name[ALG_TYPE_MAX] = {
[ALG_TYPE_HASH] = "HASH",
[ALG_TYPE_HMAC] = "HMAC",
[ALG_TYPE_CIPHER] = "CIPHER",
[ALG_TYPE_ASYM] = "ASYM",
[ALG_TYPE_AEAD] = "AEAD",
};
static void crypto_show_clock(struct seq_file *p, struct clk_bulk_data *clk_bulks, int clks_num)
{
int i;
seq_puts(p, "clock info:\n");
for (i = 0; i < clks_num; i++)
seq_printf(p, "\t%-10s %ld\n", clk_bulks[i].id, clk_get_rate(clk_bulks[i].clk));
seq_puts(p, "\n");
}
static void crypto_show_stat(struct seq_file *p, struct rk_crypto_stat *stat)
{
/* show statistic info */
seq_puts(p, "Statistic info:\n");
seq_printf(p, "\tbusy_cnt : %llu\n", stat->busy_cnt);
seq_printf(p, "\tequeue_cnt : %llu\n", stat->equeue_cnt);
seq_printf(p, "\tdequeue_cnt : %llu\n", stat->dequeue_cnt);
seq_printf(p, "\tdone_cnt : %llu\n", stat->done_cnt);
seq_printf(p, "\tcomplete_cnt : %llu\n", stat->complete_cnt);
seq_printf(p, "\tfake_cnt : %llu\n", stat->fake_cnt);
seq_printf(p, "\tirq_cnt : %llu\n", stat->irq_cnt);
seq_printf(p, "\ttimeout_cnt : %llu\n", stat->timeout_cnt);
seq_printf(p, "\terror_cnt : %llu\n", stat->error_cnt);
seq_printf(p, "\tlast_error : %d\n", stat->last_error);
seq_puts(p, "\n");
}
static void crypto_show_queue_info(struct seq_file *p, struct rk_crypto_dev *rk_dev)
{
bool busy;
unsigned long flags;
u32 qlen, max_qlen;
spin_lock_irqsave(&rk_dev->lock, flags);
qlen = rk_dev->queue.qlen;
max_qlen = rk_dev->queue.max_qlen;
busy = rk_dev->busy;
spin_unlock_irqrestore(&rk_dev->lock, flags);
seq_printf(p, "Crypto queue usage [%u/%u], ever_max = %llu, status: %s\n",
qlen, max_qlen, rk_dev->stat.ever_queue_max, busy ? "busy" : "idle");
seq_puts(p, "\n");
}
static void crypto_show_valid_algo_single(struct seq_file *p, enum alg_type type,
struct rk_crypto_algt **algs, u32 algs_num)
{
u32 i;
struct rk_crypto_algt *tmp_algs;
seq_printf(p, "\t%s:\n", alg_type2name[type]);
for (i = 0; i < algs_num; i++, algs++) {
tmp_algs = *algs;
if (!(tmp_algs->valid_flag) || tmp_algs->type != type)
continue;
seq_printf(p, "\t\t%s\n", tmp_algs->name);
}
seq_puts(p, "\n");
}
static void crypto_show_valid_algos(struct seq_file *p, struct rk_crypto_soc_data *soc_data)
{
u32 algs_num = 0;
struct rk_crypto_algt **algs;
seq_puts(p, "Valid algorithms:\n");
algs = soc_data->hw_get_algts(&algs_num);
if (!algs || algs_num == 0)
return;
crypto_show_valid_algo_single(p, ALG_TYPE_CIPHER, algs, algs_num);
crypto_show_valid_algo_single(p, ALG_TYPE_AEAD, algs, algs_num);
crypto_show_valid_algo_single(p, ALG_TYPE_HASH, algs, algs_num);
crypto_show_valid_algo_single(p, ALG_TYPE_HMAC, algs, algs_num);
crypto_show_valid_algo_single(p, ALG_TYPE_ASYM, algs, algs_num);
}
static int crypto_show_all(struct seq_file *p, void *v)
{
struct rk_crypto_dev *rk_dev = p->private;
struct rk_crypto_soc_data *soc_data = rk_dev->soc_data;
struct rk_crypto_stat *stat = &rk_dev->stat;
seq_printf(p, "Rockchip Crypto Version: %s\n\n",
soc_data->crypto_ver);
seq_printf(p, "use_soft_aes192 : %s\n\n", soc_data->use_soft_aes192 ? "true" : "false");
crypto_show_clock(p, rk_dev->clk_bulks, rk_dev->clks_num);
crypto_show_valid_algos(p, soc_data);
crypto_show_stat(p, stat);
crypto_show_queue_info(p, rk_dev);
return 0;
}
static int crypto_open(struct inode *inode, struct file *file)
{
struct rk_crypto_dev *data = PDE_DATA(inode);
return single_open(file, crypto_show_all, data);
}
static const struct proc_ops ops = {
.proc_open = crypto_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
int rkcrypto_proc_init(struct rk_crypto_dev *rk_dev)
{
rk_dev->procfs = proc_create_data(rk_dev->name, 0, NULL, &ops, rk_dev);
if (!rk_dev->procfs)
return -EINVAL;
return 0;
}
void rkcrypto_proc_cleanup(struct rk_crypto_dev *rk_dev)
{
if (rk_dev->procfs)
remove_proc_entry(rk_dev->name, NULL);
rk_dev->procfs = NULL;
}
#endif /* CONFIG_PROC_FS */
+23
View File
@@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2022 Rockchip Electronics Co., Ltd. */
#ifndef _RKCRYPTO_PROCFS_H
#define _RKCRYPTO_PROCFS_H
#include "rk_crypto_core.h"
#ifdef CONFIG_PROC_FS
int rkcrypto_proc_init(struct rk_crypto_dev *dev);
void rkcrypto_proc_cleanup(struct rk_crypto_dev *dev);
#else
static inline int rkcrypto_proc_init(struct rk_crypto_dev *dev)
{
return 0;
}
static inline void rkcrypto_proc_cleanup(struct rk_crypto_dev *dev)
{
}
#endif
#endif
+33 -3
View File
@@ -26,6 +26,9 @@
#include "rk_crypto_v2.h"
#include "rk_crypto_v3.h"
#include "cryptodev_linux/rk_cryptodev.h"
#include "procfs.h"
#define CRYPTO_NAME "rkcrypto"
static struct rk_alg_ctx *rk_alg_ctx_cast(struct crypto_async_request *async_req)
{
@@ -271,6 +274,7 @@ static void rk_crypto_irq_timer_handle(struct timer_list *t)
struct rk_crypto_dev *rk_dev = from_timer(rk_dev, t, timer);
rk_dev->err = -ETIMEDOUT;
rk_dev->stat.timeout_cnt++;
tasklet_schedule(&rk_dev->done_task);
}
@@ -281,6 +285,8 @@ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
spin_lock(&rk_dev->lock);
rk_dev->stat.irq_cnt++;
if (alg_ctx->ops.irq_handle)
alg_ctx->ops.irq_handle(irq, dev_id);
@@ -310,6 +316,7 @@ static int rk_start_op(struct rk_crypto_dev *rk_dev)
/* fake calculations are used to trigger the Done Task */
if (alg_ctx->total == 0) {
CRYPTO_TRACE("fake done_task");
rk_dev->stat.fake_cnt++;
tasklet_schedule(&rk_dev->done_task);
}
@@ -333,14 +340,19 @@ static void rk_complete_op(struct rk_crypto_dev *rk_dev, int err)
disable_irq(rk_dev->irq);
del_timer(&rk_dev->timer);
rk_dev->stat.complete_cnt++;
if (err) {
rk_dev->stat.error_cnt++;
rk_dev->stat.last_error = err;
dev_err(rk_dev->dev, "complete_op err = %d\n", err);
}
if (!alg_ctx || !alg_ctx->ops.complete)
return;
alg_ctx->ops.complete(rk_dev->async_req, err);
if (err)
dev_err(rk_dev->dev, "complete_op err = %d\n", err);
tasklet_schedule(&rk_dev->queue_task);
}
@@ -352,10 +364,17 @@ static int rk_crypto_enqueue(struct rk_crypto_dev *rk_dev,
spin_lock_irqsave(&rk_dev->lock, flags);
ret = crypto_enqueue_request(&rk_dev->queue, async_req);
if (rk_dev->queue.qlen > rk_dev->stat.ever_queue_max)
rk_dev->stat.ever_queue_max = rk_dev->queue.qlen;
if (rk_dev->busy) {
rk_dev->stat.busy_cnt++;
spin_unlock_irqrestore(&rk_dev->lock, flags);
return ret;
}
rk_dev->stat.equeue_cnt++;
rk_dev->busy = true;
spin_unlock_irqrestore(&rk_dev->lock, flags);
tasklet_schedule(&rk_dev->queue_task);
@@ -379,6 +398,7 @@ static void rk_crypto_queue_task_cb(unsigned long data)
spin_unlock_irqrestore(&rk_dev->lock, flags);
return;
}
rk_dev->stat.dequeue_cnt++;
spin_unlock_irqrestore(&rk_dev->lock, flags);
if (backlog) {
@@ -397,6 +417,8 @@ static void rk_crypto_done_task_cb(unsigned long data)
struct rk_crypto_dev *rk_dev = (struct rk_crypto_dev *)data;
struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
rk_dev->stat.done_cnt++;
if (rk_dev->err)
goto exit;
@@ -500,6 +522,8 @@ static int rk_crypto_register(struct rk_crypto_dev *rk_dev)
if (err)
goto err_cipher_algs;
tmp_algs->valid_flag = true;
CRYPTO_TRACE("%s register OK!!!\n", *algs_name);
}
@@ -691,6 +715,8 @@ static int rk_crypto_probe(struct platform_device *pdev)
goto err_crypto;
}
rk_dev->name = CRYPTO_NAME;
match = of_match_node(crypto_of_id_table, np);
soc_data = (struct rk_crypto_soc_data *)match->data;
rk_dev->soc_data = soc_data;
@@ -818,6 +844,8 @@ static int rk_crypto_probe(struct platform_device *pdev)
rk_cryptodev_register_dev(rk_dev->dev, soc_data->crypto_ver);
rkcrypto_proc_init(rk_dev);
dev_info(dev, "%s Accelerator successfully registered\n", soc_data->crypto_ver);
return 0;
@@ -832,6 +860,8 @@ static int rk_crypto_remove(struct platform_device *pdev)
{
struct rk_crypto_dev *rk_dev = platform_get_drvdata(pdev);
rkcrypto_proc_cleanup(rk_dev);
rk_cryptodev_unregister_dev(rk_dev->dev);
del_timer_sync(&rk_dev->timer);
+19
View File
@@ -48,6 +48,20 @@
#define RK_FLAG_FINAL BIT(0)
#define RK_FLAG_UPDATE BIT(1)
struct rk_crypto_stat {
unsigned long long busy_cnt;
unsigned long long equeue_cnt;
unsigned long long dequeue_cnt;
unsigned long long complete_cnt;
unsigned long long done_cnt;
unsigned long long fake_cnt;
unsigned long long irq_cnt;
unsigned long long timeout_cnt;
unsigned long long error_cnt;
unsigned long long ever_queue_max;
int last_error;
};
struct rk_crypto_dev {
struct device *dev;
struct reset_control *rst;
@@ -62,6 +76,9 @@ struct rk_crypto_dev {
struct rk_crypto_soc_data *soc_data;
int clks_num;
struct clk_bulk_data *clk_bulks;
const char *name;
struct proc_dir_entry *procfs;
struct rk_crypto_stat stat;
/* device lock */
spinlock_t lock;
@@ -208,6 +225,7 @@ enum alg_type {
ALG_TYPE_CIPHER,
ALG_TYPE_ASYM,
ALG_TYPE_AEAD,
ALG_TYPE_MAX,
};
struct rk_crypto_algt {
@@ -223,6 +241,7 @@ struct rk_crypto_algt {
u32 mode;
char *name;
bool use_soft_aes192;
bool valid_flag;
};
enum rk_hash_algo {