Merge tag 'nand/for-6.15' into mtd/next

* Raw NAND changes:
i.MX8 and i.MX31 now have their own compatible, the Qcom driver got
cleaned, the Broadcom driver got fixed.

* SPI NAND changes:
Two main features have been added:
- OTP support has been brought, and ESMT and Micron manufacturer drivers
  implement it.
- Read retry, and Macronix manufacturer driver implement it.

There is as well a bunch of minor improvements and fixes in drivers and
bindings.
This commit is contained in:
Miquel Raynal
2025-03-26 17:49:15 +01:00
15 changed files with 893 additions and 57 deletions
@@ -42,7 +42,7 @@ required:
- clock-names
- interrupts
unevaluatedProperties: true
unevaluatedProperties: false
examples:
- |
@@ -29,7 +29,14 @@ properties:
- enum:
- fsl,imx8mm-gpmi-nand
- fsl,imx8mn-gpmi-nand
- fsl,imx8mp-gpmi-nand
- fsl,imx8mq-gpmi-nand
- const: fsl,imx7d-gpmi-nand
- items:
- enum:
- fsl,imx8dxl-gpmi-nand
- fsl,imx8qm-gpmi-nand
- const: fsl,imx8qxp-gpmi-nand
reg:
items:
@@ -14,8 +14,12 @@ allOf:
properties:
compatible:
const: fsl,imx27-nand
oneOf:
- const: fsl,imx27-nand
- items:
- enum:
- fsl,imx31-nand
- const: fsl,imx27-nand
reg:
maxItems: 1
+1 -1
View File
@@ -3008,7 +3008,7 @@ static int brcmnand_resume(struct device *dev)
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
nand_reset_op(chip);
nand_reset(chip, 0);
}
return 0;
+2 -2
View File
@@ -1833,7 +1833,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
/* READ_ID data bytes are received twice in NV-DDR mode */
if (len && nand_interface_is_nvddr(conf)) {
ddrbuf = kzalloc(len * 2, GFP_KERNEL);
ddrbuf = kcalloc(2, len, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
@@ -2203,7 +2203,7 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
* twice.
*/
if (force_8bit && nand_interface_is_nvddr(conf)) {
ddrbuf = kzalloc(len * 2, GFP_KERNEL);
ddrbuf = kcalloc(2, len, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
+18 -18
View File
@@ -165,9 +165,9 @@ static void nandc_set_read_loc_first(struct nand_chip *chip,
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
__le32 locreg_val;
u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
((read_size) << READ_LOCATION_SIZE) |
((is_last_read_loc) << READ_LOCATION_LAST));
u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
locreg_val = cpu_to_le32(val);
@@ -197,9 +197,9 @@ static void nandc_set_read_loc_last(struct nand_chip *chip,
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
__le32 locreg_val;
u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
((read_size) << READ_LOCATION_SIZE) |
((is_last_read_loc) << READ_LOCATION_LAST));
u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
locreg_val = cpu_to_le32(val);
@@ -271,14 +271,14 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
}
if (host->use_ecc) {
cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
(num_cw - 1) << CW_PER_PAGE);
cfg0 = cpu_to_le32((host->cfg0 & ~CW_PER_PAGE_MASK) |
FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1)));
cfg1 = cpu_to_le32(host->cfg1);
ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
} else {
cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
(num_cw - 1) << CW_PER_PAGE);
cfg0 = cpu_to_le32((host->cfg0_raw & ~CW_PER_PAGE_MASK) |
FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1)));
cfg1 = cpu_to_le32(host->cfg1_raw);
ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
@@ -882,12 +882,12 @@ static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
host->bbm_size - host->cw_data;
host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
host->cw_data << UD_SIZE_BYTES;
host->cfg0 |= FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes) |
FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data);
host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
host->ecc_bch_cfg |= FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data);
host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, host->cw_data - 1);
}
/* implements ecc->read_page() */
@@ -1531,7 +1531,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
if (!nandc->props->qpic_version2)
host->ecc_buf_cfg = 0x203 << NUM_STEPS;
host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203);
host->clrflashstatus = FS_READY_BSY_N;
host->clrreadstatus = 0xc0;
@@ -1817,7 +1817,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
nandc->regs->addr0 = q_op.addr1_reg;
nandc->regs->addr1 = q_op.addr2_reg;
nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~CW_PER_PAGE_MASK);
nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
instrs = 3;
} else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
@@ -1900,8 +1900,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
if (!nandc->props->qpic_version2) {
nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
| NAND_CMD_PARAM << READ_ADDR);
nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~READ_ADDR_MASK) |
FIELD_PREP(READ_ADDR_MASK, NAND_CMD_PARAM));
}
nandc->regs->exec = cpu_to_le32(1);
+2 -1
View File
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
spinand-objs := core.o otp.o
spinand-objs += alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
+75 -10
View File
@@ -534,10 +534,20 @@ static int spinand_erase_op(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
static int spinand_wait(struct spinand_device *spinand,
unsigned long initial_delay_us,
unsigned long poll_delay_us,
u8 *s)
/**
* spinand_wait() - Poll memory device status
* @spinand: the spinand device
* @initial_delay_us: delay in us before starting to poll
* @poll_delay_us: time to sleep between reads in us
* @s: the pointer to variable to store the value of REG_STATUS
*
* This function polls a status register (REG_STATUS) and returns when
* the STATUS_READY bit is 0 or when the timeout has expired.
*
* Return: 0 on success, a negative error code otherwise.
*/
int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
unsigned long poll_delay_us, u8 *s)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
spinand->scratchbuf);
@@ -604,8 +614,16 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
static int spinand_read_page(struct spinand_device *spinand,
const struct nand_page_io_req *req)
/**
* spinand_read_page() - Read a page
* @spinand: the spinand device
* @req: the I/O request
*
* Return: 0 or a positive number of bitflips corrected on success.
* A negative error code otherwise.
*/
int spinand_read_page(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -635,8 +653,16 @@ static int spinand_read_page(struct spinand_device *spinand,
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
static int spinand_write_page(struct spinand_device *spinand,
const struct nand_page_io_req *req)
/**
* spinand_write_page() - Write a page
* @spinand: the spinand device
* @req: the I/O request
*
* Return: 0 or a positive number of bitflips corrected on success.
* A negative error code otherwise.
*/
int spinand_write_page(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -674,11 +700,15 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct mtd_ecc_stats old_stats;
struct nand_io_iter iter;
bool disable_ecc = false;
bool ecc_failed = false;
unsigned int retry_mode = 0;
int ret;
old_stats = mtd->ecc_stats;
if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
disable_ecc = true;
@@ -690,18 +720,43 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
if (ret)
break;
read_retry:
ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
if (ret == -EBADMSG)
if (ret == -EBADMSG && spinand->set_read_retry) {
if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
ret = spinand->set_read_retry(spinand, retry_mode);
if (ret < 0) {
spinand->set_read_retry(spinand, 0);
return ret;
}
/* Reset ecc_stats; retry */
mtd->ecc_stats = old_stats;
goto read_retry;
} else {
/* No more retry modes; real failure */
ecc_failed = true;
}
} else if (ret == -EBADMSG) {
ecc_failed = true;
else
} else {
*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
}
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
/* Reset to retry mode 0 */
if (retry_mode) {
retry_mode = 0;
ret = spinand->set_read_retry(spinand, retry_mode);
if (ret < 0)
return ret;
}
}
if (ecc_failed && !ret)
@@ -1292,6 +1347,10 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
spinand->set_cont_read = table[i].set_cont_read;
spinand->fact_otp = &table[i].fact_otp;
spinand->user_otp = &table[i].user_otp;
spinand->read_retries = table[i].read_retries;
spinand->set_read_retry = table[i].set_read_retry;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
@@ -1478,6 +1537,12 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
mtd->_resume = spinand_mtd_resume;
if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
ret = spinand_set_mtd_otp_ops(spinand);
if (ret)
goto err_cleanup_ecc_engine;
}
if (nand->ecc.engine) {
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
+88 -2
View File
@@ -8,10 +8,15 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#include <linux/spi/spi-mem.h>
/* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */
#define SPINAND_MFR_ESMT_C8 0xc8
#define ESMT_F50L1G41LB_CFG_OTP_PROTECT BIT(7)
#define ESMT_F50L1G41LB_CFG_OTP_LOCK \
(CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
@@ -102,6 +107,83 @@ static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
.free = f50l1g41lb_ooblayout_free,
};
static int f50l1g41lb_otp_info(struct spinand_device *spinand, size_t len,
struct otp_info *buf, size_t *retlen, bool user)
{
if (len < sizeof(*buf))
return -EINVAL;
buf->locked = 0;
buf->start = 0;
buf->length = user ? spinand_user_otp_size(spinand) :
spinand_fact_otp_size(spinand);
*retlen = sizeof(*buf);
return 0;
}
static int f50l1g41lb_fact_otp_info(struct spinand_device *spinand, size_t len,
struct otp_info *buf, size_t *retlen)
{
return f50l1g41lb_otp_info(spinand, len, buf, retlen, false);
}
static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len,
struct otp_info *buf, size_t *retlen)
{
return f50l1g41lb_otp_info(spinand, len, buf, retlen, true);
}
static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from,
size_t len)
{
struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
u8 status;
int ret;
ret = spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK,
ESMT_F50L1G41LB_CFG_OTP_LOCK);
if (!ret)
return ret;
ret = spi_mem_exec_op(spinand->spimem, &write_op);
if (!ret)
goto out;
ret = spi_mem_exec_op(spinand->spimem, &exec_op);
if (!ret)
goto out;
ret = spinand_wait(spinand,
SPINAND_WRITE_INITIAL_DELAY_US,
SPINAND_WRITE_POLL_DELAY_US,
&status);
if (!ret && (status & STATUS_PROG_FAILED))
ret = -EIO;
out:
if (spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK, 0)) {
dev_warn(&spinand_to_mtd(spinand)->dev,
"Can not disable OTP mode\n");
ret = -EIO;
}
return ret;
}
static const struct spinand_user_otp_ops f50l1g41lb_user_otp_ops = {
.info = f50l1g41lb_user_otp_info,
.lock = f50l1g41lb_otp_lock,
.read = spinand_user_otp_read,
.write = spinand_user_otp_write,
};
static const struct spinand_fact_otp_ops f50l1g41lb_fact_otp_ops = {
.info = f50l1g41lb_fact_otp_info,
.read = spinand_fact_otp_read,
};
static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_INFO("F50L1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f,
@@ -112,7 +194,9 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
0x7f, 0x7f),
@@ -122,7 +206,9 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D2G41KA",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f,
0x7f, 0x7f),
+64 -15
View File
@@ -14,6 +14,8 @@
#define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr)
#define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr)
#define MACRONIX_CFG_CONT_READ BIT(2)
#define MACRONIX_FEATURE_ADDR_READ_RETRY 0x70
#define MACRONIX_NUM_READ_RETRY_MODES 5
#define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4)
@@ -136,6 +138,23 @@ static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
return 0;
}
/**
* macronix_set_read_retry - Set the retry mode
* @spinand: SPI NAND device
* @retry_mode: Specify which retry mode to set
*
* Return: 0 on success, a negative error code otherwise.
*/
static int macronix_set_read_retry(struct spinand_device *spinand,
unsigned int retry_mode)
{
struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
spinand->scratchbuf);
*spinand->scratchbuf = retry_mode;
return spi_mem_exec_op(spinand->spimem, &op);
}
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
@@ -168,7 +187,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_CONT_READ(macronix_set_cont_read),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35LF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03),
NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
@@ -179,7 +200,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_CONT_READ(macronix_set_cont_read),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35LF1G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -188,7 +211,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35LF2G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
@@ -198,7 +223,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35LF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -207,7 +234,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35LF4G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
@@ -217,7 +246,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35LF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -226,7 +257,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX31LF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -270,7 +303,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35UF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -280,7 +315,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35UF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -291,7 +328,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_CONT_READ(macronix_set_cont_read),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35UF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
@@ -314,7 +353,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35UF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -324,7 +365,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -335,7 +378,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_CONT_READ(macronix_set_cont_read),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
@@ -366,7 +411,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -377,7 +424,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_CONT_READ(macronix_set_cont_read),
SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+134 -1
View File
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#include <linux/spi/spi-mem.h>
#include <linux/string.h>
#define SPINAND_MFR_MICRON 0x2c
@@ -28,6 +30,10 @@
#define MICRON_SELECT_DIE(x) ((x) << 6)
#define MICRON_MT29F2G01ABAGD_CFG_OTP_STATE BIT(7)
#define MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK \
(CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -168,6 +174,131 @@ static int micron_8_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
static int mt29f2g01abagd_otp_is_locked(struct spinand_device *spinand)
{
size_t bufsize = spinand_otp_page_size(spinand);
size_t retlen;
u8 *buf;
int ret;
buf = kmalloc(bufsize, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = spinand_upd_cfg(spinand,
MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
MICRON_MT29F2G01ABAGD_CFG_OTP_STATE);
if (ret)
goto free_buf;
ret = spinand_user_otp_read(spinand, 0, bufsize, &retlen, buf);
if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
0)) {
dev_warn(&spinand_to_mtd(spinand)->dev,
"Can not disable OTP mode\n");
ret = -EIO;
}
if (ret)
goto free_buf;
/* If all zeros, then the OTP area is locked. */
if (mem_is_zero(buf, bufsize))
ret = 1;
free_buf:
kfree(buf);
return ret;
}
static int mt29f2g01abagd_otp_info(struct spinand_device *spinand, size_t len,
struct otp_info *buf, size_t *retlen,
bool user)
{
int locked;
if (len < sizeof(*buf))
return -EINVAL;
locked = mt29f2g01abagd_otp_is_locked(spinand);
if (locked < 0)
return locked;
buf->locked = locked;
buf->start = 0;
buf->length = user ? spinand_user_otp_size(spinand) :
spinand_fact_otp_size(spinand);
*retlen = sizeof(*buf);
return 0;
}
static int mt29f2g01abagd_fact_otp_info(struct spinand_device *spinand,
size_t len, struct otp_info *buf,
size_t *retlen)
{
return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, false);
}
static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand,
size_t len, struct otp_info *buf,
size_t *retlen)
{
return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, true);
}
static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from,
size_t len)
{
struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
u8 status;
int ret;
ret = spinand_upd_cfg(spinand,
MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK);
if (!ret)
return ret;
ret = spi_mem_exec_op(spinand->spimem, &write_op);
if (!ret)
goto out;
ret = spi_mem_exec_op(spinand->spimem, &exec_op);
if (!ret)
goto out;
ret = spinand_wait(spinand,
SPINAND_WRITE_INITIAL_DELAY_US,
SPINAND_WRITE_POLL_DELAY_US,
&status);
if (!ret && (status & STATUS_PROG_FAILED))
ret = -EIO;
out:
if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 0)) {
dev_warn(&spinand_to_mtd(spinand)->dev,
"Can not disable OTP mode\n");
ret = -EIO;
}
return ret;
}
static const struct spinand_user_otp_ops mt29f2g01abagd_user_otp_ops = {
.info = mt29f2g01abagd_user_otp_info,
.lock = mt29f2g01abagd_otp_lock,
.read = spinand_user_otp_read,
.write = spinand_user_otp_write,
};
static const struct spinand_fact_otp_ops mt29f2g01abagd_fact_otp_ops = {
.info = mt29f2g01abagd_fact_otp_info,
.read = spinand_fact_otp_read,
};
static const struct spinand_info micron_spinand_table[] = {
/* M79A 2Gb 3.3V */
SPINAND_INFO("MT29F2G01ABAGD",
@@ -179,7 +310,9 @@ static const struct spinand_info micron_spinand_table[] = {
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
micron_8_ecc_get_status)),
micron_8_ecc_get_status),
SPINAND_USER_OTP_INFO(12, 2, &mt29f2g01abagd_user_otp_ops),
SPINAND_FACT_OTP_INFO(2, 0, &mt29f2g01abagd_fact_otp_ops)),
/* M79A 2Gb 1.8V */
SPINAND_INFO("MT29F2G01ABBGD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
+362
View File
@@ -0,0 +1,362 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2025, SaluteDevices. All Rights Reserved.
*
* Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
*/
#include <linux/mtd/mtd.h>
#include <linux/mtd/spinand.h>
/**
* spinand_otp_page_size() - Get SPI-NAND OTP page size
* @spinand: the spinand device
*
* Return: the OTP page size.
*/
size_t spinand_otp_page_size(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
return nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
}
static size_t spinand_otp_size(struct spinand_device *spinand,
const struct spinand_otp_layout *layout)
{
return layout->npages * spinand_otp_page_size(spinand);
}
/**
* spinand_fact_otp_size() - Get SPI-NAND factory OTP area size
* @spinand: the spinand device
*
* Return: the OTP size.
*/
size_t spinand_fact_otp_size(struct spinand_device *spinand)
{
return spinand_otp_size(spinand, &spinand->fact_otp->layout);
}
/**
* spinand_user_otp_size() - Get SPI-NAND user OTP area size
* @spinand: the spinand device
*
* Return: the OTP size.
*/
size_t spinand_user_otp_size(struct spinand_device *spinand)
{
return spinand_otp_size(spinand, &spinand->user_otp->layout);
}
static int spinand_otp_check_bounds(struct spinand_device *spinand, loff_t ofs,
size_t len,
const struct spinand_otp_layout *layout)
{
if (ofs < 0 || ofs + len > spinand_otp_size(spinand, layout))
return -EINVAL;
return 0;
}
static int spinand_user_otp_check_bounds(struct spinand_device *spinand,
loff_t ofs, size_t len)
{
return spinand_otp_check_bounds(spinand, ofs, len,
&spinand->user_otp->layout);
}
static int spinand_otp_rw(struct spinand_device *spinand, loff_t ofs,
size_t len, size_t *retlen, u8 *buf, bool is_write,
const struct spinand_otp_layout *layout)
{
struct nand_page_io_req req = {};
unsigned long long page;
size_t copied = 0;
size_t otp_pagesize = spinand_otp_page_size(spinand);
int ret;
if (!len)
return 0;
ret = spinand_otp_check_bounds(spinand, ofs, len, layout);
if (ret)
return ret;
ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, CFG_OTP_ENABLE);
if (ret)
return ret;
page = ofs;
req.dataoffs = do_div(page, otp_pagesize);
req.pos.page = page + layout->start_page;
req.type = is_write ? NAND_PAGE_WRITE : NAND_PAGE_READ;
req.mode = MTD_OPS_RAW;
req.databuf.in = buf;
while (copied < len) {
req.datalen = min_t(unsigned int,
otp_pagesize - req.dataoffs,
len - copied);
if (is_write)
ret = spinand_write_page(spinand, &req);
else
ret = spinand_read_page(spinand, &req);
if (ret < 0)
break;
req.databuf.in += req.datalen;
req.pos.page++;
req.dataoffs = 0;
copied += req.datalen;
}
*retlen = copied;
if (spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0)) {
dev_warn(&spinand_to_mtd(spinand)->dev,
"Can not disable OTP mode\n");
ret = -EIO;
}
return ret;
}
/**
* spinand_fact_otp_read() - Read from OTP area
* @spinand: the spinand device
* @ofs: the offset to read
* @len: the number of data bytes to read
* @retlen: the pointer to variable to store the number of read bytes
* @buf: the buffer to store the read data
*
* Return: 0 on success, an error code otherwise.
*/
int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
size_t len, size_t *retlen, u8 *buf)
{
return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
&spinand->fact_otp->layout);
}
/**
* spinand_user_otp_read() - Read from OTP area
* @spinand: the spinand device
* @ofs: the offset to read
* @len: the number of data bytes to read
* @retlen: the pointer to variable to store the number of read bytes
* @buf: the buffer to store the read data
*
* Return: 0 on success, an error code otherwise.
*/
int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
size_t len, size_t *retlen, u8 *buf)
{
return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
&spinand->user_otp->layout);
}
/**
* spinand_user_otp_write() - Write to OTP area
* @spinand: the spinand device
* @ofs: the offset to write to
* @len: the number of bytes to write
* @retlen: the pointer to variable to store the number of written bytes
* @buf: the buffer with data to write
*
* Return: 0 on success, an error code otherwise.
*/
int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
size_t len, size_t *retlen, const u8 *buf)
{
return spinand_otp_rw(spinand, ofs, len, retlen, (u8 *)buf, true,
&spinand->user_otp->layout);
}
static int spinand_mtd_otp_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf,
bool is_fact)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
int ret;
*retlen = 0;
mutex_lock(&spinand->lock);
if (is_fact)
ret = spinand->fact_otp->ops->info(spinand, len, buf, retlen);
else
ret = spinand->user_otp->ops->info(spinand, len, buf, retlen);
mutex_unlock(&spinand->lock);
return ret;
}
static int spinand_mtd_fact_otp_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
return spinand_mtd_otp_info(mtd, len, retlen, buf, true);
}
static int spinand_mtd_user_otp_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
return spinand_mtd_otp_info(mtd, len, retlen, buf, false);
}
static int spinand_mtd_otp_read(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t *retlen, u8 *buf, bool is_fact)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
int ret;
*retlen = 0;
if (!len)
return 0;
ret = spinand_otp_check_bounds(spinand, ofs, len,
is_fact ? &spinand->fact_otp->layout :
&spinand->user_otp->layout);
if (ret)
return ret;
mutex_lock(&spinand->lock);
if (is_fact)
ret = spinand->fact_otp->ops->read(spinand, ofs, len, retlen,
buf);
else
ret = spinand->user_otp->ops->read(spinand, ofs, len, retlen,
buf);
mutex_unlock(&spinand->lock);
return ret;
}
static int spinand_mtd_fact_otp_read(struct mtd_info *mtd, loff_t ofs,
size_t len, size_t *retlen, u8 *buf)
{
return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, true);
}
static int spinand_mtd_user_otp_read(struct mtd_info *mtd, loff_t ofs,
size_t len, size_t *retlen, u8 *buf)
{
return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, false);
}
static int spinand_mtd_user_otp_write(struct mtd_info *mtd, loff_t ofs,
size_t len, size_t *retlen, const u8 *buf)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
int ret;
*retlen = 0;
if (!len)
return 0;
ret = spinand_user_otp_check_bounds(spinand, ofs, len);
if (ret)
return ret;
mutex_lock(&spinand->lock);
ret = ops->write(spinand, ofs, len, retlen, buf);
mutex_unlock(&spinand->lock);
return ret;
}
static int spinand_mtd_user_otp_erase(struct mtd_info *mtd, loff_t ofs,
size_t len)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
int ret;
if (!len)
return 0;
ret = spinand_user_otp_check_bounds(spinand, ofs, len);
if (ret)
return ret;
mutex_lock(&spinand->lock);
ret = ops->erase(spinand, ofs, len);
mutex_unlock(&spinand->lock);
return ret;
}
static int spinand_mtd_user_otp_lock(struct mtd_info *mtd, loff_t ofs,
size_t len)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
int ret;
if (!len)
return 0;
ret = spinand_user_otp_check_bounds(spinand, ofs, len);
if (ret)
return ret;
mutex_lock(&spinand->lock);
ret = ops->lock(spinand, ofs, len);
mutex_unlock(&spinand->lock);
return ret;
}
/**
* spinand_set_mtd_otp_ops() - Setup OTP methods
* @spinand: the spinand device
*
* Setup OTP methods.
*
* Return: 0 on success, a negative error code otherwise.
*/
int spinand_set_mtd_otp_ops(struct spinand_device *spinand)
{
struct mtd_info *mtd = spinand_to_mtd(spinand);
const struct spinand_fact_otp_ops *fact_ops = spinand->fact_otp->ops;
const struct spinand_user_otp_ops *user_ops = spinand->user_otp->ops;
if (!user_ops && !fact_ops)
return -EINVAL;
if (user_ops) {
if (user_ops->info)
mtd->_get_user_prot_info = spinand_mtd_user_otp_info;
if (user_ops->read)
mtd->_read_user_prot_reg = spinand_mtd_user_otp_read;
if (user_ops->write)
mtd->_write_user_prot_reg = spinand_mtd_user_otp_write;
if (user_ops->lock)
mtd->_lock_user_prot_reg = spinand_mtd_user_otp_lock;
if (user_ops->erase)
mtd->_erase_user_prot_reg = spinand_mtd_user_otp_erase;
}
if (fact_ops) {
if (fact_ops->info)
mtd->_get_fact_prot_info = spinand_mtd_fact_otp_info;
if (fact_ops->read)
mtd->_read_fact_prot_reg = spinand_mtd_fact_otp_read;
}
return 0;
}
+5 -1
View File
@@ -108,7 +108,7 @@
#define ECC_FORCE_CLK_OPEN BIT(30)
/* NAND_DEV_CMD1 bits */
#define READ_ADDR 0
#define READ_ADDR_MASK GENMASK(7, 0)
/* NAND_DEV_CMD_VLD bits */
#define READ_START_VLD BIT(0)
@@ -119,6 +119,7 @@
/* NAND_EBI2_ECC_BUF_CFG bits */
#define NUM_STEPS 0
#define NUM_STEPS_MASK GENMASK(9, 0)
/* NAND_ERASED_CW_DETECT_CFG bits */
#define ERASED_CW_ECC_MASK 1
@@ -139,8 +140,11 @@
/* NAND_READ_LOCATION_n bits */
#define READ_LOCATION_OFFSET 0
#define READ_LOCATION_OFFSET_MASK GENMASK(9, 0)
#define READ_LOCATION_SIZE 16
#define READ_LOCATION_SIZE_MASK GENMASK(25, 16)
#define READ_LOCATION_LAST 31
#define READ_LOCATION_LAST_MASK BIT(31)
/* Version Mask */
#define NAND_VERSION_MAJOR_MASK 0xf0000000
+1 -1
View File
@@ -21,7 +21,7 @@ struct nand_device;
* @oobsize: OOB area size
* @pages_per_eraseblock: number of pages per eraseblock
* @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
* @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
* @max_bad_eraseblocks_per_lun: maximum number of bad eraseblocks per LUN
* @planes_per_lun: number of planes per LUN
* @luns_per_target: number of LUN per target (target is a synonym for die)
* @ntargets: total number of targets exposed by the NAND device
+127 -2
View File
@@ -374,6 +374,67 @@ struct spinand_ondie_ecc_conf {
u8 status;
};
/**
* struct spinand_otp_layout - structure to describe the SPI NAND OTP area
* @npages: number of pages in the OTP
* @start_page: start page of the user/factory OTP area.
*/
struct spinand_otp_layout {
unsigned int npages;
unsigned int start_page;
};
/**
* struct spinand_fact_otp_ops - SPI NAND OTP methods for factory area
* @info: get the OTP area information
* @read: read from the SPI NAND OTP area
*/
struct spinand_fact_otp_ops {
int (*info)(struct spinand_device *spinand, size_t len,
struct otp_info *buf, size_t *retlen);
int (*read)(struct spinand_device *spinand, loff_t from, size_t len,
size_t *retlen, u8 *buf);
};
/**
* struct spinand_user_otp_ops - SPI NAND OTP methods for user area
* @info: get the OTP area information
* @lock: lock an OTP region
* @erase: erase an OTP region
* @read: read from the SPI NAND OTP area
* @write: write to the SPI NAND OTP area
*/
struct spinand_user_otp_ops {
int (*info)(struct spinand_device *spinand, size_t len,
struct otp_info *buf, size_t *retlen);
int (*lock)(struct spinand_device *spinand, loff_t from, size_t len);
int (*erase)(struct spinand_device *spinand, loff_t from, size_t len);
int (*read)(struct spinand_device *spinand, loff_t from, size_t len,
size_t *retlen, u8 *buf);
int (*write)(struct spinand_device *spinand, loff_t from, size_t len,
size_t *retlen, const u8 *buf);
};
/**
* struct spinand_fact_otp - SPI NAND OTP grouping structure for factory area
* @layout: OTP region layout
* @ops: OTP access ops
*/
struct spinand_fact_otp {
const struct spinand_otp_layout layout;
const struct spinand_fact_otp_ops *ops;
};
/**
* struct spinand_user_otp - SPI NAND OTP grouping structure for user area
* @layout: OTP region layout
* @ops: OTP access ops
*/
struct spinand_user_otp {
const struct spinand_otp_layout layout;
const struct spinand_user_otp_ops *ops;
};
/**
* struct spinand_info - Structure used to describe SPI NAND chips
* @model: model name
@@ -389,6 +450,10 @@ struct spinand_ondie_ecc_conf {
* @select_target: function used to select a target/die. Required only for
* multi-die chips
* @set_cont_read: enable/disable continuous cached reads
* @fact_otp: SPI NAND factory OTP info.
* @user_otp: SPI NAND user OTP info.
* @read_retries: the number of read retry modes supported
* @set_read_retry: enable/disable read retry for data recovery
*
* Each SPI NAND manufacturer driver should have a spinand_info table
* describing all the chips supported by the driver.
@@ -409,6 +474,11 @@ struct spinand_info {
unsigned int target);
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
struct spinand_fact_otp fact_otp;
struct spinand_user_otp user_otp;
unsigned int read_retries;
int (*set_read_retry)(struct spinand_device *spinand,
unsigned int read_retry);
};
#define SPINAND_ID(__method, ...) \
@@ -432,10 +502,32 @@ struct spinand_info {
}
#define SPINAND_SELECT_TARGET(__func) \
.select_target = __func,
.select_target = __func
#define SPINAND_CONT_READ(__set_cont_read) \
.set_cont_read = __set_cont_read,
.set_cont_read = __set_cont_read
#define SPINAND_FACT_OTP_INFO(__npages, __start_page, __ops) \
.fact_otp = { \
.layout = { \
.npages = __npages, \
.start_page = __start_page, \
}, \
.ops = __ops, \
}
#define SPINAND_USER_OTP_INFO(__npages, __start_page, __ops) \
.user_otp = { \
.layout = { \
.npages = __npages, \
.start_page = __start_page, \
}, \
.ops = __ops, \
}
#define SPINAND_READ_RETRY(__read_retries, __set_read_retry) \
.read_retries = __read_retries, \
.set_read_retry = __set_read_retry
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
__flags, ...) \
@@ -487,6 +579,10 @@ struct spinand_dirmap {
* actually relevant to enable this feature.
* @set_cont_read: Enable/disable the continuous read feature
* @priv: manufacturer private data
* @fact_otp: SPI NAND factory OTP info.
* @user_otp: SPI NAND user OTP info.
* @read_retries: the number of read retry modes supported
* @set_read_retry: Enable/disable the read retry feature
*/
struct spinand_device {
struct nand_device base;
@@ -519,6 +615,13 @@ struct spinand_device {
bool cont_read_possible;
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
const struct spinand_fact_otp *fact_otp;
const struct spinand_user_otp *user_otp;
unsigned int read_retries;
int (*set_read_retry)(struct spinand_device *spinand,
unsigned int retry_mode);
};
/**
@@ -588,4 +691,26 @@ int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
unsigned long poll_delay_us, u8 *s);
int spinand_read_page(struct spinand_device *spinand,
const struct nand_page_io_req *req);
int spinand_write_page(struct spinand_device *spinand,
const struct nand_page_io_req *req);
size_t spinand_otp_page_size(struct spinand_device *spinand);
size_t spinand_fact_otp_size(struct spinand_device *spinand);
size_t spinand_user_otp_size(struct spinand_device *spinand);
int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
size_t len, size_t *retlen, u8 *buf);
int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
size_t len, size_t *retlen, u8 *buf);
int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
size_t len, size_t *retlen, const u8 *buf);
int spinand_set_mtd_otp_ops(struct spinand_device *spinand);
#endif /* __LINUX_MTD_SPINAND_H */