mmc: dw_mmc: Workaround for RV1106/1103 sdmmc

1. We always use dma for sdmmc, and it could not be used as SDIO
2. restore affected registers includes ctype and timeout
3. expose API for ISP/VICAP usage
4. remove SD3.0 support which won't affect write throughput
5. limit ffaa0000 controller only be used as sd card before ECO
6. reserve a descriptor for dirty data, in order to let DMA finish
the prior descriptors and the dirty data in the last descriptor will
not be sent at all.
7. use CMD12 + reset to clean the remain dirty data and recovery IP

Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
Change-Id: Ib1456c8f399cdeda391ba459b22097b0f5820e53
This commit is contained in:
Shawn Lin
2022-04-13 09:00:25 +08:00
committed by Tao Huang
parent d2786053e4
commit 6eca689b99
4 changed files with 126 additions and 5 deletions
+14
View File
@@ -10,6 +10,7 @@
#include <linux/of_address.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/pm_runtime.h>
#include <linux/rockchip/cpu.h>
#include <linux/slab.h>
#include "dw_mmc.h"
@@ -390,6 +391,19 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
"rockchip,rk3288-dw-mshc"))
host->bus_hz /= RK3288_CLKGEN_DIV;
if (of_device_is_compatible(host->dev->of_node,
"rockchip,rv1106-dw-mshc") &&
rockchip_get_cpu_version() == 0 &&
!strcmp(dev_name(host->dev), "ffaa0000.mmc")) {
if (device_property_read_bool(host->dev, "no-sd")) {
dev_err(host->dev, "Invalid usage, should be SD card only\n");
return -EINVAL;
}
host->is_rv1106_sd = true;
dev_info(host->dev, "is rv1106 sd\n");
}
host->need_xfer_timer = true;
return 0;
}
+98 -5
View File
@@ -36,6 +36,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/soc/rockchip/rk_sdmmc.h>
#include <linux/soc/rockchip/rockchip_decompress.h>
#include "dw_mmc.h"
@@ -105,6 +106,24 @@ struct idmac_desc {
/* Each descriptor can transfer up to 4KB of data in chained mode */
#define DW_MCI_DESC_DATA_LENGTH 0x1000
#if IS_ENABLED(CONFIG_CPU_RV1106)
static spinlock_t *g_sdmmc_ispvicap_lock;
void rv1106_sdmmc_get_lock(void)
{
if (g_sdmmc_ispvicap_lock)
spin_lock(g_sdmmc_ispvicap_lock);
}
EXPORT_SYMBOL(rv1106_sdmmc_get_lock);
void rv1106_sdmmc_put_lock(void)
{
if (g_sdmmc_ispvicap_lock)
spin_unlock(g_sdmmc_ispvicap_lock);
}
EXPORT_SYMBOL(rv1106_sdmmc_put_lock);
#endif
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
@@ -463,14 +482,20 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host)
/* Disable and reset the IDMAC interface */
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_USE_IDMAC;
if (!host->is_rv1106_sd)
temp &= ~SDMMC_CTRL_USE_IDMAC;
temp |= SDMMC_CTRL_DMA_RESET;
mci_writel(host, CTRL, temp);
/* Stop the IDMAC running */
temp = mci_readl(host, BMOD);
temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
temp |= SDMMC_IDMAC_SWRESET;
if (host->is_rv1106_sd) {
temp |= SDMMC_IDMAC_SWRESET;
} else {
temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
temp |= SDMMC_IDMAC_SWRESET;
}
mci_writel(host, BMOD, temp);
}
@@ -711,6 +736,13 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host,
}
}
if (host->is_rv1106_sd && (data->flags & MMC_DATA_WRITE)) {
desc->des0 = desc_last->des0;
desc->des2 = desc_last->des2;
desc->des1 = 0x8; /* Random dirty data for last one desc */
desc_last = desc;
}
/* Set first descriptor */
desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
@@ -895,7 +927,7 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host,
* non-word-aligned buffers or lengths. Also, we don't bother
* with all the DMA setup overhead for short transfers.
*/
if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD && !host->is_rv1106_sd)
return -EINVAL;
if (data->blksz & 3)
@@ -1308,10 +1340,16 @@ static void __dw_mci_start_request(struct dw_mci *host,
host->data_status = 0;
host->dir_status = 0;
if (host->is_rv1106_sd)
mci_writel(host, CTYPE, (slot->ctype << slot->id));
data = cmd->data;
if (data) {
mci_writel(host, TMOUT, 0xFFFFFFFF);
mci_writel(host, BYTCNT, data->blksz*data->blocks);
if (host->is_rv1106_sd && (data->flags & MMC_DATA_WRITE))
mci_writel(host, BYTCNT, 0);
else
mci_writel(host, BYTCNT, data->blksz*data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
}
@@ -1830,6 +1868,10 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
}
spin_unlock(&host->lock);
if (host->is_rv1106_sd)
dw_mci_reset(host);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
}
@@ -1871,6 +1913,9 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
{
u32 status = host->data_status;
if (host->is_rv1106_sd && (data->flags & MMC_DATA_WRITE) && (status & SDMMC_INT_DATA_OVER))
goto finish;
if (status & DW_MCI_DATA_ERROR_FLAGS) {
if (status & SDMMC_INT_DRTO) {
data->error = -ETIMEDOUT;
@@ -1903,6 +1948,7 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
*/
dw_mci_reset(host);
} else {
finish:
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
}
@@ -2706,11 +2752,14 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_DATA_OVER) {
rv1106_sd:
spin_lock_irqsave(&host->irq_lock, irqflags);
del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (host->is_rv1106_sd)
pending |= SDMMC_INT_DATA_OVER;
if (!host->data_status)
host->data_status = pending;
smp_wmb(); /* drain writebuffer */
@@ -2780,6 +2829,18 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
host->dma_ops->complete((void *)host);
if (host->is_rv1106_sd && (pending & SDMMC_IDMAC_INT_TI)) {
/*
* The last desc contain dump data, controller won't issue,
* controller will auto gate clk and it will not affect FIFO
* status. So busy check here can mostly block 2 cycles of
* hclk from simulation, it's not a big deal.
*/
while ((mci_readl(host, STATUS) & BIT(2)) != BIT(2))
;
goto rv1106_sd;
}
}
}
@@ -2867,6 +2928,10 @@ static int dw_mci_init_slot(struct dw_mci *host)
/* Useful defaults if platform data is unset. */
if (host->use_dma == TRANS_MODE_IDMAC) {
/* Reserve last desc for dirty data */
if (host->is_rv1106_sd)
host->ring_size--;
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65535;
mmc->max_seg_size = 0x1000;
@@ -3431,6 +3496,24 @@ int dw_mci_probe(struct dw_mci *host)
goto err_dmaunmap;
}
if (host->is_rv1106_sd) {
#if IS_ENABLED(CONFIG_CPU_RV1106)
g_sdmmc_ispvicap_lock = &host->lock;
#endif
/* Select IDMAC interface */
fifo_size = mci_readl(host, CTRL);
fifo_size |= SDMMC_CTRL_USE_IDMAC;
mci_writel(host, CTRL, fifo_size);
fifo_size = mci_readl(host, INTMASK);
fifo_size &= ~SDMMC_INT_HTO;
mci_writel(host, INTMASK, fifo_size);
host->slot->mmc->caps &= ~(MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12);
}
/* Now that slots are all setup, we can enable card detect */
dw_mci_enable_cd(host);
@@ -3540,6 +3623,16 @@ int dw_mci_runtime_resume(struct device *dev)
DW_MCI_ERROR_FLAGS);
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
if (host->is_rv1106_sd) {
/* Select IDMAC interface */
ret = mci_readl(host, CTRL);
ret |= SDMMC_CTRL_USE_IDMAC;
mci_writel(host, CTRL, ret);
ret = mci_readl(host, INTMASK);
ret &= ~SDMMC_INT_HTO;
mci_writel(host, INTMASK, ret);
}
if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
+1
View File
@@ -232,6 +232,7 @@ struct dw_mci {
struct timer_list dto_timer;
bool need_xfer_timer;
struct timer_list xfer_timer;
bool is_rv1106_sd;
};
/* DMA ops for Internal/External DMAC interface */
+13
View File
@@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RK_SDMMC_H
#define __RK_SDMMC_H
#if IS_ENABLED(CONFIG_CPU_RV1106) && IS_REACHABLE(CONFIG_MMC_DW)
void rv1106_sdmmc_get_lock(void);
void rv1106_sdmmc_put_lock(void);
#else
static inline void rv1106_sdmmc_get_lock(void) {}
static inline void rv1106_sdmmc_put_lock(void) {}
#endif
#endif