diff --git a/arch/arm/configs/am3xxx_pfc_generic_defconfig b/arch/arm/configs/am3xxx_pfc_generic_defconfig index dbe7f862eba4..59225e957b81 100644 --- a/arch/arm/configs/am3xxx_pfc_generic_defconfig +++ b/arch/arm/configs/am3xxx_pfc_generic_defconfig @@ -256,6 +256,8 @@ CONFIG_I2C_MUX_PCA9541=y CONFIG_I2C_MUX_PCA954x=y CONFIG_SPI=y CONFIG_SPI_OMAP24XX=y +CONFIG_SPI_KBUS=y +CONFIG_SPI_SPIDEV=y CONFIG_PINCTRL_SINGLE=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_PCA953X=y diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3ce0fd5df8e9..c8f70d771ba3 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -758,6 +758,22 @@ config SPI_TI_QSPI This device supports single, dual and quad read support, while it only supports single write mode. +config SPI_KBUS_OMAP_EXTENSION + bool "KBUS Extension for OMAP MCSPI Driver (Read Notes!)" + depends on SPI_OMAP24XX + help + KBUS Extension for the McSPI OMAP driver. + It implements the communication protocol for the infineon XE164 Chip + which does the communication with the KBUS logic. + ATTENTION: This disbles the use of a worker thread (work queue). + In its current state only one userspace process is allowed. + +config SPI_KBUS + select SPI_KBUS_OMAP_EXTENSION + tristate "WAGO KBUS Driver" + help + This is driver provides access to the KBUS interface. + config SPI_ORION tristate "Orion SPI master" depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 6af54842b9fa..b6d6f3c77533 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -152,6 +152,8 @@ obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o obj-$(CONFIG_SPI_AMD) += spi-amd.o +obj-$(CONFIG_SPI_KBUS) += spi-kbus.o +obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o # SPI slave protocol handlers obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o diff --git a/drivers/spi/spi-kbus.c b/drivers/spi/spi-kbus.c new file mode 100644 index 000000000000..878d344faa64 --- /dev/null +++ b/drivers/spi/spi-kbus.c @@ -0,0 +1,1265 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#define PXC_SPI_KBUS_TRACER +#define CREATE_TRACE_POINTS +#include + +#define KBUS_DUMMY_BIT_PER_WORD 8 +#define KBUS_DUMMY_SPEED 1000000 + +#if KBUS_TESTING +struct wago_trace_data wago_ktest; +#endif + +static dev_t kbus_dev; +static struct cdev kbus_cdev; + +/* sysfs */ +extern struct class *wsysinit_sysfs_class; +extern struct device *wsysinit_sysfs_device; +static struct device *kbus_device; + +int kbus_wait_for_gpio(int gpio) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(1); /* 1000 ? */ + while (gpio_get_value(gpio)) { /* active low */ + if (time_after(jiffies, timeout)) + return -1; + cpu_relax(); + } + return 0; +} + +int kbus_error(void) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + + /* + * read error code from gpios + */ + kdrvdata->kbus_err = !gpiod_get_value(kdrvdata->gpio_nerr); + return kdrvdata->kbus_err ? -1 : 0; +} + +int kbus_wait_for_irq(void) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(1000); + while (!kdrvdata->kbus_irq_state) { + if (time_after(jiffies, timeout)) + return -1; + cpu_relax(); + } + kdrvdata->kbus_irq_state = 0; + return 0; +} + +int kbus_wait_for_event(int *event) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(*event)) { + if (time_after(jiffies, timeout)) + return -1; + cpu_relax(); + } + *event = 0; + return 0; +} + +static irqreturn_t kbus_isr(int irq, void *dev) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + + trace_pxc_kbus(__func__, "kbus-irq:in"); + kdrvdata->kbus_irq_state = 0; /* attention: changed polarisation */ + + /* wake_up_interruptible(&kdrvdata->kbus_irq_wq); */ + wake_up(&kdrvdata->kbus_irq_wq); + + trace_pxc_kbus(__func__, "kbus-irq:out"); + return IRQ_HANDLED; +} + +static ssize_t kbus_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + u32 irq_value = 0; + ssize_t status = 0; + + status = __get_user(irq_value, buf); + if (status == 0) { + if (irq_value) { + if (!kdrvdata->kbus_irq_enabled) + KBUS_ENABLE_IRQ(kdrvdata->kbus_irq); + kdrvdata->kbus_irq_enabled = 1; + trace_pxc_kbus(__func__, "turned-on irqs!"); + } else { + if (kdrvdata->kbus_irq_enabled) + KBUS_DISABLE_IRQ(kdrvdata->kbus_irq); + kdrvdata->kbus_irq_enabled = 0; + trace_pxc_kbus(__func__, "turned-off irqs!"); + } + } + + return status; +} + +static struct task_struct *find_dma_task(void) +{ + struct task_struct *p, *found = NULL; + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + + read_lock(&tasklist_lock); + for_each_process(p) { + if (p->flags & PF_KTHREAD) { + if ((strcmp(p->comm, + kdrvdata->kbus_dma_boost_irq_thread) == + 0)) { + found = p; + break; + } + } + } + read_unlock(&tasklist_lock); + + return found; +} + +void kbus_boost_dma_task(u8 enable) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + struct sched_param boost_param = { + .sched_priority = enable ? kdrvdata->kbus_dma_boost_prio : + kdrvdata->kbus_dma_normal_prio + }; + + if (kdrvdata->dma_task) { + if ((enable && !kdrvdata->kbus_dma_boost_en) || + (!enable && kdrvdata->kbus_dma_boost_en)) { + kdrvdata->kbus_dma_boost_en = + !kdrvdata->kbus_dma_boost_en; + sched_setscheduler(kdrvdata->dma_task, SCHED_FIFO, + &boost_param); + } + } +} + +static int kbus_open(struct inode *node, struct file *file) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + +#if KBUS_TESTING + wago_tests_init(&wago_ktest, 1); +#endif + /* get pid of irq/12-DMA here + * and set it in kdrvdata + */ + if (!kdrvdata->dma_task) { + kdrvdata->dma_task = find_dma_task(); + if (kdrvdata->dma_task) + pr_info("dma-task pid is %u.\n", + kdrvdata->dma_task->pid); + else + printk("dma-task not found!\n"); + } + + return 0; +} + +static int kbus_close(struct inode *node, struct file *file) +{ +#if KBUS_TESTING + wago_tests_deinit(); +#endif + return 0; +} + +static void kbus_complete(void *arg) +{ + kbus_wago_mpoint(); /* MX */ + trace_pxc_kbus(__func__, "jump to complete()"); + complete(arg); +} + +static int kbus_spi_sync(struct spi_device *spi, struct spi_message *msg) +{ + DECLARE_COMPLETION_ONSTACK(done); + int status; + + msg->complete = kbus_complete; + msg->context = &done; + + trace_pxc_kbus(__func__, "jump to spi_async"); + status = spi_async(spi, msg); + if (status == 0) { + wait_for_completion(&done); + status = msg->status; + if (status == 0) + status = msg->actual_length; + } + + return status; +} + +static int kbus_spi_message(struct spi_ioc_transfer *u_xfers, unsigned n_xfers) +{ + struct spi_message msg; + struct spi_transfer *k_xfers; + struct spi_transfer *k_tmp; + struct spi_ioc_transfer *u_tmp; + unsigned int n, total; + u8 *buf; + int status = -EFAULT; + u32 bufsiz = KBUS__MAX_BUF_LEN; + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + + spi_message_init(&msg); + k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL); + if (k_xfers == NULL) + return -ENOMEM; + + /* Construct spi_message, copying any tx data to bounce buffer. + * We walk the array of user-provided transfers, using each one + * to initialize a kernel version of the same transfer. + */ + buf = kdrvdata->tx_buf; /* use in this case the buffer for tx and rx */ + total = 0; + for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n; + n--, k_tmp++, u_tmp++) { + k_tmp->len = u_tmp->len; + + total += k_tmp->len; + if (total > bufsiz) { + status = -EMSGSIZE; + goto done; + } + + if (u_tmp->rx_buf) { + k_tmp->rx_buf = buf; + if (!access_ok((u8 __user *)(uintptr_t)u_tmp->rx_buf, + u_tmp->len)) + goto done; + } + if (u_tmp->tx_buf) { + k_tmp->tx_buf = buf; + if (copy_from_user( + buf, + (const u8 __user *)(uintptr_t)u_tmp->tx_buf, + u_tmp->len)) + goto done; + } + buf += k_tmp->len; + + k_tmp->cs_change = !!u_tmp->cs_change; + k_tmp->bits_per_word = u_tmp->bits_per_word; + k_tmp->delay.value = u_tmp->delay_usecs; + k_tmp->speed_hz = u_tmp->speed_hz; + + spi_message_add_tail(k_tmp, &msg); + } + + status = kbus_spi_sync(kdrvdata->spi, &msg); + if (status < 0) + goto done; + + /* copy any rx data out of bounce buffer */ + buf = kdrvdata->tx_buf; + for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { + if (u_tmp->rx_buf) { + if (__copy_to_user((u8 __user *)(uintptr_t)u_tmp->rx_buf, + buf, u_tmp->len)) { + status = -EFAULT; + goto done; + } + } + buf += u_tmp->len; + } + status = total; + +done: + kfree(k_xfers); + return status; +} + +static void kbus_dump(char *prefix, char *buf, int len) +{ + int i; + + for (i = 0; i < len; i += 32) { + pr_info("DATADUMP(%s) copylen %4d buf %p" + "[%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x" + "-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x" + "-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x" + "-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x]\n", + prefix, (len - i), buf, buf[i + 0], buf[i + 1], + buf[i + 2], buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], buf[i + 9], + buf[i + 10], buf[i + 11], buf[i + 12], buf[i + 13], + buf[i + 14], buf[i + 15], buf[i + 16], buf[i + 17], + buf[i + 18], buf[i + 19], buf[i + 20], buf[i + 21], + buf[i + 22], buf[i + 23], buf[i + 24], buf[i + 25], + buf[i + 26], buf[i + 27], buf[i + 28], buf[i + 29], + buf[i + 30], buf[i + 31]); + } +} + +static int kbus_data_txrx(struct kbus_data *kdata) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + struct spi_message msg; + struct spi_transfer spi_t[] = { + { + .tx_buf = kdrvdata->tx_buf, + .len = kdata->byte_len, + .bits_per_word = KBUS_DUMMY_BIT_PER_WORD, + .speed_hz = KBUS_DUMMY_SPEED, + }, + { + .rx_buf = kdrvdata->rx_buf, + .len = kdata->byte_len, + .bits_per_word = KBUS_DUMMY_BIT_PER_WORD, + .speed_hz = KBUS_DUMMY_SPEED, + }, + }; + int status; + + trace_pxc_kbus(__func__, "enter"); + + if (!kdata->byte_len || kdata->byte_len > KBUS__MAX_BUF_LEN) + return -EINVAL; + + if (kdata->timeout_ms) + kdrvdata->timeout_ms = kdata->timeout_ms; + else + kdrvdata->timeout_ms = KBUS_IRQ_TIMEOUT; + + /* get the userspace data */ + if (copy_from_user(kdrvdata->tx_buf, kdata->tx_buf, kdata->byte_len)) + return -EFAULT; + + if (trace_pxc_buf32_enabled()) + kbus_dump("KTX", kdrvdata->tx_buf, kdata->byte_len); + + /* clear rx buf */ + memset(kdrvdata->rx_buf, 0, kdata->byte_len); + + /* + * set some valid dummy data. + * This configuration will not be valid during transfer. + * It is only set by kbus_spi_config(). + * + */ + + kbus_wago_mpoint(); /* M1 */ + + /* create message and add transfers to it */ + spi_message_init(&msg); + + /* setup dma */ + if (kdrvdata->use_dma) { + trace_pxc_kbus(__func__, "dma transfer enabled"); + spi_t[0].tx_dma = kdrvdata->tx_buf_dma; + spi_t[1].rx_dma = kdrvdata->rx_buf_dma; + msg.is_dma_mapped = 1; + } + + spi_message_add_tail(&spi_t[0], &msg); + spi_message_add_tail(&spi_t[1], &msg); + + trace_pxc_kbusmsg(__func__, &msg, ""); + status = kbus_spi_sync(kdrvdata->spi, &msg); + if (status > 0) + if (copy_to_user(kdata->rx_buf, kdrvdata->rx_buf, + kdata->byte_len)) + return -EFAULT; + trace_pxc_kbusmsg(__func__, &msg, ""); + + /* tell the user about the error, if occured */ + if (status < 0 && kdrvdata->kbus_err) { + if (__put_user(kdrvdata->kbus_err, kdata->err)) + return -EFAULT; + } + + kbus_wago_mpoint(); /* M5 */ + + if (trace_pxc_buf32_enabled()) + kbus_dump("KRX", kdrvdata->rx_buf, kdata->byte_len); + + trace_pxc_kbus(__func__, "leave"); + + return status; +} + +static int kbus_binary_txrx(struct kbus_data *kbinary) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + struct spi_message msg; + struct spi_transfer spi_t[] = { + { + .len = kbinary->byte_len, + .bits_per_word = kdrvdata->spi->bits_per_word, + .speed_hz = kdrvdata->spi->max_speed_hz, + }, + }; + int status; + + trace_pxc_kbus(__func__, "enter"); + + if (!kbinary->byte_len || kbinary->byte_len > KBUS__MAX_BUF_LEN) + return -EINVAL; + + if (!kbinary->tx_buf && !kbinary->rx_buf) + return -EINVAL; + + if (kbinary->tx_buf) { + spi_t[0].tx_buf = kdrvdata->tx_buf; + /* get the userspace data */ + if (copy_from_user(kdrvdata->tx_buf, kbinary->tx_buf, + kbinary->byte_len)) + return -EFAULT; + } + + if (kbinary->rx_buf) { + spi_t[0].rx_buf = kdrvdata->rx_buf; + /* clear rx buf */ + memset(kdrvdata->rx_buf, 0, kbinary->byte_len); + } + + /* create message and add transfer to it */ + spi_message_init(&msg); + spi_message_add_tail(&spi_t[0], &msg); + + trace_pxc_kbusmsg(__func__, &msg, ""); + status = kbus_spi_sync(kdrvdata->spi, &msg); + if (status && kbinary->rx_buf) + if (copy_to_user(kbinary->rx_buf, kdrvdata->rx_buf, + kbinary->byte_len)) + return -EFAULT; + trace_pxc_kbusmsg(__func__, &msg, ""); + + trace_pxc_kbus(__func__, "leave"); + return status; +} + +static int kbus_cmd_txrx(struct kbus_cmd *kcmd) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + struct spi_message msg; + /* + * set some valid dummy data for bits_per_word and speed_hz. + * This configuration will not be valid during transfer. + * It is only set by kbus_spi_config(). */ + struct spi_transfer spi_t[] = { + { + /* TX */ + .tx_buf = kdrvdata->tx_buf, + .len = kcmd->byte_len_tx, + .bits_per_word = KBUS_DUMMY_BIT_PER_WORD, + .speed_hz = KBUS_DUMMY_SPEED, + }, + { + /* RX0 */ + .rx_buf = kdrvdata->rx_buf, + .len = 6, + .bits_per_word = KBUS_DUMMY_BIT_PER_WORD, + .speed_hz = KBUS_DUMMY_SPEED, + }, + { + /* RX1 */ + .rx_buf = kdrvdata->rx_buf + 6, + .len = kcmd->byte_len_rx - + 6, /* set remaining max bytes. + * RX0 will update it later. */ + .bits_per_word = KBUS_DUMMY_BIT_PER_WORD, + .speed_hz = KBUS_DUMMY_SPEED, + }, + }; + int status; + + trace_pxc_kbus(__func__, "enter"); + + if ((!kcmd->byte_len_tx && !kcmd->byte_len_rx) || + kcmd->byte_len_tx > KBUS__MAX_BUF_LEN || + kcmd->byte_len_rx > KBUS__MAX_BUF_LEN) + return -EINVAL; + + if (kcmd->timeout_ms) + kdrvdata->timeout_ms = kcmd->timeout_ms; + else + kdrvdata->timeout_ms = KBUS_IRQ_TIMEOUT; + + /* get the userspace data */ + if (copy_from_user(kdrvdata->tx_buf, kcmd->tx_buf, kcmd->byte_len_tx)) + return -EFAULT; + + /* clear rx buf */ + memset(kdrvdata->rx_buf, 0, kcmd->byte_len_rx); + + kbus_wago_mpoint(); /* M1 */ + trace_pxc_kbus(__func__, "M1"); + + /* create message and add transfers to it */ + spi_message_init(&msg); + + /* setup dma */ + if (kdrvdata->use_dma) { + trace_pxc_kbus(__func__, "dma transfer enabled"); + spi_t[0].tx_dma = kdrvdata->tx_buf_dma; + spi_t[1].rx_dma = kdrvdata->rx_buf_dma; + spi_t[2].rx_dma = kdrvdata->rx_buf_dma + 6; + msg.is_dma_mapped = 1; + } + + spi_message_add_tail(&spi_t[0], &msg); + spi_message_add_tail(&spi_t[1], &msg); + spi_message_add_tail(&spi_t[2], &msg); + + /* do the actual spi msg transfer */ + trace_pxc_kbusmsg(__func__, &msg, ""); + status = kbus_spi_sync(kdrvdata->spi, &msg); + if (status) + if (copy_to_user(kcmd->rx_buf, kdrvdata->rx_buf, + spi_t[2].len + 6)) + return -EFAULT; + trace_pxc_kbusmsg(__func__, &msg, ""); + + /* tell the user about the error, if occured */ + if (status < 0 && kdrvdata->kbus_err) { + if (__put_user(kdrvdata->kbus_err, kcmd->err)) + return -EFAULT; + } + + kbus_wago_mpoint(); /* M5 */ + trace_pxc_kbus(__func__, "leave"); + + return status; +} + +static int kbus_spi_config(struct kbus_spi_config *kconfig) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + int retval = 0; + u8 save = 0; + + if (!kconfig->bits_per_word && !kconfig->mode) + return -EINVAL; + + /* update values */ + if (kconfig->bits_per_word) { + if (kconfig->bits_per_word != 16) + trace_pxc_kbus(__func__, + "WARNING: bits_per_word != 16Bit."); + kdrvdata->spi->bits_per_word = kconfig->bits_per_word; + } + if (kconfig->max_speed_hz) + kdrvdata->spi->max_speed_hz = kconfig->max_speed_hz; + if (kconfig->mode) { + if (kconfig->mode & ~SPI_MODE_MASK) + return -EINVAL; + save = kdrvdata->spi->mode; + kconfig->mode |= kdrvdata->spi->mode & ~SPI_MODE_MASK; + kdrvdata->spi->mode = (u8)kconfig->mode; + } + + /* do the actual spi setup */ + retval = spi_setup(kdrvdata->spi); + if (retval < 0) { /* restore mode if changed */ + if (save) + kdrvdata->spi->mode = save; + } else + kbus_dbg("%s: spi mode is updated: %02x\n", __func__, + kconfig->mode); + + return retval; +} + +static long kbus_ioctl(struct file *file, uint cmd, ulong arg) +{ + long ret = 0; + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + struct spi_device *spi = kdrvdata->spi; + + switch (cmd) { + case KBUS_IOC_CMD: /* FIXME */ + { + struct kbus_cmd kcmd; + struct kbus_cmd __user *kcmd_user; + + trace_pxc_kbus(__func__, + "KCMD: Enter: Set kbus_irq_state to 1."); + kbus_wago_mpoint(); /* M0 */ + + spi_set_drvdata(spi, kdrvdata); + kdrvdata->kbus_irq_state = + 1; /* will be set to 0 by kbus_isr() */ + + gpiod_set_value(kdrvdata->gpio_cmdsel, 0); + kdrvdata->cmdsel = 1; + /* Indicate Transfer Start */ + gpiod_set_value(kdrvdata->gpio_nirq, 0); + + kcmd_user = (struct kbus_cmd __user *)arg; + if (copy_from_user(&kcmd, kcmd_user, sizeof(kcmd))) { + /* release the irq pin */ + gpiod_set_value(kdrvdata->gpio_nirq, 1); + return -EFAULT; + } + + ret = kbus_cmd_txrx(&kcmd); + if (ret < 0) { + /* release the irq pin */ + gpiod_set_value(kdrvdata->gpio_nirq, 1); + trace_pxc_kbus( + __func__, + "KCMD: kbus_cmd_txrx() returned with error!"); + } + + /* FIXME: JUST FOR TESTING */ + gpiod_set_value(kdrvdata->gpio_cmdsel, 1); + + kbus_wago_mpoint(); /* M6 */ + trace_pxc_kbus(__func__, "KCMD: Leave"); + } break; + case KBUS_IOC_DATA: { + struct kbus_data kdata; + struct kbus_data __user *kdata_user; + + trace_pxc_kbus(__func__, + "KDATA: Enter: Set kbus_irq_state to 1."); + kbus_wago_mpoint(); /* M0 */ + + spi_set_drvdata(spi, kdrvdata); + kdrvdata->kbus_irq_state = + 1; /* will be set to 0 by kbus_isr() */ + + gpiod_set_value(kdrvdata->gpio_cmdsel, 1); + kdrvdata->cmdsel = 0; + /* Indicate Transfer Start */ + gpiod_set_value(kdrvdata->gpio_nirq, 0); + + kdata_user = (struct kbus_data __user *)arg; + + if (copy_from_user(&kdata, kdata_user, sizeof(kdata))) { + /* release the irq pin */ + gpiod_set_value(kdrvdata->gpio_nirq, 1); + return -EFAULT; + } + + ret = kbus_data_txrx(&kdata); + if (ret < 0) { + /* release the irq pin */ + gpiod_set_value(kdrvdata->gpio_nirq, 1); + trace_pxc_kbus( + __func__, + "KDATA: kbus_data_txrx() returned with error!"); + } + + /* FIXME: JUST FOR TESTING */ + gpiod_set_value(kdrvdata->gpio_cmdsel, 0); + + kbus_wago_mpoint(); /* M6 */ + trace_pxc_kbus(__func__, "KDATA: Leave"); + } break; + case KBUS_IOC_CONFIG: { + struct kbus_spi_config kconfig; + struct kbus_spi_config __user *kconfig_user; + + trace_pxc_kbus(__func__, "KCONFIG: Enter"); + + spi_set_drvdata(spi, kdrvdata); + kconfig_user = (struct kbus_spi_config __user *)arg; + + if (copy_from_user(&kconfig, kconfig_user, sizeof(kconfig))) + return -EFAULT; + + ret = kbus_spi_config(&kconfig); + if (ret < 0) + trace_pxc_kbus( + __func__, + "KCONFIG: kbus_spi_config() returned with error!"); + + trace_pxc_kbus(__func__, "KCONFIG: Leave"); + } break; + case KBUS_IOC_BINARY: { + struct kbus_data kbinary; + struct kbus_data __user *kbinary_user; + + trace_pxc_kbus(__func__, "KBINARY: Enter."); + + spi_set_drvdata( + spi, + NULL); /* don't use kbus algorithm in spi-omap2-mcspi.c */ + kbinary_user = (struct kbus_data __user *)arg; + if (copy_from_user(&kbinary, kbinary_user, sizeof(kbinary))) + return -EFAULT; + + ret = kbus_binary_txrx(&kbinary); + if (ret < 0) + trace_pxc_kbus( + __func__, + "KBINARY: kbus_binary_txrx() returned with error!"); + + trace_pxc_kbus(__func__, "KBINARY: Leave"); + } break; + default: /* FIXME: make it possible to use the spidev-way of communication. Not yet tested! */ + { + u32 tmp; + unsigned int n_ioc; + struct spi_ioc_transfer *ioc; + + spi_set_drvdata( + spi, + NULL); /* don't use kbus algorithm in spi-omap2-mcspi.c */ + /* segmented and/or full-duplex I/O request */ + if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) || + _IOC_DIR(cmd) != _IOC_WRITE) { + ret = -ENOTTY; + break; + } + + tmp = _IOC_SIZE(cmd); + if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) { + ret = -EINVAL; + break; + } + n_ioc = tmp / sizeof(struct spi_ioc_transfer); + if (n_ioc == 0) + break; + + /* copy into scratch area */ + ioc = kmalloc(tmp, GFP_KERNEL); + if (!ioc) { + ret = -ENOMEM; + break; + } + if (__copy_from_user(ioc, (void __user *)arg, tmp)) { + kfree(ioc); + ret = -EFAULT; + break; + } + + /* translate to spi_message, execute */ + ret = kbus_spi_message(ioc, n_ioc); + kfree(ioc); + break; + } + } + + return ret; +} + +static struct file_operations kbus_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = kbus_ioctl, + .write = kbus_write, + .open = kbus_open, + .release = kbus_close, +}; + +static ssize_t kbus_sysfs_prio_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t kbus_sysfs_prio_set(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static ssize_t kbus_sysfs_trig_reset(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); + +DEVICE_ATTR(kbus_dma_boost_prio, 0600, kbus_sysfs_prio_show, + kbus_sysfs_prio_set); + +DEVICE_ATTR(kbus_dma_normal_prio, 0600, kbus_sysfs_prio_show, + kbus_sysfs_prio_set); + +DEVICE_ATTR(kbus_trig_reset, 0200, NULL, kbus_sysfs_trig_reset); + +static ssize_t kbus_sysfs_prio_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + u8 prio = 0; + + if (attr == &dev_attr_kbus_dma_boost_prio) + prio = kdrvdata->kbus_dma_boost_prio; /* boost */ + else if (attr == &dev_attr_kbus_dma_normal_prio) + prio = kdrvdata->kbus_dma_normal_prio; /* normal */ + + return sprintf(buf, "%d\n", (int)prio); +} + +static ssize_t kbus_sysfs_prio_set(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + u32 tmp = simple_strtoul(buf, NULL, 10); + + /* check if value is valid */ + if (tmp < 1 || tmp > 99) + return -EINVAL; + + if (attr == &dev_attr_kbus_dma_boost_prio) { + pr_info("dma-boost prio changed from %d to %d.\n", + (int)kdrvdata->kbus_dma_boost_prio, (int)tmp); + kdrvdata->kbus_dma_boost_prio = (u8)tmp; /* boost */ + } else if (attr == &dev_attr_kbus_dma_normal_prio) { + pr_info("dma-normal prio changed from %d to %d.\n", + (int)kdrvdata->kbus_dma_normal_prio, (int)tmp); + kdrvdata->kbus_dma_normal_prio = (u8)tmp; /* normal */ + } + + return count; +} + +static int kbus_trig_reset(struct kbus_drv_data *kdrvdata) +{ + /* check if value is valid */ + if (kdrvdata == NULL) + return -EINVAL; + + /* reset kbus slave cpu (Infineon XE164) */ + gpiod_set_value_cansleep(kdrvdata->gpio_nrst, 1); + udelay(100); + gpiod_set_value_cansleep(kdrvdata->gpio_nrst, 0); + + pr_info("PFCxxx-KBUS: Kbus Slave CPU Reset.\n"); + + return 0; +} + +static ssize_t kbus_sysfs_trig_reset(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + u32 val = simple_strtoul(buf, NULL, 10); + + /* check if value is valid */ + if (val != 1) + return -EINVAL; + + kbus_trig_reset(kdrvdata); + + return count; +} + +static ssize_t kbus_sysfs_boost_en_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + + return sprintf(buf, "%d\n", (int)kdrvdata->kbus_dma_boost_en); +} + +static ssize_t kbus_sysfs_boost_en_set(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + u32 tmp = simple_strtoul(buf, NULL, 10); + + /* check if valid */ + if (tmp > 1) + return -EINVAL; + + /* get pid of irq/12-DMA here + * and set it in kdrvdata + */ + if (!kdrvdata->dma_task) { + kdrvdata->dma_task = find_dma_task(); + if (kdrvdata->dma_task) + pr_info("dma-task pid is %u.\n", + kdrvdata->dma_task->pid); + else + pr_info("dma-task not found!\n"); + } + + kbus_boost_dma_task((u8)tmp); + pr_info("dma-boost %s.\n", tmp ? "enabled" : "disabled"); + + return count; +} +DEVICE_ATTR(kbus_dma_boost_en, 0600, kbus_sysfs_boost_en_show, + kbus_sysfs_boost_en_set); + +static ssize_t kbus_sysfs_tty_device_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + + return sprintf(buf, "%s\n", kdrvdata->kbus_tty_device_name); +} +DEVICE_ATTR(kbus_tty_device_name, 0444, kbus_sysfs_tty_device_name_show, NULL); + +static struct kbus_drv_data *kbus_probe_dt(struct spi_device *spi) +{ + struct kbus_drv_data *kdrvdata; + struct device_node *np = spi->dev.of_node; + u32 val; + int ret; + + pr_debug("%s: probing device tree node (%s)\n", __func__, np->name); + + /* alloc kernel space buffers */ + kdrvdata = devm_kmalloc(&spi->dev, sizeof(struct kbus_drv_data), + GFP_KERNEL); + + if (!kdrvdata) + return ERR_PTR(-ENOMEM); + + kdrvdata->dma_task = NULL; + + kdrvdata->use_dma = of_property_read_bool(np, "kbus,use-dma-always"); + kdrvdata->kbus_dma_boost_en = + of_property_read_bool(np, "kbus,dma-boost"); + if (kdrvdata->kbus_dma_boost_en) { + ret = of_property_read_u32(np, "kbus,dma-boost-prio", &val); + if (ret < 0) { + pr_err("%s: dt: dma-boost-prio must be set\n", + __func__); + goto out_err; + } + kdrvdata->kbus_dma_boost_prio = (u8)val; + ret = of_property_read_u32(np, "kbus,dma-default-prio", &val); + if (ret < 0) { + pr_err("%s: dt: dma-default-prio must be set\n", + __func__); + goto out_err; + } + kdrvdata->kbus_dma_normal_prio = (u8)val; + } + + ret = of_property_read_string(np, "kbus,tty-device", + &kdrvdata->kbus_tty_device_name); + if (ret < 0) { + pr_err("%s: dt: kbus tty-device must be set (e.g. ttyO4)\n", + __func__); + goto out_err; + } + + ret = of_property_read_string(np, "kbus,dma-boost-irq-thread", + &kdrvdata->kbus_dma_boost_irq_thread); + if (ret < 0) { + pr_err("%s: dt: kbus dma-irq-thread must be set (e.g. irq/19-edma)\n", + __func__); + goto out_err; + } + + /* get gpios from device tree */ + kdrvdata->gpio_nrst = + devm_gpiod_get(&spi->dev, "kbus,nrst", GPIOD_OUT_HIGH); + if (IS_ERR(kdrvdata->gpio_nrst)) { + ret = PTR_ERR(kdrvdata->gpio_nrst); + goto out_err; + } + + kdrvdata->gpio_nsync = + devm_gpiod_get(&spi->dev, "kbus,nsync", GPIOD_IN); + if (IS_ERR(kdrvdata->gpio_nsync)) { + ret = PTR_ERR(kdrvdata->gpio_nsync); + goto out_gpio_nrst; + } + + kdrvdata->gpio_cmdsel = + devm_gpiod_get(&spi->dev, "kbus,cmdsel", GPIOD_OUT_HIGH); + if (IS_ERR(kdrvdata->gpio_cmdsel)) { + ret = PTR_ERR(kdrvdata->gpio_cmdsel); + goto out_gpio_nsync; + } + + kdrvdata->gpio_nirq = + devm_gpiod_get(&spi->dev, "kbus,nirq", GPIOD_OUT_HIGH); + if (IS_ERR(kdrvdata->gpio_nirq)) { + ret = PTR_ERR(kdrvdata->gpio_nirq); + goto out_gpio_cmdsel; + } + + kdrvdata->gpio_nerr = + devm_gpiod_get(&spi->dev, "kbus,nerr", GPIOD_IN); + if (IS_ERR(kdrvdata->gpio_nerr)) { + ret = PTR_ERR(kdrvdata->gpio_nerr); + goto out_gpio_nirq; + } + + /* reset kbus slave cpu (Infineon XE164) */ + if (of_property_read_bool(np, "kbus,reset-on-boot")) + kbus_trig_reset(kdrvdata); + + /* get irq pin */ + kdrvdata->gpio_nrdy = + devm_gpiod_get(&spi->dev, "kbus,nrdy", GPIOD_IN); + ret = IS_ERR(kdrvdata->gpio_nrdy) ? PTR_ERR(kdrvdata->gpio_nrdy) : 0; + if (ret == -EPROBE_DEFER) + goto out_gpio_nerr; + else if (ret < 0) { + kdrvdata->kbus_irq = irq_of_parse_and_map(np, 0); + if (!kdrvdata->kbus_irq) { + pr_err("KBUS Probe: failed to get irq pin!\n"); + goto out_gpio_nerr; + } + } + + kdrvdata->kbus_irq = gpiod_to_irq(kdrvdata->gpio_nrdy); + kdrvdata->spi = spi; + + return kdrvdata; + +out_gpio_nerr: + devm_gpiod_put(&spi->dev, kdrvdata->gpio_nerr); +out_gpio_nirq: + devm_gpiod_put(&spi->dev, kdrvdata->gpio_nirq); +out_gpio_cmdsel: + devm_gpiod_put(&spi->dev, kdrvdata->gpio_cmdsel); +out_gpio_nsync: + devm_gpiod_put(&spi->dev, kdrvdata->gpio_nsync); +out_gpio_nrst: + devm_gpiod_put(&spi->dev, kdrvdata->gpio_nrst); +out_err: + if (ret != -EPROBE_DEFER) + pr_err("%s: failed to probe kbus oftree (%d)\n", __func__, ret); + devm_kfree(&spi->dev, kdrvdata); + return ERR_PTR(ret); +} + +static int kbus_probe(struct spi_device *spi) +{ + int ret = 0; + u8 save = 0; + struct kbus_drv_data *kdrvdata; + + if (!spi->dev.of_node) { + pr_err("WAGO KBUS Driver: No DT node found!\n"); + return -EFAULT; + } + + kdrvdata = kbus_probe_dt(spi); + if (IS_ERR(kdrvdata)) + return PTR_ERR(kdrvdata); + + init_waitqueue_head(&kdrvdata->kbus_irq_wq); + + ret = request_irq(kdrvdata->kbus_irq, kbus_isr, + IRQF_SHARED | IRQ_TYPE_EDGE_BOTH, + "kbus", &kbus_dev); + if (ret) { + pr_err("%s: could not request irq %d: ret=%d\n", __func__, + kdrvdata->kbus_irq, ret); + goto out3; + } + + /* disable kbus irq per default */ + KBUS_DISABLE_IRQ(kdrvdata->kbus_irq); + kdrvdata->kbus_irq_enabled = 0; + + /* allocate memory */ + if (kdrvdata->use_dma) { + spi->dev.coherent_dma_mask = ~0; /* why that? */ + + /* + * Minimum coherent DMA allocation is PAGE_SIZE, so allocate + * that much and share it between Tx and Rx DMA buffers. + */ + kdrvdata->tx_buf = dma_alloc_coherent( + &spi->dev, PAGE_SIZE, &kdrvdata->tx_buf_dma, GFP_DMA); + + if (kdrvdata->tx_buf) { + kdrvdata->rx_buf = + (u8 *)(kdrvdata->tx_buf + (PAGE_SIZE / 2)); + /* set bus address based on allocated space */ + kdrvdata->rx_buf_dma = (dma_addr_t)( + kdrvdata->tx_buf_dma + (PAGE_SIZE / 2)); + kbus_dbg("%s: allocated dma space (%lu).", __func__, + PAGE_SIZE); + } else { + /* Fall back to non-DMA */ + kdrvdata->use_dma = 0; + kbus_dbg("%s: failed to allocate dma space (%lu).", + __func__, PAGE_SIZE); + } + } + + if (!kdrvdata->use_dma) { + kdrvdata->tx_buf = kmalloc(KBUS__MAX_BUF_LEN, GFP_KERNEL); + kdrvdata->rx_buf = kmalloc(KBUS__MAX_BUF_LEN, GFP_KERNEL); + } + + /* create device node in /dev */ + if (!wsysinit_sysfs_class) { + pr_err("PFCXXX: WAGO SYSFS class not defined!\n"); + ret = -EFAULT; + goto out1; + } + + kbus_device = device_create(wsysinit_sysfs_class, NULL, kbus_dev, NULL, + "kbus%d", MINOR(kbus_dev)); + dev_set_drvdata(kbus_device, kdrvdata); + spi_set_drvdata(spi, kdrvdata); + + /* create sysfs entries for dma boost support */ + device_create_file(wsysinit_sysfs_device, + &dev_attr_kbus_dma_normal_prio); + device_create_file(wsysinit_sysfs_device, + &dev_attr_kbus_dma_boost_prio); + device_create_file(wsysinit_sysfs_device, &dev_attr_kbus_dma_boost_en); + device_create_file(wsysinit_sysfs_device, &dev_attr_kbus_trig_reset); + + /* create sysfs entrie for tty device-name */ + device_create_file(wsysinit_sysfs_device, + &dev_attr_kbus_tty_device_name); + + /* do the initial spi setup - it can be updated through kbus_spi_config() */ + kdrvdata->spi->bits_per_word = 16; + kdrvdata->spi->max_speed_hz = KBUS__DEFAULT_SPEED; + save = kdrvdata->spi->mode; + kdrvdata->spi->mode &= (u8)~SPI_MODE_MASK; + kdrvdata->spi->mode |= SPI_CPHA; + ret = spi_setup(kdrvdata->spi); + if (ret < 0) + kdrvdata->spi->mode = save; + else + pr_info("%s: spi mode set to: %02x\n", __func__, + kdrvdata->spi->mode); + + pr_info("probe (%d)\n", ret); + + return ret; + +out1: + /* clean up */ + if (kdrvdata->use_dma) { + dma_free_coherent(&spi->dev, PAGE_SIZE, kdrvdata->tx_buf, + kdrvdata->tx_buf_dma); + } else { + kfree(kdrvdata->rx_buf); + kfree(kdrvdata->tx_buf); + } + +out3: + kfree(kdrvdata); + + return ret; +} + +static void kbus_remove(struct spi_device *spi) +{ + struct kbus_drv_data *kdrvdata = dev_get_drvdata(kbus_device); + + device_destroy(wsysinit_sysfs_class, kbus_dev); + free_irq(kdrvdata->kbus_irq, &kbus_dev); + + devm_gpiod_put(&spi->dev, kdrvdata->gpio_cmdsel); + devm_gpiod_put(&spi->dev, kdrvdata->gpio_nerr); + devm_gpiod_put(&spi->dev, kdrvdata->gpio_nirq); + devm_gpiod_put(&spi->dev, kdrvdata->gpio_nrdy); + devm_gpiod_put(&spi->dev, kdrvdata->gpio_nrst); + devm_gpiod_put(&spi->dev, kdrvdata->gpio_nsync); + + if (kdrvdata->use_dma) { + dma_free_coherent(&spi->dev, PAGE_SIZE, kdrvdata->tx_buf, + kdrvdata->tx_buf_dma); + } else { + kfree(kdrvdata->rx_buf); + kfree(kdrvdata->tx_buf); + } + + dev_set_drvdata(kbus_device, NULL); + spi_set_drvdata(spi, NULL); +} + +#ifdef CONFIG_OF +static const struct of_device_id kbus_spi_dt_ids[] = { + { .compatible = "wago,spi-kbus" }, + {} +}; +MODULE_DEVICE_TABLE(of, kbus_spi_dt_ids); +#endif + +static const struct spi_device_id kbus_spi_device_id = { + .name = "spi-kbus", +}; + +struct spi_driver kbus_driver = { + .id_table = &kbus_spi_device_id, + .driver = { + .name = "kbus-cpu", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(kbus_spi_dt_ids), + }, + .probe = kbus_probe, + .remove = kbus_remove, +}; + +static int __init kbus_init(void) +{ + int ret; + + kbus_dbg("%s ...\n", __func__); + + kbus_dev = MKDEV(KBUS_DRIVER_MAJOR, 0); + if ((ret = register_chrdev_region(kbus_dev, 1, "kbus")) < 0) { + pr_err("%s: register_chrdev_region(): ret=%d\n", __func__, ret); + return ret; + } + + cdev_init(&kbus_cdev, &kbus_fops); + if ((ret = cdev_add(&kbus_cdev, kbus_dev, 1)) < 0) { + pr_err("%s: cdev_add(): ret=%d\n", __func__, ret); + unregister_chrdev_region(kbus_dev, 1); + return ret; + } + + return spi_register_driver(&kbus_driver); +} + +static void __exit kbus_exit(void) +{ + spi_unregister_driver(&kbus_driver); + cdev_del(&kbus_cdev); + unregister_chrdev_region(kbus_dev, 1); +} + +module_init(kbus_init); +module_exit(kbus_exit); + +MODULE_DESCRIPTION("WAGO KBUS SPI Driver"); +MODULE_AUTHOR("Heinrich Toews "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index ddf1c684bcc7..eb4ad9a4b73c 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -29,6 +29,20 @@ #include +#include + +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION +#include +#endif + +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION +#include +#include +#define PXC_SPI_KBUS_TRACER +#include +static int kbusdelay = 0; /* KBUS Inter-Frame-Delay in nanoseconds */ +#endif + #define OMAP2_MCSPI_MAX_FREQ 48000000 #define OMAP2_MCSPI_MAX_DIVIDER 4096 #define OMAP2_MCSPI_MAX_FIFODEPTH 64 @@ -290,6 +304,25 @@ static void omap2_mcspi_set_mode(struct spi_controller *ctlr) ctx->modulctrl = l; } +#ifdef CONFIG_SPI_KBUS_OMAP_SET_SPIDAT_DIR +static void omap2_mcspi_set_spidat_direction(struct spi_master *master) +{ + u32 l; + +#define OMAP2_MCSPI_SYST_SPIDATDIR0_INPUT_EN BIT(8) +#define OMAP2_MCSPI_SYST_SPIDATDIR1_INPUT_EN BIT(9) + l = mcspi_read_reg(master, OMAP2_MCSPI_SYST); + pr_info("%s: read-OMAP2_MCSPI_SYST: 0x%x\n", __func__, l); + l &= ~(OMAP2_MCSPI_SYST_SPIDATDIR0_INPUT_EN | OMAP2_MCSPI_SYST_SPIDATDIR1_INPUT_EN); + l |= OMAP2_MCSPI_SYST_SPIDATDIR0_INPUT_EN; + pr_info("%s: write-OMAP2_MCSPI_SYST: 0x%x\n", __func__, l); + mcspi_write_reg(master, OMAP2_MCSPI_SYST, l); + + l = mcspi_read_reg(master, OMAP2_MCSPI_SYST); + pr_info("%s: (update) read-OMAP2_MCSPI_SYST: 0x%x\n", __func__, l); +} +#endif + static void omap2_mcspi_set_fifo(const struct spi_device *spi, struct spi_transfer *t, int enable) { @@ -386,6 +419,11 @@ static void omap2_mcspi_rx_callback(void *data) /* We must disable the DMA RX request */ omap2_mcspi_set_dma_req(spi, 1, 0); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + if (spi->dev.driver == &kbus_driver.driver) + trace_pxc_kbus(__func__, "DMA: RX completed!"); +#endif + complete(&mcspi_dma->dma_rx_completion); } @@ -398,6 +436,11 @@ static void omap2_mcspi_tx_callback(void *data) /* We must disable the DMA TX request */ omap2_mcspi_set_dma_req(spi, 0, 0); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + if (spi->dev.driver == &kbus_driver.driver) + trace_pxc_kbus(__func__, "DMA: TX completed!"); +#endif + complete(&mcspi_dma->dma_tx_completion); } @@ -522,6 +565,11 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, return 0; } +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + if (spi->dev.driver == &kbus_driver.driver) + if (xfer->rx_buf && ((char *) xfer->rx_buf)[96-1] == 0x66) + trace_pxc_kbus(__func__, "DMA: RX Data MATCH (0x66)"); +#endif for (x = 0; x < nb_sizes; x++) kfree(sg_out[x]); @@ -592,6 +640,12 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) void __iomem *irqstat_reg; int wait_res; +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbus(__func__, "enter"); + + kbus_boost_dma_task(1); +#endif + mcspi = spi_controller_get_devdata(spi->controller); mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)]; @@ -682,6 +736,11 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "EOT timed out\n"); } } + +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbus(__func__, "leave"); +#endif + return count; } @@ -696,11 +755,20 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) void __iomem *rx_reg; void __iomem *chstat_reg; int word_len; + struct omap2_mcspi_device_config *cd; + cd = spi->controller_data; count = xfer->len; c = count; word_len = cs->word_len; +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbus(__func__, "enter"); + kbus_dbg("%s[%d]: count: %d\n", __func__,__LINE__, count); + + kbus_boost_dma_task(0); +#endif + l = mcspi_cached_chconf0(spi); /* We store the pre-calculated register addresses on stack to speed @@ -712,6 +780,10 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) if (c < (word_len>>3)) return 0; + dev_dbg(&spi->dev, "xx: %s-%d %d %s:%s\n", xfer->tx_buf ? "tx" : "rx", + word_len, count, cd->turbo_mode ? "turbo" : "-", + (l & OMAP2_MCSPI_CHCONF_TURBO) ? "1" : "-"); + if (word_len <= 8) { u8 *rx; const u8 *tx; @@ -722,6 +794,9 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) do { c -= 1; if (tx != NULL) { +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + kbus_wago_mpoint(); /* MX */ +#endif if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_TXS) < 0) { dev_err(&spi->dev, "TXS timed out\n"); @@ -729,9 +804,15 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) } dev_vdbg(&spi->dev, "write-%d %02x\n", word_len, *tx); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbusdump(__func__, "write", word_len, *tx); +#endif writel_relaxed(*tx++, tx_reg); } if (rx != NULL) { +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + kbus_wago_mpoint(); /* MX */ +#endif if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS) < 0) { dev_err(&spi->dev, "RXS timed out\n"); @@ -744,6 +825,9 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) *rx++ = readl_relaxed(rx_reg); dev_vdbg(&spi->dev, "read-%d %02x\n", word_len, *(rx - 1)); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbusdump(__func__, "readtb", word_len, *(rx - 1)); +#endif if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS) < 0) { dev_err(&spi->dev, @@ -756,8 +840,14 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) } *rx++ = readl_relaxed(rx_reg); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + kbus_wago_mpoint(); /* MX */ +#endif dev_vdbg(&spi->dev, "read-%d %02x\n", word_len, *(rx - 1)); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbusdump(__func__, "read", word_len, *(rx - 1)); +#endif } /* Add word delay between each word */ spi_delay_exec(&xfer->word_delay, xfer); @@ -768,22 +858,34 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) rx = xfer->rx_buf; tx = xfer->tx_buf; +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + kbus_wago_mpoint(); /* MX */ +#endif do { c -= 2; if (tx != NULL) { if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_TXS) < 0) { +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbus(__func__, "TXS timed out"); +#endif dev_err(&spi->dev, "TXS timed out\n"); goto out; } dev_vdbg(&spi->dev, "write-%d %04x\n", word_len, *tx); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbusdump(__func__, "write", word_len, *tx); +#endif writel_relaxed(*tx++, tx_reg); } if (rx != NULL) { if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS) < 0) { dev_err(&spi->dev, "RXS timed out\n"); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbus(__func__, "RXS timed out"); +#endif goto out; } @@ -793,6 +895,9 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) *rx++ = readl_relaxed(rx_reg); dev_vdbg(&spi->dev, "read-%d %04x\n", word_len, *(rx - 1)); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbusdump(__func__, "readtb", word_len, *(rx - 1)); +#endif if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS) < 0) { dev_err(&spi->dev, @@ -804,13 +909,30 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) omap2_mcspi_set_enable(spi, 0); } + /* INFO: + * We have a timing problem here!!! + * During extensive spi traffic some bytes were lost + * during read. Some tests also showed that the Infineon + * needs some more time between the spi words. + */ +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + if (kbusdelay) + ndelay(kbusdelay); /* 400: With this delay we did a test over 14h successfully! */ +#endif + *rx++ = readl_relaxed(rx_reg); dev_vdbg(&spi->dev, "read-%d %04x\n", word_len, *(rx - 1)); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + trace_pxc_kbusdump(__func__, "read", word_len, *(rx - 1)); +#endif } /* Add word delay between each word */ spi_delay_exec(&xfer->word_delay, xfer); } while (c >= 2); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + kbus_wago_mpoint(); /* MX */ +#endif } else if (word_len <= 32) { u32 *rx; const u32 *tx; @@ -840,6 +962,14 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) (l & OMAP2_MCSPI_CHCONF_TURBO)) { omap2_mcspi_set_enable(spi, 0); *rx++ = readl_relaxed(rx_reg); + + /* For some reason while beeing in + * turbo mode we need a short delay + * here. Otherwise it will hang if we + * try to disable and enable + * turbo mode again */ + ndelay(1); + dev_vdbg(&spi->dev, "read-%d %08x\n", word_len, *(rx - 1)); if (mcspi_wait_for_reg_bit(chstat_reg, @@ -879,6 +1009,10 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) } out: omap2_mcspi_set_enable(spi, 1); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + kbus_wago_mpoint(); /* MX */ + trace_pxc_kbus(__func__, "leave"); +#endif return count - c; } @@ -936,6 +1070,9 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, l &= ~OMAP2_MCSPI_CHCONF_IS; l &= ~OMAP2_MCSPI_CHCONF_DPE1; l |= OMAP2_MCSPI_CHCONF_DPE0; +#ifdef CONFIG_SPI_KBUS_OMAP_SET_SPIDAT_DIR + omap2_mcspi_set_spidat_direction(spi->master); +#endif } else { l |= OMAP2_MCSPI_CHCONF_IS; l |= OMAP2_MCSPI_CHCONF_DPE1; @@ -1061,6 +1198,12 @@ static int omap2_mcspi_setup(struct spi_device *spi) struct omap2_mcspi_regs *ctx = &mcspi->ctx; struct omap2_mcspi_cs *cs = spi->controller_state; +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + if (spi->max_speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15) || + spi->max_speed_hz > OMAP2_MCSPI_MAX_FREQ) + return -EINVAL; +#endif + if (!cs) { cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) @@ -1140,15 +1283,29 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, struct omap2_mcspi_dma *mcspi_dma; struct omap2_mcspi_cs *cs; struct omap2_mcspi_device_config *cd; - int par_override = 0; +#ifndef CONFIG_SPI_KBUS_OMAP_EXTENSION + int par_override = 0; +#endif int status = 0; u32 chconf; +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + struct kbus_drv_data *kdrvdata = NULL; + + trace_pxc_kbus(__func__, "enter"); + kbus_wago_mpoint(); /* M4 */ +#endif + mcspi = spi_controller_get_devdata(ctlr); mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0); cs = spi->controller_state; cd = spi->controller_data; +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + if (spi->dev.driver == &kbus_driver.driver) + kdrvdata = spi_get_drvdata(spi); +#endif +#ifndef CONFIG_SPI_KBUS_OMAP_EXTENSION /* * The target driver could have changed spi->mode in which case * it will be different from cs->mode (the current hardware setup). @@ -1158,12 +1315,14 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, */ if (spi->mode != cs->mode) par_override = 1; +#endif omap2_mcspi_set_enable(spi, 0); if (spi_get_csgpiod(spi, 0)) omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH); +#ifndef CONFIG_SPI_KBUS_OMAP_EXTENSION if (par_override || (t->speed_hz != spi->max_speed_hz) || (t->bits_per_word != spi->bits_per_word)) { @@ -1175,6 +1334,8 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, t->bits_per_word == spi->bits_per_word) par_override = 0; } +#endif + if (cd && cd->cs_per_word) { chconf = mcspi->ctx.modulctrl; chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE; @@ -1202,11 +1363,19 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, if (t->len) { unsigned count; +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + int i; +#endif +#ifndef CONFIG_SPI_KBUS_OMAP_EXTENSION + /* FIXME + At this moment keep fifo disabled due to some issues + that were coming up with large kbus nodes. */ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && ctlr->cur_msg_mapped && ctlr->can_dma(ctlr, spi, t)) omap2_mcspi_set_fifo(spi, t, 1); +#endif omap2_mcspi_set_enable(spi, 1); @@ -1215,10 +1384,47 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, writel_relaxed(0, cs->base + OMAP2_MCSPI_TX0); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + /* + * kbus: + * release irq and check the sync signal + * before sending data + */ + if (kdrvdata && t->tx_buf) { + /* + * XE164 should be ready a long time ago (several us). + * Otherwise something is wrong with the controller! + */ + for (i = 0; i < PAC_KBUS_SYNC_CYCLES; i++) { + if (gpiod_get_value(kdrvdata->gpio_nsync)) /* active low */ + continue; + break; + } + + if (i >= PAC_KBUS_SYNC_CYCLES) { + trace_pxc_kbus(__func__, "err: sync pin is always high [-EBUSY(-16)]!"); + status = -EBUSY; + goto out; + } + + /* release the irq pin */ + gpiod_set_value(kdrvdata->gpio_nirq, 1); + + } +#endif + if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && ctlr->cur_msg_mapped && +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + ctlr->can_dma(ctlr, spi, t)) { + if (kdrvdata) + trace_pxc_kbus(__func__, "DMA: TXRX: Trigger DMA Transfer."); +#endif +#ifndef CONFIG_SPI_KBUS_OMAP_EXTENSION ctlr->can_dma(ctlr, spi, t)) +#endif count = omap2_mcspi_txrx_dma(spi, t); + } else count = omap2_mcspi_txrx_pio(spi, t); @@ -1233,12 +1439,97 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, if (mcspi->fifo_depth > 0) omap2_mcspi_set_fifo(spi, t, 0); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + /* + * Special KBUS Treatment + * + */ + if (kdrvdata) { + static u16 *kcmd_txbuf; + int tmp_ret; + + kbus_wago_mpoint(); /* MX */ + if (t->tx_buf) { /* after tx transfer */ + kcmd_txbuf = (u16 *) t->tx_buf; + /* wait for READYn IRQ from xe164 */ + + trace_pxc_kbus(__func__, "TX0"); + kbus_dbg("%s: irq timeout is %dms\n", + __func__, kdrvdata->timeout_ms); + tmp_ret = wait_event_interruptible_timeout(kdrvdata->kbus_irq_wq, + kdrvdata->kbus_irq_state == 0, + msecs_to_jiffies(kdrvdata->timeout_ms)); + + if (tmp_ret == 0 && kdrvdata->kbus_irq_state) { + status = -ETIMEDOUT; + trace_pxc_kbus(__func__, "TX0: IRQ timeout!"); + goto out; + } + if (kbus_error() < 0) { + status = -ENODATA; + trace_pxc_kbus(__func__, "TX0: KBUS ERROR."); + goto out; + } + trace_pxc_kbus(__func__, "TX0: IRQ received"); + } + + if (kdrvdata->cmdsel) { /* special treatment in command mode */ + /* We're getting the data in two nibbles. + * First: 6 bytes header where we get the length for the rest. + * Second: The remaining N bytes. + */ + if (t->rx_buf == kdrvdata->rx_buf) { /* This RX0 */ + u16 *kcmd_hdr = (u16 *) t->rx_buf; + u8 lb_cmd = kcmd_hdr[0] & 0xff; + u8 hb_cmd_inv = ~(kcmd_hdr[0] >> 8) & 0xff; + u8 lb_wlen = kcmd_hdr[2] & 0xff; + u8 hb_wlen_inv = ~(kcmd_hdr[2] >> 8) & 0xff; + unsigned int byte_len; + struct spi_transfer *kcmd_tnext; + + trace_pxc_kbus(__func__, "RX0"); + + /* validate header data here */ + if (lb_cmd != (kcmd_txbuf[0] & 0xff) || + hb_cmd_inv != lb_cmd || + hb_wlen_inv != lb_wlen) { + trace_pxc_kbus(__func__, "RX0: RX0 HDR not valid."); + kbus_dbg("%s[%d]: RX0 HDR not valid: 0x%.4x(TX:0x%.2x)|0x%.4x|0x%.4x\n", + __func__, __LINE__, kcmd_hdr[0], kcmd_txbuf[0] & 0xff, + kcmd_hdr[1], kcmd_hdr[2]); + status = -EPROTO; + goto out; + } + + /* get next transfer entry */ + kcmd_tnext = list_entry(t->transfer_list.next, + struct spi_transfer, transfer_list); + + /* regard word (16bit) count */ + byte_len = (kcmd_hdr[2] & 0xff) << 1; + if (byte_len < kcmd_tnext->len) { + kcmd_tnext->len = byte_len; + trace_pxc_kbus(__func__, "RX0: RX1 len updated."); + } + + kbus_dbg("%s[%d]: RX1 len set to: %d\n", + __func__, __LINE__, kcmd_tnext->len); + } else if (t->rx_buf != NULL) { + trace_pxc_kbus(__func__, "RX1"); + } + } + kbus_wago_mpoint(); /* MX */ + } +#endif + out: +#ifndef CONFIG_SPI_KBUS_OMAP_EXTENSION /* Restore defaults if they were overriden */ if (par_override) { par_override = 0; status = omap2_mcspi_setup_transfer(spi, NULL); } +#endif if (cd && cd->cs_per_word) { chconf = mcspi->ctx.modulctrl; @@ -1540,6 +1831,16 @@ static int omap2_mcspi_probe(struct platform_device *pdev) if (status < 0) goto disable_pm; +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION + dev_dbg(&pdev->dev, "kbusdelay=%d, %s interframe gap delay.\n", + kbusdelay, kbusdelay ? "using" : "NOT using"); +#endif + +#if 0 /* #ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION */ + if (omap2_mcspi_enable_clocks(mcspi) < 0) + goto free_master; +#endif + return status; disable_pm: @@ -1618,4 +1919,7 @@ static struct platform_driver omap2_mcspi_driver = { }; module_platform_driver(omap2_mcspi_driver); +#ifdef CONFIG_SPI_KBUS_OMAP_EXTENSION +core_param(kbusdelay, kbusdelay, int, 0000); +#endif MODULE_LICENSE("GPL"); diff --git a/include/linux/spi/kbus.h b/include/linux/spi/kbus.h new file mode 100644 index 000000000000..07e27b4cbd55 --- /dev/null +++ b/include/linux/spi/kbus.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/spi/kbus.h + * + * Copyright (C) 2020 WAGO + * Heinrich Toews + */ + +#ifndef KBUS_H +#define KBUS_H + +#include + +#define KBUS_DRIVER_MAJOR 240 +#define KBUS_IRQ_TIMEOUT 10 /* wait for a max. of 10 ms */ + +#define KBUS__DEFAULT_SPEED 12000000 +#define KBUS_READYN_IRQ +#define KBUS_DATA_FULLDPX + +#undef KBUS__WAIT_ACTIVE +#undef KBUS_IRQN_IRQ + +/*---------------------------------------------------------------------------*/ + +#define PAC_KBUS_SYNC_CYCLES 3 +#define SPI_MODE_MASK \ + (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_3WIRE | \ + SPI_LOOP | SPI_NO_CS | SPI_READY) + +#define KBUS__MAX_BUF_LEN PAGE_SIZE /* FIXME */ + +#define KBUS_TESTING 0 /* enable when gpio testing wanted */ + +#define KBUS__USE_DMA_ONLY 0 + +#if KBUS_TESTING +#define kbus_wago_mpoint(k) wago_mpoint() +#define kbus_dbg(format, arg...) pr_debug("kbus-dbg:" format, ##arg) +#else +#define kbus_wago_mpoint(k) \ + ({ \ + if (0) \ + wago_mpoint(); \ + 0; \ + }) +#define kbus_dbg(format, arg...) \ + ({ \ + if (0) \ + pr_debug("kbus-dbg:" format, ##arg); \ + 0; \ + }) +#endif + +#define KBUS_ENABLE_IRQ(irq) \ + do { \ + if (irq != -1) \ + enable_irq(irq); \ + } while (0) + +#define KBUS_DISABLE_IRQ(irq) \ + do { \ + if (irq != -1) \ + disable_irq(irq); \ + } while (0) + +struct kbus_drv_data { + u8 cmdsel; /* 0: data, 1: cmd mode */ + int kbus_err; + int kbus_err_state; + u8 *tx_buf; + u8 *rx_buf; + bool use_dma; + u32 timeout_ms; + dma_addr_t tx_buf_dma; + dma_addr_t rx_buf_dma; + int kbus_irq; + int kbus_irq_enabled; + int kbus_irq_state; + struct task_struct + *dma_task; /* task pointer to boost the dma task if necessary */ + bool kbus_dma_boost_en; + u8 kbus_dma_boost_prio; + const char *kbus_dma_boost_irq_thread; + u8 kbus_dma_normal_prio; + + /* gpios */ + struct gpio_desc *gpio_nrdy; + struct gpio_desc *gpio_nrst; + struct gpio_desc *gpio_nsync; + struct gpio_desc *gpio_cmdsel; + struct gpio_desc *gpio_nirq; + struct gpio_desc *gpio_nerr; + + wait_queue_head_t kbus_irq_wq; + struct spi_device *spi; + const char *kbus_tty_device_name; +}; + +/* For userspace ioctl communication */ +struct kbus_data { + __u8 __user *tx_buf; + __u8 __user *rx_buf; + __u32 byte_len; + __u8 __user *err; /* will only be set when err occurs! */ + __u8 __user *err_state; + __u32 timeout_ms; +}; + +struct kbus_cmd { + __u8 __user *tx_buf; + __u8 __user *rx_buf; + __u32 byte_len_tx; + __u32 byte_len_rx; + __u8 __user *err; /* will only be set when err occurs! */ + __u8 __user *err_state; + __u32 timeout_ms; +}; + +struct kbus_spi_config { + __u8 bits_per_word; /* bits_per_word */ + __u8 mode; /* see SPI_ mode bits in spi.h */ + __u32 max_speed_hz; +}; + +extern int kbus_wait_for_irq(void); +extern int kbus_wait_for_event(int *event); +extern int kbus_error(void); +extern int kbus_wait_for_gpio(int gpio); +extern void kbus_boost_dma_task(u8 enable); + +/* IOCTL commands */ +#define KBUS_IOC_MAGIC 'K' +#define KBUS_IOC_DATA _IOW(KBUS_IOC_MAGIC, 1, struct kbus_data) +#define KBUS_IOC_CMD _IOW(KBUS_IOC_MAGIC, 2, struct kbus_cmd) +#define KBUS_IOC_CONFIG _IOW(KBUS_IOC_MAGIC, 3, struct kbus_spi_config) +#define KBUS_IOC_BINARY _IOW(KBUS_IOC_MAGIC, 4, struct kbus_data) + +extern struct spi_driver + kbus_driver; /* used by spi-omap2-mcspi to recognize the kbus device */ + +#endif /* KBUS_H */ diff --git a/include/misc/wago-tests.h b/include/misc/wago-tests.h new file mode 100644 index 000000000000..b95d8daf2e47 --- /dev/null +++ b/include/misc/wago-tests.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * + * Copyright (c) 2014 WAGO GmbH & Co. KG + * + * Author: Heinrich Toews + * + */ + +#ifndef _WAGO_TESTS_H_ +#define _WAGO_TESTS_H_ + +#define WAGO_TEST_DEBUG + +#ifdef WAGO_TEST_DEBUG +#define pac_kdebug(format, arg...) \ + printk(KERN_INFO "pac-kdebug: " format , ## arg) +#else +#define pac_kdebug(format, arg...) \ + ({ if (0) printk(KERN_INFO "pac-kdebug: " format , ## arg); 0; }) +#endif + +#include +#include + +#define WAGO_TEST__MAX_MEASUREMENTS 20 +#define WAGO_TEST__GPIO 175 /* FB-nINT_GPIO175 */ + +struct wago_trace_data { + struct timespec64 mpoints[WAGO_TEST__MAX_MEASUREMENTS]; + int mpoint_index; +}; + +extern void wago_tests_init(struct wago_trace_data *tdata, u8 gpios_enable); +extern void wago_tests_deinit(void); +extern void wago_measure_reset(void); +extern void wago_mpoint(void); + +#endif /* _WAGO_TESTS_H_ */ diff --git a/include/trace/events/pxc.h b/include/trace/events/pxc.h new file mode 100644 index 000000000000..91e5ac8b7920 --- /dev/null +++ b/include/trace/events/pxc.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * + * Copyright (c) 2015 WAGO GmbH & Co. KG + * + * Author: Heinrich Toews + * + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pxc + +#if !defined(_TRACE_PXC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PXC_H + +#include + +#ifdef PFC_CLOCK_TRACER +TRACE_EVENT(pfc_clock, /* trace pfc clock events */ + + TP_PROTO(cycle_t cycle_now, cycle_t cycle_delta, struct timekeeper *tk, s64 nsec, char *msg), + + TP_ARGS(cycle_now, cycle_delta, tk, nsec, msg), + + TP_STRUCT__entry( + __field(cycle_t, cycle_now) + __field(cycle_t, cycle_delta) + __field(s64, nsec) + __field(u32, mult) + __field(u32, shift) + __field(u64, xtime_nsec) + __array(char, msg, 128) + ), + + TP_fast_assign( + __entry->cycle_now = cycle_now; + __entry->cycle_delta = cycle_delta; + __entry->nsec = nsec; + __entry->mult = tk->mult; + __entry->shift = tk->shift; + __entry->xtime_nsec = tk->xtime_nsec; + strncpy( __entry->msg, msg, 128); + ), + + TP_printk("%llu d:%llu->%lli|%u|%u|%llu|%s", + __entry->cycle_now, __entry->cycle_delta, __entry->nsec, + __entry->mult, __entry->shift, __entry->xtime_nsec, + __entry->msg) +); +#endif + +#if defined(PXC_ETH_EMAC) +TRACE_EVENT(pxc_eth_emac, /* trace davinci_emac events */ + + TP_PROTO(struct emac_priv *priv, char *msg), + + TP_ARGS(priv, msg), + + TP_STRUCT__entry( + __field(unsigned, link) + __field(unsigned, speed) + __field(unsigned, duplex) + __array(char, phy_id, 32) + __array(char, other_phy_id, 32) + __array(char, msg, 128) + __array(char, devname, 64) + ), + + TP_fast_assign( + __entry->link = priv->link; + __entry->speed = priv->speed; + __entry->duplex = priv->duplex; + strncpy( __entry->phy_id, priv->phy_id, 32); + strncpy(__entry->other_phy_id, priv->other_phy_id, 32); + strncpy( __entry->msg, msg, 128); + if (priv->ndev) + strncpy(__entry->devname, priv->ndev->name, 64); + ), + + TP_printk("%6s 1-%s 2-%s: %4s|%d/%4s %s", __entry->devname, __entry->phy_id, __entry->other_phy_id, + __entry->link ? "UP" : "DOWN", __entry->speed, __entry->duplex == 1 ? "Full" : "Half", + __entry->msg) +); + +TRACE_EVENT(pxc_eth_emac_phy, /* trace phy events */ + + TP_PROTO(struct phy_device *phydev, char *msg), + + TP_ARGS(phydev, msg), + + TP_STRUCT__entry( + __array(char, msg, 128) + __array(char, phyname, 16) + __field(int, link) + __field(int, speed) + __field(int, duplex) /* DUPLEX_HALF=0, DUPLEX_FULL=1 */ + __field(int, state) + __field(int, irq) + __array(char, devname, 64) + __field(u32, dev_flags) + ), + + TP_fast_assign( + if (phydev->attached_dev) + strncpy(__entry->devname, phydev->attached_dev->name, 64); + strncpy(__entry->msg, msg, 128); + strncpy(__entry->phyname, dev_name(&phydev->dev), 16); + __entry->link = phydev->link; + __entry->speed = phydev->speed; + __entry->duplex = phydev->duplex; + __entry->state = phydev->state; + __entry->irq = phydev->irq; + __entry->dev_flags = phydev->dev_flags; + ), + + TP_printk("%6s(%s) %4s|%d/%4s|%d|%d|%u %s", __entry->devname, __entry->phyname, + __entry->link ? "UP" : "DOWN", __entry->speed, __entry->duplex == 1 ? "Full" : "Half", + __entry->state, __entry->irq, __entry->dev_flags, __entry->msg) +); +#endif + +#if defined(PXC_SPI_TRACER) +TRACE_EVENT(pxc_spi, /* trace mcspi events */ + + TP_PROTO(char *msg, const char *func, int data), + + TP_ARGS(msg, func, data), + + TP_STRUCT__entry( + __array(char, msg, 128) + __array(char, func, 32) + __field(int, data) + ), + + TP_fast_assign( + strncpy( __entry->msg, msg, 128); + strncpy( __entry->func, func, 32); + __entry->data = data; + ), + + TP_printk("%s[%d]: %s", __entry->func, __entry->data, __entry->msg) +); + +TRACE_EVENT(pxc_spi_measure_a, /* trace mcspi events */ + + TP_PROTO(char *msg, struct wago_trace_data *tdata), + + TP_ARGS(msg, tdata), + + TP_STRUCT__entry( + __array(char, msg, 128) + __field(unsigned int, completion_delay) + __field(unsigned int, completion_delay_work) + __field(unsigned int, async_delay) + __field(unsigned int, enqueue_delay) + __field(unsigned int, work_delay) + ), + + TP_fast_assign( + strncpy( __entry->msg, msg, 128); + if (tdata->mpoint_index > 5) { + __entry->completion_delay = tdata->mpoints[5].tv_nsec - tdata->mpoints[1].tv_nsec; + __entry->completion_delay_work = tdata->mpoints[4].tv_nsec - tdata->mpoints[2].tv_nsec; + __entry->async_delay = tdata->mpoints[1].tv_nsec - tdata->mpoints[0].tv_nsec; + __entry->enqueue_delay = tdata->mpoints[2].tv_nsec - tdata->mpoints[1].tv_nsec; + __entry->work_delay = tdata->mpoints[3].tv_nsec - tdata->mpoints[2].tv_nsec; + } + ), + + TP_printk("%s: completion_delay=%u, completion_delay_work=%u, async_delay=%u, enqueue_delay=%u, work_delay=%u", + __entry->msg, __entry->completion_delay, __entry->completion_delay_work, + __entry->async_delay, __entry->enqueue_delay, __entry->work_delay) +); + +TRACE_EVENT(pxc_spi_measure_b, /* trace mcspi events */ + + TP_PROTO(char *msg, struct wago_trace_data *tdata), + + TP_ARGS(msg, tdata), + + TP_STRUCT__entry( + __array(char, msg, 128) + __field(unsigned int, completion_delay) + __field(unsigned int, completion_delay_work) + __field(unsigned int, async_delay) + __field(unsigned int, enqueue_delay) + __field(unsigned int, work_delay) + __field(unsigned int, delay1) + __field(unsigned int, delay2) + ), + + TP_fast_assign( + strncpy( __entry->msg, msg, 128); + if (tdata->mpoint_index > 8) { + __entry->completion_delay = tdata->mpoints[7].tv_nsec - tdata->mpoints[2].tv_nsec; + __entry->completion_delay_work = tdata->mpoints[6].tv_nsec - tdata->mpoints[4].tv_nsec; + __entry->async_delay = tdata->mpoints[2].tv_nsec - tdata->mpoints[1].tv_nsec; + __entry->enqueue_delay = tdata->mpoints[4].tv_nsec - tdata->mpoints[2].tv_nsec; + __entry->work_delay = tdata->mpoints[5].tv_nsec - tdata->mpoints[4].tv_nsec; + __entry->delay1 = tdata->mpoints[7].tv_nsec - tdata->mpoints[1].tv_nsec; + __entry->delay2 = tdata->mpoints[8].tv_nsec - tdata->mpoints[0].tv_nsec; + } + ), + + TP_printk("%s: completion_delay=%u, completion_delay_work=%u, async_delay=%u, enqueue_delay=%u, work_delay=%u, delay1=%u, delay2=%u", + __entry->msg, __entry->completion_delay, __entry->completion_delay_work, + __entry->async_delay, __entry->enqueue_delay, __entry->work_delay, __entry->delay1, __entry->delay2) +); +#endif /* PXC_SPI_TRACER */ + +#if defined(PXC_SPI_KBUS_TRACER) +TRACE_EVENT(pxc_kbus, /* trace kbus events */ + + TP_PROTO(const char *func, char *msg), + + TP_ARGS(func, msg), + + TP_STRUCT__entry( + __array(char, msg, 128) + __array(char, func, 32) + ), + + TP_fast_assign( + strncpy( __entry->msg, msg, 128); + strncpy( __entry->func, func, 32); + ), + + TP_printk("%s:%s", __entry->func, __entry->msg) +); + +TRACE_EVENT(pxc_kbusmsg, /* trace kbus events */ + + TP_PROTO(const char *func, struct spi_message *m, char *msg), + + TP_ARGS(func, m, msg), + + TP_STRUCT__entry( + __array(char, msg, 128) + __array(char, func, 32) + __field(int, status) + ), + + TP_fast_assign( + strncpy( __entry->msg, msg, 128); + strncpy( __entry->func, func, 32); + __entry->status = m->status; + ), + + TP_printk("%s:%s m->status=%d", __entry->func, __entry->msg, __entry->status) +); + +TRACE_EVENT(pxc_kbusdump, /* trace kbus events */ + + TP_PROTO(const char *func, char *msg, int word_len, u16 word), + + TP_ARGS(func, msg, word_len, word), + + TP_STRUCT__entry( + __array(char, func, 32) + __array(char, msg, 128) + __field( int, word_len) + __field( u16, word) + ), + + TP_fast_assign( + strncpy( __entry->func, func, 32); + strncpy( __entry->msg, msg, 128); + __entry->word_len = word_len; + __entry->word = word; + ), + + TP_printk("%s:%s-%d 0x%x", __entry->func, __entry->msg, __entry->word_len, __entry->word) /* 0x%04x */ +); + +TRACE_EVENT(pxc_buf32, + + TP_PROTO(const char *prefix, char *in_buf, int len, int offs), + + TP_ARGS(prefix, in_buf, len, offs), + + TP_STRUCT__entry( + __array(char, prefix, 32) + __field(int, copy_len) + __array(char, buf, 32) + __field(char *, in_buf_p) + ), + + TP_fast_assign( + strncpy(__entry->prefix, prefix, 32); + if (len < 32) + memset(__entry->buf, 0, 32); + strncpy(__entry->buf, in_buf + offs, len > 32 ? 32 : len); + __entry->copy_len = len; + __entry->in_buf_p = in_buf; + ), + + TP_printk("DATADUMP(%s) copylen %4d (in_buf_p %p) " + "[%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x" + "-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x" + "-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x" + "-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x-%.2x]", + __entry->prefix, + __entry->copy_len, + __entry->in_buf_p, + __entry->buf[0], __entry->buf[1], __entry->buf[2], __entry->buf[3], + __entry->buf[4], __entry->buf[5], __entry->buf[6], __entry->buf[7], + __entry->buf[8], __entry->buf[9], __entry->buf[10], __entry->buf[11], + __entry->buf[12], __entry->buf[13], __entry->buf[14], __entry->buf[15], + __entry->buf[16], __entry->buf[17], __entry->buf[18], __entry->buf[19], + __entry->buf[20], __entry->buf[21], __entry->buf[22], __entry->buf[23], + __entry->buf[24], __entry->buf[25], __entry->buf[26], __entry->buf[27], + __entry->buf[28], __entry->buf[29], __entry->buf[30], __entry->buf[31]) +); + +TRACE_EVENT(pxc_kbus_mdata, /* trace kbus measurement events */ + + TP_PROTO(const char *func, char *msg, struct wago_trace_data *tdata), + + TP_ARGS(func, msg, tdata), + + TP_STRUCT__entry( + __array(char, msg, 128) + __array(char, func, 32) + __field(unsigned int, delay1) + __field(unsigned int, delay2) + ), + + TP_fast_assign( + strncpy( __entry->msg, msg, 128); + strncpy( __entry->func, func, 32); + if (tdata->mpoint_index > 3) { + __entry->delay1 = tdata->mpoints[3].tv_nsec - tdata->mpoints[0].tv_nsec; + __entry->delay2 = tdata->mpoints[2].tv_nsec - tdata->mpoints[1].tv_nsec; + } + ), + + TP_printk("%s:%s delay1=%u delay2=%u", __entry->func, __entry->msg, + __entry->delay1, __entry->delay2) +); +#endif /* PXC_SPI_KBUS_TRACER */ + +#if defined(PXC_CAN_TRACER) +TRACE_EVENT(pxc_canpkt, + + TP_PROTO(struct can_frame *canframe), + + TP_ARGS(canframe), + + TP_STRUCT__entry( + __field(struct can_frame *, canframe) + ), + + TP_fast_assign( + __entry->canframe = canframe; + ), + + TP_printk("%s-0x%x: can_id=0x%x, len=%d, data:%.2x.%.2x.%.2x.%.2x.%.2x.%.2x.%.2x.%.2x", + (__entry->canframe->can_id & CAN_ERR_FLAG) ? " err" : "data", + __entry->canframe->can_id >> CAN_EFF_ID_BITS, + __entry->canframe->can_id & CAN_ERR_MASK, + __entry->canframe->can_dlc, + __entry->canframe->data[0], + __entry->canframe->data[1], + __entry->canframe->data[2], + __entry->canframe->data[3], + __entry->canframe->data[4], + __entry->canframe->data[5], + __entry->canframe->data[6], + __entry->canframe->data[7]) +); +#endif /* PXC_CAN_TRACER */ + +#endif /* if !defined(_TRACE_PXC_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include