- move imx_rpmsg from arch/arm/ to drivers/rpmsg.
- use the new MU generic APIs in the rpmsg implementation.
- Validated the pingpong test on both imx6sx and imx7d sdb boards.
Signed-off-by: Richard Zhu <hongxing.zhu@nxp.com>
--- /dev/null
+i.MX RPMSG platform implementations
+
+Required properties:
+- compatible : "fsl,imx7d-rpmsg", "fsl,imx6sx-rpmsg"
+- vdev-nums : The number of the remote virtual devices.
+- reg : The reserved DDR phisical memory used to store
+ vring descriptors.
+
+Example:
+rpmsg: rpmsg{
+ compatible = "fsl,imx6sx-rpmsg";
+ status = "disabled";
+};
+
+&rpmsg{
+ vdev-nums = <1>;
+ reg = <0xbfff0000 0x10000>;
+ status = "okay";
+};
obj-$(CONFIG_HAVE_IMX_DDRC) += ddrc.o
obj-$(CONFIG_HAVE_IMX_SRC) += src.o
obj-$(CONFIG_HAVE_IMX_MU) += mu.o
-obj-$(CONFIG_HAVE_IMX_RPMSG) += imx_rpmsg.o
ifneq ($(CONFIG_SOC_IMX6)$(CONFIG_SOC_IMX7)$(CONFIG_SOC_LS1021A),)
AFLAGS_headsmp.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SMP) += headsmp.o platsmp.o
+++ /dev/null
-/*
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
- * Copyright 2017 NXP
- *
- * derived from the omap-rpmsg implementation.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/rpmsg.h>
-#include <linux/slab.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/virtio_ids.h>
-#include <linux/virtio_ring.h>
-#include <linux/imx_rpmsg.h>
-
-struct imx_virdev {
- struct virtio_device vdev;
- unsigned int vring[2];
- struct virtqueue *vq[2];
- int base_vq_id;
- int num_of_vqs;
- struct notifier_block nb;
-};
-
-struct imx_rpmsg_vproc {
- char *rproc_name;
- struct mutex lock;
- int vdev_nums;
-#define MAX_VDEV_NUMS 5
- struct imx_virdev ivdev[MAX_VDEV_NUMS];
-};
-
-/*
- * For now, allocate 256 buffers of 512 bytes for each side. each buffer
- * will then have 16B for the msg header and 496B for the payload.
- * This will require a total space of 256KB for the buffers themselves, and
- * 3 pages for every vring (the size of the vring depends on the number of
- * buffers it supports).
- */
-#define RPMSG_NUM_BUFS (512)
-#define RPMSG_BUF_SIZE (512)
-#define RPMSG_BUFS_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
-
-/*
- * The alignment between the consumer and producer parts of the vring.
- * Note: this is part of the "wire" protocol. If you change this, you need
- * to update your BIOS image as well
- */
-#define RPMSG_VRING_ALIGN (4096)
-
-/* With 256 buffers, our vring will occupy 3 pages */
-#define RPMSG_RING_SIZE ((DIV_ROUND_UP(vring_size(RPMSG_NUM_BUFS / 2, \
- RPMSG_VRING_ALIGN), PAGE_SIZE)) * PAGE_SIZE)
-
-#define to_imx_virdev(vd) container_of(vd, struct imx_virdev, vdev)
-#define to_imx_rpdev(vd, id) container_of(vd, struct imx_rpmsg_vproc, ivdev[id])
-
-struct imx_rpmsg_vq_info {
- __u16 num; /* number of entries in the virtio_ring */
- __u16 vq_id; /* a globaly unique index of this virtqueue */
- void *addr; /* address where we mapped the virtio ring */
- struct imx_rpmsg_vproc *rpdev;
-};
-
-static u64 imx_rpmsg_get_features(struct virtio_device *vdev)
-{
- /* VIRTIO_RPMSG_F_NS has been made private */
- return 1 << 0;
-}
-
-static int imx_rpmsg_finalize_features(struct virtio_device *vdev)
-{
- /* Give virtio_ring a chance to accept features */
- vring_transport_features(vdev);
- return 0;
-}
-
-/* kick the remote processor, and let it know which virtqueue to poke at */
-static bool imx_rpmsg_notify(struct virtqueue *vq)
-{
- int ret;
- unsigned int mu_rpmsg = 0;
- struct imx_rpmsg_vq_info *rpvq = vq->priv;
-
- mu_rpmsg = rpvq->vq_id << 16;
- mutex_lock(&rpvq->rpdev->lock);
- /* send the index of the triggered virtqueue as the mu payload */
- ret = imx_mu_rpmsg_send(mu_rpmsg);
- mutex_unlock(&rpvq->rpdev->lock);
- if (ret) {
- pr_err("ugh, imx_mu_rpmsg_send() failed: %d\n", ret);
- return false;
- }
-
- return true;
-}
-
-static int imx_mu_rpmsg_callback(struct notifier_block *this,
- unsigned long index, void *data)
-{
- u32 mu_msg = (u32) data;
- struct imx_virdev *virdev;
-
- virdev = container_of(this, struct imx_virdev, nb);
-
- pr_debug("%s mu_msg: 0x%x\n", __func__, mu_msg);
-
- /* ignore vq indices which are clearly not for us */
- mu_msg = mu_msg >> 16;
- if (mu_msg < virdev->base_vq_id || mu_msg > virdev->base_vq_id + 1) {
- pr_debug("mu_msg: 0x%x is invalid\n", mu_msg);
- return NOTIFY_DONE;
- }
-
- mu_msg -= virdev->base_vq_id;
-
- /*
- * Currently both PENDING_MSG and explicit-virtqueue-index
- * messaging are supported.
- * Whatever approach is taken, at this point 'mu_msg' contains
- * the index of the vring which was just triggered.
- */
- if (mu_msg < virdev->num_of_vqs)
- vring_interrupt(mu_msg, virdev->vq[mu_msg]);
-
- return NOTIFY_DONE;
-}
-
-static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
- unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name)
-{
- struct imx_virdev *virdev = to_imx_virdev(vdev);
- struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(virdev,
- virdev->base_vq_id / 2);
- struct imx_rpmsg_vq_info *rpvq;
- struct virtqueue *vq;
- int err;
-
- rpvq = kmalloc(sizeof(*rpvq), GFP_KERNEL);
- if (!rpvq)
- return ERR_PTR(-ENOMEM);
-
- /* ioremap'ing normal memory, so we cast away sparse's complaints */
- rpvq->addr = (__force void *) ioremap_nocache(virdev->vring[index],
- RPMSG_RING_SIZE);
- if (!rpvq->addr) {
- err = -ENOMEM;
- goto free_rpvq;
- }
-
- memset(rpvq->addr, 0, RPMSG_RING_SIZE);
-
- pr_debug("vring%d: phys 0x%x, virt 0x%x\n", index, virdev->vring[index],
- (unsigned int) rpvq->addr);
-
- vq = vring_new_virtqueue(index, RPMSG_NUM_BUFS / 2, RPMSG_VRING_ALIGN,
- vdev, true, rpvq->addr, imx_rpmsg_notify, callback,
- name);
- if (!vq) {
- pr_err("vring_new_virtqueue failed\n");
- err = -ENOMEM;
- goto unmap_vring;
- }
-
- virdev->vq[index] = vq;
- vq->priv = rpvq;
- /* system-wide unique id for this virtqueue */
- rpvq->vq_id = virdev->base_vq_id + index;
- rpvq->rpdev = rpdev;
- mutex_init(&rpdev->lock);
-
- return vq;
-
-unmap_vring:
- /* iounmap normal memory, so make sparse happy */
- iounmap((__force void __iomem *) rpvq->addr);
-free_rpvq:
- kfree(rpvq);
- return ERR_PTR(err);
-}
-
-static void imx_rpmsg_del_vqs(struct virtio_device *vdev)
-{
- struct virtqueue *vq, *n;
- struct imx_virdev *virdev = to_imx_virdev(vdev);
- struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(virdev,
- virdev->base_vq_id / 2);
-
- list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- struct imx_rpmsg_vq_info *rpvq = vq->priv;
- iounmap(rpvq->addr);
- vring_del_virtqueue(vq);
- kfree(rpvq);
- }
-
- if (&virdev->nb)
- imx_mu_rpmsg_unregister_nb((const char *)rpdev->rproc_name,
- &virdev->nb);
-}
-
-static int imx_rpmsg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[])
-{
- struct imx_virdev *virdev = to_imx_virdev(vdev);
- struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(virdev,
- virdev->base_vq_id / 2);
- int i, err;
-
- /* we maintain two virtqueues per remote processor (for RX and TX) */
- if (nvqs != 2)
- return -EINVAL;
-
- for (i = 0; i < nvqs; ++i) {
- vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]);
- if (IS_ERR(vqs[i])) {
- err = PTR_ERR(vqs[i]);
- goto error;
- }
- }
-
- virdev->num_of_vqs = nvqs;
-
- virdev->nb.notifier_call = imx_mu_rpmsg_callback;
- imx_mu_rpmsg_register_nb((const char *)rpdev->rproc_name, &virdev->nb);
-
- return 0;
-
-error:
- imx_rpmsg_del_vqs(vdev);
- return err;
-}
-
-static void imx_rpmsg_reset(struct virtio_device *vdev)
-{
- dev_dbg(&vdev->dev, "reset !\n");
-}
-
-static u8 imx_rpmsg_get_status(struct virtio_device *vdev)
-{
- return 0;
-}
-
-static void imx_rpmsg_set_status(struct virtio_device *vdev, u8 status)
-{
- dev_dbg(&vdev->dev, "%s new status: %d\n", __func__, status);
-}
-
-static void imx_rpmsg_vproc_release(struct device *dev)
-{
- /* this handler is provided so driver core doesn't yell at us */
-}
-
-static struct virtio_config_ops imx_rpmsg_config_ops = {
- .get_features = imx_rpmsg_get_features,
- .finalize_features = imx_rpmsg_finalize_features,
- .find_vqs = imx_rpmsg_find_vqs,
- .del_vqs = imx_rpmsg_del_vqs,
- .reset = imx_rpmsg_reset,
- .set_status = imx_rpmsg_set_status,
- .get_status = imx_rpmsg_get_status,
-};
-
-static struct imx_rpmsg_vproc imx_rpmsg_vprocs[] = {
- {
- .rproc_name = "m4",
- },
-};
-
-static const struct of_device_id imx_rpmsg_dt_ids[] = {
- { .compatible = "fsl,imx6sx-rpmsg", },
- { .compatible = "fsl,imx7d-rpmsg", },
- { .compatible = "fsl,imx7ulp-rpmsg", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, imx_rpmsg_dt_ids);
-
-static int set_vring_phy_buf(struct platform_device *pdev,
- struct imx_rpmsg_vproc *rpdev, int vdev_nums)
-{
- struct resource *res;
- resource_size_t size;
- unsigned int start, end;
- int i, ret = 0;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res) {
- size = resource_size(res);
- start = res->start;
- end = res->start + size;
- for (i = 0; i < vdev_nums; i++) {
- rpdev->ivdev[i].vring[0] = start;
- rpdev->ivdev[i].vring[1] = start +
- 0x8000;
- start += 0x10000;
- if (start > end) {
- pr_err("Too small memory size %x!\n", size);
- ret = -EINVAL;
- break;
- }
- }
- } else {
- return -ENOMEM;
- }
-
- return ret;
-}
-
-static int imx_rpmsg_probe(struct platform_device *pdev)
-{
- int i, j, ret = 0;
- struct device_node *np = pdev->dev.of_node;
-
- for (i = 0; i < ARRAY_SIZE(imx_rpmsg_vprocs); i++) {
- struct imx_rpmsg_vproc *rpdev = &imx_rpmsg_vprocs[i];
-
- ret = of_property_read_u32_index(np, "vdev-nums", i,
- &rpdev->vdev_nums);
- if (ret)
- rpdev->vdev_nums = 1;
- if (rpdev->vdev_nums > MAX_VDEV_NUMS) {
- pr_err("vdev-nums exceed the max %d\n", MAX_VDEV_NUMS);
- return -EINVAL;
- }
-
- if (!strcmp(rpdev->rproc_name, "m4")) {
- ret = set_vring_phy_buf(pdev, rpdev,
- rpdev->vdev_nums);
- if (ret) {
- pr_err("No vring buffer.\n");
- return -ENOMEM;
- }
- } else {
- pr_err("No remote m4 processor.\n");
- return -ENODEV;
- }
-
- for (j = 0; j < rpdev->vdev_nums; j++) {
- pr_debug("%s rpdev%d vdev%d: vring0 0x%x, vring1 0x%x\n",
- __func__, i, rpdev->vdev_nums,
- rpdev->ivdev[j].vring[0],
- rpdev->ivdev[j].vring[1]);
- rpdev->ivdev[j].vdev.id.device = VIRTIO_ID_RPMSG;
- rpdev->ivdev[j].vdev.config = &imx_rpmsg_config_ops;
- rpdev->ivdev[j].vdev.dev.parent = &pdev->dev;
- rpdev->ivdev[j].vdev.dev.release = imx_rpmsg_vproc_release;
- rpdev->ivdev[j].base_vq_id = j * 2;
-
- ret = register_virtio_device(&rpdev->ivdev[j].vdev);
- if (ret) {
- pr_err("%s failed to register rpdev: %d\n",
- __func__, ret);
- return ret;
- }
-
- }
- }
-
- return ret;
-}
-
-static int imx_rpmsg_remove(struct platform_device *pdev)
-{
- int i, j;
-
- for (i = 0; i < ARRAY_SIZE(imx_rpmsg_vprocs); i++) {
- struct imx_rpmsg_vproc *rpdev = &imx_rpmsg_vprocs[i];
-
- for (j = 0; j < rpdev->vdev_nums; j++)
- unregister_virtio_device(&rpdev->ivdev[j].vdev);
- }
- return 0;
-}
-
-static struct platform_driver imx_rpmsg_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "imx-rpmsg",
- .of_match_table = imx_rpmsg_dt_ids,
- },
- .probe = imx_rpmsg_probe,
- .remove = imx_rpmsg_remove,
-};
-
-static int __init imx_rpmsg_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&imx_rpmsg_driver);
- if (ret)
- pr_err("Unable to initialize rpmsg driver\n");
- else
- pr_info("imx rpmsg driver is registered.\n");
-
- return ret;
-}
-
-static void __exit imx_rpmsg_exit(void)
-{
- pr_info("imx rpmsg driver is unregistered.\n");
- platform_driver_unregister(&imx_rpmsg_driver);
-}
-
-module_exit(imx_rpmsg_exit);
-module_init(imx_rpmsg_init);
-
-MODULE_AUTHOR("Freescale Semiconductor, Inc.");
-MODULE_DESCRIPTION("iMX remote processor messaging virtio device");
-MODULE_LICENSE("GPL v2");
select VIRTIO
select VIRTUALIZATION
+config HAVE_IMX_RPMSG
+ bool "IMX RPMSG driver on the AMP SOCs"
+ select RPMSG
+ select RPMSG_VIRTIO
+
config IMX_RPMSG_PINGPONG
tristate "IMX RPMSG pingpong driver -- loadable modules only"
default m
obj-$(CONFIG_RPMSG) += rpmsg_core.o
obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o
obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o
+obj-$(CONFIG_HAVE_IMX_RPMSG) += imx_rpmsg.o
obj-$(CONFIG_IMX_RPMSG_PINGPONG) += imx_rpmsg_pingpong.o
obj-$(CONFIG_IMX_RPMSG_TTY) += imx_rpmsg_tty.o
--- /dev/null
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * derived from the omap-rpmsg implementation.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_ring.h>
+#include <linux/imx_rpmsg.h>
+#include <linux/mx8_mu.h>
+
+enum imx_rpmsg_variants {
+ IMX6SX,
+ IMX7D,
+ IMX7ULP,
+};
+static enum imx_rpmsg_variants variant;
+
+struct imx_virdev {
+ struct virtio_device vdev;
+ unsigned int vring[2];
+ struct virtqueue *vq[2];
+ int base_vq_id;
+ int num_of_vqs;
+ struct notifier_block nb;
+};
+
+struct imx_rpmsg_vproc {
+ char *rproc_name;
+ struct mutex lock;
+ int vdev_nums;
+#define MAX_VDEV_NUMS 5
+ struct imx_virdev ivdev[MAX_VDEV_NUMS];
+};
+
+struct imx_mu_rpmsg_box {
+ const char *name;
+ struct blocking_notifier_head notifier;
+};
+
+static struct imx_mu_rpmsg_box mu_rpmsg_box = {
+ .name = "m4",
+};
+
+#define MAX_NUM 10 /* enlarge it if overflow happen */
+
+static void __iomem *mu_base;
+static u32 m4_message[MAX_NUM];
+static u32 in_idx, out_idx;
+static DEFINE_SPINLOCK(mu_lock);
+static struct delayed_work rpmsg_work;
+
+/*
+ * For now, allocate 256 buffers of 512 bytes for each side. each buffer
+ * will then have 16B for the msg header and 496B for the payload.
+ * This will require a total space of 256KB for the buffers themselves, and
+ * 3 pages for every vring (the size of the vring depends on the number of
+ * buffers it supports).
+ */
+#define RPMSG_NUM_BUFS (512)
+#define RPMSG_BUF_SIZE (512)
+#define RPMSG_BUFS_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
+
+/*
+ * The alignment between the consumer and producer parts of the vring.
+ * Note: this is part of the "wire" protocol. If you change this, you need
+ * to update your BIOS image as well
+ */
+#define RPMSG_VRING_ALIGN (4096)
+
+/* With 256 buffers, our vring will occupy 3 pages */
+#define RPMSG_RING_SIZE ((DIV_ROUND_UP(vring_size(RPMSG_NUM_BUFS / 2, \
+ RPMSG_VRING_ALIGN), PAGE_SIZE)) * PAGE_SIZE)
+
+#define to_imx_virdev(vd) container_of(vd, struct imx_virdev, vdev)
+#define to_imx_rpdev(vd, id) container_of(vd, struct imx_rpmsg_vproc, ivdev[id])
+
+struct imx_rpmsg_vq_info {
+ __u16 num; /* number of entries in the virtio_ring */
+ __u16 vq_id; /* a globaly unique index of this virtqueue */
+ void *addr; /* address where we mapped the virtio ring */
+ struct imx_rpmsg_vproc *rpdev;
+};
+
+static u64 imx_rpmsg_get_features(struct virtio_device *vdev)
+{
+ /* VIRTIO_RPMSG_F_NS has been made private */
+ return 1 << 0;
+}
+
+static int imx_rpmsg_finalize_features(struct virtio_device *vdev)
+{
+ /* Give virtio_ring a chance to accept features */
+ vring_transport_features(vdev);
+ return 0;
+}
+
+/* kick the remote processor, and let it know which virtqueue to poke at */
+static bool imx_rpmsg_notify(struct virtqueue *vq)
+{
+ unsigned int mu_rpmsg = 0;
+ struct imx_rpmsg_vq_info *rpvq = vq->priv;
+
+ mu_rpmsg = rpvq->vq_id << 16;
+ mutex_lock(&rpvq->rpdev->lock);
+ /* send the index of the triggered virtqueue as the mu payload */
+ MU_SendMessage(mu_base, 1, mu_rpmsg);
+ mutex_unlock(&rpvq->rpdev->lock);
+
+ return true;
+}
+
+static int imx_mu_rpmsg_callback(struct notifier_block *this,
+ unsigned long index, void *data)
+{
+ u32 mu_msg = (phys_addr_t) data;
+ struct imx_virdev *virdev;
+
+ virdev = container_of(this, struct imx_virdev, nb);
+
+ pr_debug("%s mu_msg: 0x%x\n", __func__, mu_msg);
+ /* ignore vq indices which are clearly not for us */
+ mu_msg = mu_msg >> 16;
+ if (mu_msg < virdev->base_vq_id || mu_msg > virdev->base_vq_id + 1) {
+ pr_debug("mu_msg: 0x%x is invalid\n", mu_msg);
+ return NOTIFY_DONE;
+ }
+
+ mu_msg -= virdev->base_vq_id;
+
+ /*
+ * Currently both PENDING_MSG and explicit-virtqueue-index
+ * messaging are supported.
+ * Whatever approach is taken, at this point 'mu_msg' contains
+ * the index of the vring which was just triggered.
+ */
+ if (mu_msg < virdev->num_of_vqs)
+ vring_interrupt(mu_msg, virdev->vq[mu_msg]);
+
+ return NOTIFY_DONE;
+}
+
+int imx_mu_rpmsg_register_nb(const char *name, struct notifier_block *nb)
+{
+ if ((name == NULL) || (nb == NULL))
+ return -EINVAL;
+
+ if (!strcmp(mu_rpmsg_box.name, name))
+ blocking_notifier_chain_register(&(mu_rpmsg_box.notifier), nb);
+ else
+ return -ENOENT;
+
+ return 0;
+}
+
+int imx_mu_rpmsg_unregister_nb(const char *name, struct notifier_block *nb)
+{
+ if ((name == NULL) || (nb == NULL))
+ return -EINVAL;
+
+ if (!strcmp(mu_rpmsg_box.name, name))
+ blocking_notifier_chain_unregister(&(mu_rpmsg_box.notifier),
+ nb);
+ else
+ return -ENOENT;
+
+ return 0;
+}
+
+static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
+ unsigned int index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct imx_virdev *virdev = to_imx_virdev(vdev);
+ struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(virdev,
+ virdev->base_vq_id / 2);
+ struct imx_rpmsg_vq_info *rpvq;
+ struct virtqueue *vq;
+ int err;
+
+ rpvq = kmalloc(sizeof(*rpvq), GFP_KERNEL);
+ if (!rpvq)
+ return ERR_PTR(-ENOMEM);
+
+ /* ioremap'ing normal memory, so we cast away sparse's complaints */
+ rpvq->addr = (__force void *) ioremap_nocache(virdev->vring[index],
+ RPMSG_RING_SIZE);
+ if (!rpvq->addr) {
+ err = -ENOMEM;
+ goto free_rpvq;
+ }
+
+ memset(rpvq->addr, 0, RPMSG_RING_SIZE);
+
+ pr_debug("vring%d: phys 0x%x, virt 0x%p\n", index, virdev->vring[index],
+ rpvq->addr);
+
+ vq = vring_new_virtqueue(index, RPMSG_NUM_BUFS / 2, RPMSG_VRING_ALIGN,
+ vdev, true, rpvq->addr, imx_rpmsg_notify, callback,
+ name);
+ if (!vq) {
+ pr_err("vring_new_virtqueue failed\n");
+ err = -ENOMEM;
+ goto unmap_vring;
+ }
+
+ virdev->vq[index] = vq;
+ vq->priv = rpvq;
+ /* system-wide unique id for this virtqueue */
+ rpvq->vq_id = virdev->base_vq_id + index;
+ rpvq->rpdev = rpdev;
+ mutex_init(&rpdev->lock);
+
+ return vq;
+
+unmap_vring:
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *) rpvq->addr);
+free_rpvq:
+ kfree(rpvq);
+ return ERR_PTR(err);
+}
+
+static void imx_rpmsg_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+ struct imx_virdev *virdev = to_imx_virdev(vdev);
+ struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(virdev,
+ virdev->base_vq_id / 2);
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ struct imx_rpmsg_vq_info *rpvq = vq->priv;
+
+ iounmap(rpvq->addr);
+ vring_del_virtqueue(vq);
+ kfree(rpvq);
+ }
+
+ if (&virdev->nb)
+ imx_mu_rpmsg_unregister_nb((const char *)rpdev->rproc_name,
+ &virdev->nb);
+}
+
+static int imx_rpmsg_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[])
+{
+ struct imx_virdev *virdev = to_imx_virdev(vdev);
+ struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(virdev,
+ virdev->base_vq_id / 2);
+ int i, err;
+
+ /* we maintain two virtqueues per remote processor (for RX and TX) */
+ if (nvqs != 2)
+ return -EINVAL;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto error;
+ }
+ }
+
+ virdev->num_of_vqs = nvqs;
+
+ virdev->nb.notifier_call = imx_mu_rpmsg_callback;
+ imx_mu_rpmsg_register_nb((const char *)rpdev->rproc_name, &virdev->nb);
+
+ return 0;
+
+error:
+ imx_rpmsg_del_vqs(vdev);
+ return err;
+}
+
+static void imx_rpmsg_reset(struct virtio_device *vdev)
+{
+ dev_dbg(&vdev->dev, "reset !\n");
+}
+
+static u8 imx_rpmsg_get_status(struct virtio_device *vdev)
+{
+ return 0;
+}
+
+static void imx_rpmsg_set_status(struct virtio_device *vdev, u8 status)
+{
+ dev_dbg(&vdev->dev, "%s new status: %d\n", __func__, status);
+}
+
+static void imx_rpmsg_vproc_release(struct device *dev)
+{
+ /* this handler is provided so driver core doesn't yell at us */
+}
+
+static struct virtio_config_ops imx_rpmsg_config_ops = {
+ .get_features = imx_rpmsg_get_features,
+ .finalize_features = imx_rpmsg_finalize_features,
+ .find_vqs = imx_rpmsg_find_vqs,
+ .del_vqs = imx_rpmsg_del_vqs,
+ .reset = imx_rpmsg_reset,
+ .set_status = imx_rpmsg_set_status,
+ .get_status = imx_rpmsg_get_status,
+};
+
+static struct imx_rpmsg_vproc imx_rpmsg_vprocs[] = {
+ {
+ .rproc_name = "m4",
+ },
+};
+
+static const struct of_device_id imx_rpmsg_dt_ids[] = {
+ { .compatible = "fsl,imx6sx-rpmsg", },
+ { .compatible = "fsl,imx7d-rpmsg", },
+ { .compatible = "fsl,imx7ulp-rpmsg", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_rpmsg_dt_ids);
+
+static int set_vring_phy_buf(struct platform_device *pdev,
+ struct imx_rpmsg_vproc *rpdev, int vdev_nums)
+{
+ struct resource *res;
+ resource_size_t size;
+ unsigned int start, end;
+ int i, ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res) {
+ size = resource_size(res);
+ start = res->start;
+ end = res->start + size;
+ for (i = 0; i < vdev_nums; i++) {
+ rpdev->ivdev[i].vring[0] = start;
+ rpdev->ivdev[i].vring[1] = start +
+ 0x8000;
+ start += 0x10000;
+ if (start > end) {
+ pr_err("Too small memory size %x!\n",
+ (u32)size);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ } else {
+ return -ENOMEM;
+ }
+
+ return ret;
+}
+
+static void rpmsg_work_handler(struct work_struct *work)
+{
+ u32 message;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mu_lock, flags);
+ /* handle all incoming mu message */
+ while (in_idx != out_idx) {
+ message = m4_message[out_idx % MAX_NUM];
+ spin_unlock_irqrestore(&mu_lock, flags);
+
+ blocking_notifier_call_chain(&(mu_rpmsg_box.notifier), 4,
+ (void *)(phys_addr_t)message);
+
+ spin_lock_irqsave(&mu_lock, flags);
+ m4_message[out_idx % MAX_NUM] = 0;
+ out_idx++;
+ }
+ spin_unlock_irqrestore(&mu_lock, flags);
+}
+
+static irqreturn_t imx_mu_rpmsg_isr(int irq, void *param)
+{
+ u32 irqs, message;
+ unsigned long flags;
+
+ irqs = MU_ReadStatus(mu_base);
+
+ /* RPMSG */
+ if (irqs & (1 << 26)) {
+ spin_lock_irqsave(&mu_lock, flags);
+ /* get message from receive buffer */
+ MU_ReceiveMsg(mu_base, 1, &message);
+ m4_message[in_idx % MAX_NUM] = message;
+ in_idx++;
+ /*
+ * Too many mu message not be handled in timely, can enlarge
+ * MAX_NUM
+ */
+ if (in_idx == out_idx) {
+ spin_unlock_irqrestore(&mu_lock, flags);
+ pr_err("MU overflow!\n");
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&mu_lock, flags);
+
+ schedule_delayed_work(&rpmsg_work, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int imx_rpmsg_probe(struct platform_device *pdev)
+{
+ int i, j, ret = 0;
+ u32 irq;
+ struct clk *clk;
+ struct device_node *np_mu;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+
+ variant = (enum imx_rpmsg_variants)of_device_get_match_data(dev);
+
+ /* Initialize the mu unit used by rpmsg */
+ np_mu = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-mu");
+ if (!np_mu)
+ pr_info("Cannot find MU-RPMSG entry in device tree\n");
+ mu_base = of_iomap(np_mu, 0);
+ WARN_ON(!mu_base);
+
+ if (variant == IMX7ULP)
+ irq = of_irq_get(np_mu, 1);
+ else
+ irq = of_irq_get(np_mu, 0);
+
+ ret = request_irq(irq, imx_mu_rpmsg_isr,
+ IRQF_EARLY_RESUME | IRQF_SHARED,
+ "imx-mu-rpmsg", &mu_rpmsg_box);
+ if (ret) {
+ pr_err("%s: register interrupt %d failed, rc %d\n",
+ __func__, irq, ret);
+ return ret;
+ }
+
+ if (variant == IMX7D) {
+ clk = of_clk_get(np_mu, 0);
+ if (IS_ERR(clk)) {
+ pr_err("mu clock source missing or invalid\n");
+ return PTR_ERR(clk);
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("unable to enable mu clock\n");
+ return ret;
+ }
+ }
+
+ INIT_DELAYED_WORK(&rpmsg_work, rpmsg_work_handler);
+ /*
+ * bit26 is used by rpmsg channels.
+ * bit0 of MX7ULP_MU_CR used to let m4 to know MU is ready now
+ */
+ if (variant == IMX7ULP) {
+ MU_EnableRxFullInt(mu_base, 1);
+ MU_SetFn(mu_base, 1);
+ } else {
+ MU_EnableRxFullInt(mu_base, 1);
+ }
+ BLOCKING_INIT_NOTIFIER_HEAD(&(mu_rpmsg_box.notifier));
+
+ pr_info("MU is ready for cross core communication!\n");
+
+ for (i = 0; i < ARRAY_SIZE(imx_rpmsg_vprocs); i++) {
+ struct imx_rpmsg_vproc *rpdev = &imx_rpmsg_vprocs[i];
+
+ ret = of_property_read_u32_index(np, "vdev-nums", i,
+ &rpdev->vdev_nums);
+ if (ret)
+ rpdev->vdev_nums = 1;
+ if (rpdev->vdev_nums > MAX_VDEV_NUMS) {
+ pr_err("vdev-nums exceed the max %d\n", MAX_VDEV_NUMS);
+ return -EINVAL;
+ }
+
+ if (!strcmp(rpdev->rproc_name, "m4")) {
+ ret = set_vring_phy_buf(pdev, rpdev,
+ rpdev->vdev_nums);
+ if (ret) {
+ pr_err("No vring buffer.\n");
+ return -ENOMEM;
+ }
+ } else {
+ pr_err("No remote m4 processor.\n");
+ return -ENODEV;
+ }
+
+ for (j = 0; j < rpdev->vdev_nums; j++) {
+ pr_debug("%s rpdev%d vdev%d: vring0 0x%x, vring1 0x%x\n",
+ __func__, i, rpdev->vdev_nums,
+ rpdev->ivdev[j].vring[0],
+ rpdev->ivdev[j].vring[1]);
+ rpdev->ivdev[j].vdev.id.device = VIRTIO_ID_RPMSG;
+ rpdev->ivdev[j].vdev.config = &imx_rpmsg_config_ops;
+ rpdev->ivdev[j].vdev.dev.parent = &pdev->dev;
+ rpdev->ivdev[j].vdev.dev.release = imx_rpmsg_vproc_release;
+ rpdev->ivdev[j].base_vq_id = j * 2;
+
+ ret = register_virtio_device(&rpdev->ivdev[j].vdev);
+ if (ret) {
+ pr_err("%s failed to register rpdev: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ }
+ }
+
+ return ret;
+}
+
+static int imx_rpmsg_remove(struct platform_device *pdev)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(imx_rpmsg_vprocs); i++) {
+ struct imx_rpmsg_vproc *rpdev = &imx_rpmsg_vprocs[i];
+
+ for (j = 0; j < rpdev->vdev_nums; j++)
+ unregister_virtio_device(&rpdev->ivdev[j].vdev);
+ }
+ return 0;
+}
+
+static struct platform_driver imx_rpmsg_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "imx-rpmsg",
+ .of_match_table = imx_rpmsg_dt_ids,
+ },
+ .probe = imx_rpmsg_probe,
+ .remove = imx_rpmsg_remove,
+};
+
+static int __init imx_rpmsg_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&imx_rpmsg_driver);
+ if (ret)
+ pr_err("Unable to initialize rpmsg driver\n");
+ else
+ pr_info("imx rpmsg driver is registered.\n");
+
+ return ret;
+}
+
+static void __exit imx_rpmsg_exit(void)
+{
+ pr_info("imx rpmsg driver is unregistered.\n");
+ platform_driver_unregister(&imx_rpmsg_driver);
+}
+
+module_exit(imx_rpmsg_exit);
+module_init(imx_rpmsg_init);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("iMX remote processor messaging virtio device");
+MODULE_LICENSE("GPL v2");
obj-$(CONFIG_SOC_TI) += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
-obj-$(CONFIG_ARCH_FSL_IMX8QM) += imx8/
+obj-y += imx/
-obj-$(CONFIG_ARCH_FSL_IMX8QM) += mu/
+obj-$(CONFIG_HAVE_IMX_MU) += mu/
obj-$(CONFIG_ARCH_FSL_IMX8QM) += sc/
-obj-$(CONFIG_ARCH_FSL_IMX8QM) += rpmsg/
obj-$(CONFIG_ARCH_FSL_IMX8QM) += pm-domains.o soc-imx8.o
+++ /dev/null
-obj-y += imx_rpmsg.o
+++ /dev/null
-/*
- * Copyright (C) 2016 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_reserved_mem.h>
-#include <linux/platform_device.h>
-#include <linux/rpmsg.h>
-#include <linux/slab.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/virtio_ids.h>
-#include <linux/virtio_ring.h>
-#include "../mu/mx8_mu.h"
-
-#define MU_ARR1_OFFSET 0x14
-#define MU_ASR 0x20
-
-static void __iomem *mu_base;
-static u32 m4_message;
-static struct delayed_work rpmsg_work;
-
-struct imx_mu_rpmsg_box {
- const char *name;
- struct blocking_notifier_head notifier;
-};
-
-static struct imx_mu_rpmsg_box mu_rpmsg_box = {
- .name = "m4",
-};
-
-struct imx_rpmsg_vproc {
- struct virtio_device vdev;
- dma_addr_t vring[2];
- char *rproc_name;
- struct mutex lock;
- struct notifier_block nb;
- struct virtqueue *vq[2];
- int base_vq_id;
- int num_of_vqs;
-};
-
-/*
- * For now, allocate 256 buffers of 512 bytes for each side. each buffer
- * will then have 16B for the msg header and 496B for the payload.
- * This will require a total space of 256KB for the buffers themselves, and
- * 3 pages for every vring (the size of the vring depends on the number of
- * buffers it supports).
- */
-#define RPMSG_NUM_BUFS (512)
-#define RPMSG_BUF_SIZE (512)
-#define RPMSG_BUFS_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
-
-/*
- * The alignment between the consumer and producer parts of the vring.
- * Note: this is part of the "wire" protocol. If you change this, you need
- * to update your BIOS image as well
- */
-#define RPMSG_VRING_ALIGN (4096)
-
-/* With 256 buffers, our vring will occupy 3 pages */
-#define RPMSG_RING_SIZE ((DIV_ROUND_UP(vring_size(RPMSG_NUM_BUFS / 2, \
- RPMSG_VRING_ALIGN), PAGE_SIZE)) * PAGE_SIZE)
-
-#define to_imx_rpdev(vd) container_of(vd, struct imx_rpmsg_vproc, vdev)
-
-struct imx_rpmsg_vq_info {
- __u16 num; /* number of entries in the virtio_ring */
- __u16 vq_id; /* a globaly unique index of this virtqueue */
- void *addr; /* address where we mapped the virtio ring */
- struct imx_rpmsg_vproc *rpdev;
-};
-
-static u64 imx_rpmsg_get_features(struct virtio_device *vdev)
-{
- return 1 << VIRTIO_RPMSG_F_NS;
-}
-
-static int imx_rpmsg_finalize_features(struct virtio_device *vdev)
-{
- /* Give virtio_ring a chance to accept features */
- vring_transport_features(vdev);
- return 0;
-}
-
-/* kick the remote processor, and let it know which virtqueue to poke at */
-static bool imx_rpmsg_notify(struct virtqueue *vq)
-{
- unsigned int mu_rpmsg = 0;
- struct imx_rpmsg_vq_info *rpvq = vq->priv;
-
- mu_rpmsg = rpvq->vq_id << 16;
- mutex_lock(&rpvq->rpdev->lock);
- /* send the index of the triggered virtqueue as the mu payload */
- MU_SendMessage(mu_base, 1, mu_rpmsg);
- mutex_unlock(&rpvq->rpdev->lock);
-
- return true;
-}
-
-static int imx_mu_rpmsg_callback(struct notifier_block *this,
- unsigned long index, void *data)
-{
- u32 mu_msg = (u64) data;
- struct imx_rpmsg_vproc *rpdev;
-
- rpdev = container_of(this, struct imx_rpmsg_vproc, nb);
-
- pr_debug("%s mu_msg: 0x%x\n", __func__, mu_msg);
-
- /* ignore vq indices which are clearly not for us */
- mu_msg = mu_msg >> 16;
- if (mu_msg < rpdev->base_vq_id)
- pr_err("mu_msg: 0x%x is invalid\n", mu_msg);
-
- mu_msg -= rpdev->base_vq_id;
-
- /*
- * Currently both PENDING_MSG and explicit-virtqueue-index
- * messaging are supported.
- * Whatever approach is taken, at this point 'mu_msg' contains
- * the index of the vring which was just triggered.
- */
- if (mu_msg < rpdev->num_of_vqs)
- vring_interrupt(mu_msg, rpdev->vq[mu_msg]);
-
- return NOTIFY_DONE;
-}
-
-int imx_mu_rpmsg_register_nb(const char *name, struct notifier_block *nb)
-{
- if ((name == NULL) || (nb == NULL))
- return -EINVAL;
-
- if (!strcmp(mu_rpmsg_box.name, name))
- blocking_notifier_chain_register(&(mu_rpmsg_box.notifier), nb);
- else
- return -ENOENT;
-
- return 0;
-}
-
-int imx_mu_rpmsg_unregister_nb(const char *name, struct notifier_block *nb)
-{
- if ((name == NULL) || (nb == NULL))
- return -EINVAL;
-
- if (!strcmp(mu_rpmsg_box.name, name))
- blocking_notifier_chain_unregister(&(mu_rpmsg_box.notifier),
- nb);
- else
- return -ENOENT;
-
- return 0;
-}
-
-static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
- unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name)
-{
- struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(vdev);
- struct imx_rpmsg_vq_info *rpvq;
- struct virtqueue *vq;
- int err;
-
- rpvq = kmalloc(sizeof(*rpvq), GFP_KERNEL);
- if (!rpvq)
- return ERR_PTR(-ENOMEM);
-
- dma_alloc_from_coherent(vdev->dev.parent, 0x8000,
- &rpdev->vring[index], &rpvq->addr);
- if (!rpvq->addr) {
- err = -ENOMEM;
- goto free_rpvq;
- }
-
- pr_debug("vring%d: phys 0x%llx, virt 0x%p\n", index, rpdev->vring[index],
- rpvq->addr);
- vq = vring_new_virtqueue(index, RPMSG_NUM_BUFS / 2, RPMSG_VRING_ALIGN,
- vdev, true, rpvq->addr, imx_rpmsg_notify, callback,
- name);
- if (!vq) {
- pr_err("vring_new_virtqueue failed\n");
- err = -ENOMEM;
- goto unmap_vring;
- }
-
- rpdev->vq[index] = vq;
- vq->priv = rpvq;
- /* system-wide unique id for this virtqueue */
- rpvq->vq_id = rpdev->base_vq_id + index;
- rpvq->rpdev = rpdev;
- mutex_init(&rpdev->lock);
-
- return vq;
-
-unmap_vring:
- /* iounmap normal memory, so make sparse happy */
- iounmap((__force void __iomem *) rpvq->addr);
-free_rpvq:
- kfree(rpvq);
- return ERR_PTR(err);
-}
-
-static void imx_rpmsg_del_vqs(struct virtio_device *vdev)
-{
- struct virtqueue *vq, *n;
- struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(vdev);
-
- list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- struct imx_rpmsg_vq_info *rpvq = vq->priv;
-
- iounmap(rpvq->addr);
- vring_del_virtqueue(vq);
- kfree(rpvq);
- }
-
- if (&rpdev->nb)
- imx_mu_rpmsg_unregister_nb((const char *)rpdev->rproc_name,
- &rpdev->nb);
-}
-
-static int imx_rpmsg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[])
-{
- struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(vdev);
- int i, err;
-
- /* we maintain two virtqueues per remote processor (for RX and TX) */
- if (nvqs != 2)
- return -EINVAL;
-
- for (i = 0; i < nvqs; ++i) {
- vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]);
- if (IS_ERR(vqs[i])) {
- err = PTR_ERR(vqs[i]);
- goto error;
- }
- }
-
- rpdev->num_of_vqs = nvqs;
-
- rpdev->nb.notifier_call = imx_mu_rpmsg_callback;
- imx_mu_rpmsg_register_nb((const char *)rpdev->rproc_name, &rpdev->nb);
-
- return 0;
-
-error:
- imx_rpmsg_del_vqs(vdev);
- return err;
-}
-
-static void imx_rpmsg_reset(struct virtio_device *vdev)
-{
- dev_dbg(&vdev->dev, "reset !\n");
-}
-
-static u8 imx_rpmsg_get_status(struct virtio_device *vdev)
-{
- return 0;
-}
-
-static void imx_rpmsg_set_status(struct virtio_device *vdev, u8 status)
-{
- dev_dbg(&vdev->dev, "%s new status: %d\n", __func__, status);
-}
-
-static void imx_rpmsg_vproc_release(struct device *dev)
-{
- /* this handler is provided so driver core doesn't yell at us */
-}
-
-static struct virtio_config_ops imx_rpmsg_config_ops = {
- .get_features = imx_rpmsg_get_features,
- .finalize_features = imx_rpmsg_finalize_features,
- .find_vqs = imx_rpmsg_find_vqs,
- .del_vqs = imx_rpmsg_del_vqs,
- .reset = imx_rpmsg_reset,
- .set_status = imx_rpmsg_set_status,
- .get_status = imx_rpmsg_get_status,
-};
-
-static struct imx_rpmsg_vproc imx_rpmsg_vprocs[] = {
- {
- .vdev.id.device = VIRTIO_ID_RPMSG,
- .vdev.config = &imx_rpmsg_config_ops,
- .rproc_name = "m4",
- .base_vq_id = 0,
- },
-};
-
-static const struct of_device_id imx_rpmsg_dt_ids[] = {
- { .compatible = "fsl,imx8dv-rpmsg", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, imx_rpmsg_dt_ids);
-
-static void rpmsg_work_handler(struct work_struct *work)
-{
- u64 val = m4_message;
-
- blocking_notifier_call_chain(&(mu_rpmsg_box.notifier), 4, (void *)val);
- m4_message = 0;
-}
-
-static irqreturn_t imx8_mu_rpmsg_isr(int irq, void *param)
-{
- u32 irqs;
-
- irqs = readl_relaxed(mu_base + MU_ASR);
-
- /* RPMSG */
- if (irqs & (1 << 26)) {
- /* get message from receive buffer */
- MU_ReceiveMsg(mu_base, 1, &m4_message);
- schedule_delayed_work(&rpmsg_work, 0);
- }
-
- return IRQ_HANDLED;
-}
-
-static int imx_rpmsg_probe(struct platform_device *pdev)
-{
- int i, ret = 0;
- u32 irq, val;
- struct device_node *np;
-
- /* Initialize the mu unit used by rpmsg */
- np = of_find_compatible_node(NULL, NULL, "fsl,imx8-mu-rpmsg");
- if (!np)
- pr_info("Cannot find MU-RPMSG entry in device tree\n");
-
- /* Make check the MU is initialized by CM4_0 or not */
- if (of_property_read_u32(np, "mu_is_run", &val) < 0)
- return -ENODEV;
-
- mu_base = of_iomap(np, 0);
- WARN_ON(!mu_base);
-
- irq = of_irq_get(np, 0);
-
- ret = request_irq(irq, imx8_mu_rpmsg_isr, IRQF_EARLY_RESUME,
- "imx8_mu_rpmsg_isr", &mu_rpmsg_box);
- if (ret) {
- pr_info("imx8_mu_init :request_irq failed %d, ret = %d\n",
- irq, ret);
- }
-
- /* Init MU */
- MU_Init(mu_base);
- /* enable the bit26(RIE1) */
- MU_EnableRxFullInt(mu_base, 1);
-
- INIT_DELAYED_WORK(&rpmsg_work, rpmsg_work_handler);
- BLOCKING_INIT_NOTIFIER_HEAD(&(mu_rpmsg_box.notifier));
-
- pr_info("MU is ready for cross core communication!\n");
-
- /* register the virtual processor */
- np = pdev->dev.of_node;
- for (i = 0; i < ARRAY_SIZE(imx_rpmsg_vprocs); i++) {
- struct imx_rpmsg_vproc *rpdev = &imx_rpmsg_vprocs[i];
-
- if (!strcmp(rpdev->rproc_name, "m4")) {
- ret = of_device_is_compatible(np, "fsl,imx8dv-rpmsg");
- if (ret) {
- of_reserved_mem_device_init(&pdev->dev);
- } else {
- /* hardcodes here now. */
- rpdev->vring[0] = 0xFFFF0000;
- rpdev->vring[1] = 0xFFFF8000;
- }
- } else {
- break;
- }
-
- pr_debug("%s rpdev%d: vring0 0x%llx, vring1 0x%llx\n", __func__,
- i, rpdev->vring[0], rpdev->vring[1]);
-
- rpdev->vdev.dev.parent = &pdev->dev;
- rpdev->vdev.dev.release = imx_rpmsg_vproc_release;
-
- ret = register_virtio_device(&rpdev->vdev);
- if (ret) {
- pr_err("%s failed to register rpdev: %d\n",
- __func__, ret);
- break;
- }
- }
-
- return ret;
-}
-
-static int imx_rpmsg_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(imx_rpmsg_vprocs); i++) {
- struct imx_rpmsg_vproc *rpdev = &imx_rpmsg_vprocs[i];
-
- unregister_virtio_device(&rpdev->vdev);
- }
- return 0;
-}
-
-static struct platform_driver imx_rpmsg_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "imx-rpmsg",
- .of_match_table = imx_rpmsg_dt_ids,
- },
- .probe = imx_rpmsg_probe,
- .remove = imx_rpmsg_remove,
-};
-
-static int __init imx_rpmsg_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&imx_rpmsg_driver);
- if (ret)
- pr_err("Unable to initialize rpmsg driver\n");
- else
- pr_info("imx rpmsg driver is registered.\n");
-
- return ret;
-}
-
-static void __exit imx_rpmsg_exit(void)
-{
- pr_info("imx rpmsg driver is unregistered.\n");
- platform_driver_unregister(&imx_rpmsg_driver);
-}
-
-module_exit(imx_rpmsg_exit);
-module_init(imx_rpmsg_init);
-
-MODULE_AUTHOR("Freescale Semiconductor, Inc.");
-MODULE_DESCRIPTION("iMX remote processor messaging virtio device");
-MODULE_LICENSE("GPL v2");