#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include "dmaengine.h"
+#include "virt-dma.h"
/* SDMA registers */
#define SDMA_H_C0PTR 0x000
struct sdma_engine;
+struct sdma_desc {
+ struct virt_dma_desc vd;
+ struct list_head node;
+ unsigned int num_bd;
+ dma_addr_t bd_phys;
+ bool bd_iram;
+ unsigned int buf_tail;
+ unsigned int buf_ptail;
+ struct sdma_channel *sdmac;
+ struct sdma_buffer_descriptor *bd;
+};
+
/**
* struct sdma_channel - housekeeping for a SDMA channel
*
* @bd_iram flag indicating the memory location of buffer descriptor
*/
struct sdma_channel {
+ struct virt_dma_chan vc;
+ struct list_head pending;
struct sdma_engine *sdma;
+ struct sdma_desc *desc;
unsigned int channel;
enum dma_transfer_direction direction;
enum sdma_peripheral_type peripheral_type;
unsigned int event_id0;
unsigned int event_id1;
enum dma_slave_buswidth word_size;
- unsigned int buf_tail;
- unsigned int buf_ptail;
- unsigned int num_bd;
unsigned int period_len;
- struct sdma_buffer_descriptor *bd;
- dma_addr_t bd_phys;
- bool bd_iram;
unsigned int pc_from_device, pc_to_device;
unsigned int device_to_device;
unsigned int pc_to_pc;
unsigned long event_mask[2];
unsigned long watermark_level;
u32 shp_addr, per_addr;
- struct dma_chan chan;
- spinlock_t lock;
- struct dma_async_tx_descriptor desc;
enum dma_status status;
+ struct imx_dma_data data;
unsigned int chn_count;
unsigned int chn_real_count;
- struct tasklet_struct tasklet;
- struct imx_dma_data data;
+ u32 bd_size_sum;
bool src_dualfifo;
bool dst_dualfifo;
};
u32 spba_end_addr;
unsigned int irq;
struct gen_pool *iram_pool;
+ /* channel0 bd */
+ dma_addr_t bd0_phys;
+ bool bd0_iram;
+ struct sdma_buffer_descriptor *bd0;
};
static struct sdma_driver_data sdma_imx31 = {
#define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */
#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
+static void sdma_start_desc(struct sdma_channel *sdmac);
+
static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
{
u32 chnenbl0 = sdma->drvdata->chnenbl0;
static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
u32 address)
{
- struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ struct sdma_buffer_descriptor *bd0 = sdma->bd0;
void *buf_virt;
dma_addr_t buf_phys;
int ret;
static void sdma_update_channel_loop(struct sdma_channel *sdmac)
{
struct sdma_buffer_descriptor *bd;
+ struct sdma_desc *desc = sdmac->desc;
int error = 0;
enum dma_status old_status = sdmac->status;
+ unsigned long flags;
/*
* loop mode. Iterate over descriptors, re-setup them and
* call callback function.
*/
- while (1) {
- bd = &sdmac->bd[sdmac->buf_tail];
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ while (desc) {
+ bd = &desc->bd[desc->buf_tail];
if (bd->mode.status & BD_DONE)
break;
sdmac->chn_real_count = bd->mode.count;
bd->mode.status |= BD_DONE;
bd->mode.count = sdmac->period_len;
- sdmac->buf_ptail = sdmac->buf_tail;
- sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
-
- /*
- * The callback is called from the interrupt context in order
- * to reduce latency and to avoid the risk of altering the
- * SDMA transaction status by the time the client tasklet is
- * executed.
- */
-
- dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
+ desc->buf_ptail = desc->buf_tail;
+ desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
if (sdmac->peripheral_type == IMX_DMATYPE_UART) {
/* restore mode.count after counter readed */
if (error)
sdmac->status = old_status;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ /*
+ * The callback is called from the interrupt context in order
+ * to reduce latency and to avoid the risk of altering the
+ * SDMA transaction status by the time the client tasklet is
+ * executed.
+ */
+ dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
}
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
}
-static void mxc_sdma_handle_channel_normal(unsigned long data)
+static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
struct sdma_buffer_descriptor *bd;
int i, error = 0;
- sdmac->chn_real_count = 0;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
*/
- for (i = 0; i < sdmac->num_bd; i++) {
- bd = &sdmac->bd[i];
+ for (i = 0; i < sdmac->desc->num_bd; i++) {
+ bd = &sdmac->desc->bd[i];
if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
sdmac->status = DMA_ERROR;
else
sdmac->status = DMA_COMPLETE;
-
- dma_cookie_complete(&sdmac->desc);
-
- dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
}
static irqreturn_t sdma_int_handler(int irq, void *dev_id)
while (stat) {
int channel = fls(stat) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
-
- if (sdmac->flags & IMX_DMA_SG_LOOP) {
- if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
- sdma_update_channel_loop(sdmac);
- else
- dmaengine_desc_get_callback_invoke(&sdmac->desc,
- NULL);
-
- } else
- tasklet_schedule(&sdmac->tasklet);
-
+ struct sdma_desc *desc;
+
+ spin_lock(&sdmac->vc.lock);
+ desc = sdmac->desc;
+ if (desc) {
+ if (sdmac->flags & IMX_DMA_SG_LOOP) {
+ if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
+ sdma_update_channel_loop(sdmac);
+ else
+ vchan_cyclic_callback(&desc->vd);
+ } else {
+ mxc_sdma_handle_channel_normal(sdmac);
+ vchan_cookie_complete(&desc->vd);
+ if (!list_empty(&sdmac->pending))
+ list_del(&desc->node);
+ sdma_start_desc(sdmac);
+ }
+ }
__clear_bit(channel, &stat);
+ spin_unlock(&sdmac->vc.lock);
}
return IRQ_HANDLED;
int channel = sdmac->channel;
int load_address;
struct sdma_context_data *context = sdma->context;
- struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ struct sdma_buffer_descriptor *bd0 = sdma->bd0;
int ret;
unsigned long flags;
static int sdma_save_restore_context(struct sdma_engine *sdma, bool save)
{
struct sdma_context_data *context = sdma->context;
- struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ struct sdma_buffer_descriptor *bd0 = sdma->bd0;
unsigned long flags;
int ret;
static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
{
- return container_of(chan, struct sdma_channel, chan);
+ return container_of(chan, struct sdma_channel, vc.chan);
}
static int sdma_disable_channel(struct dma_chan *chan)
return 0;
}
-static int sdma_request_channel(struct sdma_channel *sdmac)
+static int sdma_alloc_bd(struct sdma_desc *desc)
{
- struct sdma_engine *sdma = sdmac->sdma;
- int channel = sdmac->channel;
- int ret = -EBUSY;
-
- sdmac->bd_iram = true;
- sdmac->bd = gen_pool_dma_alloc(sdma->iram_pool, PAGE_SIZE, &sdmac->bd_phys);
- if (!sdmac->bd) {
- sdmac->bd_iram = false;
- sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
- if (!sdmac->bd) {
+ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+ int ret = -ENOMEM;
+ unsigned long flags;
+
+ desc->bd_iram = true;
+ desc->bd = gen_pool_dma_alloc(desc->sdmac->sdma->iram_pool, bd_size,
+ &desc->bd_phys);
+ if (!desc->bd) {
+ desc->bd_iram = false;
+ desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys, GFP_ATOMIC);
+ if (!desc->bd)
+ return ret;
+ }
+ spin_lock_irqsave(&desc->sdmac->vc.lock, flags);
+ desc->sdmac->bd_size_sum += bd_size;
+ spin_unlock_irqrestore(&desc->sdmac->vc.lock, flags);
+
+ memset(desc->bd, 0, bd_size);
+
+ return 0;
+}
+
+static void sdma_free_bd(struct sdma_desc *desc)
+{
+ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+ unsigned long flags;
+
+ if (desc->bd) {
+ if (desc->bd_iram)
+ gen_pool_free(desc->sdmac->sdma->iram_pool,
+ (unsigned long)desc->bd, bd_size);
+ else
+ dma_free_coherent(NULL, bd_size, desc->bd,
+ desc->bd_phys);
+ spin_lock_irqsave(&desc->sdmac->vc.lock, flags);
+ desc->sdmac->bd_size_sum -= bd_size;
+ spin_unlock_irqrestore(&desc->sdmac->vc.lock, flags);
+ }
+}
+
+static int sdma_request_channel0(struct sdma_engine *sdma)
+{
+ int ret = 0;
+
+ sdma->bd0_iram = true;
+ sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool, PAGE_SIZE, &sdma->bd0_phys);
+ if (!sdma->bd0) {
+ sdma->bd0_iram = false;
+ sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, GFP_KERNEL);
+ if (!sdma->bd0) {
ret = -ENOMEM;
goto out;
}
}
- sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+ memset(sdma->bd0, 0, PAGE_SIZE);
+
+ sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
+ sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
- sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
+ sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
return 0;
out:
return ret;
}
-static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
+static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct sdma_desc, vd.tx);
+}
+
+static void sdma_desc_free(struct virt_dma_desc *vd)
+{
+ struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
+ if (desc) {
+ sdma_free_bd(desc);
+ kfree(desc);
+ }
+}
+
+static int sdma_channel_pause(struct dma_chan *chan)
{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
unsigned long flags;
- struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
- dma_cookie_t cookie;
- spin_lock_irqsave(&sdmac->lock, flags);
+ if (!(sdmac->flags & IMX_DMA_SG_LOOP))
+ return -EINVAL;
+
+ sdma_disable_channel(chan);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ sdmac->status = DMA_PAUSED;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+
+ return 0;
+}
- cookie = dma_cookie_assign(tx);
+static int sdma_channel_resume(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ unsigned long flags;
- spin_unlock_irqrestore(&sdmac->lock, flags);
+ if (!(sdmac->flags & IMX_DMA_SG_LOOP))
+ return -EINVAL;
+
+ sdma_enable_channel(sdmac->sdma, sdmac->channel);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ sdmac->status = DMA_IN_PROGRESS;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+
+ return 0;
+}
+
+static int sdma_terminate_all(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vchan_get_all_descriptors(&sdmac->vc, &head);
+ while (!list_empty(&sdmac->pending)) {
+ struct sdma_desc *desc = list_first_entry(&sdmac->pending,
+ struct sdma_desc, node);
+
+ list_del(&desc->node);
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ sdmac->vc.desc_free(&desc->vd);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ }
+ if (sdmac->desc)
+ sdmac->desc = NULL;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ vchan_dma_desc_free_list(&sdmac->vc, &head);
+ sdma_disable_channel(chan);
- return cookie;
+ return 0;
}
static int sdma_alloc_chan_resources(struct dma_chan *chan)
if (ret)
goto disable_clk_ipg;
- ret = sdma_request_channel(sdmac);
- if (ret)
- goto disable_clk_ahb;
-
ret = sdma_set_channel_priority(sdmac, prio);
if (ret)
goto disable_clk_ahb;
- dma_async_tx_descriptor_init(&sdmac->desc, chan);
- sdmac->desc.tx_submit = sdma_tx_submit;
- /* txd.flags will be overwritten in prep funcs */
- sdmac->desc.flags = DMA_CTRL_ACK;
+ sdmac->bd_size_sum = 0;
return 0;
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- sdma_disable_channel(chan);
+ sdma_terminate_all(chan);
sdma_event_disable(sdmac, sdmac->event_id0);
if (sdmac->event_id1)
sdma_set_channel_priority(sdmac, 0);
- if (sdmac->bd_iram)
- gen_pool_free(sdma->iram_pool, (unsigned long)sdmac->bd, PAGE_SIZE);
- else
- dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
-
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
}
-static int sdma_transfer_init(struct sdma_channel *sdmac,
- enum dma_transfer_direction direction)
+static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
+ enum dma_transfer_direction direction, u32 bds)
{
- int ret = 0;
+ struct sdma_desc *desc;
+ /* Now allocate and setup the descriptor. */
+ desc = kzalloc((sizeof(*desc)), GFP_ATOMIC);
+ if (!desc)
+ goto err_out;
sdmac->status = DMA_IN_PROGRESS;
- sdmac->buf_tail = 0;
- sdmac->buf_ptail = 0;
- sdmac->chn_real_count = 0;
- sdmac->flags = 0;
sdmac->direction = direction;
sdmac->flags = 0;
+ sdmac->chn_count = 0;
+ sdmac->chn_real_count = 0;
- ret = sdma_load_context(sdmac);
- if (ret)
- return ret;
+ desc->sdmac = sdmac;
+ desc->num_bd = bds;
+ INIT_LIST_HEAD(&desc->node);
- sdmac->chn_count = 0;
+ if (sdma_alloc_bd(desc))
+ goto err_desc_out;
- return ret;
+ return desc;
+err_desc_out:
+ kfree(desc);
+err_out:
+ return NULL;
}
static int check_bd_buswidth(struct sdma_buffer_descriptor *bd,
size_t count;
int i = 0, param;
struct sdma_buffer_descriptor *bd;
+ struct sdma_desc *desc;
- if (!chan || !len || sdmac->status == DMA_IN_PROGRESS)
+ if (!chan || !len)
return NULL;
- if (len >= NUM_BD * SDMA_BD_MAX_CNT) {
- dev_err(sdma->dev, "channel%d: maximum bytes exceeded:%zu > %d\n",
- channel, len, NUM_BD * SDMA_BD_MAX_CNT);
- goto err_out;
- }
-
dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
&dma_src, &dma_dst, len, channel);
- if (sdma_transfer_init(sdmac, DMA_MEM_TO_MEM))
+ desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM, len / SDMA_BD_MAX_CNT + 1);
+ if (!desc)
goto err_out;
do {
count = min_t(size_t, len, SDMA_BD_MAX_CNT);
- bd = &sdmac->bd[i];
+ bd = &desc->bd[i];
bd->buffer_addr = dma_src;
bd->ext_buffer_addr = dma_dst;
bd->mode.count = count;
+ sdmac->chn_count += count;
if (check_bd_buswidth(bd, sdmac, count, dma_dst, dma_src))
- goto err_out;
+ goto err_bd_out;
dma_src += count;
dma_dst += count;
param & BD_INTR ? " intr" : "");
bd->mode.status = param;
- sdmac->chn_count += count;
} while (len);
- sdmac->num_bd = i;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
-
- return &sdmac->desc;
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+err_bd_out:
+ sdma_free_bd(desc);
+ kfree(desc);
err_out:
- sdmac->status = DMA_ERROR;
return NULL;
}
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
- enum dma_transfer_direction direction)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
int ret, i, count;
int channel = sdmac->channel;
struct scatterlist *sg_src = src_sg, *sg_dst = dst_sg;
+ struct sdma_desc *desc;
- if (sdmac->status == DMA_IN_PROGRESS)
+ if (!chan)
return NULL;
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
src_nents, channel);
- if (src_nents > NUM_BD) {
- dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
- channel, src_nents, NUM_BD);
- ret = -EINVAL;
- goto err_out;
- }
-
- if (sdma_transfer_init(sdmac, direction))
+ desc = sdma_transfer_init(sdmac, direction, src_nents);
+ if (!desc)
goto err_out;
for_each_sg(src_sg, sg_src, src_nents, i) {
- struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
bd->buffer_addr = sg_src->dma_address;
dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
channel, count, SDMA_BD_MAX_CNT);
ret = -EINVAL;
- goto err_out;
+ goto err_bd_out;
}
bd->mode.count = count;
ret = check_bd_buswidth(bd, sdmac, count, 0,
sg_src->dma_address);
if (ret)
- goto err_out;
+ goto err_bd_out;
param = BD_DONE | BD_EXTD | BD_CONT;
sg_dst = sg_next(sg_dst);
}
- sdmac->num_bd = src_nents;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
- return &sdmac->desc;
+err_bd_out:
+ sdma_free_bd(desc);
+ kfree(desc);
err_out:
- sdmac->status = DMA_ERROR;
+ dev_dbg(sdma->dev, "Can't get desc.\n");
return NULL;
}
unsigned long flags)
{
return sdma_prep_sg(chan, dst_sg, dst_nents, src_sg, src_nents,
- DMA_MEM_TO_MEM);
+ DMA_MEM_TO_MEM, flags);
}
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
- return sdma_prep_sg(chan, NULL, 0, sgl, sg_len, direction);
+ return sdma_prep_sg(chan, NULL, 0, sgl, sg_len, direction, flags);
}
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
int channel = sdmac->channel;
- int ret, i = 0, buf = 0;
- int num_periods;
+ int i = 0, buf = 0;
+ int num_periods = 0;
+ struct sdma_desc *desc;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
- if (sdmac->status == DMA_IN_PROGRESS)
- return NULL;
-
- sdmac->status = DMA_IN_PROGRESS;
+ num_periods = buf_len / period_len;
+ /* Now allocate and setup the descriptor. */
+ desc = sdma_transfer_init(sdmac, direction, num_periods);
+ if (!desc)
+ goto err_out;
- sdmac->buf_tail = 0;
- sdmac->buf_ptail = 0;
- sdmac->chn_real_count = 0;
sdmac->period_len = period_len;
-
sdmac->flags |= IMX_DMA_SG_LOOP;
- sdmac->direction = direction;
- ret = sdma_load_context(sdmac);
- if (ret)
- goto err_out;
+ /* for hdmi-audio without BDs */
if (sdmac->peripheral_type == IMX_DMATYPE_HDMI)
- return &sdmac->desc;
- else
- num_periods = buf_len / period_len;
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
- if (num_periods > NUM_BD) {
- dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
- channel, num_periods, NUM_BD);
- goto err_out;
- }
+ desc->buf_tail = 0;
+ desc->buf_ptail = 0;
+ sdmac->chn_real_count = 0;
if (period_len > SDMA_BD_MAX_CNT) {
dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
channel, period_len, SDMA_BD_MAX_CNT);
- goto err_out;
+ goto err_bd_out;
}
if (sdmac->peripheral_type == IMX_DMATYPE_UART)
sdmac->chn_count = period_len;
while (buf < buf_len) {
- struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
bd->buffer_addr = dma_addr;
bd->mode.count = period_len;
if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
- goto err_out;
+ goto err_bd_out;
if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
bd->mode.command = 0;
else
i++;
}
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
- sdmac->num_bd = num_periods;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
-
- return &sdmac->desc;
+err_bd_out:
+ sdma_free_bd(desc);
+ kfree(desc);
err_out:
- sdmac->status = DMA_ERROR;
return NULL;
}
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- tasklet_kill(&sdmac->tasklet);
+ tasklet_kill(&sdmac->vc.task);
}
static enum dma_status sdma_tx_status(struct dma_chan *chan,
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
u32 residue;
+ struct virt_dma_desc *vd;
+ struct sdma_desc *desc;
+ enum dma_status ret;
+ unsigned long flags;
- /*
- * For uart rx data may not receive fully, use old chn_real_count to
- * know the real rx count.
- */
- if ((sdmac->flags & IMX_DMA_SG_LOOP) &&
- sdmac->peripheral_type != IMX_DMATYPE_UART)
- residue = (sdmac->num_bd - sdmac->buf_ptail) *
- sdmac->period_len - sdmac->chn_real_count;
- else
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (!txstate) {
+ return ret;
+ } else if (ret == DMA_COMPLETE) {
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ txstate->residue = sdmac->chn_count - sdmac->chn_real_count;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ return ret;
+ }
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vd = vchan_find_desc(&sdmac->vc, cookie);
+ desc = to_sdma_desc(&vd->tx);
+ if (vd) {
+ if ((sdmac->flags & IMX_DMA_SG_LOOP)) {
+ if (sdmac->peripheral_type != IMX_DMATYPE_UART)
+ residue = (desc->num_bd - desc->buf_ptail) *
+ sdmac->period_len - sdmac->chn_real_count;
+ else
+ residue = sdmac->chn_count - sdmac->chn_real_count;
+ } else
+ residue = sdmac->chn_count;
+ } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
residue = sdmac->chn_count - sdmac->chn_real_count;
+ else
+ residue = 0;
- dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
- residue);
+ txstate->residue = residue;
+ ret = sdmac->status;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
- return sdmac->status;
+ return ret;
+}
+
+static void sdma_start_desc(struct sdma_channel *sdmac)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
+ struct sdma_desc *desc;
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+
+ if (!vd) {
+ sdmac->desc = NULL;
+ return;
+ }
+ sdmac->desc = desc = to_sdma_desc(&vd->tx);
+ /*
+ * Do not delete the node in desc_issued list in cyclic mode, otherwise
+ * the desc alloced will never be freed in vchan_dma_desc_free_list
+ */
+ if (!(sdmac->flags & IMX_DMA_SG_LOOP)) {
+ list_add_tail(&sdmac->desc->node, &sdmac->pending);
+ list_del(&vd->node);
+ }
+ sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
+ sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+ sdma_enable_channel(sdma, sdmac->channel);
}
static void sdma_issue_pending(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- struct sdma_engine *sdma = sdmac->sdma;
+ unsigned long flags;
- if (sdmac->status == DMA_IN_PROGRESS)
- sdma_enable_channel(sdma, sdmac->channel);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
+ sdma_start_desc(sdmac);
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
}
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
for (i = 0; i < MAX_DMA_CHANNELS; i++)
writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
- ret = sdma_request_channel(&sdma->channel[0]);
+ ret = sdma_request_channel0(sdma);
if (ret)
goto err_dma_alloc;
struct sdma_channel *sdmac = &sdma->channel[i];
sdmac->sdma = sdma;
- spin_lock_init(&sdmac->lock);
-
- sdmac->chan.device = &sdma->dma_device;
- dma_cookie_init(&sdmac->chan);
sdmac->channel = i;
+ sdmac->status = DMA_IN_PROGRESS;
+ sdmac->vc.desc_free = sdma_desc_free;
+ INIT_LIST_HEAD(&sdmac->pending);
- tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
- (unsigned long) sdmac);
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
* that channel 0 in dmaengine counting matches sdma channel 1.
*/
if (i)
- list_add_tail(&sdmac->chan.device_node,
- &sdma->dma_device.channels);
+ vchan_init(&sdmac->vc, &sdma->dma_device);
}
if (np)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
- sdma->dma_device.device_terminate_all = sdma_disable_channel;
+ sdma->dma_device.device_terminate_all = sdma_terminate_all;
+ sdma->dma_device.device_pause = sdma_channel_pause;
+ sdma->dma_device.device_resume = sdma_channel_resume;
sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
sdma->dma_device.copy_align = 2;
- dma_set_max_seg_size(sdma->dma_device.dev, 65535);
platform_set_drvdata(pdev, sdma);
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];
- tasklet_kill(&sdmac->tasklet);
+ tasklet_kill(&sdmac->vc.task);
+ sdma_free_chan_resources(&sdmac->vc.chan);
}
platform_set_drvdata(pdev, NULL);