volatile u8 *hwregs;
struct fasync_struct *async_queue;
+ unsigned int mirror_regs[512];
struct device *dev;
struct mutex dev_mutex;
} hx280enc_t;
unsigned int WaitEncReady(hx280enc_t *dev)
{
- PDEBUG("WaitEncReady\n");
+ u32 irq_status, is_write1_clr;
+ int i;
+ long ret;
- if (wait_event_interruptible(enc_wait_queue, CheckEncIrq(dev))) {
- PDEBUG("ENC wait_event_interruptible interrupted\n");
- return -ERESTARTSYS;
- }
+ PDEBUG("%s\n", __func__);
+ ret = wait_event_timeout(enc_wait_queue, CheckEncIrq(dev), msecs_to_jiffies(200));
+ if (ret == 0)
+ pr_err("ENC wait_event_timeout() timeout !\n");
+
+ /* read register to mirror */
+ for (i = 0; i < dev->iosize; i += 4)
+ dev->mirror_regs[i/4] = readl(dev->hwregs + i);
+
+ /* clear the status bits */
+ is_write1_clr = (dev->mirror_regs[0x4a0/4] & 0x00800000);
+ irq_status = dev->mirror_regs[1];
+ if (is_write1_clr)
+ writel(irq_status, dev->hwregs + 0x04);
+ else
+ writel(irq_status & (~0xf7d), dev->hwregs + 0x04);
return 0;
}
spin_unlock_irqrestore(&owner_lock, flags);
wake_up_interruptible_all(&enc_hw_queue);
+}
+static long EncRefreshRegs(hx280enc_t *dev, unsigned int *regs)
+{
+ long ret;
+
+ ret = copy_to_user(regs, dev->mirror_regs, dev->iosize);
+ if (ret) {
+ PDEBUG("%s: copy_to_user failed, returned %li\n", __func__, ret);
+ return -EFAULT;
+ }
+ return 0;
}
ReleaseEncoder(&hx280enc_data);
break;
case _IOC_NR(HX280ENC_IOCG_CORE_WAIT): {
- int ret;
+ unsigned int *regs = (unsigned int *)arg;
- ret = WaitEncReady(&hx280enc_data);
- return ret;
+ WaitEncReady(&hx280enc_data);
+ return EncRefreshRegs(&hx280enc_data, regs);
}
}
return 0;
dev->irq_status = irq_status & (~0x01);
spin_unlock_irqrestore(&owner_lock, flags);
- wake_up_interruptible_all(&enc_wait_queue);
+ wake_up_all(&enc_wait_queue);
PDEBUG("IRQ handled!\n");
return IRQ_HANDLED;