riscv: add option to wait for ack from secondary harts in smp functions
authorLukas Auer <lukas.auer@aisec.fraunhofer.de>
Sun, 8 Dec 2019 22:28:51 +0000 (23:28 +0100)
committerAndes <uboot@andestech.com>
Tue, 10 Dec 2019 00:23:10 +0000 (08:23 +0800)
Add a wait option to smp_call_function() to wait for the secondary harts
to acknowledge the call-function request. The request is considered to
be acknowledged once each secondary hart has cleared the corresponding
IPI.

As part of the call-function request, the secondary harts invalidate the
instruction cache after clearing the IPI. This adds a delay between
acknowledgment (clear IPI) and fulfillment (call function) of the
request. We want to use the acknowledgment to be able to judge when the
request has been completed. Remove the delay by clearing the IPI after
cache invalidation and just before calling the function from the
request.

Signed-off-by: Lukas Auer <lukas.auer@aisec.fraunhofer.de>
Reviewed-by: Rick Chen <rick@andestech.com>
Tested-by: Rick Chen <rick@andestech.com>
Reviewed-by: Anup Patel <anup.patel@wdc.com>
arch/riscv/cpu/start.S
arch/riscv/include/asm/smp.h
arch/riscv/lib/bootm.c
arch/riscv/lib/smp.c
arch/riscv/lib/spl.c
common/spl/spl_opensbi.c

index ee6d471..1a55b7d 100644 (file)
@@ -197,6 +197,7 @@ spl_secondary_hart_stack_gd_setup:
        la      a0, secondary_hart_relocate
        mv      a1, s0
        mv      a2, s0
+       mv      a3, zero
        jal     smp_call_function
 
        /* hang if relocation of secondary harts has failed */
@@ -337,6 +338,7 @@ relocate_secondary_harts:
 
        mv      a1, s2
        mv      a2, s3
+       mv      a3, zero
        jal     smp_call_function
 
        /* hang if relocation of secondary harts has failed */
index bc863fd..74de92e 100644 (file)
@@ -46,8 +46,9 @@ void handle_ipi(ulong hart);
  * @addr: Address of function
  * @arg0: First argument of function
  * @arg1: Second argument of function
+ * @wait: Wait for harts to acknowledge request
  * @return 0 if OK, -ve on error
  */
-int smp_call_function(ulong addr, ulong arg0, ulong arg1);
+int smp_call_function(ulong addr, ulong arg0, ulong arg1, int wait);
 
 #endif
index efbd3e2..e96137a 100644 (file)
@@ -99,7 +99,7 @@ static void boot_jump_linux(bootm_headers_t *images, int flag)
                if (IMAGE_ENABLE_OF_LIBFDT && images->ft_len) {
 #ifdef CONFIG_SMP
                        ret = smp_call_function(images->ep,
-                                               (ulong)images->ft_addr, 0);
+                                               (ulong)images->ft_addr, 0, 0);
                        if (ret)
                                hang();
 #endif
index 188a7e3..17adb35 100644 (file)
@@ -44,11 +44,11 @@ extern int riscv_clear_ipi(int hart);
  */
 extern int riscv_get_ipi(int hart, int *pending);
 
-static int send_ipi_many(struct ipi_data *ipi)
+static int send_ipi_many(struct ipi_data *ipi, int wait)
 {
        ofnode node, cpus;
        u32 reg;
-       int ret;
+       int ret, pending;
 
        cpus = ofnode_path("/cpus");
        if (!ofnode_valid(cpus)) {
@@ -91,6 +91,15 @@ static int send_ipi_many(struct ipi_data *ipi)
                        pr_err("Cannot send IPI to hart %d\n", reg);
                        return ret;
                }
+
+               if (wait) {
+                       pending = 1;
+                       while (pending) {
+                               ret = riscv_get_ipi(reg, &pending);
+                               if (ret)
+                                       return ret;
+                       }
+               }
        }
 
        return 0;
@@ -104,21 +113,25 @@ void handle_ipi(ulong hart)
        if (hart >= CONFIG_NR_CPUS)
                return;
 
+       __smp_mb();
+
+       smp_function = (void (*)(ulong, ulong, ulong))gd->arch.ipi[hart].addr;
+       invalidate_icache_all();
+
+       /*
+        * Clear the IPI to acknowledge the request before jumping to the
+        * requested function.
+        */
        ret = riscv_clear_ipi(hart);
        if (ret) {
                pr_err("Cannot clear IPI of hart %ld\n", hart);
                return;
        }
 
-       __smp_mb();
-
-       smp_function = (void (*)(ulong, ulong, ulong))gd->arch.ipi[hart].addr;
-       invalidate_icache_all();
-
        smp_function(hart, gd->arch.ipi[hart].arg0, gd->arch.ipi[hart].arg1);
 }
 
-int smp_call_function(ulong addr, ulong arg0, ulong arg1)
+int smp_call_function(ulong addr, ulong arg0, ulong arg1, int wait)
 {
        int ret = 0;
        struct ipi_data ipi;
@@ -127,7 +140,7 @@ int smp_call_function(ulong addr, ulong arg0, ulong arg1)
        ipi.arg0 = arg0;
        ipi.arg1 = arg1;
 
-       ret = send_ipi_many(&ipi);
+       ret = send_ipi_many(&ipi, wait);
 
        return ret;
 }
index a544df0..dc7577f 100644 (file)
@@ -41,7 +41,7 @@ void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
 
        debug("image entry point: 0x%lX\n", spl_image->entry_point);
 #ifdef CONFIG_SMP
-       ret = smp_call_function(spl_image->entry_point, (ulong)fdt_blob, 0);
+       ret = smp_call_function(spl_image->entry_point, (ulong)fdt_blob, 0, 0);
        if (ret)
                hang();
 #endif
index fed41b1..58bf246 100644 (file)
@@ -78,7 +78,7 @@ void spl_invoke_opensbi(struct spl_image_info *spl_image)
 #ifdef CONFIG_SMP
        ret = smp_call_function((ulong)spl_image->entry_point,
                                (ulong)spl_image->fdt_addr,
-                               (ulong)&opensbi_info);
+                               (ulong)&opensbi_info, 0);
        if (ret)
                hang();
 #endif