GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: lib/libcompiler_rt/clear_cache.c Lines: 0 0 0.0 %
Date: 2017-11-07 Branches: 0 0 0.0 %

Line Branch Exec Source
1
/* ===-- clear_cache.c - Implement __clear_cache ---------------------------===
2
 *
3
 *                     The LLVM Compiler Infrastructure
4
 *
5
 * This file is dual licensed under the MIT and the University of Illinois Open
6
 * Source Licenses. See LICENSE.TXT for details.
7
 *
8
 * ===----------------------------------------------------------------------===
9
 */
10
11
#include "int_lib.h"
12
#include <stddef.h>
13
14
#if __APPLE__
15
  #include <libkern/OSCacheControl.h>
16
#endif
17
18
#if defined(_WIN32)
19
/* Forward declare Win32 APIs since the GCC mode driver does not handle the
20
   newer SDKs as well as needed.  */
21
uint32_t FlushInstructionCache(uintptr_t hProcess, void *lpBaseAddress,
22
                               uintptr_t dwSize);
23
uintptr_t GetCurrentProcess(void);
24
#endif
25
26
#if (defined(__FreeBSD__) || defined(__Bitrig__)) && defined(__arm__)
27
  #include <sys/types.h>
28
  #include <machine/sysarch.h>
29
#endif
30
31
#if defined(__NetBSD__) && defined(__arm__)
32
  #include <machine/sysarch.h>
33
#endif
34
35
#if defined(__mips__)
36
  #include <sys/cachectl.h>
37
  #include <sys/syscall.h>
38
  #include <unistd.h>
39
  #if defined(__ANDROID__) && defined(__LP64__)
40
    /*
41
     * clear_mips_cache - Invalidates instruction cache for Mips.
42
     */
43
    static void clear_mips_cache(const void* Addr, size_t Size) {
44
      asm volatile (
45
        ".set push\n"
46
        ".set noreorder\n"
47
        ".set noat\n"
48
        "beq %[Size], $zero, 20f\n"          /* If size == 0, branch around. */
49
        "nop\n"
50
        "daddu %[Size], %[Addr], %[Size]\n"  /* Calculate end address + 1 */
51
        "rdhwr $v0, $1\n"                    /* Get step size for SYNCI.
52
                                                $1 is $HW_SYNCI_Step */
53
        "beq $v0, $zero, 20f\n"              /* If no caches require
54
                                                synchronization, branch
55
                                                around. */
56
        "nop\n"
57
        "10:\n"
58
        "synci 0(%[Addr])\n"                 /* Synchronize all caches around
59
                                                address. */
60
        "daddu %[Addr], %[Addr], $v0\n"      /* Add step size. */
61
        "sltu $at, %[Addr], %[Size]\n"       /* Compare current with end
62
                                                address. */
63
        "bne $at, $zero, 10b\n"              /* Branch if more to do. */
64
        "nop\n"
65
        "sync\n"                             /* Clear memory hazards. */
66
        "20:\n"
67
        "bal 30f\n"
68
        "nop\n"
69
        "30:\n"
70
        "daddiu $ra, $ra, 12\n"              /* $ra has a value of $pc here.
71
                                                Add offset of 12 to point to the
72
                                                instruction after the last nop.
73
                                              */
74
        "jr.hb $ra\n"                        /* Return, clearing instruction
75
                                                hazards. */
76
        "nop\n"
77
        ".set pop\n"
78
        : [Addr] "+r"(Addr), [Size] "+r"(Size)
79
        :: "at", "ra", "v0", "memory"
80
      );
81
    }
82
  #endif
83
#endif
84
85
#if defined(__linux__) && defined(__arm__)
86
  #include <asm/unistd.h>
87
#endif
88
89
/*
90
 * The compiler generates calls to __clear_cache() when creating
91
 * trampoline functions on the stack for use with nested functions.
92
 * It is expected to invalidate the instruction cache for the
93
 * specified range.
94
 */
95
96
void __clear_cache(void *start, void *end) {
97
#if __i386__ || __x86_64__
98
/*
99
 * Intel processors have a unified instruction and data cache
100
 * so there is nothing to do
101
 */
102
#elif defined(__arm__) && !defined(__APPLE__)
103
    #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__Bitrig__)
104
        struct arm_sync_icache_args arg;
105
106
        arg.addr = (uintptr_t)start;
107
        arg.len = (uintptr_t)end - (uintptr_t)start;
108
109
        sysarch(ARM_SYNC_ICACHE, &arg);
110
    #elif defined(__linux__)
111
         register int start_reg __asm("r0") = (int) (intptr_t) start;
112
         const register int end_reg __asm("r1") = (int) (intptr_t) end;
113
         const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush;
114
         __asm __volatile("svc 0x0"
115
                          : "=r"(start_reg)
116
                          : "r"(syscall_nr), "r"(start_reg), "r"(end_reg));
117
         if (start_reg != 0) {
118
             compilerrt_abort();
119
         }
120
    #elif defined(_WIN32)
121
        FlushInstructionCache(GetCurrentProcess(), start, end - start);
122
    #else
123
        compilerrt_abort();
124
    #endif
125
#elif defined(__mips__)
126
  const uintptr_t start_int = (uintptr_t) start;
127
  const uintptr_t end_int = (uintptr_t) end;
128
    #if defined(__ANDROID__) && defined(__LP64__)
129
        // Call synci implementation for short address range.
130
        const uintptr_t address_range_limit = 256;
131
        if ((end_int - start_int) <= address_range_limit) {
132
            clear_mips_cache(start, (end_int - start_int));
133
        } else {
134
            syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
135
        }
136
    #else
137
        syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
138
    #endif
139
#elif defined(__aarch64__) && !defined(__APPLE__)
140
  uint64_t xstart = (uint64_t)(uintptr_t) start;
141
  uint64_t xend = (uint64_t)(uintptr_t) end;
142
  uint64_t addr;
143
144
  // Get Cache Type Info
145
  uint64_t ctr_el0;
146
  __asm __volatile("mrs %0, ctr_el0" : "=r"(ctr_el0));
147
148
  /*
149
   * dc & ic instructions must use 64bit registers so we don't use
150
   * uintptr_t in case this runs in an IPL32 environment.
151
   */
152
  const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15);
153
  for (addr = xstart; addr < xend; addr += dcache_line_size)
154
    __asm __volatile("dc cvau, %0" :: "r"(addr));
155
  __asm __volatile("dsb ish");
156
157
  const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15);
158
  for (addr = xstart; addr < xend; addr += icache_line_size)
159
    __asm __volatile("ic ivau, %0" :: "r"(addr));
160
  __asm __volatile("isb sy");
161
#else
162
    #if __APPLE__
163
        /* On Darwin, sys_icache_invalidate() provides this functionality */
164
        sys_icache_invalidate(start, end-start);
165
    #else
166
        compilerrt_abort();
167
    #endif
168
#endif
169
}
170