summaryrefslogtreecommitdiffstats
path: root/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c
blob: 73a5a6b6b5d6062fd0fae5f739bbf3d1f034c953 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
/** @file
  RISC-V specific functionality for cache.

  Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
  Copyright (c) 2023, Rivos Inc. All rights reserved.<BR>

  SPDX-License-Identifier: BSD-2-Clause-Patent
**/

#include <Base.h>
#include <Library/BaseLib.h>
#include <Library/DebugLib.h>
#include <Library/PcdLib.h>

//
// TODO: Grab cache block size and make Cache Management Operation
// enabling decision based on RISC-V CPU HOB in
// future when it is available and convert PcdRiscVFeatureOverride
// PCD to a pointer that contains pointer to bitmap structure
// which can be operated more elegantly.
//
#define RISCV_CACHE_BLOCK_SIZE         64
#define RISCV_CPU_FEATURE_CMO_BITMASK  0x1

typedef enum {
  CacheOpClean,
  CacheOpFlush,
  CacheOpInvld,
} CACHE_OP;

/**
Verify CBOs are supported by this HW
TODO: Use RISC-V CPU HOB once available.

**/
STATIC
BOOLEAN
RiscVIsCMOEnabled (
  VOID
  )
{
  // If CMO is disabled in HW, skip Override check
  // Otherwise this PCD can override settings
  return ((PcdGet64 (PcdRiscVFeatureOverride) & RISCV_CPU_FEATURE_CMO_BITMASK) != 0);
}

/**
  Performs required opeartion on cache lines in the cache coherency domain
  of the calling CPU. If Address is not aligned on a cache line boundary,
  then entire cache line containing Address is operated. If Address + Length
  is not aligned on a cache line boundary, then the entire cache line
  containing Address + Length -1 is operated.
  If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
  @param  Address The base address of the cache lines to
          invalidate.
  @param  Length  The number of bytes to invalidate from the instruction
          cache.
  @param  Op  Type of CMO operation to be performed
  @return Address.

**/
STATIC
VOID
CacheOpCacheRange (
  IN VOID      *Address,
  IN UINTN     Length,
  IN CACHE_OP  Op
  )
{
  UINTN  CacheLineSize;
  UINTN  Start;
  UINTN  End;

  if (Length == 0) {
    return;
  }

  if ((Op != CacheOpInvld) && (Op != CacheOpFlush) && (Op != CacheOpClean)) {
    return;
  }

  ASSERT ((Length - 1) <= (MAX_ADDRESS - (UINTN)Address));

  CacheLineSize = RISCV_CACHE_BLOCK_SIZE;

  Start = (UINTN)Address;
  //
  // Calculate the cache line alignment
  //
  End    = (Start + Length + (CacheLineSize - 1)) & ~(CacheLineSize - 1);
  Start &= ~((UINTN)CacheLineSize - 1);

  DEBUG (
    (DEBUG_VERBOSE,
     "CacheOpCacheRange: Performing Cache Management Operation %d \n", Op)
    );

  do {
    switch (Op) {
      case CacheOpInvld:
        RiscVCpuCacheInvalCmoAsm (Start);
        break;
      case CacheOpFlush:
        RiscVCpuCacheFlushCmoAsm (Start);
        break;
      case CacheOpClean:
        RiscVCpuCacheCleanCmoAsm (Start);
        break;
      default:
        break;
    }

    Start = Start + CacheLineSize;
  } while (Start != End);
}

/**
  Invalidates the entire instruction cache in cache coherency domain of the
  calling CPU. Risc-V does not have currently an CBO implementation which can
  invalidate the entire I-cache. Hence using Fence instruction for now. P.S.
  Fence instruction may or may not implement full I-cache invd functionality
  on all implementations.

**/
VOID
EFIAPI
InvalidateInstructionCache (
  VOID
  )
{
  RiscVInvalidateInstCacheFenceAsm ();
}

/**
  Invalidates a range of instruction cache lines in the cache coherency domain
  of the calling CPU.

  An operation from a CMO instruction is defined to operate only on the copies
  of a cache block that are cached in the caches accessible by the explicit
  memory accesses performed by the set of coherent agents.In other words CMO
  operations are not applicable to instruction cache. Use fence.i instruction
  instead to achieve the same purpose.
  @param  Address The base address of the instruction cache lines to
                  invalidate. If the CPU is in a physical addressing mode, then
                  Address is a physical address. If the CPU is in a virtual
                  addressing mode, then Address is a virtual address.

  @param  Length  The number of bytes to invalidate from the instruction cache.

  @return Address.

**/
VOID *
EFIAPI
InvalidateInstructionCacheRange (
  IN VOID   *Address,
  IN UINTN  Length
  )
{
  DEBUG (
    (DEBUG_VERBOSE,
     "InvalidateInstructionCacheRange: RISC-V unsupported function.\n"
     "Invalidating the whole instruction cache instead.\n"
    )
    );
  InvalidateInstructionCache ();
  return Address;
}

/**
  Writes back and invalidates the entire data cache in cache coherency domain
  of the calling CPU.

  Writes back and invalidates the entire data cache in cache coherency domain
  of the calling CPU. This function guarantees that all dirty cache lines are
  written back to system memory, and also invalidates all the data cache lines
  in the cache coherency domain of the calling CPU.

**/
VOID
EFIAPI
WriteBackInvalidateDataCache (
  VOID
  )
{
  ASSERT (FALSE);
  DEBUG ((
    DEBUG_ERROR,
    "WriteBackInvalidateDataCache: RISC-V unsupported function.\n"
    ));
}

/**
  Writes back and invalidates a range of data cache lines in the cache
  coherency domain of the calling CPU.

  Writes back and invalidates the data cache lines specified by Address and
  Length. If Address is not aligned on a cache line boundary, then entire data
  cache line containing Address is written back and invalidated. If Address +
  Length is not aligned on a cache line boundary, then the entire data cache
  line containing Address + Length -1 is written back and invalidated. This
  function may choose to write back and invalidate the entire data cache if
  that is more efficient than writing back and invalidating the specified
  range. If Length is 0, then no data cache lines are written back and
  invalidated. Address is returned.

  If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().

  @param  Address The base address of the data cache lines to write back and
                  invalidate. If the CPU is in a physical addressing mode, then
                  Address is a physical address. If the CPU is in a virtual
                  addressing mode, then Address is a virtual address.
  @param  Length  The number of bytes to write back and invalidate from the
                  data cache.

  @return Address of cache invalidation.

**/
VOID *
EFIAPI
WriteBackInvalidateDataCacheRange (
  IN      VOID   *Address,
  IN      UINTN  Length
  )
{
  if (RiscVIsCMOEnabled ()) {
    CacheOpCacheRange (Address, Length, CacheOpFlush);
  } else {
    ASSERT (FALSE);
  }

  return Address;
}

/**
  Writes back the entire data cache in cache coherency domain of the calling
  CPU.

  Writes back the entire data cache in cache coherency domain of the calling
  CPU. This function guarantees that all dirty cache lines are written back to
  system memory. This function may also invalidate all the data cache lines in
  the cache coherency domain of the calling CPU.

**/
VOID
EFIAPI
WriteBackDataCache (
  VOID
  )
{
  ASSERT (FALSE);
}

/**
  Writes back a range of data cache lines in the cache coherency domain of the
  calling CPU.

  Writes back the data cache lines specified by Address and Length. If Address
  is not aligned on a cache line boundary, then entire data cache line
  containing Address is written back. If Address + Length is not aligned on a
  cache line boundary, then the entire data cache line containing Address +
  Length -1 is written back. This function may choose to write back the entire
  data cache if that is more efficient than writing back the specified range.
  If Length is 0, then no data cache lines are written back. This function may
  also invalidate all the data cache lines in the specified range of the cache
  coherency domain of the calling CPU. Address is returned.

  If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().

  @param  Address The base address of the data cache lines to write back.
  @param  Length  The number of bytes to write back from the data cache.

  @return Address of cache written in main memory.

**/
VOID *
EFIAPI
WriteBackDataCacheRange (
  IN      VOID   *Address,
  IN      UINTN  Length
  )
{
  if (RiscVIsCMOEnabled ()) {
    CacheOpCacheRange (Address, Length, CacheOpClean);
  } else {
    ASSERT (FALSE);
  }

  return Address;
}

/**
  Invalidates the entire data cache in cache coherency domain of the calling
  CPU.

  Invalidates the entire data cache in cache coherency domain of the calling
  CPU. This function must be used with care because dirty cache lines are not
  written back to system memory. It is typically used for cache diagnostics. If
  the CPU does not support invalidation of the entire data cache, then a write
  back and invalidate operation should be performed on the entire data cache.

**/
VOID
EFIAPI
InvalidateDataCache (
  VOID
  )
{
  RiscVInvalidateDataCacheFenceAsm ();
}

/**
  Invalidates a range of data cache lines in the cache coherency domain of the
  calling CPU.

  Invalidates the data cache lines specified by Address and Length. If Address
  is not aligned on a cache line boundary, then entire data cache line
  containing Address is invalidated. If Address + Length is not aligned on a
  cache line boundary, then the entire data cache line containing Address +
  Length -1 is invalidated. This function must never invalidate any cache lines
  outside the specified range. If Length is 0, then no data cache lines are
  invalidated. Address is returned. This function must be used with care
  because dirty cache lines are not written back to system memory. It is
  typically used for cache diagnostics. If the CPU does not support
  invalidation of a data cache range, then a write back and invalidate
  operation should be performed on the data cache range.

  If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().

  @param  Address The base address of the data cache lines to invalidate.
  @param  Length  The number of bytes to invalidate from the data cache.

  @return Address.

**/
VOID *
EFIAPI
InvalidateDataCacheRange (
  IN      VOID   *Address,
  IN      UINTN  Length
  )
{
  if (RiscVIsCMOEnabled ()) {
    CacheOpCacheRange (Address, Length, CacheOpInvld);
  } else {
    DEBUG (
      (DEBUG_VERBOSE,
       "InvalidateDataCacheRange: Zicbom not supported.\n"
       "Invalidating the whole Data cache instead.\n")
      );
    InvalidateDataCache ();
  }

  return Address;
}