blob: a7580f69dda02e681228785c5c3cfa49b207d88c (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
|
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
struct kvm;
struct kvm_async_pf;
struct kvm_device_ops;
struct kvm_interrupt;
struct kvm_irq_routing_table;
struct kvm_memory_slot;
struct kvm_one_reg;
struct kvm_run;
struct kvm_userspace_memory_region;
struct kvm_vcpu;
struct kvm_vcpu_init;
struct kvm_memslots;
enum kvm_mr_change;
#include <linux/types.h>
#include <asm/kvm_types.h>
/*
* Address types:
*
* gva - guest virtual address
* gpa - guest physical address
* gfn - guest frame number
* hva - host virtual address
* hpa - host physical address
* hfn - host frame number
*/
typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
#define GPA_INVALID (~(gpa_t)0)
typedef unsigned long hva_t;
typedef u64 hpa_t;
typedef u64 hfn_t;
typedef hfn_t kvm_pfn_t;
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
unsigned long hva;
unsigned long len;
struct kvm_memory_slot *memslot;
};
struct gfn_to_pfn_cache {
u64 generation;
gfn_t gfn;
kvm_pfn_t pfn;
bool dirty;
};
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
/*
* Memory caches are used to preallocate memory ahead of various MMU flows,
* e.g. page fault handlers. Gracefully handling allocation failures deep in
* MMU flows is problematic, as is triggering reclaim, I/O, etc... while
* holding MMU locks. Note, these caches act more like prefetch buffers than
* classical caches, i.e. objects are not returned to the cache on being freed.
*/
struct kvm_mmu_memory_cache {
int nobjs;
gfp_t gfp_zero;
struct kmem_cache *kmem_cache;
void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
};
#endif
#endif /* __KVM_TYPES_H__ */
|