1 /******************************************************************************
2 * tmem-xen.c
3 *
4 * Xen-specific Transcendent memory
5 *
6 * Copyright (c) 2009, Dan Magenheimer, Oracle Corp.
7 */
8
9 #include <xen/tmem.h>
10 #include <xen/tmem_xen.h>
11 #include <xen/lzo.h> /* compression code */
12 #include <xen/paging.h>
13 #include <xen/domain_page.h>
14 #include <xen/cpu.h>
15 #include <xen/init.h>
16
17 bool __read_mostly opt_tmem;
18 boolean_param("tmem", opt_tmem);
19
20 bool __read_mostly opt_tmem_compress;
21 boolean_param("tmem_compress", opt_tmem_compress);
22
23 atomic_t freeable_page_count = ATOMIC_INIT(0);
24
25 /* these are a concurrency bottleneck, could be percpu and dynamically
26 * allocated iff opt_tmem_compress */
27 #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
28 #define LZO_DSTMEM_PAGES 2
29 static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, workmem);
30 static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, dstmem);
31 static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page);
32
33 #if defined(CONFIG_ARM)
cli_get_page(xen_pfn_t cmfn,unsigned long * pcli_mfn,struct page_info ** pcli_pfp,bool cli_write)34 static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
35 struct page_info **pcli_pfp, bool cli_write)
36 {
37 ASSERT_UNREACHABLE();
38 return NULL;
39 }
40
cli_put_page(void * cli_va,struct page_info * cli_pfp,unsigned long cli_mfn,bool mark_dirty)41 static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
42 unsigned long cli_mfn, bool mark_dirty)
43 {
44 ASSERT_UNREACHABLE();
45 }
46 #else
47 #include <asm/p2m.h>
48
cli_get_page(xen_pfn_t cmfn,unsigned long * pcli_mfn,struct page_info ** pcli_pfp,bool cli_write)49 static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
50 struct page_info **pcli_pfp, bool cli_write)
51 {
52 p2m_type_t t;
53 struct page_info *page;
54
55 page = get_page_from_gfn(current->domain, cmfn, &t, P2M_ALLOC);
56 if ( !page || t != p2m_ram_rw )
57 {
58 if ( page )
59 put_page(page);
60 return NULL;
61 }
62
63 if ( cli_write && !get_page_type(page, PGT_writable_page) )
64 {
65 put_page(page);
66 return NULL;
67 }
68
69 *pcli_mfn = page_to_mfn(page);
70 *pcli_pfp = page;
71 return map_domain_page(_mfn(*pcli_mfn));
72 }
73
cli_put_page(void * cli_va,struct page_info * cli_pfp,unsigned long cli_mfn,bool mark_dirty)74 static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
75 unsigned long cli_mfn, bool mark_dirty)
76 {
77 if ( mark_dirty )
78 {
79 put_page_and_type(cli_pfp);
80 paging_mark_dirty(current->domain, _mfn(cli_mfn));
81 }
82 else
83 put_page(cli_pfp);
84 unmap_domain_page(cli_va);
85 }
86 #endif
87
tmem_copy_from_client(struct page_info * pfp,xen_pfn_t cmfn,tmem_cli_va_param_t clibuf)88 int tmem_copy_from_client(struct page_info *pfp,
89 xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
90 {
91 unsigned long tmem_mfn, cli_mfn = 0;
92 char *tmem_va, *cli_va = NULL;
93 struct page_info *cli_pfp = NULL;
94 int rc = 1;
95
96 ASSERT(pfp != NULL);
97 tmem_mfn = page_to_mfn(pfp);
98 tmem_va = map_domain_page(_mfn(tmem_mfn));
99 if ( guest_handle_is_null(clibuf) )
100 {
101 cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0);
102 if ( cli_va == NULL )
103 {
104 unmap_domain_page(tmem_va);
105 return -EFAULT;
106 }
107 }
108 smp_mb();
109 if ( cli_va )
110 {
111 memcpy(tmem_va, cli_va, PAGE_SIZE);
112 cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
113 }
114 else
115 rc = -EINVAL;
116 unmap_domain_page(tmem_va);
117 return rc;
118 }
119
tmem_compress_from_client(xen_pfn_t cmfn,void ** out_va,size_t * out_len,tmem_cli_va_param_t clibuf)120 int tmem_compress_from_client(xen_pfn_t cmfn,
121 void **out_va, size_t *out_len, tmem_cli_va_param_t clibuf)
122 {
123 int ret = 0;
124 unsigned char *dmem = this_cpu(dstmem);
125 unsigned char *wmem = this_cpu(workmem);
126 char *scratch = this_cpu(scratch_page);
127 struct page_info *cli_pfp = NULL;
128 unsigned long cli_mfn = 0;
129 void *cli_va = NULL;
130
131 if ( dmem == NULL || wmem == NULL )
132 return 0; /* no buffer, so can't compress */
133 if ( guest_handle_is_null(clibuf) )
134 {
135 cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0);
136 if ( cli_va == NULL )
137 return -EFAULT;
138 }
139 else if ( !scratch )
140 return 0;
141 else if ( copy_from_guest(scratch, clibuf, PAGE_SIZE) )
142 return -EFAULT;
143 smp_mb();
144 ret = lzo1x_1_compress(cli_va ?: scratch, PAGE_SIZE, dmem, out_len, wmem);
145 ASSERT(ret == LZO_E_OK);
146 *out_va = dmem;
147 if ( cli_va )
148 cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
149 return 1;
150 }
151
tmem_copy_to_client(xen_pfn_t cmfn,struct page_info * pfp,tmem_cli_va_param_t clibuf)152 int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
153 tmem_cli_va_param_t clibuf)
154 {
155 unsigned long tmem_mfn, cli_mfn = 0;
156 char *tmem_va, *cli_va = NULL;
157 struct page_info *cli_pfp = NULL;
158 int rc = 1;
159
160 ASSERT(pfp != NULL);
161 if ( guest_handle_is_null(clibuf) )
162 {
163 cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 1);
164 if ( cli_va == NULL )
165 return -EFAULT;
166 }
167 tmem_mfn = page_to_mfn(pfp);
168 tmem_va = map_domain_page(_mfn(tmem_mfn));
169 if ( cli_va )
170 {
171 memcpy(cli_va, tmem_va, PAGE_SIZE);
172 cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
173 }
174 else
175 rc = -EINVAL;
176 unmap_domain_page(tmem_va);
177 smp_mb();
178 return rc;
179 }
180
tmem_decompress_to_client(xen_pfn_t cmfn,void * tmem_va,size_t size,tmem_cli_va_param_t clibuf)181 int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va,
182 size_t size, tmem_cli_va_param_t clibuf)
183 {
184 unsigned long cli_mfn = 0;
185 struct page_info *cli_pfp = NULL;
186 void *cli_va = NULL;
187 char *scratch = this_cpu(scratch_page);
188 size_t out_len = PAGE_SIZE;
189 int ret;
190
191 if ( guest_handle_is_null(clibuf) )
192 {
193 cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 1);
194 if ( cli_va == NULL )
195 return -EFAULT;
196 }
197 else if ( !scratch )
198 return 0;
199 ret = lzo1x_decompress_safe(tmem_va, size, cli_va ?: scratch, &out_len);
200 ASSERT(ret == LZO_E_OK);
201 ASSERT(out_len == PAGE_SIZE);
202 if ( cli_va )
203 cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
204 else if ( copy_to_guest(clibuf, scratch, PAGE_SIZE) )
205 return -EFAULT;
206 smp_mb();
207 return 1;
208 }
209
210 /****************** XEN-SPECIFIC HOST INITIALIZATION ********************/
211 static int dstmem_order, workmem_order;
212
cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)213 static int cpu_callback(
214 struct notifier_block *nfb, unsigned long action, void *hcpu)
215 {
216 unsigned int cpu = (unsigned long)hcpu;
217
218 switch ( action )
219 {
220 case CPU_UP_PREPARE: {
221 if ( per_cpu(dstmem, cpu) == NULL )
222 per_cpu(dstmem, cpu) = alloc_xenheap_pages(dstmem_order, 0);
223 if ( per_cpu(workmem, cpu) == NULL )
224 per_cpu(workmem, cpu) = alloc_xenheap_pages(workmem_order, 0);
225 if ( per_cpu(scratch_page, cpu) == NULL )
226 per_cpu(scratch_page, cpu) = alloc_xenheap_page();
227 break;
228 }
229 case CPU_DEAD:
230 case CPU_UP_CANCELED: {
231 if ( per_cpu(dstmem, cpu) != NULL )
232 {
233 free_xenheap_pages(per_cpu(dstmem, cpu), dstmem_order);
234 per_cpu(dstmem, cpu) = NULL;
235 }
236 if ( per_cpu(workmem, cpu) != NULL )
237 {
238 free_xenheap_pages(per_cpu(workmem, cpu), workmem_order);
239 per_cpu(workmem, cpu) = NULL;
240 }
241 if ( per_cpu(scratch_page, cpu) != NULL )
242 {
243 free_xenheap_page(per_cpu(scratch_page, cpu));
244 per_cpu(scratch_page, cpu) = NULL;
245 }
246 break;
247 }
248 default:
249 break;
250 }
251
252 return NOTIFY_DONE;
253 }
254
255 static struct notifier_block cpu_nfb = {
256 .notifier_call = cpu_callback
257 };
258
tmem_init(void)259 int __init tmem_init(void)
260 {
261 unsigned int cpu;
262
263 dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
264 workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
265
266 for_each_online_cpu ( cpu )
267 {
268 void *hcpu = (void *)(long)cpu;
269 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
270 }
271
272 register_cpu_notifier(&cpu_nfb);
273
274 return 1;
275 }
276