1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <xen/device_tree.h>
4 #include <xen/fdt-domain-build.h>
5 #include <xen/libfdt/libfdt.h>
6 #include <xen/rangeset.h>
7 #include <xen/sched.h>
8 #include <xen/static-memory.h>
9 #include <xen/static-shmem.h>
10 
11 #include <asm/setup.h>
12 
13 typedef struct {
14     struct domain *d;
15     const char *role_str;
16     paddr_t gbase;
17     struct shmem_membank_extra *bank_extra_info;
18 } alloc_heap_pages_cb_extra;
19 
20 static struct {
21     struct membanks_hdr common;
22     struct membank bank[NR_SHMEM_BANKS];
23 } shm_heap_banks __initdata = {
24     .common.max_banks = NR_SHMEM_BANKS,
25     .common.type = STATIC_SHARED_MEMORY
26 };
27 
get_shmem_heap_banks(void)28 static inline struct membanks *get_shmem_heap_banks(void)
29 {
30     return container_of(&shm_heap_banks.common, struct membanks, common);
31 }
32 
build_assertions(void)33 static void __init __maybe_unused build_assertions(void)
34 {
35     /*
36      * Check that no padding is between struct membanks "bank" flexible array
37      * member and struct shared_meminfo "bank" member
38      */
39     BUILD_BUG_ON((offsetof(struct membanks, bank) !=
40                  offsetof(struct shared_meminfo, bank)));
41 }
42 
43 static const struct membank __init *
find_shm_bank_by_id(const struct membanks * shmem,const char * shm_id)44 find_shm_bank_by_id(const struct membanks *shmem, const char *shm_id)
45 {
46     unsigned int bank;
47 
48     for ( bank = 0 ; bank < shmem->nr_banks; bank++ )
49     {
50         if ( strcmp(shm_id, shmem->bank[bank].shmem_extra->shm_id) == 0 )
51             break;
52     }
53 
54     if ( bank == shmem->nr_banks )
55         return NULL;
56 
57     return &shmem->bank[bank];
58 }
59 
60 /*
61  * This function checks whether the static shared memory region is
62  * already allocated to dom_io.
63  */
is_shm_allocated_to_domio(paddr_t pbase)64 static bool __init is_shm_allocated_to_domio(paddr_t pbase)
65 {
66     struct page_info *page;
67     struct domain *d;
68 
69     page = maddr_to_page(pbase);
70     d = page_get_owner_and_reference(page);
71     if ( d == NULL )
72         return false;
73     put_page(page);
74 
75     if ( d != dom_io )
76     {
77         printk(XENLOG_ERR
78                "shm memory node has already been allocated to a specific owner %pd, Please check your configuration\n",
79                d);
80         return false;
81     }
82 
83     return true;
84 }
85 
acquire_shared_memory_bank(struct domain * d,paddr_t pbase,paddr_t psize,bool bank_from_heap)86 static mfn_t __init acquire_shared_memory_bank(struct domain *d,
87                                                paddr_t pbase, paddr_t psize,
88                                                bool bank_from_heap)
89 {
90     mfn_t smfn;
91     unsigned long nr_pfns;
92     int res;
93 
94     /*
95      * Pages of statically shared memory shall be included
96      * into domain_tot_pages().
97      */
98     nr_pfns = PFN_DOWN(psize);
99     if ( (UINT_MAX - d->max_pages) < nr_pfns )
100     {
101         printk(XENLOG_ERR "%pd: Over-allocation for d->max_pages: %lu.\n",
102                d, nr_pfns);
103         return INVALID_MFN;
104     }
105     d->max_pages += nr_pfns;
106 
107     smfn = maddr_to_mfn(pbase);
108     if ( bank_from_heap )
109         /*
110          * When host address is not provided, static shared memory is
111          * allocated from heap and shall be assigned to owner domain.
112          */
113         res = assign_pages(maddr_to_page(pbase), nr_pfns, d, 0);
114     else
115         res = acquire_domstatic_pages(d, smfn, nr_pfns, 0);
116 
117     if ( res )
118     {
119         printk(XENLOG_ERR "%pd: failed to %s static memory: %d.\n", d,
120                bank_from_heap ? "assign" : "acquire", res);
121         goto fail;
122     }
123 
124     return smfn;
125 
126  fail:
127     d->max_pages -= nr_pfns;
128     return INVALID_MFN;
129 }
130 
assign_shared_memory(struct domain * d,paddr_t gbase,bool bank_from_heap,const struct membank * shm_bank)131 static int __init assign_shared_memory(struct domain *d, paddr_t gbase,
132                                        bool bank_from_heap,
133                                        const struct membank *shm_bank)
134 {
135     mfn_t smfn;
136     int ret = 0;
137     unsigned long nr_pages, nr_borrowers, i;
138     struct page_info *page;
139     paddr_t pbase, psize;
140 
141     pbase = shm_bank->start;
142     psize = shm_bank->size;
143     nr_borrowers = shm_bank->shmem_extra->nr_shm_borrowers;
144 
145     smfn = acquire_shared_memory_bank(d, pbase, psize, bank_from_heap);
146     if ( mfn_eq(smfn, INVALID_MFN) )
147         return -EINVAL;
148 
149     /*
150      * DOMID_IO is not auto-translated (i.e. it sees RAM 1:1). So we do not need
151      * to create mapping in the P2M.
152      */
153     nr_pages = PFN_DOWN(psize);
154     if ( d != dom_io )
155     {
156         ret = guest_physmap_add_pages(d, gaddr_to_gfn(gbase), smfn,
157                                       PFN_DOWN(psize));
158         if ( ret )
159         {
160             printk(XENLOG_ERR "Failed to map shared memory to %pd.\n", d);
161             return ret;
162         }
163     }
164 
165     /*
166      * Instead of letting borrower domain get a page ref, we add as many
167      * additional reference as the number of borrowers when the owner
168      * is allocated, since there is a chance that owner is created
169      * after borrower.
170      * So if the borrower is created first, it will cause adding pages
171      * in the P2M without reference.
172      */
173     page = mfn_to_page(smfn);
174     for ( i = 0; i < nr_pages; i++ )
175     {
176         if ( !get_page_nr(page + i, d, nr_borrowers) )
177         {
178             printk(XENLOG_ERR
179                    "Failed to add %lu references to page %"PRI_mfn".\n",
180                    nr_borrowers, mfn_x(smfn) + i);
181             goto fail;
182         }
183     }
184 
185     return 0;
186 
187  fail:
188     while ( --i >= 0 )
189         put_page_nr(page + i, nr_borrowers);
190     return ret;
191 }
192 
193 static int __init
append_shm_bank_to_domain(struct kernel_info * kinfo,paddr_t start,paddr_t size,const char * shm_id)194 append_shm_bank_to_domain(struct kernel_info *kinfo, paddr_t start,
195                           paddr_t size, const char *shm_id)
196 {
197     struct membanks *shm_mem = kernel_info_get_shm_mem(kinfo);
198     struct shmem_membank_extra *shm_mem_extra;
199 
200     if ( shm_mem->nr_banks >= shm_mem->max_banks )
201         return -ENOMEM;
202 
203     shm_mem_extra = &kinfo->shm_mem.extra[shm_mem->nr_banks];
204 
205     shm_mem->bank[shm_mem->nr_banks].start = start;
206     shm_mem->bank[shm_mem->nr_banks].size = size;
207     safe_strcpy(shm_mem_extra->shm_id, shm_id);
208     shm_mem->bank[shm_mem->nr_banks].shmem_extra = shm_mem_extra;
209     shm_mem->nr_banks++;
210 
211     return 0;
212 }
213 
handle_shared_mem_bank(struct domain * d,paddr_t gbase,const char * role_str,bool bank_from_heap,const struct membank * shm_bank)214 static int __init handle_shared_mem_bank(struct domain *d, paddr_t gbase,
215                                          const char *role_str,
216                                          bool bank_from_heap,
217                                          const struct membank *shm_bank)
218 {
219     bool owner_dom_io = true;
220     paddr_t pbase, psize;
221     int ret;
222 
223     pbase = shm_bank->start;
224     psize = shm_bank->size;
225 
226     /*
227      * "role" property is optional and if it is defined explicitly,
228      * then the owner domain is not the default "dom_io" domain.
229      */
230     if ( role_str != NULL )
231         owner_dom_io = false;
232 
233     /*
234      * DOMID_IO is a fake domain and is not described in the Device-Tree.
235      * Therefore when the owner of the shared region is DOMID_IO, we will
236      * only find the borrowers.
237      */
238     if ( (owner_dom_io && !is_shm_allocated_to_domio(pbase)) ||
239          (!owner_dom_io && strcmp(role_str, "owner") == 0) )
240     {
241         /*
242          * We found the first borrower of the region, the owner was not
243          * specified, so they should be assigned to dom_io.
244          */
245         ret = assign_shared_memory(owner_dom_io ? dom_io : d, gbase,
246                                    bank_from_heap, shm_bank);
247         if ( ret )
248             return ret;
249     }
250 
251     if ( owner_dom_io || (strcmp(role_str, "borrower") == 0) )
252     {
253         /* Set up P2M foreign mapping for borrower domain. */
254         ret = map_regions_p2mt(d, _gfn(PFN_UP(gbase)), PFN_DOWN(psize),
255                                _mfn(PFN_UP(pbase)), p2m_map_foreign_rw);
256         if ( ret )
257             return ret;
258     }
259 
260     return 0;
261 }
262 
save_map_heap_pages(struct domain * d,struct page_info * pg,unsigned int order,void * extra)263 static bool __init save_map_heap_pages(struct domain *d, struct page_info *pg,
264                                        unsigned int order, void *extra)
265 {
266     alloc_heap_pages_cb_extra *b_extra = (alloc_heap_pages_cb_extra *)extra;
267     int idx = shm_heap_banks.common.nr_banks;
268     int ret = -ENOSPC;
269 
270     BUG_ON(!b_extra);
271 
272     if ( idx < shm_heap_banks.common.max_banks )
273     {
274         shm_heap_banks.bank[idx].start = page_to_maddr(pg);
275         shm_heap_banks.bank[idx].size = (1ULL << (PAGE_SHIFT + order));
276         shm_heap_banks.bank[idx].shmem_extra = b_extra->bank_extra_info;
277         shm_heap_banks.common.nr_banks++;
278 
279         ret = handle_shared_mem_bank(b_extra->d, b_extra->gbase,
280                                      b_extra->role_str, true,
281                                      &shm_heap_banks.bank[idx]);
282         if ( !ret )
283         {
284             /* Increment guest physical address for next mapping */
285             b_extra->gbase += shm_heap_banks.bank[idx].size;
286             return true;
287         }
288     }
289 
290     printk("Failed to allocate static shared memory from Xen heap: (%d)\n",
291            ret);
292 
293     return false;
294 }
295 
process_shm(struct domain * d,struct kernel_info * kinfo,const struct dt_device_node * node)296 int __init process_shm(struct domain *d, struct kernel_info *kinfo,
297                        const struct dt_device_node *node)
298 {
299     struct dt_device_node *shm_node;
300 
301     /* Hwdom case - shm node under /chosen */
302     if ( !node )
303     {
304         node = dt_find_node_by_path("/chosen");
305         BUG_ON(!node);
306     }
307 
308     dt_for_each_child_node(node, shm_node)
309     {
310         const struct membank *boot_shm_bank;
311         const struct dt_property *prop;
312         const __be32 *cells;
313         uint32_t addr_cells;
314         paddr_t gbase, pbase, psize;
315         int ret = 0;
316         unsigned int i;
317         const char *role_str;
318         const char *shm_id;
319 
320         if ( !dt_device_is_compatible(shm_node, "xen,domain-shared-memory-v1") )
321             continue;
322 
323         if ( dt_property_read_string(shm_node, "xen,shm-id", &shm_id) )
324         {
325             printk("%pd: invalid \"xen,shm-id\" property", d);
326             return -EINVAL;
327         }
328         BUG_ON((strlen(shm_id) <= 0) || (strlen(shm_id) >= MAX_SHM_ID_LENGTH));
329 
330         boot_shm_bank = find_shm_bank_by_id(bootinfo_get_shmem(), shm_id);
331         if ( !boot_shm_bank )
332         {
333             printk("%pd: static shared memory bank not found: '%s'", d, shm_id);
334             return -ENOENT;
335         }
336 
337         pbase = boot_shm_bank->start;
338         psize = boot_shm_bank->size;
339 
340         /* "role" property is optional */
341         if ( dt_property_read_string(shm_node, "role", &role_str) != 0 )
342             role_str = NULL;
343 
344         /*
345          * xen,shared-mem = <[pbase,] gbase, size>;
346          * pbase is optional.
347          */
348         addr_cells = dt_n_addr_cells(shm_node);
349         prop = dt_find_property(shm_node, "xen,shared-mem", NULL);
350         BUG_ON(!prop);
351         cells = (const __be32 *)prop->value;
352 
353         if ( pbase != INVALID_PADDR )
354         {
355             /* guest phys address is after host phys address */
356             gbase = dt_read_paddr(cells + addr_cells, addr_cells);
357 
358             if ( is_domain_direct_mapped(d) && (pbase != gbase) )
359             {
360                 printk("%pd: physical address 0x%"PRIpaddr" and guest address 0x%"PRIpaddr" are not direct-mapped.\n",
361                        d, pbase, gbase);
362                 return -EINVAL;
363             }
364 
365             for ( i = 0; i < PFN_DOWN(psize); i++ )
366                 if ( !mfn_valid(mfn_add(maddr_to_mfn(pbase), i)) )
367                 {
368                     printk("%pd: invalid physical address 0x%"PRI_mfn"\n",
369                         d, mfn_x(mfn_add(maddr_to_mfn(pbase), i)));
370                     return -EINVAL;
371                 }
372 
373             /* The host physical address is supplied by the user */
374             ret = handle_shared_mem_bank(d, gbase, role_str, false,
375                                          boot_shm_bank);
376             if ( ret )
377                 return ret;
378         }
379         else
380         {
381             /*
382              * The host physical address is not supplied by the user, so it
383              * means that the banks needs to be allocated from the Xen heap,
384              * look into the already allocated banks from the heap.
385              */
386             const struct membank *alloc_bank =
387                 find_shm_bank_by_id(get_shmem_heap_banks(), shm_id);
388 
389             if ( is_domain_direct_mapped(d) )
390             {
391                 printk("%pd: host and guest physical address must be supplied for direct-mapped domains\n",
392                        d);
393                 return -EINVAL;
394             }
395 
396             /* guest phys address is right at the beginning */
397             gbase = dt_read_paddr(cells, addr_cells);
398 
399             if ( !alloc_bank )
400             {
401                 alloc_heap_pages_cb_extra cb_arg = { d, role_str, gbase,
402                     boot_shm_bank->shmem_extra };
403 
404                 /* shm_id identified bank is not yet allocated */
405                 if ( !allocate_domheap_memory(NULL, psize, save_map_heap_pages,
406                                               &cb_arg) )
407                 {
408                     printk(XENLOG_ERR
409                            "Failed to allocate (%"PRIpaddr"KB) pages as static shared memory from heap\n",
410                            psize >> 10);
411                     return -EINVAL;
412                 }
413             }
414             else
415             {
416                 /* shm_id identified bank is already allocated */
417                 const struct membank *end_bank =
418                         &shm_heap_banks.bank[shm_heap_banks.common.nr_banks];
419                 paddr_t gbase_bank = gbase;
420 
421                 /*
422                  * Static shared memory banks that are taken from the Xen heap
423                  * are allocated sequentially in shm_heap_banks, so starting
424                  * from the first bank found identified by shm_id, the code can
425                  * just advance by one bank at the time until it reaches the end
426                  * of the array or it finds another bank NOT identified by
427                  * shm_id
428                  */
429                 for ( ; alloc_bank < end_bank; alloc_bank++ )
430                 {
431                     if ( strcmp(shm_id, alloc_bank->shmem_extra->shm_id) != 0 )
432                         break;
433 
434                     ret = handle_shared_mem_bank(d, gbase_bank, role_str, true,
435                                                  alloc_bank);
436                     if ( ret )
437                         return ret;
438 
439                     /* Increment guest physical address for next mapping */
440                     gbase_bank += alloc_bank->size;
441                 }
442             }
443         }
444 
445         /*
446          * Record static shared memory region info for later setting
447          * up shm-node in guest device tree.
448          */
449         ret = append_shm_bank_to_domain(kinfo, gbase, psize, shm_id);
450         if ( ret )
451             return ret;
452     }
453 
454     return 0;
455 }
456 
make_shm_resv_memory_node(const struct kernel_info * kinfo,int addrcells,int sizecells)457 int __init make_shm_resv_memory_node(const struct kernel_info *kinfo,
458                                      int addrcells, int sizecells)
459 {
460     const struct membanks *mem = kernel_info_get_shm_mem_const(kinfo);
461     void *fdt = kinfo->fdt;
462     unsigned int i = 0;
463     int res = 0;
464 
465     if ( mem->nr_banks == 0 )
466         return 0;
467 
468     /*
469      * For each shared memory region, a range is exposed under
470      * the /reserved-memory node as a child node. Each range sub-node is
471      * named xen-shmem@<address>.
472      */
473     dt_dprintk("Create xen-shmem node\n");
474 
475     for ( ; i < mem->nr_banks; i++ )
476     {
477         uint64_t start = mem->bank[i].start;
478         uint64_t size = mem->bank[i].size;
479         const char compat[] = "xen,shared-memory-v1";
480         /* Worst case addrcells + sizecells */
481         __be32 reg[GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS];
482         __be32 *cells;
483         unsigned int len = (addrcells + sizecells) * sizeof(__be32);
484 
485         res = domain_fdt_begin_node(fdt, "xen-shmem", mem->bank[i].start);
486         if ( res )
487             return res;
488 
489         res = fdt_property(fdt, "compatible", compat, sizeof(compat));
490         if ( res )
491             return res;
492 
493         cells = reg;
494         dt_child_set_range(&cells, addrcells, sizecells, start, size);
495 
496         res = fdt_property(fdt, "reg", reg, len);
497         if ( res )
498             return res;
499 
500         dt_dprintk("Shared memory bank %u: %#"PRIx64"->%#"PRIx64"\n",
501                    i, start, start + size);
502 
503         res = fdt_property_string(fdt, "xen,id",
504                                   mem->bank[i].shmem_extra->shm_id);
505         if ( res )
506             return res;
507 
508         /*
509          * TODO:
510          * - xen,offset: (borrower VMs only)
511          *   64 bit integer offset within the owner virtual machine's shared
512          *   memory region used for the mapping in the borrower VM
513          */
514         res = fdt_property_u64(fdt, "xen,offset", 0);
515         if ( res )
516             return res;
517 
518         res = fdt_end_node(fdt);
519         if ( res )
520             return res;
521     }
522 
523     return res;
524 }
525 
process_shm_node(const void * fdt,int node,uint32_t address_cells,uint32_t size_cells)526 int __init process_shm_node(const void *fdt, int node, uint32_t address_cells,
527                             uint32_t size_cells)
528 {
529     const struct fdt_property *prop, *prop_id, *prop_role;
530     const __be32 *cell;
531     paddr_t paddr = INVALID_PADDR;
532     paddr_t gaddr, size, end;
533     struct membanks *mem = bootinfo_get_shmem();
534     struct shmem_membank_extra *shmem_extra = bootinfo_get_shmem_extra();
535     unsigned int i;
536     int len;
537     bool owner = false;
538     const char *shm_id;
539 
540     if ( address_cells < 1 || size_cells < 1 )
541     {
542         printk("fdt: invalid #address-cells or #size-cells for static shared memory node.\n");
543         return -EINVAL;
544     }
545 
546     /*
547      * "xen,shm-id" property holds an arbitrary string with a strict limit
548      * on the number of characters, MAX_SHM_ID_LENGTH
549      */
550     prop_id = fdt_get_property(fdt, node, "xen,shm-id", NULL);
551     if ( !prop_id )
552         return -ENOENT;
553     shm_id = (const char *)prop_id->data;
554     if ( strnlen(shm_id, MAX_SHM_ID_LENGTH) == MAX_SHM_ID_LENGTH )
555     {
556         printk("fdt: invalid xen,shm-id %s, it must be limited to %u characters\n",
557                shm_id, MAX_SHM_ID_LENGTH);
558         return -EINVAL;
559     }
560 
561     /*
562      * "role" property is optional and if it is defined explicitly,
563      * it must be either `owner` or `borrower`.
564      */
565     prop_role = fdt_get_property(fdt, node, "role", NULL);
566     if ( prop_role )
567     {
568         if ( !strcmp(prop_role->data, "owner") )
569             owner = true;
570         else if ( strcmp(prop_role->data, "borrower") )
571         {
572             printk("fdt: invalid `role` property for static shared memory node.\n");
573             return -EINVAL;
574         }
575     }
576 
577     /*
578      * xen,shared-mem = <paddr, gaddr, size>;
579      * Memory region starting from physical address #paddr of #size shall
580      * be mapped to guest physical address #gaddr as static shared memory
581      * region.
582      */
583     prop = fdt_get_property(fdt, node, "xen,shared-mem", &len);
584     if ( !prop )
585         return -ENOENT;
586 
587     cell = (const __be32 *)prop->data;
588     if ( len != dt_cells_to_size(address_cells + size_cells + address_cells) )
589     {
590         if ( len == dt_cells_to_size(address_cells + size_cells) )
591             device_tree_get_reg(&cell, address_cells, size_cells, &gaddr,
592                                 &size);
593         else
594         {
595             printk("fdt: invalid `xen,shared-mem` property.\n");
596             return -EINVAL;
597         }
598     }
599     else
600     {
601         device_tree_get_reg(&cell, address_cells, address_cells, &paddr,
602                             &gaddr);
603         size = dt_next_cell(size_cells, &cell);
604 
605         if ( !IS_ALIGNED(paddr, PAGE_SIZE) )
606         {
607             printk("fdt: physical address 0x%"PRIpaddr" is not suitably aligned.\n",
608                 paddr);
609             return -EINVAL;
610         }
611 
612         end = paddr + size;
613         if ( end <= paddr )
614         {
615             printk("fdt: static shared memory region %s overflow\n", shm_id);
616             return -EINVAL;
617         }
618     }
619 
620     if ( !IS_ALIGNED(gaddr, PAGE_SIZE) )
621     {
622         printk("fdt: guest address 0x%"PRIpaddr" is not suitably aligned.\n",
623                gaddr);
624         return -EINVAL;
625     }
626 
627     if ( !size )
628     {
629         printk("fdt: the size for static shared memory region can not be zero\n");
630         return -EINVAL;
631     }
632 
633     if ( !IS_ALIGNED(size, PAGE_SIZE) )
634     {
635         printk("fdt: size 0x%"PRIpaddr" is not suitably aligned\n", size);
636         return -EINVAL;
637     }
638 
639     for ( i = 0; i < mem->nr_banks; i++ )
640     {
641         /*
642          * Meet the following check:
643          * - when host address is provided:
644          *   1) The shm ID matches and the region exactly match
645          *   2) The shm ID doesn't match and the region doesn't overlap
646          *      with an existing one
647          * - when host address is not provided:
648          *   1) The shm ID matches and the region size exactly match
649          */
650         bool paddr_assigned = (INVALID_PADDR != paddr);
651 
652         if ( strncmp(shm_id, shmem_extra[i].shm_id, MAX_SHM_ID_LENGTH) == 0 )
653         {
654             /*
655              * Regions have same shm_id (cases):
656              * 1) physical host address is supplied:
657              *    - OK:   paddr is equal and size is equal (same region)
658              *    - Fail: paddr doesn't match or size doesn't match (there
659              *            cannot exists two shmem regions with same shm_id)
660              * 2) physical host address is NOT supplied:
661              *    - OK:   size is equal (same region)
662              *    - Fail: size is not equal (same shm_id must identify only one
663              *            region, there can't be two different regions with same
664              *            shm_id)
665              */
666             bool start_match = paddr_assigned ? (paddr == mem->bank[i].start) :
667                                                 true;
668 
669             if ( start_match && (size == mem->bank[i].size) )
670                 break;
671             else
672             {
673                 printk("fdt: different shared memory region could not share the same shm ID %s\n",
674                        shm_id);
675                 return -EINVAL;
676             }
677         }
678 
679         /*
680          * Regions have different shm_id (cases):
681          * 1) physical host address is supplied:
682          *    - OK:   paddr different, or size different (case where paddr
683          *            is equal but psize is different are wrong, but they
684          *            are handled later when checking for overlapping)
685          *    - Fail: paddr equal and size equal (the same region can't be
686          *            identified with different shm_id)
687          * 2) physical host address is NOT supplied:
688          *    - OK:   Both have different shm_id so even with same size they
689          *            can exists
690          */
691         if ( !paddr_assigned || (paddr != mem->bank[i].start) ||
692              (size != mem->bank[i].size) )
693             continue;
694         else
695         {
696             printk("fdt: xen,shm-id %s does not match for all the nodes using the same region\n",
697                    shm_id);
698             return -EINVAL;
699         }
700     }
701 
702     if ( i == mem->nr_banks )
703     {
704         if (i < mem->max_banks)
705         {
706             if ( (paddr != INVALID_PADDR) &&
707                  check_reserved_regions_overlap(paddr, size, false) )
708                 return -EINVAL;
709 
710             /* Static shared memory shall be reserved from any other use. */
711             safe_strcpy(shmem_extra[mem->nr_banks].shm_id, shm_id);
712             mem->bank[mem->nr_banks].start = paddr;
713             mem->bank[mem->nr_banks].size = size;
714             mem->bank[mem->nr_banks].shmem_extra = &shmem_extra[mem->nr_banks];
715             mem->nr_banks++;
716         }
717         else
718         {
719             printk("Warning: Max number of supported memory regions reached.\n");
720             return -ENOSPC;
721         }
722     }
723     /*
724      * keep a count of the number of borrowers, which later may be used
725      * to calculate the reference count.
726      */
727     if ( !owner )
728         shmem_extra[i].nr_shm_borrowers++;
729 
730     return 0;
731 }
732 
make_resv_memory_node(const struct kernel_info * kinfo,int addrcells,int sizecells)733 int __init make_resv_memory_node(const struct kernel_info *kinfo, int addrcells,
734                                  int sizecells)
735 {
736     const struct membanks *mem = kernel_info_get_shm_mem_const(kinfo);
737     void *fdt = kinfo->fdt;
738     int res = 0;
739     /* Placeholder for reserved-memory\0 */
740     const char resvbuf[16] = "reserved-memory";
741 
742     if ( mem->nr_banks == 0 )
743         /* No shared memory provided. */
744         return 0;
745 
746     dt_dprintk("Create reserved-memory node\n");
747 
748     res = fdt_begin_node(fdt, resvbuf);
749     if ( res )
750         return res;
751 
752     res = fdt_property(fdt, "ranges", NULL, 0);
753     if ( res )
754         return res;
755 
756     res = fdt_property_cell(fdt, "#address-cells", addrcells);
757     if ( res )
758         return res;
759 
760     res = fdt_property_cell(fdt, "#size-cells", sizecells);
761     if ( res )
762         return res;
763 
764     res = make_shm_resv_memory_node(kinfo, addrcells, sizecells);
765     if ( res )
766         return res;
767 
768     res = fdt_end_node(fdt);
769 
770     return res;
771 }
772 
early_print_info_shmem(void)773 void __init early_print_info_shmem(void)
774 {
775     const struct membanks *shmem = bootinfo_get_shmem();
776     unsigned int bank;
777     unsigned int printed = 0;
778 
779     for ( bank = 0; bank < shmem->nr_banks; bank++, printed++ )
780         if ( shmem->bank[bank].start != INVALID_PADDR )
781             printk(" SHMEM[%u]: %"PRIpaddr" - %"PRIpaddr"\n", printed,
782                 shmem->bank[bank].start,
783                 shmem->bank[bank].start + shmem->bank[bank].size - 1);
784 }
785 
init_sharedmem_pages(void)786 void __init init_sharedmem_pages(void)
787 {
788     const struct membanks *shmem = bootinfo_get_shmem();
789     unsigned int bank;
790 
791     for ( bank = 0 ; bank < shmem->nr_banks; bank++ )
792         if ( shmem->bank[bank].start != INVALID_PADDR )
793             init_staticmem_bank(&shmem->bank[bank]);
794 }
795 
remove_shm_from_rangeset(const struct kernel_info * kinfo,struct rangeset * rangeset)796 int __init remove_shm_from_rangeset(const struct kernel_info *kinfo,
797                                     struct rangeset *rangeset)
798 {
799     const struct membanks *shm_mem = kernel_info_get_shm_mem_const(kinfo);
800     unsigned int i;
801 
802     /* Remove static shared memory regions */
803     for ( i = 0; i < shm_mem->nr_banks; i++ )
804     {
805         paddr_t start, end;
806         int res;
807 
808         start = shm_mem->bank[i].start;
809         end = shm_mem->bank[i].start + shm_mem->bank[i].size;
810         res = rangeset_remove_range(rangeset, PFN_DOWN(start),
811                                     PFN_DOWN(end - 1));
812         if ( res )
813         {
814             printk(XENLOG_ERR
815                    "Failed to remove: %#"PRIpaddr"->%#"PRIpaddr", error: %d\n",
816                    start, end, res);
817             return -EINVAL;
818         }
819     }
820 
821     return 0;
822 }
823 
shm_mem_node_fill_reg_range(const struct kernel_info * kinfo,__be32 * reg,int * nr_cells,int addrcells,int sizecells)824 void __init shm_mem_node_fill_reg_range(const struct kernel_info *kinfo,
825                                         __be32 *reg, int *nr_cells,
826                                         int addrcells, int sizecells)
827 {
828     const struct membanks *mem = kernel_info_get_shm_mem_const(kinfo);
829     unsigned int i;
830     __be32 *cells;
831 
832     BUG_ON(!nr_cells || !reg);
833 
834     cells = &reg[*nr_cells];
835     for ( i = 0; i < mem->nr_banks; i++ )
836     {
837         paddr_t start = mem->bank[i].start;
838         paddr_t size = mem->bank[i].size;
839 
840         *nr_cells += addrcells + sizecells;
841         BUG_ON(*nr_cells >= DT_MEM_NODE_REG_RANGE_SIZE);
842         dt_child_set_range(&cells, addrcells, sizecells, start, size);
843     }
844 }
845 
846 /*
847  * Local variables:
848  * mode: C
849  * c-file-style: "BSD"
850  * c-basic-offset: 4
851  * tab-width: 4
852  * indent-tabs-mode: nil
853  * End:
854  */
855