1 /******************************************************************************
2 * Original code extracted from arch/x86/x86_64/mm.c
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <xen/init.h>
19 #include <xen/mm.h>
20 #include <xen/bitops.h>
21
22 /* Parameters for PFN/MADDR compression. */
23 unsigned long __read_mostly max_pdx;
24 unsigned long __read_mostly pfn_pdx_bottom_mask = ~0UL;
25 unsigned long __read_mostly ma_va_bottom_mask = ~0UL;
26 unsigned long __read_mostly pfn_top_mask = 0;
27 unsigned long __read_mostly ma_top_mask = 0;
28 unsigned long __read_mostly pfn_hole_mask = 0;
29 unsigned int __read_mostly pfn_pdx_hole_shift = 0;
30
31 unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
32 (FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 };
33
__mfn_valid(unsigned long mfn)34 bool __mfn_valid(unsigned long mfn)
35 {
36 return likely(mfn < max_page) &&
37 likely(!(mfn & pfn_hole_mask)) &&
38 likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT,
39 pdx_group_valid));
40 }
41
42 /* Sets all bits from the most-significant 1-bit down to the LSB */
fill_mask(u64 mask)43 static u64 __init fill_mask(u64 mask)
44 {
45 while (mask & (mask + 1))
46 mask |= mask + 1;
47 return mask;
48 }
49
pdx_init_mask(u64 base_addr)50 u64 __init pdx_init_mask(u64 base_addr)
51 {
52 return fill_mask(base_addr - 1);
53 }
54
pdx_region_mask(u64 base,u64 len)55 u64 __init pdx_region_mask(u64 base, u64 len)
56 {
57 return fill_mask(base ^ (base + len - 1));
58 }
59
set_pdx_range(unsigned long smfn,unsigned long emfn)60 void set_pdx_range(unsigned long smfn, unsigned long emfn)
61 {
62 unsigned long idx, eidx;
63
64 idx = pfn_to_pdx(smfn) / PDX_GROUP_COUNT;
65 eidx = (pfn_to_pdx(emfn - 1) + PDX_GROUP_COUNT) / PDX_GROUP_COUNT;
66
67 for ( ; idx < eidx; ++idx )
68 __set_bit(idx, pdx_group_valid);
69 }
70
pfn_pdx_hole_setup(unsigned long mask)71 void __init pfn_pdx_hole_setup(unsigned long mask)
72 {
73 unsigned int i, j, bottom_shift = 0, hole_shift = 0;
74
75 /*
76 * We skip the first MAX_ORDER bits, as we never want to compress them.
77 * This guarantees that page-pointer arithmetic remains valid within
78 * contiguous aligned ranges of 2^MAX_ORDER pages. Among others, our
79 * buddy allocator relies on this assumption.
80 */
81 for ( j = MAX_ORDER-1; ; )
82 {
83 i = find_next_zero_bit(&mask, BITS_PER_LONG, j);
84 j = find_next_bit(&mask, BITS_PER_LONG, i);
85 if ( j >= BITS_PER_LONG )
86 break;
87 if ( j - i > hole_shift )
88 {
89 hole_shift = j - i;
90 bottom_shift = i;
91 }
92 }
93 if ( !hole_shift )
94 return;
95
96 printk(KERN_INFO "PFN compression on bits %u...%u\n",
97 bottom_shift, bottom_shift + hole_shift - 1);
98
99 pfn_pdx_hole_shift = hole_shift;
100 pfn_pdx_bottom_mask = (1UL << bottom_shift) - 1;
101 ma_va_bottom_mask = (PAGE_SIZE << bottom_shift) - 1;
102 pfn_hole_mask = ((1UL << hole_shift) - 1) << bottom_shift;
103 pfn_top_mask = ~(pfn_pdx_bottom_mask | pfn_hole_mask);
104 ma_top_mask = pfn_top_mask << PAGE_SHIFT;
105 }
106
107
108 /*
109 * Local variables:
110 * mode: C
111 * c-file-style: "BSD"
112 * c-basic-offset: 4
113 * indent-tabs-mode: nil
114 * End:
115 */
116