1 #include <xen/init.h>
2 #include <xen/lib.h>
3 #include <xen/mm.h>
4 #include <xen/compat.h>
5 #include <xen/dmi.h>
6 #include <xen/pfn.h>
7 #include <asm/e820.h>
8 #include <asm/page.h>
9 #include <asm/processor.h>
10 #include <asm/mtrr.h>
11 #include <asm/msr.h>
12 #include <asm/guest.h>
13
14 /*
15 * opt_mem: Limit maximum address of physical RAM.
16 * Any RAM beyond this address limit is ignored.
17 */
18 static unsigned long long __initdata opt_mem;
19 size_param("mem", opt_mem);
20
21 /*
22 * opt_availmem: Limit maximum usable amount of physical RAM.
23 * Any RAM beyond this limited amount is ignored.
24 */
25 static unsigned long long __initdata opt_availmem;
26 size_param("availmem", opt_availmem);
27
28 /* opt_nomtrr_check: Don't clip ram to highest cacheable MTRR. */
29 static s8 __initdata e820_mtrr_clip = -1;
30 boolean_param("e820-mtrr-clip", e820_mtrr_clip);
31
32 /* opt_e820_verbose: Be verbose about clipping, the original e820, &c */
33 static bool __initdata e820_verbose;
34 boolean_param("e820-verbose", e820_verbose);
35
36 struct e820map e820;
37 struct e820map __initdata e820_raw;
38
39 /*
40 * This function checks if the entire range <start,end> is mapped with type.
41 *
42 * Note: this function only works correct if the e820 table is sorted and
43 * not-overlapping, which is the case
44 */
e820_all_mapped(u64 start,u64 end,unsigned type)45 int __init e820_all_mapped(u64 start, u64 end, unsigned type)
46 {
47 int i;
48
49 for (i = 0; i < e820.nr_map; i++) {
50 struct e820entry *ei = &e820.map[i];
51
52 if (type && ei->type != type)
53 continue;
54 /* is the region (part) in overlap with the current region ?*/
55 if (ei->addr >= end || ei->addr + ei->size <= start)
56 continue;
57
58 /* if the region is at the beginning of <start,end> we move
59 * start to the end of the region since it's ok until there
60 */
61 if (ei->addr <= start)
62 start = ei->addr + ei->size;
63 /*
64 * if start is now at or beyond end, we're done, full
65 * coverage
66 */
67 if (start >= end)
68 return 1;
69 }
70 return 0;
71 }
72
add_memory_region(unsigned long long start,unsigned long long size,int type)73 static void __init add_memory_region(unsigned long long start,
74 unsigned long long size, int type)
75 {
76 int x;
77
78 x = e820.nr_map;
79
80 if (x == ARRAY_SIZE(e820.map)) {
81 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
82 return;
83 }
84
85 e820.map[x].addr = start;
86 e820.map[x].size = size;
87 e820.map[x].type = type;
88 e820.nr_map++;
89 }
90
print_e820_memory_map(struct e820entry * map,unsigned int entries)91 static void __init print_e820_memory_map(struct e820entry *map, unsigned int entries)
92 {
93 unsigned int i;
94
95 for (i = 0; i < entries; i++) {
96 printk(" %016Lx - %016Lx ",
97 (unsigned long long)(map[i].addr),
98 (unsigned long long)(map[i].addr + map[i].size));
99 switch (map[i].type) {
100 case E820_RAM:
101 printk("(usable)\n");
102 break;
103 case E820_RESERVED:
104 printk("(reserved)\n");
105 break;
106 case E820_ACPI:
107 printk("(ACPI data)\n");
108 break;
109 case E820_NVS:
110 printk("(ACPI NVS)\n");
111 break;
112 case E820_UNUSABLE:
113 printk("(unusable)\n");
114 break;
115 default:
116 printk("type %u\n", map[i].type);
117 break;
118 }
119 }
120 }
121
122 /*
123 * Sanitize the BIOS e820 map.
124 *
125 * Some e820 responses include overlapping entries. The following
126 * replaces the original e820 map with a new one, removing overlaps.
127 *
128 */
129 struct change_member {
130 struct e820entry *pbios; /* pointer to original bios entry */
131 unsigned long long addr; /* address for this change point */
132 };
133 static struct change_member change_point_list[2*E820MAX] __initdata;
134 static struct change_member *change_point[2*E820MAX] __initdata;
135 static struct e820entry *overlap_list[E820MAX] __initdata;
136 static struct e820entry new_bios[E820MAX] __initdata;
137
sanitize_e820_map(struct e820entry * biosmap,unsigned int * pnr_map)138 int __init sanitize_e820_map(struct e820entry *biosmap, unsigned int *pnr_map)
139 {
140 struct change_member *change_tmp;
141 unsigned long current_type, last_type;
142 unsigned long long last_addr;
143 int chgidx, still_changing;
144 int overlap_entries;
145 int new_bios_entry;
146 int old_nr, new_nr, chg_nr;
147 int i;
148
149 /*
150 Visually we're performing the following (1,2,3,4 = memory types)...
151
152 Sample memory map (w/overlaps):
153 ____22__________________
154 ______________________4_
155 ____1111________________
156 _44_____________________
157 11111111________________
158 ____________________33__
159 ___________44___________
160 __________33333_________
161 ______________22________
162 ___________________2222_
163 _________111111111______
164 _____________________11_
165 _________________4______
166
167 Sanitized equivalent (no overlap):
168 1_______________________
169 _44_____________________
170 ___1____________________
171 ____22__________________
172 ______11________________
173 _________1______________
174 __________3_____________
175 ___________44___________
176 _____________33_________
177 _______________2________
178 ________________1_______
179 _________________4______
180 ___________________2____
181 ____________________33__
182 ______________________4_
183 */
184
185 /* if there's only one memory region, don't bother */
186 if (*pnr_map < 2)
187 return -1;
188
189 old_nr = *pnr_map;
190
191 /* bail out if we find any unreasonable addresses in bios map */
192 for (i=0; i<old_nr; i++)
193 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
194 return -1;
195
196 /* create pointers for initial change-point information (for sorting) */
197 for (i=0; i < 2*old_nr; i++)
198 change_point[i] = &change_point_list[i];
199
200 /* record all known change-points (starting and ending addresses),
201 omitting those that are for empty memory regions */
202 chgidx = 0;
203 for (i=0; i < old_nr; i++) {
204 if (biosmap[i].size != 0) {
205 change_point[chgidx]->addr = biosmap[i].addr;
206 change_point[chgidx++]->pbios = &biosmap[i];
207 change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
208 change_point[chgidx++]->pbios = &biosmap[i];
209 }
210 }
211 chg_nr = chgidx; /* true number of change-points */
212
213 /* sort change-point list by memory addresses (low -> high) */
214 still_changing = 1;
215 while (still_changing) {
216 still_changing = 0;
217 for (i=1; i < chg_nr; i++) {
218 /* if <current_addr> > <last_addr>, swap */
219 /* or, if current=<start_addr> & last=<end_addr>, swap */
220 if ((change_point[i]->addr < change_point[i-1]->addr) ||
221 ((change_point[i]->addr == change_point[i-1]->addr) &&
222 (change_point[i]->addr == change_point[i]->pbios->addr) &&
223 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
224 )
225 {
226 change_tmp = change_point[i];
227 change_point[i] = change_point[i-1];
228 change_point[i-1] = change_tmp;
229 still_changing=1;
230 }
231 }
232 }
233
234 /* create a new bios memory map, removing overlaps */
235 overlap_entries=0; /* number of entries in the overlap table */
236 new_bios_entry=0; /* index for creating new bios map entries */
237 last_type = 0; /* start with undefined memory type */
238 last_addr = 0; /* start with 0 as last starting address */
239 /* loop through change-points, determining affect on the new bios map */
240 for (chgidx=0; chgidx < chg_nr; chgidx++)
241 {
242 /* keep track of all overlapping bios entries */
243 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
244 {
245 /* add map entry to overlap list (> 1 entry implies an overlap) */
246 overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
247 }
248 else
249 {
250 /* remove entry from list (order independent, so swap with last) */
251 for (i=0; i<overlap_entries; i++)
252 {
253 if (overlap_list[i] == change_point[chgidx]->pbios)
254 overlap_list[i] = overlap_list[overlap_entries-1];
255 }
256 overlap_entries--;
257 }
258 /* if there are overlapping entries, decide which "type" to use */
259 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
260 current_type = 0;
261 for (i=0; i<overlap_entries; i++)
262 if (overlap_list[i]->type > current_type)
263 current_type = overlap_list[i]->type;
264 /* continue building up new bios map based on this information */
265 if (current_type != last_type) {
266 if (last_type != 0) {
267 new_bios[new_bios_entry].size =
268 change_point[chgidx]->addr - last_addr;
269 /* move forward only if the new size was non-zero */
270 if (new_bios[new_bios_entry].size != 0)
271 if (++new_bios_entry >= ARRAY_SIZE(new_bios))
272 break; /* no more space left for new bios entries */
273 }
274 if (current_type != 0) {
275 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
276 new_bios[new_bios_entry].type = current_type;
277 last_addr=change_point[chgidx]->addr;
278 }
279 last_type = current_type;
280 }
281 }
282 new_nr = new_bios_entry; /* retain count for new bios entries */
283
284 /* copy new bios mapping into original location */
285 memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
286 *pnr_map = new_nr;
287
288 return 0;
289 }
290
291 /*
292 * Copy the BIOS e820 map into a safe place.
293 *
294 * Sanity-check it while we're at it..
295 *
296 * If we're lucky and live on a modern system, the setup code
297 * will have given us a memory map that we can use to properly
298 * set up memory. If we aren't, we'll fake a memory map.
299 *
300 * We check to see that the memory map contains at least 2 elements
301 * before we'll use it, because the detection code in setup.S may
302 * not be perfect and most every PC known to man has two memory
303 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
304 * thinkpad 560x, for example, does not cooperate with the memory
305 * detection code.)
306 */
copy_e820_map(struct e820entry * biosmap,int nr_map)307 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
308 {
309 /* Only one memory region (or negative)? Ignore it */
310 if (nr_map < 2)
311 return -1;
312
313 do {
314 unsigned long long start = biosmap->addr;
315 unsigned long long size = biosmap->size;
316 unsigned long long end = start + size;
317 unsigned long type = biosmap->type;
318
319 /* Overflow in 64 bits? Ignore the memory map. */
320 if (start > end)
321 return -1;
322
323 /*
324 * Some BIOSes claim RAM in the 640k - 1M region.
325 * Not right. Fix it up.
326 */
327 if (type == E820_RAM) {
328 if (start < 0x100000ULL && end > 0xA0000ULL) {
329 if (start < 0xA0000ULL)
330 add_memory_region(start, 0xA0000ULL-start, type);
331 if (end <= 0x100000ULL)
332 continue;
333 start = 0x100000ULL;
334 size = end - start;
335 }
336 }
337 add_memory_region(start, size, type);
338 } while (biosmap++,--nr_map);
339 return 0;
340 }
341
342
343 /*
344 * Find the highest page frame number we have available
345 */
find_max_pfn(void)346 static unsigned long __init find_max_pfn(void)
347 {
348 int i;
349 unsigned long max_pfn = 0;
350
351 for (i = 0; i < e820.nr_map; i++) {
352 unsigned long start, end;
353 /* RAM? */
354 if (e820.map[i].type != E820_RAM)
355 continue;
356 start = PFN_UP(e820.map[i].addr);
357 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
358 if (start >= end)
359 continue;
360 if (end > max_pfn)
361 max_pfn = end;
362 }
363
364 return max_pfn;
365 }
366
clip_to_limit(uint64_t limit,char * warnmsg)367 static void __init clip_to_limit(uint64_t limit, char *warnmsg)
368 {
369 int i;
370 char _warnmsg[160];
371 uint64_t old_limit = 0;
372
373 for ( ; ; )
374 {
375 /* Find a RAM region needing clipping. */
376 for ( i = 0; i < e820.nr_map; i++ )
377 if ( (e820.map[i].type == E820_RAM) &&
378 ((e820.map[i].addr + e820.map[i].size) > limit) )
379 break;
380
381 /* If none found, we are done. */
382 if ( i == e820.nr_map )
383 break;
384
385 old_limit = max_t(
386 uint64_t, old_limit, e820.map[i].addr + e820.map[i].size);
387
388 /* We try to convert clipped RAM areas to E820_UNUSABLE. */
389 if ( e820_change_range_type(&e820, max(e820.map[i].addr, limit),
390 e820.map[i].addr + e820.map[i].size,
391 E820_RAM, E820_UNUSABLE) )
392 continue;
393
394 /*
395 * If the type change fails (e.g., not space in table) then we clip or
396 * delete the region as appropriate.
397 */
398 if ( e820.map[i].addr < limit )
399 {
400 e820.map[i].size = limit - e820.map[i].addr;
401 }
402 else
403 {
404 memmove(&e820.map[i], &e820.map[i+1],
405 (e820.nr_map - i - 1) * sizeof(struct e820entry));
406 e820.nr_map--;
407 }
408 }
409
410 if ( old_limit )
411 {
412 if ( warnmsg )
413 {
414 snprintf(_warnmsg, sizeof(_warnmsg), warnmsg, (long)(limit>>30));
415 printk("WARNING: %s\n", _warnmsg);
416 }
417 printk("Truncating RAM from %lukB to %lukB\n",
418 (unsigned long)(old_limit >> 10), (unsigned long)(limit >> 10));
419 }
420 }
421
422 /* Conservative estimate of top-of-RAM by looking for MTRR WB regions. */
mtrr_top_of_ram(void)423 static uint64_t __init mtrr_top_of_ram(void)
424 {
425 uint32_t eax, ebx, ecx, edx;
426 uint64_t mtrr_cap, mtrr_def, addr_mask, base, mask, top;
427 unsigned int i, phys_bits = 36;
428
429 /* By default we check only Intel systems. */
430 if ( e820_mtrr_clip == -1 )
431 {
432 char vendor[13];
433 cpuid(0x00000000, &eax,
434 (uint32_t *)&vendor[0],
435 (uint32_t *)&vendor[8],
436 (uint32_t *)&vendor[4]);
437 vendor[12] = '\0';
438 e820_mtrr_clip = !strcmp(vendor, "GenuineIntel");
439 }
440
441 if ( !e820_mtrr_clip )
442 return 0;
443
444 if ( e820_verbose )
445 printk("Checking MTRR ranges...\n");
446
447 /* Does the CPU support architectural MTRRs? */
448 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
449 if ( !test_bit(X86_FEATURE_MTRR & 31, &edx) )
450 return 0;
451
452 /* Find the physical address size for this CPU. */
453 eax = cpuid_eax(0x80000000);
454 if ( (eax >> 16) == 0x8000 && eax >= 0x80000008 )
455 {
456 phys_bits = (uint8_t)cpuid_eax(0x80000008);
457 if ( phys_bits > PADDR_BITS )
458 phys_bits = PADDR_BITS;
459 }
460 addr_mask = ((1ull << phys_bits) - 1) & ~((1ull << 12) - 1);
461
462 rdmsrl(MSR_MTRRcap, mtrr_cap);
463 rdmsrl(MSR_MTRRdefType, mtrr_def);
464
465 if ( e820_verbose )
466 printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def);
467
468 /* MTRRs enabled, and default memory type is not writeback? */
469 if ( !test_bit(11, &mtrr_def) || ((uint8_t)mtrr_def == MTRR_TYPE_WRBACK) )
470 return 0;
471
472 /*
473 * Find end of highest WB-type range. This is a conservative estimate
474 * of the highest WB address since overlapping UC/WT ranges dominate.
475 */
476 top = 0;
477 for ( i = 0; i < (uint8_t)mtrr_cap; i++ )
478 {
479 rdmsrl(MSR_IA32_MTRR_PHYSBASE(i), base);
480 rdmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask);
481
482 if ( e820_verbose )
483 printk(" MTRR[%d]: base %"PRIx64" mask %"PRIx64"\n",
484 i, base, mask);
485
486 if ( !test_bit(11, &mask) || ((uint8_t)base != MTRR_TYPE_WRBACK) )
487 continue;
488 base &= addr_mask;
489 mask &= addr_mask;
490 top = max_t(uint64_t, top, ((base | ~mask) & addr_mask) + PAGE_SIZE);
491 }
492
493 return top;
494 }
495
reserve_dmi_region(void)496 static void __init reserve_dmi_region(void)
497 {
498 for ( ; ; )
499 {
500 paddr_t base;
501 u32 len;
502 const char *what = dmi_get_table(&base, &len);
503
504 if ( !what )
505 break;
506 if ( ((base + len) > base) &&
507 reserve_e820_ram(&e820, base, base + len) )
508 printk("WARNING: %s table located in E820 RAM %"PRIpaddr"-%"PRIpaddr". Fixed.\n",
509 what, base, base + len);
510 }
511 }
512
machine_specific_memory_setup(struct e820map * raw)513 static void __init machine_specific_memory_setup(struct e820map *raw)
514 {
515 unsigned long mpt_limit, ro_mpt_limit;
516 uint64_t top_of_ram, size;
517 int i;
518
519 sanitize_e820_map(raw->map, &raw->nr_map);
520 copy_e820_map(raw->map, raw->nr_map);
521
522 if ( opt_mem )
523 clip_to_limit(opt_mem, NULL);
524
525 if ( opt_availmem )
526 {
527 for ( i = size = 0; (i < e820.nr_map) && (size <= opt_availmem); i++ )
528 if ( e820.map[i].type == E820_RAM )
529 size += e820.map[i].size;
530 if ( size > opt_availmem )
531 clip_to_limit(
532 e820.map[i-1].addr + e820.map[i-1].size - (size-opt_availmem),
533 NULL);
534 }
535
536 mpt_limit = ((RDWR_MPT_VIRT_END - RDWR_MPT_VIRT_START)
537 / sizeof(unsigned long)) << PAGE_SHIFT;
538 ro_mpt_limit = ((RO_MPT_VIRT_END - RO_MPT_VIRT_START)
539 / sizeof(unsigned long)) << PAGE_SHIFT;
540 if ( mpt_limit > ro_mpt_limit )
541 mpt_limit = ro_mpt_limit;
542 clip_to_limit(mpt_limit,
543 "Only the first %lu GB of the physical "
544 "memory map can be accessed by Xen.");
545
546 reserve_dmi_region();
547
548 top_of_ram = mtrr_top_of_ram();
549 if ( top_of_ram )
550 clip_to_limit(top_of_ram, "MTRRs do not cover all of memory.");
551 }
552
553 /* This function relies on the passed in e820->map[] being sorted. */
e820_add_range(struct e820map * e820,uint64_t s,uint64_t e,uint32_t type)554 int __init e820_add_range(
555 struct e820map *e820, uint64_t s, uint64_t e, uint32_t type)
556 {
557 unsigned int i;
558
559 for ( i = 0; i < e820->nr_map; ++i )
560 {
561 uint64_t rs = e820->map[i].addr;
562 uint64_t re = rs + e820->map[i].size;
563
564 if ( rs == e && e820->map[i].type == type )
565 {
566 e820->map[i].addr = s;
567 return 1;
568 }
569
570 if ( re == s && e820->map[i].type == type &&
571 (i + 1 == e820->nr_map || e820->map[i + 1].addr >= e) )
572 {
573 e820->map[i].size += e - s;
574 return 1;
575 }
576
577 if ( rs >= e )
578 break;
579
580 if ( re > s )
581 return 0;
582 }
583
584 if ( e820->nr_map >= ARRAY_SIZE(e820->map) )
585 {
586 printk(XENLOG_WARNING "E820: overflow while adding region"
587 " %"PRIx64"-%"PRIx64"\n", s, e);
588 return 0;
589 }
590
591 memmove(e820->map + i + 1, e820->map + i,
592 (e820->nr_map - i) * sizeof(*e820->map));
593
594 e820->nr_map++;
595 e820->map[i].addr = s;
596 e820->map[i].size = e - s;
597 e820->map[i].type = type;
598
599 return 1;
600 }
601
e820_change_range_type(struct e820map * e820,uint64_t s,uint64_t e,uint32_t orig_type,uint32_t new_type)602 int __init e820_change_range_type(
603 struct e820map *e820, uint64_t s, uint64_t e,
604 uint32_t orig_type, uint32_t new_type)
605 {
606 uint64_t rs = 0, re = 0;
607 int i;
608
609 for ( i = 0; i < e820->nr_map; i++ )
610 {
611 /* Have we found the e820 region that includes the specified range? */
612 rs = e820->map[i].addr;
613 re = rs + e820->map[i].size;
614 if ( (s >= rs) && (e <= re) )
615 break;
616 }
617
618 if ( (i == e820->nr_map) || (e820->map[i].type != orig_type) )
619 return 0;
620
621 if ( (s == rs) && (e == re) )
622 {
623 e820->map[i].type = new_type;
624 }
625 else if ( (s == rs) || (e == re) )
626 {
627 if ( (e820->nr_map + 1) > ARRAY_SIZE(e820->map) )
628 goto overflow;
629
630 memmove(&e820->map[i+1], &e820->map[i],
631 (e820->nr_map-i) * sizeof(e820->map[0]));
632 e820->nr_map++;
633
634 if ( s == rs )
635 {
636 e820->map[i].size = e - s;
637 e820->map[i].type = new_type;
638 e820->map[i+1].addr = e;
639 e820->map[i+1].size = re - e;
640 }
641 else
642 {
643 e820->map[i].size = s - rs;
644 e820->map[i+1].addr = s;
645 e820->map[i+1].size = e - s;
646 e820->map[i+1].type = new_type;
647 }
648 }
649 else
650 {
651 if ( (e820->nr_map + 2) > ARRAY_SIZE(e820->map) )
652 goto overflow;
653
654 memmove(&e820->map[i+2], &e820->map[i],
655 (e820->nr_map-i) * sizeof(e820->map[0]));
656 e820->nr_map += 2;
657
658 e820->map[i].size = s - rs;
659 e820->map[i+1].addr = s;
660 e820->map[i+1].size = e - s;
661 e820->map[i+1].type = new_type;
662 e820->map[i+2].addr = e;
663 e820->map[i+2].size = re - e;
664 }
665
666 /* Finally, look for any opportunities to merge adjacent e820 entries. */
667 for ( i = 0; i < (e820->nr_map - 1); i++ )
668 {
669 if ( (e820->map[i].type != e820->map[i+1].type) ||
670 ((e820->map[i].addr + e820->map[i].size) != e820->map[i+1].addr) )
671 continue;
672 e820->map[i].size += e820->map[i+1].size;
673 memmove(&e820->map[i+1], &e820->map[i+2],
674 (e820->nr_map-i-2) * sizeof(e820->map[0]));
675 e820->nr_map--;
676 i--;
677 }
678
679 return 1;
680
681 overflow:
682 printk("Overflow in e820 while reserving region %"PRIx64"-%"PRIx64"\n",
683 s, e);
684 return 0;
685 }
686
687 /* Set E820_RAM area (@s,@e) as RESERVED in specified e820 map. */
reserve_e820_ram(struct e820map * e820,uint64_t s,uint64_t e)688 int __init reserve_e820_ram(struct e820map *e820, uint64_t s, uint64_t e)
689 {
690 return e820_change_range_type(e820, s, e, E820_RAM, E820_RESERVED);
691 }
692
init_e820(const char * str,struct e820map * raw)693 unsigned long __init init_e820(const char *str, struct e820map *raw)
694 {
695 if ( e820_verbose )
696 {
697 printk("Initial %s RAM map:\n", str);
698 print_e820_memory_map(raw->map, raw->nr_map);
699 }
700
701 machine_specific_memory_setup(raw);
702
703 if ( xen_guest )
704 hypervisor_fixup_e820(&e820);
705
706 printk("%s RAM map:\n", str);
707 print_e820_memory_map(e820.map, e820.nr_map);
708
709 return find_max_pfn();
710 }
711