1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm_types.h>
3 #include <linux/maple_tree.h>
4 #include <linux/rwsem.h>
5 #include <linux/spinlock.h>
6 #include <linux/list.h>
7 #include <linux/cpumask.h>
8 #include <linux/mman.h>
9 #include <linux/pgtable.h>
10
11 #include <linux/atomic.h>
12 #include <linux/user_namespace.h>
13 #include <linux/ioasid.h>
14 #include <asm/mmu.h>
15
16 #ifndef INIT_MM_CONTEXT
17 #define INIT_MM_CONTEXT(name)
18 #endif
19
20 /*
21 * For dynamically allocated mm_structs, there is a dynamically sized cpumask
22 * at the end of the structure, the size of which depends on the maximum CPU
23 * number the system can see. That way we allocate only as much memory for
24 * mm_cpumask() as needed for the hundreds, or thousands of processes that
25 * a system typically runs.
26 *
27 * Since there is only one init_mm in the entire system, keep it simple
28 * and size this cpu_bitmask to NR_CPUS.
29 */
30 struct mm_struct init_mm = {
31 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
32 .pgd = swapper_pg_dir,
33 .mm_users = ATOMIC_INIT(2),
34 .mm_count = ATOMIC_INIT(1),
35 .write_protect_seq = SEQCNT_ZERO(init_mm.write_protect_seq),
36 MMAP_LOCK_INITIALIZER(init_mm)
37 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
38 .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
39 .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
40 .user_ns = &init_user_ns,
41 .cpu_bitmap = CPU_BITS_NONE,
42 #ifdef CONFIG_IOMMU_SVA
43 .pasid = INVALID_IOASID,
44 #endif
45 INIT_MM_CONTEXT(init_mm)
46 };
47
setup_initial_init_mm(void * start_code,void * end_code,void * end_data,void * brk)48 void setup_initial_init_mm(void *start_code, void *end_code,
49 void *end_data, void *brk)
50 {
51 init_mm.start_code = (unsigned long)start_code;
52 init_mm.end_code = (unsigned long)end_code;
53 init_mm.end_data = (unsigned long)end_data;
54 init_mm.brk = (unsigned long)brk;
55 }
56