1 /*
2 * Copyright (C) 2009, Mukesh Rathor, Oracle Corp. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; If not, see <http://www.gnu.org/licenses/>.
15 */
16
17 #include <xen/sched.h>
18 #include <xen/compile.h>
19 #include <xen/mm.h>
20 #include <xen/domain_page.h>
21 #include <xen/guest_access.h>
22 #include <asm/debugger.h>
23 #include <asm/p2m.h>
24
25 /*
26 * This file for general routines common to more than one debugger, like kdb,
27 * gdbsx, etc..
28 */
29
30 #ifdef XEN_KDB_CONFIG
31 #include "../kdb/include/kdbdefs.h"
32 #include "../kdb/include/kdbproto.h"
33 #define DBGP(...) {(kdbdbg) ? kdbp(__VA_ARGS__):0;}
34 #define DBGP1(...) {(kdbdbg>1) ? kdbp(__VA_ARGS__):0;}
35 #define DBGP2(...) {(kdbdbg>2) ? kdbp(__VA_ARGS__):0;}
36 #else
37 #define DBGP1(...) ((void)0)
38 #define DBGP2(...) ((void)0)
39 #endif
40
41 typedef unsigned long dbgva_t;
42 typedef unsigned char dbgbyte_t;
43
44 /* Returns: mfn for the given (hvm guest) vaddr */
45 static mfn_t
dbg_hvm_va2mfn(dbgva_t vaddr,struct domain * dp,int toaddr,gfn_t * gfn)46 dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr, gfn_t *gfn)
47 {
48 mfn_t mfn;
49 uint32_t pfec = PFEC_page_present;
50 p2m_type_t gfntype;
51
52 DBGP2("vaddr:%lx domid:%d\n", vaddr, dp->domain_id);
53
54 *gfn = _gfn(paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec));
55 if ( gfn_eq(*gfn, INVALID_GFN) )
56 {
57 DBGP2("kdb:bad gfn from gva_to_gfn\n");
58 return INVALID_MFN;
59 }
60
61 mfn = get_gfn(dp, gfn_x(*gfn), &gfntype);
62 if ( p2m_is_readonly(gfntype) && toaddr )
63 {
64 DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
65 mfn = INVALID_MFN;
66 }
67 else
68 DBGP2("X: vaddr:%lx domid:%d mfn:%#"PRI_mfn"\n",
69 vaddr, dp->domain_id, mfn_x(mfn));
70
71 if ( mfn_eq(mfn, INVALID_MFN) )
72 {
73 put_gfn(dp, gfn_x(*gfn));
74 *gfn = INVALID_GFN;
75 }
76
77 return mfn;
78 }
79
80 /*
81 * pgd3val: this is the value of init_mm.pgd[3] in a PV guest. It is optional.
82 * This to assist debug of modules in the guest. The kernel address
83 * space seems is always mapped, but modules are not necessarily
84 * mapped in any arbitraty guest cr3 that we pick if pgd3val is 0.
85 * Modules should always be addressible if we use cr3 from init_mm.
86 * Since pgd3val is already a pgd value, cr3->pgd[3], we just need to
87 * do 2 level lookups.
88 *
89 * NOTE: 4 level paging works for 32 PAE guests also because cpu runs in IA32-e
90 * mode.
91 * Returns: mfn for the given (pv guest) vaddr
92 */
93 static mfn_t
dbg_pv_va2mfn(dbgva_t vaddr,struct domain * dp,uint64_t pgd3val)94 dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
95 {
96 l4_pgentry_t l4e, *l4t;
97 l3_pgentry_t l3e, *l3t;
98 l2_pgentry_t l2e, *l2t;
99 l1_pgentry_t l1e, *l1t;
100 unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
101 mfn_t mfn = maddr_to_mfn(cr3);
102
103 DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
104 cr3, pgd3val);
105
106 if ( pgd3val == 0 )
107 {
108 l4t = map_domain_page(mfn);
109 l4e = l4t[l4_table_offset(vaddr)];
110 unmap_domain_page(l4t);
111 mfn = l4e_get_mfn(l4e);
112 DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%#"PRI_mfn"\n", l4t,
113 l4_table_offset(vaddr), l4e, mfn_x(mfn));
114 if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
115 {
116 DBGP1("l4 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
117 return INVALID_MFN;
118 }
119
120 l3t = map_domain_page(mfn);
121 l3e = l3t[l3_table_offset(vaddr)];
122 unmap_domain_page(l3t);
123 mfn = l3e_get_mfn(l3e);
124 DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%#"PRI_mfn"\n", l3t,
125 l3_table_offset(vaddr), l3e, mfn_x(mfn));
126 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
127 (l3e_get_flags(l3e) & _PAGE_PSE) )
128 {
129 DBGP1("l3 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
130 return INVALID_MFN;
131 }
132 }
133
134 l2t = map_domain_page(mfn);
135 l2e = l2t[l2_table_offset(vaddr)];
136 unmap_domain_page(l2t);
137 mfn = l2e_get_mfn(l2e);
138 DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%#"PRI_mfn"\n",
139 l2t, l2_table_offset(vaddr), l2e, mfn_x(mfn));
140 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
141 (l2e_get_flags(l2e) & _PAGE_PSE) )
142 {
143 DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
144 return INVALID_MFN;
145 }
146 l1t = map_domain_page(mfn);
147 l1e = l1t[l1_table_offset(vaddr)];
148 unmap_domain_page(l1t);
149 mfn = l1e_get_mfn(l1e);
150 DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%#"PRI_mfn"\n", l1t, l1_table_offset(vaddr),
151 l1e, mfn_x(mfn));
152
153 return mfn_valid(mfn) ? mfn : INVALID_MFN;
154 }
155
156 /* Returns: number of bytes remaining to be copied */
dbg_rw_guest_mem(struct domain * dp,void * __user gaddr,void * __user buf,unsigned int len,bool toaddr,uint64_t pgd3)157 static unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr,
158 void * __user buf, unsigned int len,
159 bool toaddr, uint64_t pgd3)
160 {
161 while ( len > 0 )
162 {
163 char *va;
164 unsigned long addr = (unsigned long)gaddr;
165 mfn_t mfn;
166 gfn_t gfn = INVALID_GFN;
167 unsigned long pagecnt;
168
169 pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
170
171 mfn = (is_hvm_domain(dp)
172 ? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn)
173 : dbg_pv_va2mfn(addr, dp, pgd3));
174 if ( mfn_eq(mfn, INVALID_MFN) )
175 break;
176
177 va = map_domain_page(mfn);
178 va = va + (addr & (PAGE_SIZE-1));
179
180 if ( toaddr )
181 {
182 copy_from_user(va, buf, pagecnt); /* va = buf */
183 paging_mark_dirty(dp, mfn);
184 }
185 else
186 {
187 copy_to_user(buf, va, pagecnt); /* buf = va */
188 }
189
190 unmap_domain_page(va);
191 if ( !gfn_eq(gfn, INVALID_GFN) )
192 put_gfn(dp, gfn_x(gfn));
193
194 addr += pagecnt;
195 buf += pagecnt;
196 len -= pagecnt;
197 }
198
199 return len;
200 }
201
202 /*
203 * addr is hypervisor addr if domid == DOMID_IDLE, else it's guest addr
204 * buf is debugger buffer.
205 * if toaddr, then addr = buf (write to addr), else buf = addr (rd from guest)
206 * pgd3: value of init_mm.pgd[3] in guest. see above.
207 * Returns: number of bytes remaining to be copied.
208 */
dbg_rw_mem(void * __user addr,void * __user buf,unsigned int len,domid_t domid,bool toaddr,uint64_t pgd3)209 unsigned int dbg_rw_mem(void * __user addr, void * __user buf,
210 unsigned int len, domid_t domid, bool toaddr,
211 uint64_t pgd3)
212 {
213 DBGP2("gmem:addr:%lx buf:%p len:$%u domid:%d toaddr:%x\n",
214 addr, buf, len, domid, toaddr);
215
216 if ( domid == DOMID_IDLE )
217 {
218 if ( toaddr )
219 len = __copy_to_user(addr, buf, len);
220 else
221 len = __copy_from_user(buf, addr, len);
222 }
223 else
224 {
225 struct domain *d = get_domain_by_id(domid);
226
227 if ( d )
228 {
229 if ( !d->is_dying )
230 len = dbg_rw_guest_mem(d, addr, buf, len, toaddr, pgd3);
231 put_domain(d);
232 }
233 }
234
235 DBGP2("gmem:exit:len:$%d\n", len);
236 return len;
237 }
238
239 /*
240 * Local variables:
241 * mode: C
242 * c-file-style: "BSD"
243 * c-basic-offset: 4
244 * indent-tabs-mode: nil
245 * End:
246 */
247