1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "i40e.h"
5 #include "i40e_osdep.h"
6 #include "i40e_register.h"
7 #include "i40e_status.h"
8 #include "i40e_alloc.h"
9 #include "i40e_hmc.h"
10 #include "i40e_type.h"
11 
12 /**
13  * i40e_add_sd_table_entry - Adds a segment descriptor to the table
14  * @hw: pointer to our hw struct
15  * @hmc_info: pointer to the HMC configuration information struct
16  * @sd_index: segment descriptor index to manipulate
17  * @type: what type of segment descriptor we're manipulating
18  * @direct_mode_sz: size to alloc in direct mode
19  **/
i40e_add_sd_table_entry(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 sd_index,enum i40e_sd_entry_type type,u64 direct_mode_sz)20 int i40e_add_sd_table_entry(struct i40e_hw *hw,
21 			    struct i40e_hmc_info *hmc_info,
22 			    u32 sd_index,
23 			    enum i40e_sd_entry_type type,
24 			    u64 direct_mode_sz)
25 {
26 	enum i40e_memory_type mem_type __attribute__((unused));
27 	struct i40e_hmc_sd_entry *sd_entry;
28 	bool dma_mem_alloc_done = false;
29 	int ret_code = I40E_SUCCESS;
30 	struct i40e_dma_mem mem;
31 	u64 alloc_len;
32 
33 	if (NULL == hmc_info->sd_table.sd_entry) {
34 		ret_code = I40E_ERR_BAD_PTR;
35 		hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
36 		goto exit;
37 	}
38 
39 	if (sd_index >= hmc_info->sd_table.sd_cnt) {
40 		ret_code = I40E_ERR_INVALID_SD_INDEX;
41 		hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
42 		goto exit;
43 	}
44 
45 	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
46 	if (!sd_entry->valid) {
47 		if (I40E_SD_TYPE_PAGED == type) {
48 			mem_type = i40e_mem_pd;
49 			alloc_len = I40E_HMC_PAGED_BP_SIZE;
50 		} else {
51 			mem_type = i40e_mem_bp_jumbo;
52 			alloc_len = direct_mode_sz;
53 		}
54 
55 		/* allocate a 4K pd page or 2M backing page */
56 		ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
57 						 I40E_HMC_PD_BP_BUF_ALIGNMENT);
58 		if (ret_code)
59 			goto exit;
60 		dma_mem_alloc_done = true;
61 		if (I40E_SD_TYPE_PAGED == type) {
62 			ret_code = i40e_allocate_virt_mem(hw,
63 					&sd_entry->u.pd_table.pd_entry_virt_mem,
64 					sizeof(struct i40e_hmc_pd_entry) * 512);
65 			if (ret_code)
66 				goto exit;
67 			sd_entry->u.pd_table.pd_entry =
68 				(struct i40e_hmc_pd_entry *)
69 				sd_entry->u.pd_table.pd_entry_virt_mem.va;
70 			sd_entry->u.pd_table.pd_page_addr = mem;
71 		} else {
72 			sd_entry->u.bp.addr = mem;
73 			sd_entry->u.bp.sd_pd_index = sd_index;
74 		}
75 		/* initialize the sd entry */
76 		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
77 
78 		/* increment the ref count */
79 		I40E_INC_SD_REFCNT(&hmc_info->sd_table);
80 	}
81 	/* Increment backing page reference count */
82 	if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
83 		I40E_INC_BP_REFCNT(&sd_entry->u.bp);
84 exit:
85 	if (ret_code)
86 		if (dma_mem_alloc_done)
87 			i40e_free_dma_mem(hw, &mem);
88 
89 	return ret_code;
90 }
91 
92 /**
93  * i40e_add_pd_table_entry - Adds page descriptor to the specified table
94  * @hw: pointer to our HW structure
95  * @hmc_info: pointer to the HMC configuration information structure
96  * @pd_index: which page descriptor index to manipulate
97  * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
98  *
99  * This function:
100  *	1. Initializes the pd entry
101  *	2. Adds pd_entry in the pd_table
102  *	3. Mark the entry valid in i40e_hmc_pd_entry structure
103  *	4. Initializes the pd_entry's ref count to 1
104  * assumptions:
105  *	1. The memory for pd should be pinned down, physically contiguous and
106  *	   aligned on 4K boundary and zeroed memory.
107  *	2. It should be 4K in size.
108  **/
i40e_add_pd_table_entry(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 pd_index,struct i40e_dma_mem * rsrc_pg)109 int i40e_add_pd_table_entry(struct i40e_hw *hw,
110 			    struct i40e_hmc_info *hmc_info,
111 			    u32 pd_index,
112 			    struct i40e_dma_mem *rsrc_pg)
113 {
114 	struct i40e_hmc_pd_table *pd_table;
115 	struct i40e_hmc_pd_entry *pd_entry;
116 	struct i40e_dma_mem mem;
117 	struct i40e_dma_mem *page = &mem;
118 	u32 sd_idx, rel_pd_idx;
119 	int ret_code = 0;
120 	u64 page_desc;
121 	u64 *pd_addr;
122 
123 	if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
124 		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
125 		hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
126 		goto exit;
127 	}
128 
129 	/* find corresponding sd */
130 	sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
131 	if (I40E_SD_TYPE_PAGED !=
132 	    hmc_info->sd_table.sd_entry[sd_idx].entry_type)
133 		goto exit;
134 
135 	rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
136 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
137 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
138 	if (!pd_entry->valid) {
139 		if (rsrc_pg) {
140 			pd_entry->rsrc_pg = true;
141 			page = rsrc_pg;
142 		} else {
143 			/* allocate a 4K backing page */
144 			ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
145 						I40E_HMC_PAGED_BP_SIZE,
146 						I40E_HMC_PD_BP_BUF_ALIGNMENT);
147 			if (ret_code)
148 				goto exit;
149 			pd_entry->rsrc_pg = false;
150 		}
151 
152 		pd_entry->bp.addr = *page;
153 		pd_entry->bp.sd_pd_index = pd_index;
154 		pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
155 		/* Set page address and valid bit */
156 		page_desc = page->pa | 0x1;
157 
158 		pd_addr = (u64 *)pd_table->pd_page_addr.va;
159 		pd_addr += rel_pd_idx;
160 
161 		/* Add the backing page physical address in the pd entry */
162 		memcpy(pd_addr, &page_desc, sizeof(u64));
163 
164 		pd_entry->sd_index = sd_idx;
165 		pd_entry->valid = true;
166 		I40E_INC_PD_REFCNT(pd_table);
167 	}
168 	I40E_INC_BP_REFCNT(&pd_entry->bp);
169 exit:
170 	return ret_code;
171 }
172 
173 /**
174  * i40e_remove_pd_bp - remove a backing page from a page descriptor
175  * @hw: pointer to our HW structure
176  * @hmc_info: pointer to the HMC configuration information structure
177  * @idx: the page index
178  *
179  * This function:
180  *	1. Marks the entry in pd tabe (for paged address mode) or in sd table
181  *	   (for direct address mode) invalid.
182  *	2. Write to register PMPDINV to invalidate the backing page in FV cache
183  *	3. Decrement the ref count for the pd _entry
184  * assumptions:
185  *	1. Caller can deallocate the memory used by backing storage after this
186  *	   function returns.
187  **/
i40e_remove_pd_bp(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx)188 int i40e_remove_pd_bp(struct i40e_hw *hw,
189 		      struct i40e_hmc_info *hmc_info,
190 		      u32 idx)
191 {
192 	struct i40e_hmc_pd_entry *pd_entry;
193 	struct i40e_hmc_pd_table *pd_table;
194 	struct i40e_hmc_sd_entry *sd_entry;
195 	u32 sd_idx, rel_pd_idx;
196 	int ret_code = 0;
197 	u64 *pd_addr;
198 
199 	/* calculate index */
200 	sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
201 	rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
202 	if (sd_idx >= hmc_info->sd_table.sd_cnt) {
203 		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
204 		hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
205 		goto exit;
206 	}
207 	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
208 	if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
209 		ret_code = I40E_ERR_INVALID_SD_TYPE;
210 		hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
211 		goto exit;
212 	}
213 	/* get the entry and decrease its ref counter */
214 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
215 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
216 	I40E_DEC_BP_REFCNT(&pd_entry->bp);
217 	if (pd_entry->bp.ref_cnt)
218 		goto exit;
219 
220 	/* mark the entry invalid */
221 	pd_entry->valid = false;
222 	I40E_DEC_PD_REFCNT(pd_table);
223 	pd_addr = (u64 *)pd_table->pd_page_addr.va;
224 	pd_addr += rel_pd_idx;
225 	memset(pd_addr, 0, sizeof(u64));
226 	I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
227 
228 	/* free memory here */
229 	if (!pd_entry->rsrc_pg)
230 		ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
231 	if (ret_code)
232 		goto exit;
233 	if (!pd_table->ref_cnt)
234 		i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
235 exit:
236 	return ret_code;
237 }
238 
239 /**
240  * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
241  * @hmc_info: pointer to the HMC configuration information structure
242  * @idx: the page index
243  **/
i40e_prep_remove_sd_bp(struct i40e_hmc_info * hmc_info,u32 idx)244 int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
245 			   u32 idx)
246 {
247 	struct i40e_hmc_sd_entry *sd_entry;
248 	int ret_code = 0;
249 
250 	/* get the entry and decrease its ref counter */
251 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
252 	I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
253 	if (sd_entry->u.bp.ref_cnt) {
254 		ret_code = I40E_ERR_NOT_READY;
255 		goto exit;
256 	}
257 	I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
258 
259 	/* mark the entry invalid */
260 	sd_entry->valid = false;
261 exit:
262 	return ret_code;
263 }
264 
265 /**
266  * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
267  * @hw: pointer to our hw struct
268  * @hmc_info: pointer to the HMC configuration information structure
269  * @idx: the page index
270  * @is_pf: used to distinguish between VF and PF
271  **/
i40e_remove_sd_bp_new(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx,bool is_pf)272 int i40e_remove_sd_bp_new(struct i40e_hw *hw,
273 			  struct i40e_hmc_info *hmc_info,
274 			  u32 idx, bool is_pf)
275 {
276 	struct i40e_hmc_sd_entry *sd_entry;
277 
278 	if (!is_pf)
279 		return I40E_NOT_SUPPORTED;
280 
281 	/* get the entry and decrease its ref counter */
282 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
283 	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
284 
285 	return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
286 }
287 
288 /**
289  * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
290  * @hmc_info: pointer to the HMC configuration information structure
291  * @idx: segment descriptor index to find the relevant page descriptor
292  **/
i40e_prep_remove_pd_page(struct i40e_hmc_info * hmc_info,u32 idx)293 int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
294 			     u32 idx)
295 {
296 	struct i40e_hmc_sd_entry *sd_entry;
297 	int ret_code = 0;
298 
299 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
300 
301 	if (sd_entry->u.pd_table.ref_cnt) {
302 		ret_code = I40E_ERR_NOT_READY;
303 		goto exit;
304 	}
305 
306 	/* mark the entry invalid */
307 	sd_entry->valid = false;
308 
309 	I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
310 exit:
311 	return ret_code;
312 }
313 
314 /**
315  * i40e_remove_pd_page_new - Removes a PD page from sd entry.
316  * @hw: pointer to our hw struct
317  * @hmc_info: pointer to the HMC configuration information structure
318  * @idx: segment descriptor index to find the relevant page descriptor
319  * @is_pf: used to distinguish between VF and PF
320  **/
i40e_remove_pd_page_new(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx,bool is_pf)321 int i40e_remove_pd_page_new(struct i40e_hw *hw,
322 			    struct i40e_hmc_info *hmc_info,
323 			    u32 idx, bool is_pf)
324 {
325 	struct i40e_hmc_sd_entry *sd_entry;
326 
327 	if (!is_pf)
328 		return I40E_NOT_SUPPORTED;
329 
330 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
331 	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
332 
333 	return  i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
334 }
335