1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * NETC NTMP (NETC Table Management Protocol) 2.0 Library
4 * Copyright 2025 NXP
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <linux/fsl/netc_global.h>
9 #include <linux/iopoll.h>
10
11 #include "ntmp_private.h"
12
13 #define NETC_CBDR_TIMEOUT 1000 /* us */
14 #define NETC_CBDR_DELAY_US 10
15 #define NETC_CBDR_MR_EN BIT(31)
16
17 #define NTMP_BASE_ADDR_ALIGN 128
18 #define NTMP_DATA_ADDR_ALIGN 32
19
20 /* Define NTMP Table ID */
21 #define NTMP_MAFT_ID 1
22 #define NTMP_RSST_ID 3
23
24 /* Generic Update Actions for most tables */
25 #define NTMP_GEN_UA_CFGEU BIT(0)
26 #define NTMP_GEN_UA_STSEU BIT(1)
27
28 #define NTMP_ENTRY_ID_SIZE 4
29 #define RSST_ENTRY_NUM 64
30 #define RSST_STSE_DATA_SIZE(n) ((n) * 8)
31 #define RSST_CFGE_DATA_SIZE(n) (n)
32
ntmp_init_cbdr(struct netc_cbdr * cbdr,struct device * dev,const struct netc_cbdr_regs * regs)33 int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
34 const struct netc_cbdr_regs *regs)
35 {
36 int cbd_num = NETC_CBDR_BD_NUM;
37 size_t size;
38
39 size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN;
40 cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base,
41 GFP_KERNEL);
42 if (!cbdr->addr_base)
43 return -ENOMEM;
44
45 cbdr->dma_size = size;
46 cbdr->bd_num = cbd_num;
47 cbdr->regs = *regs;
48 cbdr->dev = dev;
49
50 /* The base address of the Control BD Ring must be 128 bytes aligned */
51 cbdr->dma_base_align = ALIGN(cbdr->dma_base, NTMP_BASE_ADDR_ALIGN);
52 cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
53 NTMP_BASE_ADDR_ALIGN);
54
55 cbdr->next_to_clean = 0;
56 cbdr->next_to_use = 0;
57 spin_lock_init(&cbdr->ring_lock);
58
59 /* Step 1: Configure the base address of the Control BD Ring */
60 netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
61 netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
62
63 /* Step 2: Configure the producer index register */
64 netc_write(cbdr->regs.pir, cbdr->next_to_clean);
65
66 /* Step 3: Configure the consumer index register */
67 netc_write(cbdr->regs.cir, cbdr->next_to_use);
68
69 /* Step4: Configure the number of BDs of the Control BD Ring */
70 netc_write(cbdr->regs.lenr, cbdr->bd_num);
71
72 /* Step 5: Enable the Control BD Ring */
73 netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
74
75 return 0;
76 }
77 EXPORT_SYMBOL_GPL(ntmp_init_cbdr);
78
ntmp_free_cbdr(struct netc_cbdr * cbdr)79 void ntmp_free_cbdr(struct netc_cbdr *cbdr)
80 {
81 /* Disable the Control BD Ring */
82 netc_write(cbdr->regs.mr, 0);
83 dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base,
84 cbdr->dma_base);
85 memset(cbdr, 0, sizeof(*cbdr));
86 }
87 EXPORT_SYMBOL_GPL(ntmp_free_cbdr);
88
ntmp_get_free_cbd_num(struct netc_cbdr * cbdr)89 static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr)
90 {
91 return (cbdr->next_to_clean - cbdr->next_to_use - 1 +
92 cbdr->bd_num) % cbdr->bd_num;
93 }
94
ntmp_get_cbd(struct netc_cbdr * cbdr,int index)95 static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index)
96 {
97 return &((union netc_cbd *)(cbdr->addr_base_align))[index];
98 }
99
ntmp_clean_cbdr(struct netc_cbdr * cbdr)100 static void ntmp_clean_cbdr(struct netc_cbdr *cbdr)
101 {
102 union netc_cbd *cbd;
103 int i;
104
105 i = cbdr->next_to_clean;
106 while (netc_read(cbdr->regs.cir) != i) {
107 cbd = ntmp_get_cbd(cbdr, i);
108 memset(cbd, 0, sizeof(*cbd));
109 i = (i + 1) % cbdr->bd_num;
110 }
111
112 cbdr->next_to_clean = i;
113 }
114
netc_xmit_ntmp_cmd(struct ntmp_user * user,union netc_cbd * cbd)115 static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
116 {
117 union netc_cbd *cur_cbd;
118 struct netc_cbdr *cbdr;
119 int i, err;
120 u16 status;
121 u32 val;
122
123 /* Currently only i.MX95 ENETC is supported, and it only has one
124 * command BD ring
125 */
126 cbdr = &user->ring[0];
127
128 spin_lock_bh(&cbdr->ring_lock);
129
130 if (unlikely(!ntmp_get_free_cbd_num(cbdr)))
131 ntmp_clean_cbdr(cbdr);
132
133 i = cbdr->next_to_use;
134 cur_cbd = ntmp_get_cbd(cbdr, i);
135 *cur_cbd = *cbd;
136 dma_wmb();
137
138 /* Update producer index of both software and hardware */
139 i = (i + 1) % cbdr->bd_num;
140 cbdr->next_to_use = i;
141 netc_write(cbdr->regs.pir, i);
142
143 err = read_poll_timeout_atomic(netc_read, val, val == i,
144 NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
145 true, cbdr->regs.cir);
146 if (unlikely(err))
147 goto cbdr_unlock;
148
149 dma_rmb();
150 /* Get the writeback command BD, because the caller may need
151 * to check some other fields of the response header.
152 */
153 *cbd = *cur_cbd;
154
155 /* Check the writeback error status */
156 status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR;
157 if (unlikely(status)) {
158 err = -EIO;
159 dev_err(user->dev, "Command BD error: 0x%04x\n", status);
160 }
161
162 ntmp_clean_cbdr(cbdr);
163 dma_wmb();
164
165 cbdr_unlock:
166 spin_unlock_bh(&cbdr->ring_lock);
167
168 return err;
169 }
170
ntmp_alloc_data_mem(struct ntmp_dma_buf * data,void ** buf_align)171 static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align)
172 {
173 void *buf;
174
175 buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
176 &data->dma, GFP_KERNEL);
177 if (!buf)
178 return -ENOMEM;
179
180 data->buf = buf;
181 *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN);
182
183 return 0;
184 }
185
ntmp_free_data_mem(struct ntmp_dma_buf * data)186 static void ntmp_free_data_mem(struct ntmp_dma_buf *data)
187 {
188 dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
189 data->buf, data->dma);
190 }
191
ntmp_fill_request_hdr(union netc_cbd * cbd,dma_addr_t dma,int len,int table_id,int cmd,int access_method)192 static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma,
193 int len, int table_id, int cmd,
194 int access_method)
195 {
196 dma_addr_t dma_align;
197
198 memset(cbd, 0, sizeof(*cbd));
199 dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN);
200 cbd->req_hdr.addr = cpu_to_le64(dma_align);
201 cbd->req_hdr.len = cpu_to_le32(len);
202 cbd->req_hdr.cmd = cmd;
203 cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD,
204 access_method);
205 cbd->req_hdr.table_id = table_id;
206 cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION,
207 NTMP_HDR_VER2);
208 /* For NTMP version 2.0 or later version */
209 cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF);
210 }
211
ntmp_fill_crd(struct ntmp_cmn_req_data * crd,u8 tblv,u8 qa,u16 ua)212 static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv,
213 u8 qa, u16 ua)
214 {
215 crd->update_act = cpu_to_le16(ua);
216 crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa);
217 }
218
ntmp_fill_crd_eid(struct ntmp_req_by_eid * rbe,u8 tblv,u8 qa,u16 ua,u32 entry_id)219 static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv,
220 u8 qa, u16 ua, u32 entry_id)
221 {
222 ntmp_fill_crd(&rbe->crd, tblv, qa, ua);
223 rbe->entry_id = cpu_to_le32(entry_id);
224 }
225
ntmp_table_name(int tbl_id)226 static const char *ntmp_table_name(int tbl_id)
227 {
228 switch (tbl_id) {
229 case NTMP_MAFT_ID:
230 return "MAC Address Filter Table";
231 case NTMP_RSST_ID:
232 return "RSS Table";
233 default:
234 return "Unknown Table";
235 };
236 }
237
ntmp_delete_entry_by_id(struct ntmp_user * user,int tbl_id,u8 tbl_ver,u32 entry_id,u32 req_len,u32 resp_len)238 static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
239 u8 tbl_ver, u32 entry_id, u32 req_len,
240 u32 resp_len)
241 {
242 struct ntmp_dma_buf data = {
243 .dev = user->dev,
244 .size = max(req_len, resp_len),
245 };
246 struct ntmp_req_by_eid *req;
247 union netc_cbd cbd;
248 int err;
249
250 err = ntmp_alloc_data_mem(&data, (void **)&req);
251 if (err)
252 return err;
253
254 ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id);
255 ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len),
256 tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID);
257
258 err = netc_xmit_ntmp_cmd(user, &cbd);
259 if (err)
260 dev_err(user->dev,
261 "Failed to delete entry 0x%x of %s, err: %pe",
262 entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
263
264 ntmp_free_data_mem(&data);
265
266 return err;
267 }
268
ntmp_query_entry_by_id(struct ntmp_user * user,int tbl_id,u32 len,struct ntmp_req_by_eid * req,dma_addr_t dma,bool compare_eid)269 static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
270 u32 len, struct ntmp_req_by_eid *req,
271 dma_addr_t dma, bool compare_eid)
272 {
273 struct ntmp_cmn_resp_query *resp;
274 int cmd = NTMP_CMD_QUERY;
275 union netc_cbd cbd;
276 u32 entry_id;
277 int err;
278
279 entry_id = le32_to_cpu(req->entry_id);
280 if (le16_to_cpu(req->crd.update_act))
281 cmd = NTMP_CMD_QU;
282
283 /* Request header */
284 ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID);
285 err = netc_xmit_ntmp_cmd(user, &cbd);
286 if (err) {
287 dev_err(user->dev,
288 "Failed to query entry 0x%x of %s, err: %pe\n",
289 entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
290 return err;
291 }
292
293 /* For a few tables, the first field of their response data is not
294 * entry_id, so directly return success.
295 */
296 if (!compare_eid)
297 return 0;
298
299 resp = (struct ntmp_cmn_resp_query *)req;
300 if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) {
301 dev_err(user->dev,
302 "%s: query EID 0x%x doesn't match response EID 0x%x\n",
303 ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id));
304 return -EIO;
305 }
306
307 return 0;
308 }
309
ntmp_maft_add_entry(struct ntmp_user * user,u32 entry_id,struct maft_entry_data * maft)310 int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
311 struct maft_entry_data *maft)
312 {
313 struct ntmp_dma_buf data = {
314 .dev = user->dev,
315 .size = sizeof(struct maft_req_add),
316 };
317 struct maft_req_add *req;
318 union netc_cbd cbd;
319 int err;
320
321 err = ntmp_alloc_data_mem(&data, (void **)&req);
322 if (err)
323 return err;
324
325 /* Set mac address filter table request data buffer */
326 ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id);
327 req->keye = maft->keye;
328 req->cfge = maft->cfge;
329
330 ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
331 NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID);
332 err = netc_xmit_ntmp_cmd(user, &cbd);
333 if (err)
334 dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
335 entry_id, ERR_PTR(err));
336
337 ntmp_free_data_mem(&data);
338
339 return err;
340 }
341 EXPORT_SYMBOL_GPL(ntmp_maft_add_entry);
342
ntmp_maft_query_entry(struct ntmp_user * user,u32 entry_id,struct maft_entry_data * maft)343 int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
344 struct maft_entry_data *maft)
345 {
346 struct ntmp_dma_buf data = {
347 .dev = user->dev,
348 .size = sizeof(struct maft_resp_query),
349 };
350 struct maft_resp_query *resp;
351 struct ntmp_req_by_eid *req;
352 int err;
353
354 err = ntmp_alloc_data_mem(&data, (void **)&req);
355 if (err)
356 return err;
357
358 ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
359 err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
360 NTMP_LEN(sizeof(*req), data.size),
361 req, data.dma, true);
362 if (err)
363 goto end;
364
365 resp = (struct maft_resp_query *)req;
366 maft->keye = resp->keye;
367 maft->cfge = resp->cfge;
368
369 end:
370 ntmp_free_data_mem(&data);
371
372 return err;
373 }
374 EXPORT_SYMBOL_GPL(ntmp_maft_query_entry);
375
ntmp_maft_delete_entry(struct ntmp_user * user,u32 entry_id)376 int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
377 {
378 return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver,
379 entry_id, NTMP_EID_REQ_LEN, 0);
380 }
381 EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry);
382
ntmp_rsst_update_entry(struct ntmp_user * user,const u32 * table,int count)383 int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
384 int count)
385 {
386 struct ntmp_dma_buf data = {.dev = user->dev};
387 struct rsst_req_update *req;
388 union netc_cbd cbd;
389 int err, i;
390
391 if (count != RSST_ENTRY_NUM)
392 /* HW only takes in a full 64 entry table */
393 return -EINVAL;
394
395 data.size = struct_size(req, groups, count);
396 err = ntmp_alloc_data_mem(&data, (void **)&req);
397 if (err)
398 return err;
399
400 /* Set the request data buffer */
401 ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0,
402 NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0);
403 for (i = 0; i < count; i++)
404 req->groups[i] = (u8)(table[i]);
405
406 ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
407 NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID);
408
409 err = netc_xmit_ntmp_cmd(user, &cbd);
410 if (err)
411 dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
412 ERR_PTR(err));
413
414 ntmp_free_data_mem(&data);
415
416 return err;
417 }
418 EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry);
419
ntmp_rsst_query_entry(struct ntmp_user * user,u32 * table,int count)420 int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
421 {
422 struct ntmp_dma_buf data = {.dev = user->dev};
423 struct ntmp_req_by_eid *req;
424 union netc_cbd cbd;
425 int err, i;
426 u8 *group;
427
428 if (count != RSST_ENTRY_NUM)
429 /* HW only takes in a full 64 entry table */
430 return -EINVAL;
431
432 data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
433 RSST_CFGE_DATA_SIZE(count);
434 err = ntmp_alloc_data_mem(&data, (void **)&req);
435 if (err)
436 return err;
437
438 /* Set the request data buffer */
439 ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
440 ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size),
441 NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID);
442 err = netc_xmit_ntmp_cmd(user, &cbd);
443 if (err) {
444 dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
445 ERR_PTR(err));
446 goto end;
447 }
448
449 group = (u8 *)req;
450 group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count);
451 for (i = 0; i < count; i++)
452 table[i] = group[i];
453
454 end:
455 ntmp_free_data_mem(&data);
456
457 return err;
458 }
459 EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry);
460
461 MODULE_DESCRIPTION("NXP NETC Library");
462 MODULE_LICENSE("Dual BSD/GPL");
463