1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 **********************************************************************/
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_main.h"
27
28 static void oct_poll_req_completion(struct work_struct *work);
29
octeon_setup_response_list(struct octeon_device * oct)30 int octeon_setup_response_list(struct octeon_device *oct)
31 {
32 int i, ret = 0;
33 struct cavium_wq *cwq;
34
35 for (i = 0; i < MAX_RESPONSE_LISTS; i++) {
36 INIT_LIST_HEAD(&oct->response_list[i].head);
37 spin_lock_init(&oct->response_list[i].lock);
38 atomic_set(&oct->response_list[i].pending_req_count, 0);
39 }
40 spin_lock_init(&oct->cmd_resp_wqlock);
41
42 oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
43 if (!oct->dma_comp_wq.wq) {
44 dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
45 return -ENOMEM;
46 }
47
48 cwq = &oct->dma_comp_wq;
49 INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
50 cwq->wk.ctxptr = oct;
51 oct->cmd_resp_state = OCT_DRV_ONLINE;
52
53 return ret;
54 }
55
octeon_delete_response_list(struct octeon_device * oct)56 void octeon_delete_response_list(struct octeon_device *oct)
57 {
58 cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
59 destroy_workqueue(oct->dma_comp_wq.wq);
60 }
61
lio_process_ordered_list(struct octeon_device * octeon_dev,u32 force_quit)62 int lio_process_ordered_list(struct octeon_device *octeon_dev,
63 u32 force_quit)
64 {
65 struct octeon_response_list *ordered_sc_list;
66 struct octeon_soft_command *sc;
67 int request_complete = 0;
68 int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
69 u32 status;
70 u64 status64;
71
72 octeon_free_sc_done_list(octeon_dev);
73
74 ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
75
76 do {
77 spin_lock_bh(&ordered_sc_list->lock);
78
79 if (list_empty(&ordered_sc_list->head)) {
80 spin_unlock_bh(&ordered_sc_list->lock);
81 return 1;
82 }
83
84 sc = list_first_entry(&ordered_sc_list->head,
85 struct octeon_soft_command, node);
86
87 status = OCTEON_REQUEST_PENDING;
88
89 /* check if octeon has finished DMA'ing a response
90 * to where rptr is pointing to
91 */
92 status64 = *sc->status_word;
93
94 if (status64 != COMPLETION_WORD_INIT) {
95 /* This logic ensures that all 64b have been written.
96 * 1. check byte 0 for non-FF
97 * 2. if non-FF, then swap result from BE to host order
98 * 3. check byte 7 (swapped to 0) for non-FF
99 * 4. if non-FF, use the low 32-bit status code
100 * 5. if either byte 0 or byte 7 is FF, don't use status
101 */
102 if ((status64 & 0xff) != 0xff) {
103 octeon_swap_8B_data(&status64, 1);
104 if (((status64 & 0xff) != 0xff)) {
105 /* retrieve 16-bit firmware status */
106 status = (u32)(status64 & 0xffffULL);
107 if (status) {
108 status =
109 FIRMWARE_STATUS_CODE(status);
110 } else {
111 /* i.e. no error */
112 status = OCTEON_REQUEST_DONE;
113 }
114 }
115 }
116 } else if (unlikely(force_quit) || (sc->expiry_time &&
117 time_after(jiffies, (unsigned long)sc->expiry_time))) {
118 struct octeon_instr_irh *irh =
119 (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
120
121 dev_err(&octeon_dev->pci_dev->dev, "%s: ", __func__);
122 dev_err(&octeon_dev->pci_dev->dev,
123 "cmd %x/%x/%llx/%llx failed, ",
124 irh->opcode, irh->subcode,
125 sc->cmd.cmd3.ossp[0], sc->cmd.cmd3.ossp[1]);
126 dev_err(&octeon_dev->pci_dev->dev,
127 "timeout (%ld, %ld)\n",
128 (long)jiffies, (long)sc->expiry_time);
129 status = OCTEON_REQUEST_TIMEOUT;
130 }
131
132 if (status != OCTEON_REQUEST_PENDING) {
133 sc->sc_status = status;
134
135 /* we have received a response or we have timed out */
136 /* remove node from linked list */
137 list_del(&sc->node);
138 atomic_dec(&octeon_dev->response_list
139 [OCTEON_ORDERED_SC_LIST].
140 pending_req_count);
141
142 if (!sc->callback) {
143 atomic_inc(&octeon_dev->response_list
144 [OCTEON_DONE_SC_LIST].
145 pending_req_count);
146 list_add_tail(&sc->node,
147 &octeon_dev->response_list
148 [OCTEON_DONE_SC_LIST].head);
149
150 if (unlikely(READ_ONCE(sc->caller_is_done))) {
151 /* caller does not wait for response
152 * from firmware
153 */
154 if (status != OCTEON_REQUEST_DONE) {
155 struct octeon_instr_irh *irh;
156
157 irh =
158 (struct octeon_instr_irh *)
159 &sc->cmd.cmd3.irh;
160 dev_dbg
161 (&octeon_dev->pci_dev->dev,
162 "%s: sc failed: opcode=%x, ",
163 __func__, irh->opcode);
164 dev_dbg
165 (&octeon_dev->pci_dev->dev,
166 "subcode=%x, ossp[0]=%llx, ",
167 irh->subcode,
168 sc->cmd.cmd3.ossp[0]);
169 dev_dbg
170 (&octeon_dev->pci_dev->dev,
171 "ossp[1]=%llx, status=%d\n",
172 sc->cmd.cmd3.ossp[1],
173 status);
174 }
175 } else {
176 complete(&sc->complete);
177 }
178
179 spin_unlock_bh(&ordered_sc_list->lock);
180 } else {
181 /* sc with callback function */
182 if (status == OCTEON_REQUEST_TIMEOUT) {
183 atomic_inc(&octeon_dev->response_list
184 [OCTEON_ZOMBIE_SC_LIST].
185 pending_req_count);
186 list_add_tail(&sc->node,
187 &octeon_dev->response_list
188 [OCTEON_ZOMBIE_SC_LIST].
189 head);
190 }
191
192 spin_unlock_bh(&ordered_sc_list->lock);
193
194 sc->callback(octeon_dev, status,
195 sc->callback_arg);
196 /* sc is freed by caller */
197 }
198
199 request_complete++;
200
201 } else {
202 /* no response yet */
203 request_complete = 0;
204 spin_unlock_bh
205 (&ordered_sc_list->lock);
206 }
207
208 /* If we hit the Max Ordered requests to process every loop,
209 * we quit
210 * and let this function be invoked the next time the poll
211 * thread runs
212 * to process the remaining requests. This function can take up
213 * the entire CPU if there is no upper limit to the requests
214 * processed.
215 */
216 if (request_complete >= resp_to_process)
217 break;
218 } while (request_complete);
219
220 return 0;
221 }
222
oct_poll_req_completion(struct work_struct * work)223 static void oct_poll_req_completion(struct work_struct *work)
224 {
225 struct cavium_wk *wk = (struct cavium_wk *)work;
226 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
227 struct cavium_wq *cwq = &oct->dma_comp_wq;
228
229 lio_process_ordered_list(oct, 0);
230
231 if (atomic_read(&oct->response_list
232 [OCTEON_ORDERED_SC_LIST].pending_req_count))
233 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1));
234 }
235