1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2022 Linaro Ltd.
4 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
5 */
6
7 #include <linux/mhi_ep.h>
8 #include "internal.h"
9
mhi_ep_ring_addr2offset(struct mhi_ep_ring * ring,u64 ptr)10 size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
11 {
12 return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
13 }
14
mhi_ep_ring_num_elems(struct mhi_ep_ring * ring)15 static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
16 {
17 __le64 rlen;
18
19 memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
20
21 return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
22 }
23
mhi_ep_ring_inc_index(struct mhi_ep_ring * ring)24 void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
25 {
26 ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
27 }
28
__mhi_ep_cache_ring(struct mhi_ep_ring * ring,size_t end)29 static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
30 {
31 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
32 struct device *dev = &mhi_cntrl->mhi_dev->dev;
33 size_t start, copy_size;
34 int ret;
35
36 /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
37 if (ring->type == RING_TYPE_ER)
38 return 0;
39
40 /* No need to cache the ring if write pointer is unmodified */
41 if (ring->wr_offset == end)
42 return 0;
43
44 start = ring->wr_offset;
45 if (start < end) {
46 copy_size = (end - start) * sizeof(struct mhi_ring_element);
47 ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
48 (start * sizeof(struct mhi_ring_element)),
49 &ring->ring_cache[start], copy_size);
50 if (ret < 0)
51 return ret;
52 } else {
53 copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
54 ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
55 (start * sizeof(struct mhi_ring_element)),
56 &ring->ring_cache[start], copy_size);
57 if (ret < 0)
58 return ret;
59
60 if (end) {
61 ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
62 &ring->ring_cache[0],
63 end * sizeof(struct mhi_ring_element));
64 if (ret < 0)
65 return ret;
66 }
67 }
68
69 dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
70
71 return 0;
72 }
73
mhi_ep_cache_ring(struct mhi_ep_ring * ring,u64 wr_ptr)74 static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
75 {
76 size_t wr_offset;
77 int ret;
78
79 wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
80
81 /* Cache the host ring till write offset */
82 ret = __mhi_ep_cache_ring(ring, wr_offset);
83 if (ret)
84 return ret;
85
86 ring->wr_offset = wr_offset;
87
88 return 0;
89 }
90
mhi_ep_update_wr_offset(struct mhi_ep_ring * ring)91 int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
92 {
93 u64 wr_ptr;
94
95 wr_ptr = mhi_ep_mmio_get_db(ring);
96
97 return mhi_ep_cache_ring(ring, wr_ptr);
98 }
99
100 /* TODO: Support for adding multiple ring elements to the ring */
mhi_ep_ring_add_element(struct mhi_ep_ring * ring,struct mhi_ring_element * el)101 int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
102 {
103 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
104 struct device *dev = &mhi_cntrl->mhi_dev->dev;
105 size_t old_offset = 0;
106 u32 num_free_elem;
107 __le64 rp;
108 int ret;
109
110 ret = mhi_ep_update_wr_offset(ring);
111 if (ret) {
112 dev_err(dev, "Error updating write pointer\n");
113 return ret;
114 }
115
116 if (ring->rd_offset < ring->wr_offset)
117 num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
118 else
119 num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
120
121 /* Check if there is space in ring for adding at least an element */
122 if (!num_free_elem) {
123 dev_err(dev, "No space left in the ring\n");
124 return -ENOSPC;
125 }
126
127 old_offset = ring->rd_offset;
128 mhi_ep_ring_inc_index(ring);
129
130 dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
131
132 /* Update rp in ring context */
133 rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
134 memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
135
136 ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
137 sizeof(*el));
138 if (ret < 0)
139 return ret;
140
141 return 0;
142 }
143
mhi_ep_ring_init(struct mhi_ep_ring * ring,enum mhi_ep_ring_type type,u32 id)144 void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
145 {
146 ring->type = type;
147 if (ring->type == RING_TYPE_CMD) {
148 ring->db_offset_h = EP_CRDB_HIGHER;
149 ring->db_offset_l = EP_CRDB_LOWER;
150 } else if (ring->type == RING_TYPE_CH) {
151 ring->db_offset_h = CHDB_HIGHER_n(id);
152 ring->db_offset_l = CHDB_LOWER_n(id);
153 ring->ch_id = id;
154 } else {
155 ring->db_offset_h = ERDB_HIGHER_n(id);
156 ring->db_offset_l = ERDB_LOWER_n(id);
157 }
158 }
159
mhi_ep_ring_start(struct mhi_ep_cntrl * mhi_cntrl,struct mhi_ep_ring * ring,union mhi_ep_ring_ctx * ctx)160 int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
161 union mhi_ep_ring_ctx *ctx)
162 {
163 struct device *dev = &mhi_cntrl->mhi_dev->dev;
164 __le64 val;
165 int ret;
166
167 ring->mhi_cntrl = mhi_cntrl;
168 ring->ring_ctx = ctx;
169 ring->ring_size = mhi_ep_ring_num_elems(ring);
170 memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
171 ring->rbase = le64_to_cpu(val);
172
173 if (ring->type == RING_TYPE_CH)
174 ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
175
176 if (ring->type == RING_TYPE_ER)
177 ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
178
179 /* During ring init, both rp and wp are equal */
180 memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
181 ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
182 ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
183
184 /* Allocate ring cache memory for holding the copy of host ring */
185 ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
186 if (!ring->ring_cache)
187 return -ENOMEM;
188
189 memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
190 ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
191 if (ret) {
192 dev_err(dev, "Failed to cache ring\n");
193 kfree(ring->ring_cache);
194 return ret;
195 }
196
197 ring->started = true;
198
199 return 0;
200 }
201
mhi_ep_ring_reset(struct mhi_ep_cntrl * mhi_cntrl,struct mhi_ep_ring * ring)202 void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
203 {
204 ring->started = false;
205 kfree(ring->ring_cache);
206 ring->ring_cache = NULL;
207 }
208