1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <hw/arch_ops.h>
6 #include <limits>
7 #include <string.h>
8 #include <utility>
9 #include <zircon/syscalls.h>
10
11 #include <intel-hda/utils/utils.h>
12
13 #include "debug-logging.h"
14 #include "intel-hda-codec.h"
15 #include "intel-hda-stream.h"
16 #include "utils.h"
17
18 namespace audio {
19 namespace intel_hda {
20
21 constexpr size_t IntelHDAStream::MAX_BDL_LENGTH;
22
23 namespace {
24 // Note: these timeouts are arbitrary; the spec provides no guidance here.
25 // That said, it is hard to imagine it taking more than a single audio
26 // frame's worth of time, so 10mSec should be more then generous enough.
27 static constexpr zx_time_t IHDA_SD_MAX_RESET_TIME_NSEC = 10000000u; // 10mSec
28 static constexpr zx_time_t IHDA_SD_RESET_POLL_TIME_NSEC = 100000u; // 100uSec
29 static constexpr zx_time_t IHDA_SD_STOP_HOLD_TIME_NSEC = 100000u;
30 constexpr uint32_t DMA_ALIGN = 128;
31 constexpr uint32_t DMA_ALIGN_MASK = DMA_ALIGN - 1;
32 } // namespace
33
Create(Type type,uint16_t id,hda_stream_desc_regs_t * regs,const fbl::RefPtr<RefCountedBti> & pci_bti)34 fbl::RefPtr<IntelHDAStream> IntelHDAStream::Create(
35 Type type,
36 uint16_t id,
37 hda_stream_desc_regs_t* regs,
38 const fbl::RefPtr<RefCountedBti>& pci_bti) {
39 fbl::AllocChecker ac;
40 auto ret = fbl::AdoptRef(new (&ac) IntelHDAStream(type, id, regs, pci_bti));
41 if (!ac.check()) {
42 return nullptr;
43 }
44
45 zx_status_t res = ret->Initialize();
46 if (res != ZX_OK) {
47 // Initialize should have already logged the warning with the proper
48 // debug prefix for the stream. Don't bother to do so here.
49 return nullptr;
50 }
51
52 return ret;
53 }
54
IntelHDAStream(Type type,uint16_t id,hda_stream_desc_regs_t * regs,const fbl::RefPtr<RefCountedBti> & pci_bti)55 IntelHDAStream::IntelHDAStream(Type type,
56 uint16_t id,
57 hda_stream_desc_regs_t* regs,
58 const fbl::RefPtr<RefCountedBti>& pci_bti)
59 : type_(type),
60 id_(id),
61 regs_(regs),
62 pci_bti_(pci_bti) {
63 snprintf(log_prefix_, sizeof(log_prefix_), "IHDA_SD #%u", id_);
64 }
65
~IntelHDAStream()66 IntelHDAStream::~IntelHDAStream() {
67 ZX_DEBUG_ASSERT(!running_);
68 }
69
Initialize()70 zx_status_t IntelHDAStream::Initialize() {
71 // BDL entries should be 16 bytes long, meaning that we should be able to
72 // fit 256 of them perfectly into a single 4k page.
73 constexpr size_t MAX_BDL_BYTES = sizeof(IntelHDABDLEntry) * MAX_BDL_LENGTH;
74 static_assert(MAX_BDL_BYTES <= PAGE_SIZE, "A max length BDL must fit inside a single page!");
75
76 // Create a VMO made of a single page and map it for read/write so the CPU
77 // has access to it.
78 constexpr uint32_t CPU_MAP_FLAGS = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
79 zx::vmo bdl_vmo;
80 zx_status_t res;
81 res = bdl_cpu_mem_.CreateAndMap(PAGE_SIZE,
82 CPU_MAP_FLAGS,
83 DriverVmars::registers(),
84 &bdl_vmo,
85 ZX_RIGHT_SAME_RIGHTS,
86 ZX_CACHE_POLICY_UNCACHED_DEVICE);
87 if (res != ZX_OK) {
88 LOG(ERROR, "Failed to create and map %u bytes for stream BDL! (res %d)\n", PAGE_SIZE, res);
89 return res;
90 }
91
92 // Pin this VMO and grant the controller access to it. The controller
93 // should only need read access to buffer descriptor lists.
94 constexpr uint32_t HDA_MAP_FLAGS = ZX_BTI_PERM_READ;
95 ZX_DEBUG_ASSERT(pci_bti_ != nullptr);
96 res = bdl_hda_mem_.Pin(bdl_vmo, pci_bti_->initiator(), HDA_MAP_FLAGS);
97 if (res != ZX_OK) {
98 LOG(ERROR, "Failed to pin pages for stream BDL! (res %d)\n", res);
99 return res;
100 }
101
102 // Sanity checks. At this point, everything should be allocated, mapped,
103 // and should obey the alignment restrictions imposed by the HDA spec.
104 ZX_DEBUG_ASSERT(bdl_cpu_mem_.start() != 0);
105 ZX_DEBUG_ASSERT(!(reinterpret_cast<uintptr_t>(bdl_cpu_mem_.start()) & DMA_ALIGN_MASK));
106 ZX_DEBUG_ASSERT(bdl_hda_mem_.region_count() == 1);
107 ZX_DEBUG_ASSERT(!(bdl_hda_mem_.region(0).phys_addr & DMA_ALIGN_MASK));
108
109 return ZX_OK;
110 }
111
EnsureStopped(hda_stream_desc_regs_t * regs)112 void IntelHDAStream::EnsureStopped(hda_stream_desc_regs_t* regs) {
113 // Stop the stream, but do not place it into reset. Ack any lingering IRQ
114 // status bits in the process.
115 REG_CLR_BITS(®s->ctl_sts.w, HDA_SD_REG_CTRL_RUN);
116 hw_wmb();
117 zx_nanosleep(zx_deadline_after(IHDA_SD_STOP_HOLD_TIME_NSEC));
118
119 constexpr uint32_t SET = HDA_SD_REG_STS32_ACK;
120 constexpr uint32_t CLR = HDA_SD_REG_CTRL_IOCE |
121 HDA_SD_REG_CTRL_FEIE |
122 HDA_SD_REG_CTRL_DEIE;
123 REG_MOD(®s->ctl_sts.w, CLR, SET);
124 hw_wmb();
125 }
126
Reset(hda_stream_desc_regs_t * regs)127 void IntelHDAStream::Reset(hda_stream_desc_regs_t* regs) {
128 // Enter the reset state To do this, we...
129 // 1) Clear the RUN bit if it was set.
130 // 2) Set the SRST bit to 1.
131 // 3) Poll until the hardware acks by setting the SRST bit to 1.
132 if (REG_RD(®s->ctl_sts.w) & HDA_SD_REG_CTRL_RUN) {
133 EnsureStopped(regs);
134 }
135
136 REG_WR(®s->ctl_sts.w, HDA_SD_REG_CTRL_SRST); // Set the reset bit.
137 hw_mb(); // Make sure that all writes have gone through before we start to read.
138
139 // Wait until the hardware acks the reset.
140 zx_status_t res;
141 res = WaitCondition(
142 IHDA_SD_MAX_RESET_TIME_NSEC,
143 IHDA_SD_RESET_POLL_TIME_NSEC,
144 [®s]() -> bool {
145 auto val = REG_RD(®s->ctl_sts.w);
146 return (val & HDA_SD_REG_CTRL_SRST) != 0;
147 });
148
149 if (res != ZX_OK) {
150 GLOBAL_LOG(ERROR, "Failed to place stream descriptor HW into reset! (res %d)\n", res);
151 }
152
153 // Leave the reset state. To do this, we...
154 // 1) Set the SRST bit to 0.
155 // 2) Poll until the hardware acks by setting the SRST bit back to 0.
156 REG_WR(®s->ctl_sts.w, 0u);
157 hw_mb(); // Make sure that all writes have gone through before we start to read.
158
159 // Wait until the hardware acks the release from reset.
160 res = WaitCondition(
161 IHDA_SD_MAX_RESET_TIME_NSEC,
162 IHDA_SD_RESET_POLL_TIME_NSEC,
163 [®s]() -> bool {
164 auto val = REG_RD(®s->ctl_sts.w);
165 return (val & HDA_SD_REG_CTRL_SRST) == 0;
166 });
167
168 if (res != ZX_OK) {
169 GLOBAL_LOG(ERROR, "Failed to release stream descriptor HW from reset! (res %d)\n", res);
170 }
171 }
172
Configure(Type type,uint8_t tag)173 void IntelHDAStream::Configure(Type type, uint8_t tag) {
174 if (type == Type::INVALID) {
175 ZX_DEBUG_ASSERT(tag == 0);
176 } else {
177 ZX_DEBUG_ASSERT(type != Type::BIDIR);
178 ZX_DEBUG_ASSERT((tag != 0) && (tag < 16));
179 }
180
181 configured_type_ = type;
182 tag_ = tag;
183 }
184
SetStreamFormat(const fbl::RefPtr<dispatcher::ExecutionDomain> & domain,uint16_t encoded_fmt,zx::channel * client_endpoint_out)185 zx_status_t IntelHDAStream::SetStreamFormat(const fbl::RefPtr<dispatcher::ExecutionDomain>& domain,
186 uint16_t encoded_fmt,
187 zx::channel* client_endpoint_out) {
188 if ((domain == nullptr) || (client_endpoint_out == nullptr))
189 return ZX_ERR_INVALID_ARGS;
190
191 // We are being given a new format. Reset any client connection we may have
192 // and stop the hardware.
193 Deactivate();
194
195 // Attempt to create a channel and activate it, binding it to our Codec
196 // owner in the process, but dispatching requests to us. Binding the
197 // channel to our Codec will cause it to exist in the same serialization
198 // domain as all of the other channels being serviced by this codec owner.
199 dispatcher::Channel::ProcessHandler phandler(
200 [stream = fbl::WrapRefPtr(this)](dispatcher::Channel* channel) -> zx_status_t {
201 return stream->ProcessClientRequest(channel);
202 });
203
204 dispatcher::Channel::ChannelClosedHandler chandler(
205 [stream = fbl::WrapRefPtr(this)](const dispatcher::Channel* channel) -> void {
206 stream->ProcessClientDeactivate(channel);
207 });
208
209 zx_status_t res;
210 fbl::RefPtr<dispatcher::Channel> local_endpoint;
211 res = CreateAndActivateChannel(domain,
212 std::move(phandler),
213 std::move(chandler),
214 &local_endpoint,
215 client_endpoint_out);
216 if (res != ZX_OK) {
217 LOG(TRACE, "Failed to create and activate ring buffer channel during SetStreamFormat "
218 "(res %d)\n", res);
219 return res;
220 }
221
222 // Record and program the stream format, then record the fifo depth we get
223 // based on this format selection.
224 encoded_fmt_ = encoded_fmt;
225 REG_WR(®s_->fmt, encoded_fmt_);
226 hw_mb();
227 fifo_depth_ = REG_RD(®s_->fifod);
228
229 LOG(TRACE, "Stream format set 0x%04hx; fifo is %hu bytes deep\n", encoded_fmt_, fifo_depth_);
230
231 // Record our new client channel
232 fbl::AutoLock channel_lock(&channel_lock_);
233 channel_ = std::move(local_endpoint);
234 bytes_per_frame_ = StreamFormat(encoded_fmt).bytes_per_frame();
235
236 return ZX_OK;
237 }
238
Deactivate()239 void IntelHDAStream::Deactivate() {
240 fbl::AutoLock channel_lock(&channel_lock_);
241 DeactivateLocked();
242 }
243
244 #define HANDLE_REQ(_ioctl, _payload, _handler, _allow_noack) \
245 case _ioctl: \
246 if (req_size != sizeof(req._payload)) { \
247 LOG(TRACE, "Bad " #_ioctl \
248 " response length (%u != %zu)\n", \
249 req_size, sizeof(req._payload)); \
250 return ZX_ERR_INVALID_ARGS; \
251 } \
252 if (!_allow_noack && (req.hdr.cmd & AUDIO_FLAG_NO_ACK)) { \
253 LOG(TRACE, "NO_ACK flag not allowed for " #_ioctl "\n"); \
254 return ZX_ERR_INVALID_ARGS; \
255 } \
256 return _handler(req._payload);
ProcessClientRequest(dispatcher::Channel * channel)257 zx_status_t IntelHDAStream::ProcessClientRequest(dispatcher::Channel* channel) {
258 zx_status_t res;
259 uint32_t req_size;
260 zx::handle rxed_handle;
261 union {
262 audio_proto::CmdHdr hdr;
263 audio_proto::RingBufGetFifoDepthReq get_fifo_depth;
264 audio_proto::RingBufGetBufferReq get_buffer;
265 audio_proto::RingBufStartReq start;
266 audio_proto::RingBufStopReq stop;
267 } req;
268 // TODO(johngro) : How large is too large?
269 static_assert(sizeof(req) <= 256, "Request buffer is too large to hold on the stack!");
270
271 // Is this request from our currently active channel? If not, make sure the
272 // channel has been de-activated and ignore the request.
273 fbl::AutoLock channel_lock(&channel_lock_);
274 if (channel_.get() != channel) {
275 channel->Deactivate();
276 return ZX_OK;
277 }
278
279 // Read the client request.
280 ZX_DEBUG_ASSERT(channel != nullptr);
281 res = channel->Read(&req, sizeof(req), &req_size);
282 if (res != ZX_OK) {
283 LOG(TRACE, "Failed to read client request (res %d)\n", res);
284 return res;
285 }
286
287 // Sanity check the request, then dispatch it to the appropriate handler.
288 if (req_size < sizeof(req.hdr)) {
289 LOG(TRACE, "Client request too small to contain header (%u < %zu)\n",
290 req_size, sizeof(req.hdr));
291 return ZX_ERR_INVALID_ARGS;
292 }
293
294 LOG(SPEW, "Client Request (cmd 0x%04x tid %u) len %u\n",
295 req.hdr.cmd,
296 req.hdr.transaction_id,
297 req_size);
298
299 if (req.hdr.transaction_id == AUDIO_INVALID_TRANSACTION_ID)
300 return ZX_ERR_INVALID_ARGS;
301
302 // Strip the NO_ACK flag from the request before deciding the dispatch target.
303 auto cmd = static_cast<audio_proto::Cmd>(req.hdr.cmd & ~AUDIO_FLAG_NO_ACK);
304 switch (cmd) {
305 HANDLE_REQ(AUDIO_RB_CMD_GET_FIFO_DEPTH, get_fifo_depth, ProcessGetFifoDepthLocked, false);
306 HANDLE_REQ(AUDIO_RB_CMD_GET_BUFFER, get_buffer, ProcessGetBufferLocked, false);
307 HANDLE_REQ(AUDIO_RB_CMD_START, start, ProcessStartLocked, false);
308 HANDLE_REQ(AUDIO_RB_CMD_STOP, stop, ProcessStopLocked, false);
309 default:
310 LOG(TRACE, "Unrecognized command ID 0x%04x\n", req.hdr.cmd);
311 return ZX_ERR_INVALID_ARGS;
312 }
313 }
314 #undef HANDLE_REQ
315
ProcessClientDeactivate(const dispatcher::Channel * channel)316 void IntelHDAStream::ProcessClientDeactivate(const dispatcher::Channel* channel) {
317 // Is the channel being closed our currently active channel? If so, go
318 // ahead and deactivate this DMA stream. Otherwise, just ignore this
319 // request.
320 fbl::AutoLock channel_lock(&channel_lock_);
321 if (channel == channel_.get()) {
322 LOG(TRACE, "Client closed channel to stream\n");
323 DeactivateLocked();
324 }
325 }
326
ProcessStreamIRQ()327 void IntelHDAStream::ProcessStreamIRQ() {
328 // Regardless of whether we are currently active or not, make sure we ack any
329 // pending IRQs so we don't accidentally spin out of control.
330 uint8_t sts = REG_RD(®s_->ctl_sts.b.sts);
331 REG_WR(®s_->ctl_sts.b.sts, sts);
332
333 // Enter the lock and check to see if we should still be sending update
334 // notifications. If our channel has been nulled out, then this stream was
335 // were stopped after the IRQ fired but before it was handled. Don't send
336 // any notifications in this case.
337 fbl::AutoLock notif_lock(¬if_lock_);
338
339 // TODO(johngro): Deal with FIFO errors or descriptor errors. There is no
340 // good way to recover from such a thing. If it happens, we need to shut
341 // the stream down and send the client an error notification informing them
342 // that their stream was ruined and that they need to restart it.
343 if (sts & (HDA_SD_REG_STS8_FIFOE | HDA_SD_REG_STS8_DESE)) {
344 REG_CLR_BITS(®s_->ctl_sts.w, HDA_SD_REG_CTRL_RUN);
345 LOG(ERROR, "Fatal stream error, shutting down DMA! (IRQ status 0x%02x)\n", sts);
346 }
347
348 if (irq_channel_ == nullptr)
349 return;
350
351 if (sts & HDA_SD_REG_STS8_BCIS) {
352 audio_proto::RingBufPositionNotify msg;
353 msg.hdr.cmd = AUDIO_RB_POSITION_NOTIFY;
354 msg.hdr.transaction_id = AUDIO_INVALID_TRANSACTION_ID;
355 msg.ring_buffer_pos = REG_RD(®s_->lpib);
356 irq_channel_->Write(&msg, sizeof(msg));
357 }
358 }
359
DeactivateLocked()360 void IntelHDAStream::DeactivateLocked() {
361 // Prevent the IRQ thread from sending channel notifications by making sure
362 // the irq_channel_ reference has been cleared.
363 {
364 fbl::AutoLock notif_lock(¬if_lock_);
365 irq_channel_ = nullptr;
366 }
367
368 // If we have a connection to a client, close it.
369 if (channel_ != nullptr) {
370 channel_->Deactivate();
371 channel_ = nullptr;
372 }
373
374 // Make sure that the stream has been stopped.
375 EnsureStoppedLocked();
376
377 // We are now stopped and unconfigured.
378 running_ = false;
379 fifo_depth_ = 0;
380 bytes_per_frame_ = 0;
381
382 // Release any assigned ring buffer.
383 ReleaseRingBufferLocked();
384
385 LOG(TRACE, "Stream deactivated\n");
386 }
387
ProcessGetFifoDepthLocked(const audio_proto::RingBufGetFifoDepthReq & req)388 zx_status_t IntelHDAStream::ProcessGetFifoDepthLocked(
389 const audio_proto::RingBufGetFifoDepthReq& req) {
390 ZX_DEBUG_ASSERT(channel_ != nullptr);
391
392 audio_proto::RingBufGetFifoDepthResp resp = { };
393 resp.hdr = req.hdr;
394
395 // We don't know what our FIFO depth is going to be if our format has not
396 // been set yet.
397 if (bytes_per_frame_ == 0) {
398 LOG(TRACE, "Bad state (not configured) while getting fifo depth.\n");
399 resp.result = ZX_ERR_BAD_STATE;
400 resp.fifo_depth = 0;
401 } else {
402 resp.result = ZX_OK;
403 resp.fifo_depth = fifo_depth_;
404 }
405
406 return channel_->Write(&resp, sizeof(resp));
407 }
408
ProcessGetBufferLocked(const audio_proto::RingBufGetBufferReq & req)409 zx_status_t IntelHDAStream::ProcessGetBufferLocked(const audio_proto::RingBufGetBufferReq& req) {
410 zx::vmo ring_buffer_vmo;
411 zx::vmo client_rb_handle;
412 audio_proto::RingBufGetBufferResp resp = { };
413 uint64_t tmp;
414 uint32_t rb_size;
415
416 ZX_DEBUG_ASSERT(channel_ != nullptr);
417
418 resp.hdr = req.hdr;
419 resp.result = ZX_ERR_INTERNAL;
420
421 // We cannot change buffers while we are running, and we cannot create a
422 // buffer if our format has not been set yet.
423 if (running_ || (bytes_per_frame_ == 0)) {
424 LOG(TRACE, "Bad state %s%s while setting buffer.",
425 running_ ? "(running)" : "",
426 bytes_per_frame_ == 0 ? "(not configured)" : "");
427 resp.result = ZX_ERR_BAD_STATE;
428 goto finished;
429 }
430
431 // The request arguments are invalid if any of the following are true...
432 //
433 // 1) The user's minimum ring buffer size in frames 0.
434 // 2) The user's minimum ring buffer size in bytes is too large to hold in a 32 bit integer.
435 // 3) The user wants more notifications per ring than we have BDL entries.
436 tmp = static_cast<uint64_t>(req.min_ring_buffer_frames) * bytes_per_frame_;
437 if ((req.min_ring_buffer_frames == 0) ||
438 (tmp > std::numeric_limits<uint32_t>::max()) ||
439 (req.notifications_per_ring > MAX_BDL_LENGTH)) {
440 LOG(TRACE, "Invalid client args while setting buffer "
441 "(min frames %u, notif/ring %u)\n",
442 req.min_ring_buffer_frames,
443 req.notifications_per_ring);
444 resp.result = ZX_ERR_INVALID_ARGS;
445 goto finished;
446 }
447 rb_size = static_cast<uint32_t>(tmp);
448
449 // If we have an existing buffer, let go of it now.
450 ReleaseRingBufferLocked();
451
452 // Attempt to allocate a VMO for the ring buffer.
453 resp.result = zx::vmo::create(rb_size, 0, &ring_buffer_vmo);
454 if (resp.result != ZX_OK) {
455 LOG(TRACE, "Failed to create %u byte VMO for ring buffer (res %d)\n",
456 rb_size, resp.result);
457 goto finished;
458 }
459
460 // Commit and pin the pages for this VMO so that our HW DMA can access them.
461 uint32_t hda_rights;
462 hda_rights = (configured_type() == Type::INPUT)
463 ? ZX_BTI_PERM_READ | ZX_BTI_PERM_WRITE
464 : ZX_BTI_PERM_READ;
465
466 resp.result = pinned_ring_buffer_.Pin(ring_buffer_vmo, pci_bti_->initiator(), hda_rights);
467 if (resp.result != ZX_OK) {
468 LOG(TRACE, "Failed to commit and pin pages for %u bytes in ring buffer VMO (res %d)\n",
469 rb_size, resp.result);
470 goto finished;
471 }
472
473 ZX_DEBUG_ASSERT(pinned_ring_buffer_.region_count() >= 1);
474 if (pinned_ring_buffer_.region_count() > MAX_BDL_LENGTH) {
475 LOG(ERROR,
476 "IntelHDA stream ring buffer is too fragmented (%u regions) to construct a valid BDL\n",
477 pinned_ring_buffer_.region_count());
478 resp.result = ZX_ERR_INTERNAL;
479 goto finished;
480 }
481
482 // Create the client's copy of this VMO with some restricted rights.
483 //
484 // TODO(johngro) : strip the transfer right when we move this handle.
485 // Clients have no reason to be allowed to transfer the VMO to anyone else.
486 //
487 // TODO(johngro) : clients should not be able to change the size of the VMO,
488 // but giving them the WRITE property (needed for them to be able to map the
489 // VMO for write) also gives them permission to change the size of the VMO.
490 resp.result = ring_buffer_vmo.duplicate(
491 ZX_RIGHT_TRANSFER |
492 ZX_RIGHT_MAP |
493 ZX_RIGHT_READ |
494 (configured_type() == Type::OUTPUT ? ZX_RIGHT_WRITE : 0),
495 &client_rb_handle);
496
497 if (resp.result != ZX_OK) {
498 LOG(TRACE, "Failed duplicate ring buffer VMO handle! (res %d)\n", resp.result);
499 goto finished;
500 }
501
502 // Program the buffer descriptor list. Mark BDL entries as needed to
503 // generate interrupts with the frequency requested by the user.
504 uint32_t nominal_irq_spacing;
505 nominal_irq_spacing = req.notifications_per_ring
506 ? (rb_size + req.notifications_per_ring - 1) /
507 req.notifications_per_ring
508 : 0;
509
510 uint32_t next_irq_pos;
511 uint32_t amt_done;
512 uint32_t region_num, region_offset;
513 uint32_t entry;
514 uint32_t irqs_inserted;
515
516 next_irq_pos = nominal_irq_spacing;
517 amt_done = 0;
518 region_num = 0;
519 region_offset = 0;
520 irqs_inserted = 0;
521
522 for (entry = 0; (entry < MAX_BDL_LENGTH) && (amt_done < rb_size); ++entry) {
523 const auto& r = pinned_ring_buffer_.region(region_num);
524
525 if (r.size > std::numeric_limits<uint32_t>::max()) {
526 LOG(TRACE, "VMO region too large! (%" PRIu64 " bytes)", r.size);
527 resp.result = ZX_ERR_INTERNAL;
528 goto finished;
529 }
530
531 ZX_DEBUG_ASSERT(region_offset < r.size);
532 uint32_t amt_left = rb_size - amt_done;
533 uint32_t region_left = static_cast<uint32_t>(r.size) - region_offset;
534 uint32_t todo = fbl::min(amt_left, region_left);
535
536 ZX_DEBUG_ASSERT(region_left >= DMA_ALIGN);
537 bdl()[entry].flags = 0;
538
539 if (nominal_irq_spacing) {
540 uint32_t ipos = (next_irq_pos + DMA_ALIGN - 1) & ~DMA_ALIGN_MASK;
541
542 if ((amt_done + todo) >= ipos) {
543 bdl()[entry].flags = IntelHDABDLEntry::IOC_FLAG;
544 next_irq_pos += nominal_irq_spacing;
545 ++irqs_inserted;
546
547 if (ipos <= amt_done)
548 todo = fbl::min(todo, DMA_ALIGN);
549 else
550 todo = fbl::min(todo, ipos - amt_done);
551 }
552 }
553
554 ZX_DEBUG_ASSERT(!(todo & DMA_ALIGN_MASK) || (todo == amt_left));
555
556 bdl()[entry].address = r.phys_addr + region_offset;
557 bdl()[entry].length = todo;
558
559 ZX_DEBUG_ASSERT(!(bdl()[entry].address & DMA_ALIGN_MASK));
560
561 amt_done += todo;
562 region_offset += todo;
563
564 if (region_offset >= r.size) {
565 ZX_DEBUG_ASSERT(region_offset == r.size);
566 region_offset = 0;
567 region_num++;
568 }
569 }
570
571 ZX_DEBUG_ASSERT(entry > 0);
572 if (irqs_inserted < req.notifications_per_ring) {
573 bdl()[entry - 1].flags = IntelHDABDLEntry::IOC_FLAG;
574 }
575
576 if (zxlog_level_enabled(TRACE)) {
577 LOG(TRACE, "DMA Scatter/Gather used %u entries for %u/%u bytes of ring buffer\n",
578 entry, amt_done, rb_size);
579 for (uint32_t i = 0; i < entry; ++i) {
580 LOG(TRACE, "[%2u] : %016" PRIx64 " - 0x%04x %sIRQ\n",
581 i,
582 bdl()[i].address,
583 bdl()[i].length,
584 bdl()[i].flags ? "" : "NO ");
585 }
586 }
587
588 if (amt_done < rb_size) {
589 ZX_DEBUG_ASSERT(entry == MAX_BDL_LENGTH);
590 LOG(TRACE, "Ran out of BDL entires after %u/%u bytes of ring buffer\n",
591 amt_done, rb_size);
592 resp.result = ZX_ERR_INTERNAL;
593 goto finished;
594 }
595
596 // TODO(johngro) : Force writeback of the cache to make sure that the BDL
597 // has hit physical memory?
598
599 // Record the cyclic buffer length and the BDL last valid index.
600 ZX_DEBUG_ASSERT(entry > 0);
601 cyclic_buffer_length_ = rb_size;
602 bdl_last_valid_index_ = static_cast<uint16_t>(entry - 1);
603
604 ZX_DEBUG_ASSERT((rb_size % bytes_per_frame_) == 0);
605 resp.num_ring_buffer_frames = rb_size / bytes_per_frame_;
606
607 finished:
608 if (resp.result == ZX_OK) {
609 // Success. DMA is set up and ready to go.
610 return channel_->Write(&resp, sizeof(resp), std::move(client_rb_handle));
611 } else {
612 ReleaseRingBufferLocked();
613 return channel_->Write(&resp, sizeof(resp));
614 }
615 }
616
ProcessStartLocked(const audio_proto::RingBufStartReq & req)617 zx_status_t IntelHDAStream::ProcessStartLocked(const audio_proto::RingBufStartReq& req) {
618 audio_proto::RingBufStartResp resp = { };
619 uint32_t ctl_val;
620 const auto bdl_phys = bdl_hda_mem_.region(0).phys_addr;
621
622 resp.hdr = req.hdr;
623 resp.result = ZX_OK;
624
625 // We cannot start unless we have configured the ring buffer and are not already started.
626 bool ring_buffer_valid = pinned_ring_buffer_.region_count() >= 1;
627 if (!ring_buffer_valid || running_) {
628 LOG(TRACE, "Bad state during start request %s%s.\n",
629 !ring_buffer_valid ? "(ring buffer not configured)" : "",
630 running_ ? "(already running)" : "");
631 resp.result = ZX_ERR_BAD_STATE;
632 goto finished;
633 }
634
635 // Make sure that the stream DMA channel has been fully reset.
636 Reset();
637
638 // Now program all of the relevant registers before beginning operation.
639 // Program the cyclic buffer length and the BDL last valid index.
640 ZX_DEBUG_ASSERT((configured_type_ == Type::INPUT) || (configured_type_ == Type::OUTPUT));
641 ctl_val = HDA_SD_REG_CTRL_STRM_TAG(tag_)
642 | HDA_SD_REG_CTRL_STRIPE1
643 | (configured_type_ == Type::INPUT ? HDA_SD_REG_CTRL_DIR_IN
644 : HDA_SD_REG_CTRL_DIR_OUT);
645 REG_WR(®s_->ctl_sts.w, ctl_val);
646 REG_WR(®s_->fmt, encoded_fmt_);
647 REG_WR(®s_->bdpl, static_cast<uint32_t>(bdl_phys & 0xFFFFFFFFu));
648 REG_WR(®s_->bdpu, static_cast<uint32_t>((bdl_phys >> 32) & 0xFFFFFFFFu));
649 REG_WR(®s_->cbl, cyclic_buffer_length_);
650 REG_WR(®s_->lvi, bdl_last_valid_index_);
651 hw_wmb();
652
653 // Make a copy of our reference to our channel which can be used by the IRQ
654 // thread to deliver notifications to the application.
655 {
656 fbl::AutoLock notif_lock(¬if_lock_);
657 ZX_DEBUG_ASSERT(irq_channel_ == nullptr);
658 irq_channel_ = channel_;
659
660 // Set the RUN bit in our control register. Mark the time that we did
661 // so. Do this from within the notification lock so that there is no
662 // chance of us fighting with the IRQ thread over the ctl/sts register.
663 // After this point in time, we may not write to the ctl/sts register
664 // unless we have nerfed IRQ thread callbacks by clearing irq_channel_
665 // from within the notif_lock_.
666 //
667 // TODO(johngro) : Do a better job of estimating when the first frame gets
668 // clocked out. For outputs, using the SSYNC register to hold off the
669 // stream until the DMA has filled the FIFO could help. There may also be a
670 // way to use the WALLCLK register to determine exactly when the next HDA
671 // frame will begin transmission. Compensating for the external codec FIFO
672 // delay would be a good idea as well.
673 //
674 // For now, we just assume that transmission starts "very soon" after we
675 // whack the bit.
676 constexpr uint32_t SET = HDA_SD_REG_CTRL_RUN |
677 HDA_SD_REG_CTRL_IOCE |
678 HDA_SD_REG_CTRL_FEIE |
679 HDA_SD_REG_CTRL_DEIE |
680 HDA_SD_REG_STS32_ACK;
681 REG_SET_BITS(®s_->ctl_sts.w, SET);
682 hw_wmb();
683 resp.start_time = zx_clock_get_monotonic();
684 }
685
686 // Success, we are now running.
687 running_ = true;
688
689 finished:
690 return channel_->Write(&resp, sizeof(resp));
691 }
692
ProcessStopLocked(const audio_proto::RingBufStopReq & req)693 zx_status_t IntelHDAStream::ProcessStopLocked(const audio_proto::RingBufStopReq& req) {
694 audio_proto::RingBufStopResp resp = { };
695 resp.hdr = req.hdr;
696
697 if (running_) {
698 // Start by preventing the IRQ thread from processing status interrupts.
699 // After we have done this, it should be safe to manipulate the ctl/sts
700 // register.
701 {
702 fbl::AutoLock notif_lock(¬if_lock_);
703 ZX_DEBUG_ASSERT(irq_channel_ != nullptr);
704 irq_channel_ = nullptr;
705 }
706
707 // Make sure that we have been stopped and that all interrupts have been acked.
708 EnsureStoppedLocked();
709 running_ = false;
710 resp.result = ZX_OK;
711 } else {
712 resp.result = ZX_ERR_BAD_STATE;
713 }
714
715 return channel_->Write(&resp, sizeof(resp));
716 }
717
ReleaseRingBufferLocked()718 void IntelHDAStream::ReleaseRingBufferLocked() {
719 pinned_ring_buffer_.Unpin();
720 memset(bdl_cpu_mem_.start(), 0, bdl_cpu_mem_.size());
721 }
722
723 } // namespace intel_hda
724 } // namespace audio
725