1 /*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "qmgr.h"
24
25 struct nvkm_falcon_qmgr_seq *
nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr * qmgr)26 nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
27 {
28 const struct nvkm_subdev *subdev = qmgr->falcon->owner;
29 struct nvkm_falcon_qmgr_seq *seq;
30 u32 index;
31
32 mutex_lock(&qmgr->seq.mutex);
33 index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
34 if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
35 nvkm_error(subdev, "no free sequence available\n");
36 mutex_unlock(&qmgr->seq.mutex);
37 return ERR_PTR(-EAGAIN);
38 }
39
40 set_bit(index, qmgr->seq.tbl);
41 mutex_unlock(&qmgr->seq.mutex);
42
43 seq = &qmgr->seq.id[index];
44 seq->state = SEQ_STATE_PENDING;
45 return seq;
46 }
47
48 void
nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr * qmgr,struct nvkm_falcon_qmgr_seq * seq)49 nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
50 struct nvkm_falcon_qmgr_seq *seq)
51 {
52 /* no need to acquire seq.mutex since clear_bit is atomic */
53 seq->state = SEQ_STATE_FREE;
54 seq->callback = NULL;
55 reinit_completion(&seq->done);
56 clear_bit(seq->id, qmgr->seq.tbl);
57 }
58
59 void
nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr ** pqmgr)60 nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
61 {
62 struct nvkm_falcon_qmgr *qmgr = *pqmgr;
63 if (qmgr) {
64 kfree(*pqmgr);
65 *pqmgr = NULL;
66 }
67 }
68
69 int
nvkm_falcon_qmgr_new(struct nvkm_falcon * falcon,struct nvkm_falcon_qmgr ** pqmgr)70 nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
71 struct nvkm_falcon_qmgr **pqmgr)
72 {
73 struct nvkm_falcon_qmgr *qmgr;
74 int i;
75
76 if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
77 return -ENOMEM;
78
79 qmgr->falcon = falcon;
80 mutex_init(&qmgr->seq.mutex);
81 for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
82 qmgr->seq.id[i].id = i;
83 init_completion(&qmgr->seq.id[i].done);
84 }
85
86 return 0;
87 }
88