Lines Matching refs:ctrlr

41 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)  argument
97 static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr) in __find_req() argument
101 list_for_each_entry(p, &ctrlr->cache, list) { in __find_req()
111 static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, in cache_rpm_request() argument
119 spin_lock_irqsave(&ctrlr->cache_lock, flags); in cache_rpm_request()
120 req = __find_req(ctrlr, cmd->addr); in cache_rpm_request()
132 list_add_tail(&req->list, &ctrlr->cache); in cache_rpm_request()
148 ctrlr->dirty |= (req->sleep_val != old_sleep_val || in cache_rpm_request()
154 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); in cache_rpm_request()
173 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); in __rpmh_write() local
180 req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]); in __rpmh_write()
187 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); in __rpmh_write()
275 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req) in cache_batch() argument
279 spin_lock_irqsave(&ctrlr->cache_lock, flags); in cache_batch()
280 list_add_tail(&req->list, &ctrlr->batch_cache); in cache_batch()
281 ctrlr->dirty = true; in cache_batch()
282 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); in cache_batch()
285 static int flush_batch(struct rpmh_ctrlr *ctrlr) in flush_batch() argument
293 list_for_each_entry(req, &ctrlr->batch_cache, list) { in flush_batch()
296 ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), in flush_batch()
329 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); in rpmh_write_batch() local
361 cache_batch(ctrlr, req); in rpmh_write_batch()
370 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg); in rpmh_write_batch()
407 static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state, in send_single() argument
418 return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg); in send_single()
430 int rpmh_flush(struct rpmh_ctrlr *ctrlr) in rpmh_flush() argument
442 if (!spin_trylock(&ctrlr->cache_lock)) in rpmh_flush()
445 if (!ctrlr->dirty) { in rpmh_flush()
451 rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); in rpmh_flush()
454 ret = flush_batch(ctrlr); in rpmh_flush()
458 list_for_each_entry(p, &ctrlr->cache, list) { in rpmh_flush()
464 ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr, in rpmh_flush()
468 ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr, in rpmh_flush()
474 ctrlr->dirty = false; in rpmh_flush()
477 rpmh_rsc_write_next_wakeup(ctrlr_to_drv(ctrlr)); in rpmh_flush()
479 spin_unlock(&ctrlr->cache_lock); in rpmh_flush()
492 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); in rpmh_invalidate() local
496 spin_lock_irqsave(&ctrlr->cache_lock, flags); in rpmh_invalidate()
497 list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) in rpmh_invalidate()
499 INIT_LIST_HEAD(&ctrlr->batch_cache); in rpmh_invalidate()
500 ctrlr->dirty = true; in rpmh_invalidate()
501 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); in rpmh_invalidate()