| /linux/drivers/mmc/core/ |
| A D | queue.c | 34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) in mmc_cqe_check_busy() 143 mmc_get_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler() 161 mmc_put_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler() 251 if (mq->recovery_needed || mq->busy) { in mmc_mq_queue_rq() 283 mq->busy = true; in mmc_mq_queue_rq() 327 mq->busy = false; in mmc_mq_queue_rq() 384 disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq); in mmc_alloc_disk() 428 mq->card = card; in mmc_init_queue() 432 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_init_queue() 447 mq->tag_set.driver_data = mq; in mmc_init_queue() [all …]
|
| A D | block.c | 306 mq = &md->queue; in power_ro_lock_store() 729 mq = &md->queue; in mmc_blk_ioctl_cmd() 802 mq = &md->queue; in mmc_blk_ioctl_multi_cmd() 1595 mmc_put_card(mq->card, &mq->ctx); in mmc_blk_cqe_complete_rq() 1674 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); in mmc_blk_hsq_issue_rw_rq() 2207 mmc_put_card(mq->card, &mq->ctx); in mmc_blk_mq_dec_in_flight() 2266 mmc_blk_mq_poll_completion(mq, mq->complete_req); in mmc_blk_mq_complete_prev_req() 2271 mmc_blk_mq_post_req(mq, mq->complete_req, true); in mmc_blk_mq_complete_prev_req() 2321 queue_work(mq->card->complete_wq, &mq->complete_work); in mmc_blk_mq_req_done() 2373 wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); in mmc_blk_rw_wait() [all …]
|
| A D | queue.h | 97 struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 105 void mmc_cqe_check_busy(struct mmc_queue *mq); 108 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req); 110 static inline int mmc_tot_in_flight(struct mmc_queue *mq) in mmc_tot_in_flight() argument 112 return mq->in_flight[MMC_ISSUE_SYNC] + in mmc_tot_in_flight() 113 mq->in_flight[MMC_ISSUE_DCMD] + in mmc_tot_in_flight() 114 mq->in_flight[MMC_ISSUE_ASYNC]; in mmc_tot_in_flight() 117 static inline int mmc_cqe_qcnt(struct mmc_queue *mq) in mmc_cqe_qcnt() argument 119 return mq->in_flight[MMC_ISSUE_DCMD] + in mmc_cqe_qcnt() 120 mq->in_flight[MMC_ISSUE_ASYNC]; in mmc_cqe_qcnt()
|
| A D | block.h | 8 void mmc_blk_cqe_recovery(struct mmc_queue *mq); 12 enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req); 14 void mmc_blk_mq_recovery(struct mmc_queue *mq);
|
| /linux/drivers/md/ |
| A D | dm-cache-policy-smq.c | 961 q_del(e->dirty ? &mq->dirty : &mq->clean, e); in del_queue() 1191 e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle); in queue_writeback() 1371 kfree(mq); in smq_destroy() 1740 if (!mq) in __smq_create() 1750 mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size); in __smq_create() 1768 init_allocator(&mq->cache_alloc, &mq->es, in __smq_create() 1792 q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS); in __smq_create() 1797 q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS); in __smq_create() 1798 q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS); in __smq_create() 1806 if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks)) in __smq_create() [all …]
|
| /linux/drivers/scsi/arm/ |
| A D | msgqueue.c | 29 return mq; in mqe_alloc() 40 if (mq) { in mqe_free() 42 msgq->free = mq; in mqe_free() 85 for (mq = msgq->qe; mq; mq = mq->next) in msgqueue_msglength() 102 for (mq = msgq->qe; mq && msgno; mq = mq->next, msgno--); in msgqueue_getmsg() 104 return mq ? &mq->msg : NULL; in msgqueue_getmsg() 120 if (mq) { in msgqueue_addmsg() 130 mq->msg.fifo = 0; in msgqueue_addmsg() 131 mq->next = NULL; in msgqueue_addmsg() 137 *mqp = mq; in msgqueue_addmsg() [all …]
|
| /linux/drivers/sh/maple/ |
| A D | maple.c | 143 mq = mdev->mq; in maple_release_device() 145 kfree(mq); in maple_release_device() 190 mq = kzalloc(sizeof(*mq), GFP_KERNEL); in maple_allocq() 191 if (!mq) in maple_allocq() 195 mq->dev = mdev; in maple_allocq() 197 if (!mq->recvbuf) in maple_allocq() 199 mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]); in maple_allocq() 201 return mq; in maple_allocq() 204 kfree(mq); in maple_allocq() 227 if (!mdev->mq) { in maple_alloc_dev() [all …]
|
| /linux/drivers/misc/sgi-gru/ |
| A D | grukservices.c | 551 mq->start = &mq->data; in gru_create_message_queue() 552 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES; in gru_create_message_queue() 553 mq->next = &mq->data; in gru_create_message_queue() 554 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES; in gru_create_message_queue() 559 mqd->mq = mq; in gru_create_message_queue() 837 struct message_queue *mq = mqd->mq; in gru_free_message() local 847 pnext = mq->next; in gru_free_message() 852 } else if (pnext < mq->start2 && next >= mq->start2) { in gru_free_message() 858 mq->next = next; in gru_free_message() 869 struct message_queue *mq = mqd->mq; in gru_get_next_message() local [all …]
|
| /linux/block/ |
| A D | Makefile | 9 blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ 10 blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \ 24 obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o 30 obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o 31 obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o 34 obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
| /linux/drivers/scsi/elx/efct/ |
| A D | efct_hw_queues.c | 52 if (!mq) { in efct_hw_init_queues() 247 struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL); in efct_hw_new_mq() local 249 if (!mq) in efct_hw_new_mq() 257 mq->queue = &hw->mq[mq->instance]; in efct_hw_new_mq() 262 kfree(mq); in efct_hw_new_mq() 266 hw->hw_mq[mq->instance] = mq; in efct_hw_new_mq() 270 mq->queue->id, mq->entry_count); in efct_hw_new_mq() 271 return mq; in efct_hw_new_mq() 435 if (!mq) in efct_hw_del_mq() 439 mq->cq->eq->hw->hw_mq[mq->instance] = NULL; in efct_hw_del_mq() [all …]
|
| /linux/drivers/misc/sgi-xp/ |
| A D | xpc_uv.c | 94 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, in xpc_get_gru_mq_irq_uv() 96 if (mq->irq < 0) in xpc_get_gru_mq_irq_uv() 97 return mq->irq; in xpc_get_gru_mq_irq_uv() 99 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); in xpc_get_gru_mq_irq_uv() 116 mq->order, &mq->mmr_offset); in xpc_gru_mq_watchlist_alloc_uv() 151 if (mq == NULL) { in xpc_create_gru_mq_uv() 197 mq->irq, -ret); in xpc_create_gru_mq_uv() 204 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, in xpc_create_gru_mq_uv() 220 return mq; in xpc_create_gru_mq_uv() 234 kfree(mq); in xpc_create_gru_mq_uv() [all …]
|
| /linux/fs/bcachefs/ |
| A D | quota.c | 222 struct bch_memquota *mq, in bch2_quota_check_limit() argument 291 if (!mq[i]) in bch2_quota_acct() 305 mq[i]->c[counter].v += v; in bch2_quota_acct() 384 struct bch_memquota *mq; in __bch2_quota_set() local 399 if (!mq) { in __bch2_quota_set() 781 struct bch_memquota *mq; in bch2_get_quota() local 787 if (mq) in bch2_get_quota() 788 __bch2_quota_get(qdq, mq); in bch2_get_quota() 801 struct bch_memquota *mq; in bch2_get_next_quota() local 807 if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) { in bch2_get_next_quota() [all …]
|
| /linux/Documentation/admin-guide/device-mapper/ |
| A D | cache-policies.rst | 29 multiqueue (mq) 48 with the multiqueue (mq) policy. 50 The smq policy (vs mq) offers the promise of less memory utilization, 54 Users may switch from "mq" to "smq" simply by appropriately reloading a 56 mq policy's hints to be dropped. Also, performance of the cache may 63 The mq policy used a lot of memory; 88 bytes per cache block on a 64 78 mq placed entries in different levels of the multiqueue structures 91 The mq policy maintained a hit count for each cache block. For a 105 Testing smq shows substantially better performance than mq. 129 /dev/sdd 512 0 mq 4 sequential_threshold 1024 random_threshold 8"
|
| /linux/drivers/block/ |
| A D | rnull.rs | 15 block::mq::{ 61 fn queue_rq(rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result { in queue_rq() 62 mq::Request::end_ok(rq) in queue_rq()
|
| /linux/include/linux/ |
| A D | maple.h | 69 struct mapleq *mq; member 70 void (*callback) (struct mapleq * mq); 89 void (*callback) (struct mapleq * mq),
|
| /linux/drivers/mtd/maps/ |
| A D | vmu-flash.c | 89 static void vmu_blockread(struct mapleq *mq) in vmu_blockread() argument 94 mdev = mq->dev; in vmu_blockread() 101 memcpy(card->blockread, mq->recvbuf->buf + 12, in vmu_blockread() 191 list_del_init(&(mdev->mq->list)); in maple_vmu_read_block() 192 kfree(mdev->mq->sendbuf); in maple_vmu_read_block() 193 mdev->mq->sendbuf = NULL; in maple_vmu_read_block() 283 kfree(mdev->mq->sendbuf); in maple_vmu_write_block() 284 mdev->mq->sendbuf = NULL; in maple_vmu_write_block() 285 list_del_init(&(mdev->mq->list)); in maple_vmu_write_block() 511 mdev = mq->dev; in vmu_queryblocks() [all …]
|
| /linux/drivers/net/ethernet/netronome/nfp/abm/ |
| A D | qdisc.c | 304 struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot); in nfp_abm_qdisc_clear_mq() local 307 if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev) in nfp_abm_qdisc_clear_mq() 309 for (i = 0; i < mq->num_children; i++) in nfp_abm_qdisc_clear_mq() 310 if (mq->children[i] == qdisc) { in nfp_abm_qdisc_clear_mq() 311 mq->children[i] = NULL; in nfp_abm_qdisc_clear_mq() 791 memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats)); in nfp_abm_mq_stats() 792 memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats)); in nfp_abm_mq_stats() 803 nfp_abm_stats_propagate(&qdisc->mq.stats, in nfp_abm_mq_stats() 805 nfp_abm_stats_propagate(&qdisc->mq.prev_stats, in nfp_abm_mq_stats() 810 nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats, in nfp_abm_mq_stats()
|
| /linux/Documentation/block/ |
| A D | switching-sched.rst | 17 the fly to select one of mq-deadline, none, bfq, or kyber schedulers - 32 [mq-deadline] kyber bfq none 35 [none] mq-deadline kyber bfq
|
| A D | blk-mq.rst | 4 Multi-Queue Block IO Queueing Mechanism (blk-mq) 36 to different CPUs) wanted to perform block IO. Instead of this, the blk-mq API 45 for instance), blk-mq takes action: it will store and manage IO requests to 49 blk-mq has two group of queues: software staging queues and hardware dispatch 59 resources to accept more requests, blk-mq will place requests on a temporary 142 … Block IO: Introducing Multi-queue SSD Access on Multi-core Systems <http://kernel.dk/blk-mq.pdf>`_ 151 .. kernel-doc:: include/linux/blk-mq.h 153 .. kernel-doc:: block/blk-mq.c
|
| /linux/arch/arm/boot/dts/allwinner/ |
| A D | sun8i-t113s-mangopi-mq-r-t113.dts | 9 #include "sunxi-d1s-t113-mangopi-mq-r.dtsi" 13 compatible = "widora,mangopi-mq-r-t113", "allwinner,sun8i-t113s";
|
| /linux/Documentation/devicetree/bindings/riscv/ |
| A D | sunxi.yaml | 59 - const: widora,mangopi-mq 64 - const: widora,mangopi-mq-pro 69 - const: widora,mangopi-mq-r-f133
|
| /linux/arch/riscv/boot/dts/allwinner/ |
| A D | Makefile | 9 dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1-mangopi-mq-pro.dtb 11 dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1s-mangopi-mq.dtb
|
| /linux/drivers/input/mouse/ |
| A D | maplemouse.c | 27 static void dc_mouse_callback(struct mapleq *mq) in dc_mouse_callback() argument 30 struct maple_device *mapledev = mq->dev; in dc_mouse_callback() 33 unsigned char *res = mq->recvbuf->buf; in dc_mouse_callback()
|
| /linux/Documentation/devicetree/bindings/powerpc/4xx/ |
| A D | ppc440spe-adma.txt | 82 - compatible : "ibm,mq-440spe"; 87 MQ0: mq { 88 compatible = "ibm,mq-440spe";
|
| /linux/drivers/net/wireless/intel/iwlwifi/dvm/ |
| A D | main.c | 2031 int mq = priv->queue_to_mac80211[queue]; in iwl_stop_sw_queue() local 2039 queue, mq); in iwl_stop_sw_queue() 2043 set_bit(mq, &priv->transport_queue_stop); in iwl_stop_sw_queue() 2044 ieee80211_stop_queue(priv->hw, mq); in iwl_stop_sw_queue() 2050 int mq = priv->queue_to_mac80211[queue]; in iwl_wake_sw_queue() local 2058 queue, mq); in iwl_wake_sw_queue() 2062 clear_bit(mq, &priv->transport_queue_stop); in iwl_wake_sw_queue() 2065 ieee80211_wake_queue(priv->hw, mq); in iwl_wake_sw_queue() 2070 int mq; in iwlagn_lift_passive_no_rx() local 2075 for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) { in iwlagn_lift_passive_no_rx() [all …]
|