Lines Matching refs:ent

129 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
147 struct mlx5_cache_ent *ent = mr->cache_ent; in create_mkey_callback() local
148 struct mlx5_ib_dev *dev = ent->dev; in create_mkey_callback()
154 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
155 ent->pending--; in create_mkey_callback()
157 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
169 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
170 list_add_tail(&mr->list, &ent->head); in create_mkey_callback()
171 ent->available_mrs++; in create_mkey_callback()
172 ent->total_mrs++; in create_mkey_callback()
174 queue_adjust_cache_locked(ent); in create_mkey_callback()
175 ent->pending--; in create_mkey_callback()
176 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
179 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc) in alloc_cache_mr() argument
186 mr->cache_ent = ent; in alloc_cache_mr()
188 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); in alloc_cache_mr()
191 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); in alloc_cache_mr()
192 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); in alloc_cache_mr()
194 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); in alloc_cache_mr()
195 MLX5_SET(mkc, mkc, log_page_size, ent->page); in alloc_cache_mr()
200 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) in add_keys() argument
215 mr = alloc_cache_mr(ent, mkc); in add_keys()
220 spin_lock_irq(&ent->lock); in add_keys()
221 if (ent->pending >= MAX_PENDING_REG_MR) { in add_keys()
223 spin_unlock_irq(&ent->lock); in add_keys()
227 ent->pending++; in add_keys()
228 spin_unlock_irq(&ent->lock); in add_keys()
229 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, in add_keys()
230 &ent->dev->async_ctx, in, inlen, in add_keys()
234 spin_lock_irq(&ent->lock); in add_keys()
235 ent->pending--; in add_keys()
236 spin_unlock_irq(&ent->lock); in add_keys()
237 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
248 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent) in create_cache_mr() argument
261 mr = alloc_cache_mr(ent, mkc); in create_cache_mr()
267 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen); in create_cache_mr()
273 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mr()
274 spin_lock_irq(&ent->lock); in create_cache_mr()
275 ent->total_mrs++; in create_cache_mr()
276 spin_unlock_irq(&ent->lock); in create_cache_mr()
286 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) in remove_cache_mr_locked() argument
290 lockdep_assert_held(&ent->lock); in remove_cache_mr_locked()
291 if (list_empty(&ent->head)) in remove_cache_mr_locked()
293 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in remove_cache_mr_locked()
295 ent->available_mrs--; in remove_cache_mr_locked()
296 ent->total_mrs--; in remove_cache_mr_locked()
297 spin_unlock_irq(&ent->lock); in remove_cache_mr_locked()
298 mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key); in remove_cache_mr_locked()
300 spin_lock_irq(&ent->lock); in remove_cache_mr_locked()
303 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, in resize_available_mrs() argument
308 lockdep_assert_held(&ent->lock); in resize_available_mrs()
312 target = ent->limit * 2; in resize_available_mrs()
313 if (target == ent->available_mrs + ent->pending) in resize_available_mrs()
315 if (target > ent->available_mrs + ent->pending) { in resize_available_mrs()
316 u32 todo = target - (ent->available_mrs + ent->pending); in resize_available_mrs()
318 spin_unlock_irq(&ent->lock); in resize_available_mrs()
319 err = add_keys(ent, todo); in resize_available_mrs()
322 spin_lock_irq(&ent->lock); in resize_available_mrs()
329 remove_cache_mr_locked(ent); in resize_available_mrs()
337 struct mlx5_cache_ent *ent = filp->private_data; in size_write() local
350 spin_lock_irq(&ent->lock); in size_write()
351 if (target < ent->total_mrs - ent->available_mrs) { in size_write()
355 target = target - (ent->total_mrs - ent->available_mrs); in size_write()
356 if (target < ent->limit || target > ent->limit*2) { in size_write()
360 err = resize_available_mrs(ent, target, false); in size_write()
363 spin_unlock_irq(&ent->lock); in size_write()
368 spin_unlock_irq(&ent->lock); in size_write()
375 struct mlx5_cache_ent *ent = filp->private_data; in size_read() local
379 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs); in size_read()
396 struct mlx5_cache_ent *ent = filp->private_data; in limit_write() local
408 spin_lock_irq(&ent->lock); in limit_write()
409 ent->limit = var; in limit_write()
410 err = resize_available_mrs(ent, 0, true); in limit_write()
411 spin_unlock_irq(&ent->lock); in limit_write()
420 struct mlx5_cache_ent *ent = filp->private_data; in limit_read() local
424 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
443 struct mlx5_cache_ent *ent = &cache->ent[i]; in someone_adding() local
446 spin_lock_irq(&ent->lock); in someone_adding()
447 ret = ent->available_mrs < ent->limit; in someone_adding()
448 spin_unlock_irq(&ent->lock); in someone_adding()
460 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) in queue_adjust_cache_locked() argument
462 lockdep_assert_held(&ent->lock); in queue_adjust_cache_locked()
464 if (ent->disabled || READ_ONCE(ent->dev->fill_delay)) in queue_adjust_cache_locked()
466 if (ent->available_mrs < ent->limit) { in queue_adjust_cache_locked()
467 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
468 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
469 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
470 ent->available_mrs + ent->pending < 2 * ent->limit) { in queue_adjust_cache_locked()
475 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
476 } else if (ent->available_mrs == 2 * ent->limit) { in queue_adjust_cache_locked()
477 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
478 } else if (ent->available_mrs > 2 * ent->limit) { in queue_adjust_cache_locked()
480 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
481 if (ent->pending) in queue_adjust_cache_locked()
482 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
485 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
489 static void __cache_work_func(struct mlx5_cache_ent *ent) in __cache_work_func() argument
491 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
495 spin_lock_irq(&ent->lock); in __cache_work_func()
496 if (ent->disabled) in __cache_work_func()
499 if (ent->fill_to_high_water && in __cache_work_func()
500 ent->available_mrs + ent->pending < 2 * ent->limit && in __cache_work_func()
502 spin_unlock_irq(&ent->lock); in __cache_work_func()
503 err = add_keys(ent, 1); in __cache_work_func()
504 spin_lock_irq(&ent->lock); in __cache_work_func()
505 if (ent->disabled) in __cache_work_func()
517 ent->order, err); in __cache_work_func()
518 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
522 } else if (ent->available_mrs > 2 * ent->limit) { in __cache_work_func()
537 spin_unlock_irq(&ent->lock); in __cache_work_func()
541 spin_lock_irq(&ent->lock); in __cache_work_func()
542 if (ent->disabled) in __cache_work_func()
545 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
546 remove_cache_mr_locked(ent); in __cache_work_func()
547 queue_adjust_cache_locked(ent); in __cache_work_func()
550 spin_unlock_irq(&ent->lock); in __cache_work_func()
555 struct mlx5_cache_ent *ent; in delayed_cache_work_func() local
557 ent = container_of(work, struct mlx5_cache_ent, dwork.work); in delayed_cache_work_func()
558 __cache_work_func(ent); in delayed_cache_work_func()
563 struct mlx5_cache_ent *ent; in cache_work_func() local
565 ent = container_of(work, struct mlx5_cache_ent, work); in cache_work_func()
566 __cache_work_func(ent); in cache_work_func()
574 struct mlx5_cache_ent *ent; in mlx5_mr_cache_alloc() local
578 entry >= ARRAY_SIZE(cache->ent))) in mlx5_mr_cache_alloc()
585 ent = &cache->ent[entry]; in mlx5_mr_cache_alloc()
586 spin_lock_irq(&ent->lock); in mlx5_mr_cache_alloc()
587 if (list_empty(&ent->head)) { in mlx5_mr_cache_alloc()
588 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
589 mr = create_cache_mr(ent); in mlx5_mr_cache_alloc()
593 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in mlx5_mr_cache_alloc()
595 ent->available_mrs--; in mlx5_mr_cache_alloc()
596 queue_adjust_cache_locked(ent); in mlx5_mr_cache_alloc()
597 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
609 struct mlx5_cache_ent *ent = req_ent; in get_cache_mr() local
611 spin_lock_irq(&ent->lock); in get_cache_mr()
612 if (!list_empty(&ent->head)) { in get_cache_mr()
613 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in get_cache_mr()
615 ent->available_mrs--; in get_cache_mr()
616 queue_adjust_cache_locked(ent); in get_cache_mr()
617 spin_unlock_irq(&ent->lock); in get_cache_mr()
621 queue_adjust_cache_locked(ent); in get_cache_mr()
622 spin_unlock_irq(&ent->lock); in get_cache_mr()
629 struct mlx5_cache_ent *ent = mr->cache_ent; in mlx5_mr_cache_free() local
631 spin_lock_irq(&ent->lock); in mlx5_mr_cache_free()
632 list_add_tail(&mr->list, &ent->head); in mlx5_mr_cache_free()
633 ent->available_mrs++; in mlx5_mr_cache_free()
634 queue_adjust_cache_locked(ent); in mlx5_mr_cache_free()
635 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_free()
641 struct mlx5_cache_ent *ent = &cache->ent[c]; in clean_keys() local
646 cancel_delayed_work(&ent->dwork); in clean_keys()
648 spin_lock_irq(&ent->lock); in clean_keys()
649 if (list_empty(&ent->head)) { in clean_keys()
650 spin_unlock_irq(&ent->lock); in clean_keys()
653 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in clean_keys()
655 ent->available_mrs--; in clean_keys()
656 ent->total_mrs--; in clean_keys()
657 spin_unlock_irq(&ent->lock); in clean_keys()
679 struct mlx5_cache_ent *ent; in mlx5_mr_cache_debugfs_init() local
689 ent = &cache->ent[i]; in mlx5_mr_cache_debugfs_init()
690 sprintf(ent->name, "%d", ent->order); in mlx5_mr_cache_debugfs_init()
691 dir = debugfs_create_dir(ent->name, cache->root); in mlx5_mr_cache_debugfs_init()
692 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mr_cache_debugfs_init()
693 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mr_cache_debugfs_init()
694 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs); in mlx5_mr_cache_debugfs_init()
695 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mr_cache_debugfs_init()
709 struct mlx5_cache_ent *ent; in mlx5_mr_cache_init() local
722 ent = &cache->ent[i]; in mlx5_mr_cache_init()
723 INIT_LIST_HEAD(&ent->head); in mlx5_mr_cache_init()
724 spin_lock_init(&ent->lock); in mlx5_mr_cache_init()
725 ent->order = i + 2; in mlx5_mr_cache_init()
726 ent->dev = dev; in mlx5_mr_cache_init()
727 ent->limit = 0; in mlx5_mr_cache_init()
729 INIT_WORK(&ent->work, cache_work_func); in mlx5_mr_cache_init()
730 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5_mr_cache_init()
733 mlx5_odp_init_mr_cache_entry(ent); in mlx5_mr_cache_init()
737 if (ent->order > mr_cache_max_order(dev)) in mlx5_mr_cache_init()
740 ent->page = PAGE_SHIFT; in mlx5_mr_cache_init()
741 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / in mlx5_mr_cache_init()
743 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; in mlx5_mr_cache_init()
747 ent->limit = dev->mdev->profile.mr_cache[i].limit; in mlx5_mr_cache_init()
749 ent->limit = 0; in mlx5_mr_cache_init()
750 spin_lock_irq(&ent->lock); in mlx5_mr_cache_init()
751 queue_adjust_cache_locked(ent); in mlx5_mr_cache_init()
752 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_init()
768 struct mlx5_cache_ent *ent = &dev->cache.ent[i]; in mlx5_mr_cache_cleanup() local
770 spin_lock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
771 ent->disabled = true; in mlx5_mr_cache_cleanup()
772 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
773 cancel_work_sync(&ent->work); in mlx5_mr_cache_cleanup()
774 cancel_delayed_work_sync(&ent->dwork); in mlx5_mr_cache_cleanup()
902 if (order < cache->ent[0].order) in mr_cache_ent_from_order()
903 return &cache->ent[0]; in mr_cache_ent_from_order()
904 order = order - cache->ent[0].order; in mr_cache_ent_from_order()
907 return &cache->ent[order]; in mr_cache_ent_from_order()
937 struct mlx5_cache_ent *ent; in alloc_cacheable_mr() local
948 ent = mr_cache_ent_from_order( in alloc_cacheable_mr()
954 if (!ent || ent->limit == 0 || in alloc_cacheable_mr()
962 mr = get_cache_mr(ent); in alloc_cacheable_mr()
964 mr = create_cache_mr(ent); in alloc_cacheable_mr()