1 /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
2 /*
3 * aoeblk.c
4 * block device routines
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/hdreg.h>
9 #include <linux/blk-mq.h>
10 #include <linux/backing-dev.h>
11 #include <linux/fs.h>
12 #include <linux/ioctl.h>
13 #include <linux/slab.h>
14 #include <linux/ratelimit.h>
15 #include <linux/genhd.h>
16 #include <linux/netdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/export.h>
19 #include <linux/moduleparam.h>
20 #include <linux/debugfs.h>
21 #include <scsi/sg.h>
22 #include "aoe.h"
23
24 static DEFINE_MUTEX(aoeblk_mutex);
25 static struct kmem_cache *buf_pool_cache;
26 static struct dentry *aoe_debugfs_dir;
27
28 /* GPFS needs a larger value than the default. */
29 static int aoe_maxsectors;
30 module_param(aoe_maxsectors, int, 0644);
31 MODULE_PARM_DESC(aoe_maxsectors,
32 "When nonzero, set the maximum number of sectors per I/O request");
33
aoedisk_show_state(struct device * dev,struct device_attribute * attr,char * page)34 static ssize_t aoedisk_show_state(struct device *dev,
35 struct device_attribute *attr, char *page)
36 {
37 struct gendisk *disk = dev_to_disk(dev);
38 struct aoedev *d = disk->private_data;
39
40 return sysfs_emit(page, "%s%s\n",
41 (d->flags & DEVFL_UP) ? "up" : "down",
42 (d->flags & DEVFL_KICKME) ? ",kickme" :
43 (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
44 /* I'd rather see nopen exported so we can ditch closewait */
45 }
aoedisk_show_mac(struct device * dev,struct device_attribute * attr,char * page)46 static ssize_t aoedisk_show_mac(struct device *dev,
47 struct device_attribute *attr, char *page)
48 {
49 struct gendisk *disk = dev_to_disk(dev);
50 struct aoedev *d = disk->private_data;
51 struct aoetgt *t = d->targets[0];
52
53 if (t == NULL)
54 return sysfs_emit(page, "none\n");
55 return sysfs_emit(page, "%pm\n", t->addr);
56 }
aoedisk_show_netif(struct device * dev,struct device_attribute * attr,char * page)57 static ssize_t aoedisk_show_netif(struct device *dev,
58 struct device_attribute *attr, char *page)
59 {
60 struct gendisk *disk = dev_to_disk(dev);
61 struct aoedev *d = disk->private_data;
62 struct net_device *nds[8], **nd, **nnd, **ne;
63 struct aoetgt **t, **te;
64 struct aoeif *ifp, *e;
65 char *p;
66
67 memset(nds, 0, sizeof nds);
68 nd = nds;
69 ne = nd + ARRAY_SIZE(nds);
70 t = d->targets;
71 te = t + d->ntargets;
72 for (; t < te && *t; t++) {
73 ifp = (*t)->ifs;
74 e = ifp + NAOEIFS;
75 for (; ifp < e && ifp->nd; ifp++) {
76 for (nnd = nds; nnd < nd; nnd++)
77 if (*nnd == ifp->nd)
78 break;
79 if (nnd == nd && nd != ne)
80 *nd++ = ifp->nd;
81 }
82 }
83
84 ne = nd;
85 nd = nds;
86 if (*nd == NULL)
87 return sysfs_emit(page, "none\n");
88 for (p = page; nd < ne; nd++)
89 p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
90 p == page ? "" : ",", (*nd)->name);
91 p += scnprintf(p, PAGE_SIZE - (p-page), "\n");
92 return p-page;
93 }
94 /* firmware version */
aoedisk_show_fwver(struct device * dev,struct device_attribute * attr,char * page)95 static ssize_t aoedisk_show_fwver(struct device *dev,
96 struct device_attribute *attr, char *page)
97 {
98 struct gendisk *disk = dev_to_disk(dev);
99 struct aoedev *d = disk->private_data;
100
101 return sysfs_emit(page, "0x%04x\n", (unsigned int) d->fw_ver);
102 }
aoedisk_show_payload(struct device * dev,struct device_attribute * attr,char * page)103 static ssize_t aoedisk_show_payload(struct device *dev,
104 struct device_attribute *attr, char *page)
105 {
106 struct gendisk *disk = dev_to_disk(dev);
107 struct aoedev *d = disk->private_data;
108
109 return sysfs_emit(page, "%lu\n", d->maxbcnt);
110 }
111
aoedisk_debugfs_show(struct seq_file * s,void * ignored)112 static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
113 {
114 struct aoedev *d;
115 struct aoetgt **t, **te;
116 struct aoeif *ifp, *ife;
117 unsigned long flags;
118 char c;
119
120 d = s->private;
121 seq_printf(s, "rttavg: %d rttdev: %d\n",
122 d->rttavg >> RTTSCALE,
123 d->rttdev >> RTTDSCALE);
124 seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
125 seq_printf(s, "kicked: %ld\n", d->kicked);
126 seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
127 seq_printf(s, "ref: %ld\n", d->ref);
128
129 spin_lock_irqsave(&d->lock, flags);
130 t = d->targets;
131 te = t + d->ntargets;
132 for (; t < te && *t; t++) {
133 c = '\t';
134 seq_printf(s, "falloc: %ld\n", (*t)->falloc);
135 seq_printf(s, "ffree: %p\n",
136 list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
137 seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
138 (*t)->maxout, (*t)->nframes);
139 seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
140 seq_printf(s, "\ttaint:%d\n", (*t)->taint);
141 seq_printf(s, "\tr:%d\n", (*t)->rpkts);
142 seq_printf(s, "\tw:%d\n", (*t)->wpkts);
143 ifp = (*t)->ifs;
144 ife = ifp + ARRAY_SIZE((*t)->ifs);
145 for (; ifp->nd && ifp < ife; ifp++) {
146 seq_printf(s, "%c%s", c, ifp->nd->name);
147 c = ',';
148 }
149 seq_puts(s, "\n");
150 }
151 spin_unlock_irqrestore(&d->lock, flags);
152
153 return 0;
154 }
155
aoe_debugfs_open(struct inode * inode,struct file * file)156 static int aoe_debugfs_open(struct inode *inode, struct file *file)
157 {
158 return single_open(file, aoedisk_debugfs_show, inode->i_private);
159 }
160
161 static DEVICE_ATTR(state, 0444, aoedisk_show_state, NULL);
162 static DEVICE_ATTR(mac, 0444, aoedisk_show_mac, NULL);
163 static DEVICE_ATTR(netif, 0444, aoedisk_show_netif, NULL);
164 static struct device_attribute dev_attr_firmware_version = {
165 .attr = { .name = "firmware-version", .mode = 0444 },
166 .show = aoedisk_show_fwver,
167 };
168 static DEVICE_ATTR(payload, 0444, aoedisk_show_payload, NULL);
169
170 static struct attribute *aoe_attrs[] = {
171 &dev_attr_state.attr,
172 &dev_attr_mac.attr,
173 &dev_attr_netif.attr,
174 &dev_attr_firmware_version.attr,
175 &dev_attr_payload.attr,
176 NULL,
177 };
178
179 static const struct attribute_group aoe_attr_group = {
180 .attrs = aoe_attrs,
181 };
182
183 static const struct attribute_group *aoe_attr_groups[] = {
184 &aoe_attr_group,
185 NULL,
186 };
187
188 static const struct file_operations aoe_debugfs_fops = {
189 .open = aoe_debugfs_open,
190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = single_release,
193 };
194
195 static void
aoedisk_add_debugfs(struct aoedev * d)196 aoedisk_add_debugfs(struct aoedev *d)
197 {
198 char *p;
199
200 if (aoe_debugfs_dir == NULL)
201 return;
202 p = strchr(d->gd->disk_name, '/');
203 if (p == NULL)
204 p = d->gd->disk_name;
205 else
206 p++;
207 BUG_ON(*p == '\0');
208 d->debugfs = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
209 &aoe_debugfs_fops);
210 }
211 void
aoedisk_rm_debugfs(struct aoedev * d)212 aoedisk_rm_debugfs(struct aoedev *d)
213 {
214 debugfs_remove(d->debugfs);
215 d->debugfs = NULL;
216 }
217
218 static int
aoeblk_open(struct block_device * bdev,fmode_t mode)219 aoeblk_open(struct block_device *bdev, fmode_t mode)
220 {
221 struct aoedev *d = bdev->bd_disk->private_data;
222 ulong flags;
223
224 if (!virt_addr_valid(d)) {
225 pr_crit("aoe: invalid device pointer in %s\n",
226 __func__);
227 WARN_ON(1);
228 return -ENODEV;
229 }
230 if (!(d->flags & DEVFL_UP) || d->flags & DEVFL_TKILL)
231 return -ENODEV;
232
233 mutex_lock(&aoeblk_mutex);
234 spin_lock_irqsave(&d->lock, flags);
235 if (d->flags & DEVFL_UP && !(d->flags & DEVFL_TKILL)) {
236 d->nopen++;
237 spin_unlock_irqrestore(&d->lock, flags);
238 mutex_unlock(&aoeblk_mutex);
239 return 0;
240 }
241 spin_unlock_irqrestore(&d->lock, flags);
242 mutex_unlock(&aoeblk_mutex);
243 return -ENODEV;
244 }
245
246 static void
aoeblk_release(struct gendisk * disk,fmode_t mode)247 aoeblk_release(struct gendisk *disk, fmode_t mode)
248 {
249 struct aoedev *d = disk->private_data;
250 ulong flags;
251
252 spin_lock_irqsave(&d->lock, flags);
253
254 if (--d->nopen == 0) {
255 spin_unlock_irqrestore(&d->lock, flags);
256 aoecmd_cfg(d->aoemajor, d->aoeminor);
257 return;
258 }
259 spin_unlock_irqrestore(&d->lock, flags);
260 }
261
aoeblk_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)262 static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
263 const struct blk_mq_queue_data *bd)
264 {
265 struct aoedev *d = hctx->queue->queuedata;
266
267 spin_lock_irq(&d->lock);
268
269 if ((d->flags & DEVFL_UP) == 0) {
270 pr_info_ratelimited("aoe: device %ld.%d is not up\n",
271 d->aoemajor, d->aoeminor);
272 spin_unlock_irq(&d->lock);
273 blk_mq_start_request(bd->rq);
274 return BLK_STS_IOERR;
275 }
276
277 list_add_tail(&bd->rq->queuelist, &d->rq_list);
278 aoecmd_work(d);
279 spin_unlock_irq(&d->lock);
280 return BLK_STS_OK;
281 }
282
283 static int
aoeblk_getgeo(struct block_device * bdev,struct hd_geometry * geo)284 aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
285 {
286 struct aoedev *d = bdev->bd_disk->private_data;
287
288 if ((d->flags & DEVFL_UP) == 0) {
289 printk(KERN_ERR "aoe: disk not up\n");
290 return -ENODEV;
291 }
292
293 geo->cylinders = d->geo.cylinders;
294 geo->heads = d->geo.heads;
295 geo->sectors = d->geo.sectors;
296 return 0;
297 }
298
299 static int
aoeblk_ioctl(struct block_device * bdev,fmode_t mode,uint cmd,ulong arg)300 aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg)
301 {
302 struct aoedev *d;
303
304 if (!arg)
305 return -EINVAL;
306
307 d = bdev->bd_disk->private_data;
308 if ((d->flags & DEVFL_UP) == 0) {
309 pr_err("aoe: disk not up\n");
310 return -ENODEV;
311 }
312
313 if (cmd == HDIO_GET_IDENTITY) {
314 if (!copy_to_user((void __user *) arg, &d->ident,
315 sizeof(d->ident)))
316 return 0;
317 return -EFAULT;
318 }
319
320 /* udev calls scsi_id, which uses SG_IO, resulting in noise */
321 if (cmd != SG_IO)
322 pr_info("aoe: unknown ioctl 0x%x\n", cmd);
323
324 return -ENOTTY;
325 }
326
327 static const struct block_device_operations aoe_bdops = {
328 .open = aoeblk_open,
329 .release = aoeblk_release,
330 .ioctl = aoeblk_ioctl,
331 .compat_ioctl = blkdev_compat_ptr_ioctl,
332 .getgeo = aoeblk_getgeo,
333 .owner = THIS_MODULE,
334 };
335
336 static const struct blk_mq_ops aoeblk_mq_ops = {
337 .queue_rq = aoeblk_queue_rq,
338 };
339
340 /* blk_mq_alloc_disk and add_disk can sleep */
341 void
aoeblk_gdalloc(void * vp)342 aoeblk_gdalloc(void *vp)
343 {
344 struct aoedev *d = vp;
345 struct gendisk *gd;
346 mempool_t *mp;
347 struct blk_mq_tag_set *set;
348 ulong flags;
349 int late = 0;
350 int err;
351
352 spin_lock_irqsave(&d->lock, flags);
353 if (d->flags & DEVFL_GDALLOC
354 && !(d->flags & DEVFL_TKILL)
355 && !(d->flags & DEVFL_GD_NOW))
356 d->flags |= DEVFL_GD_NOW;
357 else
358 late = 1;
359 spin_unlock_irqrestore(&d->lock, flags);
360 if (late)
361 return;
362
363 mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab,
364 buf_pool_cache);
365 if (mp == NULL) {
366 printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
367 d->aoemajor, d->aoeminor);
368 goto err;
369 }
370
371 set = &d->tag_set;
372 set->ops = &aoeblk_mq_ops;
373 set->cmd_size = sizeof(struct aoe_req);
374 set->nr_hw_queues = 1;
375 set->queue_depth = 128;
376 set->numa_node = NUMA_NO_NODE;
377 set->flags = BLK_MQ_F_SHOULD_MERGE;
378 err = blk_mq_alloc_tag_set(set);
379 if (err) {
380 pr_err("aoe: cannot allocate tag set for %ld.%d\n",
381 d->aoemajor, d->aoeminor);
382 goto err_mempool;
383 }
384
385 gd = blk_mq_alloc_disk(set, d);
386 if (IS_ERR(gd)) {
387 pr_err("aoe: cannot allocate block queue for %ld.%d\n",
388 d->aoemajor, d->aoeminor);
389 goto err_tagset;
390 }
391
392 spin_lock_irqsave(&d->lock, flags);
393 WARN_ON(!(d->flags & DEVFL_GD_NOW));
394 WARN_ON(!(d->flags & DEVFL_GDALLOC));
395 WARN_ON(d->flags & DEVFL_TKILL);
396 WARN_ON(d->gd);
397 WARN_ON(d->flags & DEVFL_UP);
398 blk_queue_max_hw_sectors(gd->queue, BLK_DEF_MAX_SECTORS);
399 blk_queue_io_opt(gd->queue, SZ_2M);
400 d->bufpool = mp;
401 d->blkq = gd->queue;
402 d->gd = gd;
403 if (aoe_maxsectors)
404 blk_queue_max_hw_sectors(gd->queue, aoe_maxsectors);
405 gd->major = AOE_MAJOR;
406 gd->first_minor = d->sysminor;
407 gd->minors = AOE_PARTITIONS;
408 gd->fops = &aoe_bdops;
409 gd->private_data = d;
410 set_capacity(gd, d->ssize);
411 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
412 d->aoemajor, d->aoeminor);
413
414 d->flags &= ~DEVFL_GDALLOC;
415 d->flags |= DEVFL_UP;
416
417 spin_unlock_irqrestore(&d->lock, flags);
418
419 err = device_add_disk(NULL, gd, aoe_attr_groups);
420 if (err)
421 goto out_disk_cleanup;
422 aoedisk_add_debugfs(d);
423
424 spin_lock_irqsave(&d->lock, flags);
425 WARN_ON(!(d->flags & DEVFL_GD_NOW));
426 d->flags &= ~DEVFL_GD_NOW;
427 spin_unlock_irqrestore(&d->lock, flags);
428 return;
429
430 out_disk_cleanup:
431 blk_cleanup_disk(gd);
432 err_tagset:
433 blk_mq_free_tag_set(set);
434 err_mempool:
435 mempool_destroy(mp);
436 err:
437 spin_lock_irqsave(&d->lock, flags);
438 d->flags &= ~DEVFL_GD_NOW;
439 schedule_work(&d->work);
440 spin_unlock_irqrestore(&d->lock, flags);
441 }
442
443 void
aoeblk_exit(void)444 aoeblk_exit(void)
445 {
446 debugfs_remove_recursive(aoe_debugfs_dir);
447 aoe_debugfs_dir = NULL;
448 kmem_cache_destroy(buf_pool_cache);
449 }
450
451 int __init
aoeblk_init(void)452 aoeblk_init(void)
453 {
454 buf_pool_cache = kmem_cache_create("aoe_bufs",
455 sizeof(struct buf),
456 0, 0, NULL);
457 if (buf_pool_cache == NULL)
458 return -ENOMEM;
459 aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
460 return 0;
461 }
462
463