1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Core registration and callback routines for MTD
4   * drivers and users.
5   *
6   * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7   * Copyright © 2006      Red Hat UK Limited
8   */
9  
10  #include <linux/module.h>
11  #include <linux/kernel.h>
12  #include <linux/ptrace.h>
13  #include <linux/seq_file.h>
14  #include <linux/string.h>
15  #include <linux/timer.h>
16  #include <linux/major.h>
17  #include <linux/fs.h>
18  #include <linux/err.h>
19  #include <linux/ioctl.h>
20  #include <linux/init.h>
21  #include <linux/of.h>
22  #include <linux/proc_fs.h>
23  #include <linux/idr.h>
24  #include <linux/backing-dev.h>
25  #include <linux/gfp.h>
26  #include <linux/slab.h>
27  #include <linux/reboot.h>
28  #include <linux/leds.h>
29  #include <linux/debugfs.h>
30  #include <linux/nvmem-provider.h>
31  #include <linux/root_dev.h>
32  
33  #include <linux/mtd/mtd.h>
34  #include <linux/mtd/partitions.h>
35  
36  #include "mtdcore.h"
37  
38  struct backing_dev_info *mtd_bdi;
39  
40  #ifdef CONFIG_PM_SLEEP
41  
mtd_cls_suspend(struct device * dev)42  static int mtd_cls_suspend(struct device *dev)
43  {
44  	struct mtd_info *mtd = dev_get_drvdata(dev);
45  
46  	return mtd ? mtd_suspend(mtd) : 0;
47  }
48  
mtd_cls_resume(struct device * dev)49  static int mtd_cls_resume(struct device *dev)
50  {
51  	struct mtd_info *mtd = dev_get_drvdata(dev);
52  
53  	if (mtd)
54  		mtd_resume(mtd);
55  	return 0;
56  }
57  
58  static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
59  #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
60  #else
61  #define MTD_CLS_PM_OPS NULL
62  #endif
63  
64  static struct class mtd_class = {
65  	.name = "mtd",
66  	.owner = THIS_MODULE,
67  	.pm = MTD_CLS_PM_OPS,
68  };
69  
70  static DEFINE_IDR(mtd_idr);
71  
72  /* These are exported solely for the purpose of mtd_blkdevs.c. You
73     should not use them for _anything_ else */
74  DEFINE_MUTEX(mtd_table_mutex);
75  EXPORT_SYMBOL_GPL(mtd_table_mutex);
76  
__mtd_next_device(int i)77  struct mtd_info *__mtd_next_device(int i)
78  {
79  	return idr_get_next(&mtd_idr, &i);
80  }
81  EXPORT_SYMBOL_GPL(__mtd_next_device);
82  
83  static LIST_HEAD(mtd_notifiers);
84  
85  
86  #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
87  
88  /* REVISIT once MTD uses the driver model better, whoever allocates
89   * the mtd_info will probably want to use the release() hook...
90   */
mtd_release(struct device * dev)91  static void mtd_release(struct device *dev)
92  {
93  	struct mtd_info *mtd = dev_get_drvdata(dev);
94  	dev_t index = MTD_DEVT(mtd->index);
95  
96  	/* remove /dev/mtdXro node */
97  	device_destroy(&mtd_class, index + 1);
98  }
99  
100  #define MTD_DEVICE_ATTR_RO(name) \
101  static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
102  
103  #define MTD_DEVICE_ATTR_RW(name) \
104  static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
105  
mtd_type_show(struct device * dev,struct device_attribute * attr,char * buf)106  static ssize_t mtd_type_show(struct device *dev,
107  		struct device_attribute *attr, char *buf)
108  {
109  	struct mtd_info *mtd = dev_get_drvdata(dev);
110  	char *type;
111  
112  	switch (mtd->type) {
113  	case MTD_ABSENT:
114  		type = "absent";
115  		break;
116  	case MTD_RAM:
117  		type = "ram";
118  		break;
119  	case MTD_ROM:
120  		type = "rom";
121  		break;
122  	case MTD_NORFLASH:
123  		type = "nor";
124  		break;
125  	case MTD_NANDFLASH:
126  		type = "nand";
127  		break;
128  	case MTD_DATAFLASH:
129  		type = "dataflash";
130  		break;
131  	case MTD_UBIVOLUME:
132  		type = "ubi";
133  		break;
134  	case MTD_MLCNANDFLASH:
135  		type = "mlc-nand";
136  		break;
137  	default:
138  		type = "unknown";
139  	}
140  
141  	return sysfs_emit(buf, "%s\n", type);
142  }
143  MTD_DEVICE_ATTR_RO(type);
144  
mtd_flags_show(struct device * dev,struct device_attribute * attr,char * buf)145  static ssize_t mtd_flags_show(struct device *dev,
146  		struct device_attribute *attr, char *buf)
147  {
148  	struct mtd_info *mtd = dev_get_drvdata(dev);
149  
150  	return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
151  }
152  MTD_DEVICE_ATTR_RO(flags);
153  
mtd_size_show(struct device * dev,struct device_attribute * attr,char * buf)154  static ssize_t mtd_size_show(struct device *dev,
155  		struct device_attribute *attr, char *buf)
156  {
157  	struct mtd_info *mtd = dev_get_drvdata(dev);
158  
159  	return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
160  }
161  MTD_DEVICE_ATTR_RO(size);
162  
mtd_erasesize_show(struct device * dev,struct device_attribute * attr,char * buf)163  static ssize_t mtd_erasesize_show(struct device *dev,
164  		struct device_attribute *attr, char *buf)
165  {
166  	struct mtd_info *mtd = dev_get_drvdata(dev);
167  
168  	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
169  }
170  MTD_DEVICE_ATTR_RO(erasesize);
171  
mtd_writesize_show(struct device * dev,struct device_attribute * attr,char * buf)172  static ssize_t mtd_writesize_show(struct device *dev,
173  		struct device_attribute *attr, char *buf)
174  {
175  	struct mtd_info *mtd = dev_get_drvdata(dev);
176  
177  	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
178  }
179  MTD_DEVICE_ATTR_RO(writesize);
180  
mtd_subpagesize_show(struct device * dev,struct device_attribute * attr,char * buf)181  static ssize_t mtd_subpagesize_show(struct device *dev,
182  		struct device_attribute *attr, char *buf)
183  {
184  	struct mtd_info *mtd = dev_get_drvdata(dev);
185  	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
186  
187  	return sysfs_emit(buf, "%u\n", subpagesize);
188  }
189  MTD_DEVICE_ATTR_RO(subpagesize);
190  
mtd_oobsize_show(struct device * dev,struct device_attribute * attr,char * buf)191  static ssize_t mtd_oobsize_show(struct device *dev,
192  		struct device_attribute *attr, char *buf)
193  {
194  	struct mtd_info *mtd = dev_get_drvdata(dev);
195  
196  	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
197  }
198  MTD_DEVICE_ATTR_RO(oobsize);
199  
mtd_oobavail_show(struct device * dev,struct device_attribute * attr,char * buf)200  static ssize_t mtd_oobavail_show(struct device *dev,
201  				 struct device_attribute *attr, char *buf)
202  {
203  	struct mtd_info *mtd = dev_get_drvdata(dev);
204  
205  	return sysfs_emit(buf, "%u\n", mtd->oobavail);
206  }
207  MTD_DEVICE_ATTR_RO(oobavail);
208  
mtd_numeraseregions_show(struct device * dev,struct device_attribute * attr,char * buf)209  static ssize_t mtd_numeraseregions_show(struct device *dev,
210  		struct device_attribute *attr, char *buf)
211  {
212  	struct mtd_info *mtd = dev_get_drvdata(dev);
213  
214  	return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
215  }
216  MTD_DEVICE_ATTR_RO(numeraseregions);
217  
mtd_name_show(struct device * dev,struct device_attribute * attr,char * buf)218  static ssize_t mtd_name_show(struct device *dev,
219  		struct device_attribute *attr, char *buf)
220  {
221  	struct mtd_info *mtd = dev_get_drvdata(dev);
222  
223  	return sysfs_emit(buf, "%s\n", mtd->name);
224  }
225  MTD_DEVICE_ATTR_RO(name);
226  
mtd_ecc_strength_show(struct device * dev,struct device_attribute * attr,char * buf)227  static ssize_t mtd_ecc_strength_show(struct device *dev,
228  				     struct device_attribute *attr, char *buf)
229  {
230  	struct mtd_info *mtd = dev_get_drvdata(dev);
231  
232  	return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
233  }
234  MTD_DEVICE_ATTR_RO(ecc_strength);
235  
mtd_bitflip_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)236  static ssize_t mtd_bitflip_threshold_show(struct device *dev,
237  					  struct device_attribute *attr,
238  					  char *buf)
239  {
240  	struct mtd_info *mtd = dev_get_drvdata(dev);
241  
242  	return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
243  }
244  
mtd_bitflip_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)245  static ssize_t mtd_bitflip_threshold_store(struct device *dev,
246  					   struct device_attribute *attr,
247  					   const char *buf, size_t count)
248  {
249  	struct mtd_info *mtd = dev_get_drvdata(dev);
250  	unsigned int bitflip_threshold;
251  	int retval;
252  
253  	retval = kstrtouint(buf, 0, &bitflip_threshold);
254  	if (retval)
255  		return retval;
256  
257  	mtd->bitflip_threshold = bitflip_threshold;
258  	return count;
259  }
260  MTD_DEVICE_ATTR_RW(bitflip_threshold);
261  
mtd_ecc_step_size_show(struct device * dev,struct device_attribute * attr,char * buf)262  static ssize_t mtd_ecc_step_size_show(struct device *dev,
263  		struct device_attribute *attr, char *buf)
264  {
265  	struct mtd_info *mtd = dev_get_drvdata(dev);
266  
267  	return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
268  
269  }
270  MTD_DEVICE_ATTR_RO(ecc_step_size);
271  
mtd_corrected_bits_show(struct device * dev,struct device_attribute * attr,char * buf)272  static ssize_t mtd_corrected_bits_show(struct device *dev,
273  		struct device_attribute *attr, char *buf)
274  {
275  	struct mtd_info *mtd = dev_get_drvdata(dev);
276  	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
277  
278  	return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
279  }
280  MTD_DEVICE_ATTR_RO(corrected_bits);	/* ecc stats corrected */
281  
mtd_ecc_failures_show(struct device * dev,struct device_attribute * attr,char * buf)282  static ssize_t mtd_ecc_failures_show(struct device *dev,
283  		struct device_attribute *attr, char *buf)
284  {
285  	struct mtd_info *mtd = dev_get_drvdata(dev);
286  	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
287  
288  	return sysfs_emit(buf, "%u\n", ecc_stats->failed);
289  }
290  MTD_DEVICE_ATTR_RO(ecc_failures);	/* ecc stats errors */
291  
mtd_bad_blocks_show(struct device * dev,struct device_attribute * attr,char * buf)292  static ssize_t mtd_bad_blocks_show(struct device *dev,
293  		struct device_attribute *attr, char *buf)
294  {
295  	struct mtd_info *mtd = dev_get_drvdata(dev);
296  	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
297  
298  	return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
299  }
300  MTD_DEVICE_ATTR_RO(bad_blocks);
301  
mtd_bbt_blocks_show(struct device * dev,struct device_attribute * attr,char * buf)302  static ssize_t mtd_bbt_blocks_show(struct device *dev,
303  		struct device_attribute *attr, char *buf)
304  {
305  	struct mtd_info *mtd = dev_get_drvdata(dev);
306  	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
307  
308  	return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
309  }
310  MTD_DEVICE_ATTR_RO(bbt_blocks);
311  
312  static struct attribute *mtd_attrs[] = {
313  	&dev_attr_type.attr,
314  	&dev_attr_flags.attr,
315  	&dev_attr_size.attr,
316  	&dev_attr_erasesize.attr,
317  	&dev_attr_writesize.attr,
318  	&dev_attr_subpagesize.attr,
319  	&dev_attr_oobsize.attr,
320  	&dev_attr_oobavail.attr,
321  	&dev_attr_numeraseregions.attr,
322  	&dev_attr_name.attr,
323  	&dev_attr_ecc_strength.attr,
324  	&dev_attr_ecc_step_size.attr,
325  	&dev_attr_corrected_bits.attr,
326  	&dev_attr_ecc_failures.attr,
327  	&dev_attr_bad_blocks.attr,
328  	&dev_attr_bbt_blocks.attr,
329  	&dev_attr_bitflip_threshold.attr,
330  	NULL,
331  };
332  ATTRIBUTE_GROUPS(mtd);
333  
334  static const struct device_type mtd_devtype = {
335  	.name		= "mtd",
336  	.groups		= mtd_groups,
337  	.release	= mtd_release,
338  };
339  
340  static bool mtd_expert_analysis_mode;
341  
342  #ifdef CONFIG_DEBUG_FS
mtd_check_expert_analysis_mode(void)343  bool mtd_check_expert_analysis_mode(void)
344  {
345  	const char *mtd_expert_analysis_warning =
346  		"Bad block checks have been entirely disabled.\n"
347  		"This is only reserved for post-mortem forensics and debug purposes.\n"
348  		"Never enable this mode if you do not know what you are doing!\n";
349  
350  	return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
351  }
352  EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
353  #endif
354  
355  static struct dentry *dfs_dir_mtd;
356  
mtd_debugfs_populate(struct mtd_info * mtd)357  static void mtd_debugfs_populate(struct mtd_info *mtd)
358  {
359  	struct device *dev = &mtd->dev;
360  
361  	if (IS_ERR_OR_NULL(dfs_dir_mtd))
362  		return;
363  
364  	mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
365  }
366  
367  #ifndef CONFIG_MMU
mtd_mmap_capabilities(struct mtd_info * mtd)368  unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
369  {
370  	switch (mtd->type) {
371  	case MTD_RAM:
372  		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
373  			NOMMU_MAP_READ | NOMMU_MAP_WRITE;
374  	case MTD_ROM:
375  		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
376  			NOMMU_MAP_READ;
377  	default:
378  		return NOMMU_MAP_COPY;
379  	}
380  }
381  EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
382  #endif
383  
mtd_reboot_notifier(struct notifier_block * n,unsigned long state,void * cmd)384  static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
385  			       void *cmd)
386  {
387  	struct mtd_info *mtd;
388  
389  	mtd = container_of(n, struct mtd_info, reboot_notifier);
390  	mtd->_reboot(mtd);
391  
392  	return NOTIFY_DONE;
393  }
394  
395  /**
396   * mtd_wunit_to_pairing_info - get pairing information of a wunit
397   * @mtd: pointer to new MTD device info structure
398   * @wunit: write unit we are interested in
399   * @info: returned pairing information
400   *
401   * Retrieve pairing information associated to the wunit.
402   * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
403   * paired together, and where programming a page may influence the page it is
404   * paired with.
405   * The notion of page is replaced by the term wunit (write-unit) to stay
406   * consistent with the ->writesize field.
407   *
408   * The @wunit argument can be extracted from an absolute offset using
409   * mtd_offset_to_wunit(). @info is filled with the pairing information attached
410   * to @wunit.
411   *
412   * From the pairing info the MTD user can find all the wunits paired with
413   * @wunit using the following loop:
414   *
415   * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
416   *	info.pair = i;
417   *	mtd_pairing_info_to_wunit(mtd, &info);
418   *	...
419   * }
420   */
mtd_wunit_to_pairing_info(struct mtd_info * mtd,int wunit,struct mtd_pairing_info * info)421  int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
422  			      struct mtd_pairing_info *info)
423  {
424  	struct mtd_info *master = mtd_get_master(mtd);
425  	int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
426  
427  	if (wunit < 0 || wunit >= npairs)
428  		return -EINVAL;
429  
430  	if (master->pairing && master->pairing->get_info)
431  		return master->pairing->get_info(master, wunit, info);
432  
433  	info->group = 0;
434  	info->pair = wunit;
435  
436  	return 0;
437  }
438  EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
439  
440  /**
441   * mtd_pairing_info_to_wunit - get wunit from pairing information
442   * @mtd: pointer to new MTD device info structure
443   * @info: pairing information struct
444   *
445   * Returns a positive number representing the wunit associated to the info
446   * struct, or a negative error code.
447   *
448   * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
449   * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
450   * doc).
451   *
452   * It can also be used to only program the first page of each pair (i.e.
453   * page attached to group 0), which allows one to use an MLC NAND in
454   * software-emulated SLC mode:
455   *
456   * info.group = 0;
457   * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
458   * for (info.pair = 0; info.pair < npairs; info.pair++) {
459   *	wunit = mtd_pairing_info_to_wunit(mtd, &info);
460   *	mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
461   *		  mtd->writesize, &retlen, buf + (i * mtd->writesize));
462   * }
463   */
mtd_pairing_info_to_wunit(struct mtd_info * mtd,const struct mtd_pairing_info * info)464  int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
465  			      const struct mtd_pairing_info *info)
466  {
467  	struct mtd_info *master = mtd_get_master(mtd);
468  	int ngroups = mtd_pairing_groups(master);
469  	int npairs = mtd_wunit_per_eb(master) / ngroups;
470  
471  	if (!info || info->pair < 0 || info->pair >= npairs ||
472  	    info->group < 0 || info->group >= ngroups)
473  		return -EINVAL;
474  
475  	if (master->pairing && master->pairing->get_wunit)
476  		return mtd->pairing->get_wunit(master, info);
477  
478  	return info->pair;
479  }
480  EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
481  
482  /**
483   * mtd_pairing_groups - get the number of pairing groups
484   * @mtd: pointer to new MTD device info structure
485   *
486   * Returns the number of pairing groups.
487   *
488   * This number is usually equal to the number of bits exposed by a single
489   * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
490   * to iterate over all pages of a given pair.
491   */
mtd_pairing_groups(struct mtd_info * mtd)492  int mtd_pairing_groups(struct mtd_info *mtd)
493  {
494  	struct mtd_info *master = mtd_get_master(mtd);
495  
496  	if (!master->pairing || !master->pairing->ngroups)
497  		return 1;
498  
499  	return master->pairing->ngroups;
500  }
501  EXPORT_SYMBOL_GPL(mtd_pairing_groups);
502  
mtd_nvmem_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)503  static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
504  			      void *val, size_t bytes)
505  {
506  	struct mtd_info *mtd = priv;
507  	size_t retlen;
508  	int err;
509  
510  	err = mtd_read(mtd, offset, bytes, &retlen, val);
511  	if (err && err != -EUCLEAN)
512  		return err;
513  
514  	return retlen == bytes ? 0 : -EIO;
515  }
516  
mtd_nvmem_add(struct mtd_info * mtd)517  static int mtd_nvmem_add(struct mtd_info *mtd)
518  {
519  	struct device_node *node = mtd_get_of_node(mtd);
520  	struct nvmem_config config = {};
521  
522  	config.id = -1;
523  	config.dev = &mtd->dev;
524  	config.name = dev_name(&mtd->dev);
525  	config.owner = THIS_MODULE;
526  	config.reg_read = mtd_nvmem_reg_read;
527  	config.size = mtd->size;
528  	config.word_size = 1;
529  	config.stride = 1;
530  	config.read_only = true;
531  	config.root_only = true;
532  	config.ignore_wp = true;
533  	config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
534  	config.priv = mtd;
535  
536  	mtd->nvmem = nvmem_register(&config);
537  	if (IS_ERR(mtd->nvmem)) {
538  		/* Just ignore if there is no NVMEM support in the kernel */
539  		if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
540  			mtd->nvmem = NULL;
541  		} else {
542  			dev_err(&mtd->dev, "Failed to register NVMEM device\n");
543  			return PTR_ERR(mtd->nvmem);
544  		}
545  	}
546  
547  	return 0;
548  }
549  
mtd_check_of_node(struct mtd_info * mtd)550  static void mtd_check_of_node(struct mtd_info *mtd)
551  {
552  	struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
553  	const char *pname, *prefix = "partition-";
554  	int plen, mtd_name_len, offset, prefix_len;
555  
556  	/* Check if MTD already has a device node */
557  	if (mtd_get_of_node(mtd))
558  		return;
559  
560  	if (!mtd_is_partition(mtd))
561  		return;
562  
563  	parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
564  	if (!parent_dn)
565  		return;
566  
567  	if (mtd_is_partition(mtd->parent))
568  		partitions = of_node_get(parent_dn);
569  	else
570  		partitions = of_get_child_by_name(parent_dn, "partitions");
571  	if (!partitions)
572  		goto exit_parent;
573  
574  	prefix_len = strlen(prefix);
575  	mtd_name_len = strlen(mtd->name);
576  
577  	/* Search if a partition is defined with the same name */
578  	for_each_child_of_node(partitions, mtd_dn) {
579  		/* Skip partition with no/wrong prefix */
580  		if (!of_node_name_prefix(mtd_dn, prefix))
581  			continue;
582  
583  		/* Label have priority. Check that first */
584  		if (!of_property_read_string(mtd_dn, "label", &pname)) {
585  			offset = 0;
586  		} else {
587  			pname = mtd_dn->name;
588  			offset = prefix_len;
589  		}
590  
591  		plen = strlen(pname) - offset;
592  		if (plen == mtd_name_len &&
593  		    !strncmp(mtd->name, pname + offset, plen)) {
594  			mtd_set_of_node(mtd, mtd_dn);
595  			break;
596  		}
597  	}
598  
599  	of_node_put(partitions);
600  exit_parent:
601  	of_node_put(parent_dn);
602  }
603  
604  /**
605   *	add_mtd_device - register an MTD device
606   *	@mtd: pointer to new MTD device info structure
607   *
608   *	Add a device to the list of MTD devices present in the system, and
609   *	notify each currently active MTD 'user' of its arrival. Returns
610   *	zero on success or non-zero on failure.
611   */
612  
add_mtd_device(struct mtd_info * mtd)613  int add_mtd_device(struct mtd_info *mtd)
614  {
615  	struct device_node *np = mtd_get_of_node(mtd);
616  	struct mtd_info *master = mtd_get_master(mtd);
617  	struct mtd_notifier *not;
618  	int i, error, ofidx;
619  
620  	/*
621  	 * May occur, for instance, on buggy drivers which call
622  	 * mtd_device_parse_register() multiple times on the same master MTD,
623  	 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
624  	 */
625  	if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
626  		return -EEXIST;
627  
628  	BUG_ON(mtd->writesize == 0);
629  
630  	/*
631  	 * MTD drivers should implement ->_{write,read}() or
632  	 * ->_{write,read}_oob(), but not both.
633  	 */
634  	if (WARN_ON((mtd->_write && mtd->_write_oob) ||
635  		    (mtd->_read && mtd->_read_oob)))
636  		return -EINVAL;
637  
638  	if (WARN_ON((!mtd->erasesize || !master->_erase) &&
639  		    !(mtd->flags & MTD_NO_ERASE)))
640  		return -EINVAL;
641  
642  	/*
643  	 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
644  	 * master is an MLC NAND and has a proper pairing scheme defined.
645  	 * We also reject masters that implement ->_writev() for now, because
646  	 * NAND controller drivers don't implement this hook, and adding the
647  	 * SLC -> MLC address/length conversion to this path is useless if we
648  	 * don't have a user.
649  	 */
650  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
651  	    (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
652  	     !master->pairing || master->_writev))
653  		return -EINVAL;
654  
655  	mutex_lock(&mtd_table_mutex);
656  
657  	ofidx = -1;
658  	if (np)
659  		ofidx = of_alias_get_id(np, "mtd");
660  	if (ofidx >= 0)
661  		i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
662  	else
663  		i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
664  	if (i < 0) {
665  		error = i;
666  		goto fail_locked;
667  	}
668  
669  	mtd->index = i;
670  	mtd->usecount = 0;
671  
672  	/* default value if not set by driver */
673  	if (mtd->bitflip_threshold == 0)
674  		mtd->bitflip_threshold = mtd->ecc_strength;
675  
676  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
677  		int ngroups = mtd_pairing_groups(master);
678  
679  		mtd->erasesize /= ngroups;
680  		mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
681  			    mtd->erasesize;
682  	}
683  
684  	if (is_power_of_2(mtd->erasesize))
685  		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
686  	else
687  		mtd->erasesize_shift = 0;
688  
689  	if (is_power_of_2(mtd->writesize))
690  		mtd->writesize_shift = ffs(mtd->writesize) - 1;
691  	else
692  		mtd->writesize_shift = 0;
693  
694  	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
695  	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
696  
697  	/* Some chips always power up locked. Unlock them now */
698  	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
699  		error = mtd_unlock(mtd, 0, mtd->size);
700  		if (error && error != -EOPNOTSUPP)
701  			printk(KERN_WARNING
702  			       "%s: unlock failed, writes may not work\n",
703  			       mtd->name);
704  		/* Ignore unlock failures? */
705  		error = 0;
706  	}
707  
708  	/* Caller should have set dev.parent to match the
709  	 * physical device, if appropriate.
710  	 */
711  	mtd->dev.type = &mtd_devtype;
712  	mtd->dev.class = &mtd_class;
713  	mtd->dev.devt = MTD_DEVT(i);
714  	dev_set_name(&mtd->dev, "mtd%d", i);
715  	dev_set_drvdata(&mtd->dev, mtd);
716  	mtd_check_of_node(mtd);
717  	of_node_get(mtd_get_of_node(mtd));
718  	error = device_register(&mtd->dev);
719  	if (error) {
720  		put_device(&mtd->dev);
721  		goto fail_added;
722  	}
723  
724  	/* Add the nvmem provider */
725  	error = mtd_nvmem_add(mtd);
726  	if (error)
727  		goto fail_nvmem_add;
728  
729  	mtd_debugfs_populate(mtd);
730  
731  	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
732  		      "mtd%dro", i);
733  
734  	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
735  	/* No need to get a refcount on the module containing
736  	   the notifier, since we hold the mtd_table_mutex */
737  	list_for_each_entry(not, &mtd_notifiers, list)
738  		not->add(mtd);
739  
740  	mutex_unlock(&mtd_table_mutex);
741  
742  	if (of_find_property(mtd_get_of_node(mtd), "linux,rootfs", NULL)) {
743  		if (IS_BUILTIN(CONFIG_MTD)) {
744  			pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
745  			ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
746  		} else {
747  			pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
748  				mtd->index, mtd->name);
749  		}
750  	}
751  
752  	/* We _know_ we aren't being removed, because
753  	   our caller is still holding us here. So none
754  	   of this try_ nonsense, and no bitching about it
755  	   either. :) */
756  	__module_get(THIS_MODULE);
757  	return 0;
758  
759  fail_nvmem_add:
760  	device_unregister(&mtd->dev);
761  fail_added:
762  	of_node_put(mtd_get_of_node(mtd));
763  	idr_remove(&mtd_idr, i);
764  fail_locked:
765  	mutex_unlock(&mtd_table_mutex);
766  	return error;
767  }
768  
769  /**
770   *	del_mtd_device - unregister an MTD device
771   *	@mtd: pointer to MTD device info structure
772   *
773   *	Remove a device from the list of MTD devices present in the system,
774   *	and notify each currently active MTD 'user' of its departure.
775   *	Returns zero on success or 1 on failure, which currently will happen
776   *	if the requested device does not appear to be present in the list.
777   */
778  
del_mtd_device(struct mtd_info * mtd)779  int del_mtd_device(struct mtd_info *mtd)
780  {
781  	int ret;
782  	struct mtd_notifier *not;
783  	struct device_node *mtd_of_node;
784  
785  	mutex_lock(&mtd_table_mutex);
786  
787  	if (idr_find(&mtd_idr, mtd->index) != mtd) {
788  		ret = -ENODEV;
789  		goto out_error;
790  	}
791  
792  	/* No need to get a refcount on the module containing
793  		the notifier, since we hold the mtd_table_mutex */
794  	list_for_each_entry(not, &mtd_notifiers, list)
795  		not->remove(mtd);
796  
797  	if (mtd->usecount) {
798  		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
799  		       mtd->index, mtd->name, mtd->usecount);
800  		ret = -EBUSY;
801  	} else {
802  		mtd_of_node = mtd_get_of_node(mtd);
803  		debugfs_remove_recursive(mtd->dbg.dfs_dir);
804  
805  		/* Try to remove the NVMEM provider */
806  		nvmem_unregister(mtd->nvmem);
807  
808  		device_unregister(&mtd->dev);
809  
810  		/* Clear dev so mtd can be safely re-registered later if desired */
811  		memset(&mtd->dev, 0, sizeof(mtd->dev));
812  
813  		idr_remove(&mtd_idr, mtd->index);
814  		of_node_put(mtd_of_node);
815  
816  		module_put(THIS_MODULE);
817  		ret = 0;
818  	}
819  
820  out_error:
821  	mutex_unlock(&mtd_table_mutex);
822  	return ret;
823  }
824  
825  /*
826   * Set a few defaults based on the parent devices, if not provided by the
827   * driver
828   */
mtd_set_dev_defaults(struct mtd_info * mtd)829  static void mtd_set_dev_defaults(struct mtd_info *mtd)
830  {
831  	if (mtd->dev.parent) {
832  		if (!mtd->owner && mtd->dev.parent->driver)
833  			mtd->owner = mtd->dev.parent->driver->owner;
834  		if (!mtd->name)
835  			mtd->name = dev_name(mtd->dev.parent);
836  	} else {
837  		pr_debug("mtd device won't show a device symlink in sysfs\n");
838  	}
839  
840  	INIT_LIST_HEAD(&mtd->partitions);
841  	mutex_init(&mtd->master.partitions_lock);
842  	mutex_init(&mtd->master.chrdev_lock);
843  }
844  
mtd_otp_size(struct mtd_info * mtd,bool is_user)845  static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
846  {
847  	struct otp_info *info;
848  	ssize_t size = 0;
849  	unsigned int i;
850  	size_t retlen;
851  	int ret;
852  
853  	info = kmalloc(PAGE_SIZE, GFP_KERNEL);
854  	if (!info)
855  		return -ENOMEM;
856  
857  	if (is_user)
858  		ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
859  	else
860  		ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
861  	if (ret)
862  		goto err;
863  
864  	for (i = 0; i < retlen / sizeof(*info); i++)
865  		size += info[i].length;
866  
867  	kfree(info);
868  	return size;
869  
870  err:
871  	kfree(info);
872  
873  	/* ENODATA means there is no OTP region. */
874  	return ret == -ENODATA ? 0 : ret;
875  }
876  
mtd_otp_nvmem_register(struct mtd_info * mtd,const char * compatible,int size,nvmem_reg_read_t reg_read)877  static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
878  						   const char *compatible,
879  						   int size,
880  						   nvmem_reg_read_t reg_read)
881  {
882  	struct nvmem_device *nvmem = NULL;
883  	struct nvmem_config config = {};
884  	struct device_node *np;
885  
886  	/* DT binding is optional */
887  	np = of_get_compatible_child(mtd->dev.of_node, compatible);
888  
889  	/* OTP nvmem will be registered on the physical device */
890  	config.dev = mtd->dev.parent;
891  	config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
892  	config.id = NVMEM_DEVID_NONE;
893  	config.owner = THIS_MODULE;
894  	config.type = NVMEM_TYPE_OTP;
895  	config.root_only = true;
896  	config.ignore_wp = true;
897  	config.reg_read = reg_read;
898  	config.size = size;
899  	config.of_node = np;
900  	config.priv = mtd;
901  
902  	nvmem = nvmem_register(&config);
903  	/* Just ignore if there is no NVMEM support in the kernel */
904  	if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
905  		nvmem = NULL;
906  
907  	of_node_put(np);
908  	kfree(config.name);
909  
910  	return nvmem;
911  }
912  
mtd_nvmem_user_otp_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)913  static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
914  				       void *val, size_t bytes)
915  {
916  	struct mtd_info *mtd = priv;
917  	size_t retlen;
918  	int ret;
919  
920  	ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
921  	if (ret)
922  		return ret;
923  
924  	return retlen == bytes ? 0 : -EIO;
925  }
926  
mtd_nvmem_fact_otp_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)927  static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
928  				       void *val, size_t bytes)
929  {
930  	struct mtd_info *mtd = priv;
931  	size_t retlen;
932  	int ret;
933  
934  	ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
935  	if (ret)
936  		return ret;
937  
938  	return retlen == bytes ? 0 : -EIO;
939  }
940  
mtd_otp_nvmem_add(struct mtd_info * mtd)941  static int mtd_otp_nvmem_add(struct mtd_info *mtd)
942  {
943  	struct nvmem_device *nvmem;
944  	ssize_t size;
945  	int err;
946  
947  	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
948  		size = mtd_otp_size(mtd, true);
949  		if (size < 0)
950  			return size;
951  
952  		if (size > 0) {
953  			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
954  						       mtd_nvmem_user_otp_reg_read);
955  			if (IS_ERR(nvmem)) {
956  				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
957  				return PTR_ERR(nvmem);
958  			}
959  			mtd->otp_user_nvmem = nvmem;
960  		}
961  	}
962  
963  	if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
964  		size = mtd_otp_size(mtd, false);
965  		if (size < 0) {
966  			err = size;
967  			goto err;
968  		}
969  
970  		if (size > 0) {
971  			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
972  						       mtd_nvmem_fact_otp_reg_read);
973  			if (IS_ERR(nvmem)) {
974  				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
975  				err = PTR_ERR(nvmem);
976  				goto err;
977  			}
978  			mtd->otp_factory_nvmem = nvmem;
979  		}
980  	}
981  
982  	return 0;
983  
984  err:
985  	nvmem_unregister(mtd->otp_user_nvmem);
986  	return err;
987  }
988  
989  /**
990   * mtd_device_parse_register - parse partitions and register an MTD device.
991   *
992   * @mtd: the MTD device to register
993   * @types: the list of MTD partition probes to try, see
994   *         'parse_mtd_partitions()' for more information
995   * @parser_data: MTD partition parser-specific data
996   * @parts: fallback partition information to register, if parsing fails;
997   *         only valid if %nr_parts > %0
998   * @nr_parts: the number of partitions in parts, if zero then the full
999   *            MTD device is registered if no partition info is found
1000   *
1001   * This function aggregates MTD partitions parsing (done by
1002   * 'parse_mtd_partitions()') and MTD device and partitions registering. It
1003   * basically follows the most common pattern found in many MTD drivers:
1004   *
1005   * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
1006   *   registered first.
1007   * * Then It tries to probe partitions on MTD device @mtd using parsers
1008   *   specified in @types (if @types is %NULL, then the default list of parsers
1009   *   is used, see 'parse_mtd_partitions()' for more information). If none are
1010   *   found this functions tries to fallback to information specified in
1011   *   @parts/@nr_parts.
1012   * * If no partitions were found this function just registers the MTD device
1013   *   @mtd and exits.
1014   *
1015   * Returns zero in case of success and a negative error code in case of failure.
1016   */
mtd_device_parse_register(struct mtd_info * mtd,const char * const * types,struct mtd_part_parser_data * parser_data,const struct mtd_partition * parts,int nr_parts)1017  int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
1018  			      struct mtd_part_parser_data *parser_data,
1019  			      const struct mtd_partition *parts,
1020  			      int nr_parts)
1021  {
1022  	int ret;
1023  
1024  	mtd_set_dev_defaults(mtd);
1025  
1026  	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
1027  		ret = add_mtd_device(mtd);
1028  		if (ret)
1029  			return ret;
1030  	}
1031  
1032  	/* Prefer parsed partitions over driver-provided fallback */
1033  	ret = parse_mtd_partitions(mtd, types, parser_data);
1034  	if (ret == -EPROBE_DEFER)
1035  		goto out;
1036  
1037  	if (ret > 0)
1038  		ret = 0;
1039  	else if (nr_parts)
1040  		ret = add_mtd_partitions(mtd, parts, nr_parts);
1041  	else if (!device_is_registered(&mtd->dev))
1042  		ret = add_mtd_device(mtd);
1043  	else
1044  		ret = 0;
1045  
1046  	if (ret)
1047  		goto out;
1048  
1049  	/*
1050  	 * FIXME: some drivers unfortunately call this function more than once.
1051  	 * So we have to check if we've already assigned the reboot notifier.
1052  	 *
1053  	 * Generally, we can make multiple calls work for most cases, but it
1054  	 * does cause problems with parse_mtd_partitions() above (e.g.,
1055  	 * cmdlineparts will register partitions more than once).
1056  	 */
1057  	WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1058  		  "MTD already registered\n");
1059  	if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1060  		mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1061  		register_reboot_notifier(&mtd->reboot_notifier);
1062  	}
1063  
1064  	ret = mtd_otp_nvmem_add(mtd);
1065  
1066  out:
1067  	if (ret && device_is_registered(&mtd->dev))
1068  		del_mtd_device(mtd);
1069  
1070  	return ret;
1071  }
1072  EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1073  
1074  /**
1075   * mtd_device_unregister - unregister an existing MTD device.
1076   *
1077   * @master: the MTD device to unregister.  This will unregister both the master
1078   *          and any partitions if registered.
1079   */
mtd_device_unregister(struct mtd_info * master)1080  int mtd_device_unregister(struct mtd_info *master)
1081  {
1082  	int err;
1083  
1084  	if (master->_reboot) {
1085  		unregister_reboot_notifier(&master->reboot_notifier);
1086  		memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1087  	}
1088  
1089  	nvmem_unregister(master->otp_user_nvmem);
1090  	nvmem_unregister(master->otp_factory_nvmem);
1091  
1092  	err = del_mtd_partitions(master);
1093  	if (err)
1094  		return err;
1095  
1096  	if (!device_is_registered(&master->dev))
1097  		return 0;
1098  
1099  	return del_mtd_device(master);
1100  }
1101  EXPORT_SYMBOL_GPL(mtd_device_unregister);
1102  
1103  /**
1104   *	register_mtd_user - register a 'user' of MTD devices.
1105   *	@new: pointer to notifier info structure
1106   *
1107   *	Registers a pair of callbacks function to be called upon addition
1108   *	or removal of MTD devices. Causes the 'add' callback to be immediately
1109   *	invoked for each MTD device currently present in the system.
1110   */
register_mtd_user(struct mtd_notifier * new)1111  void register_mtd_user (struct mtd_notifier *new)
1112  {
1113  	struct mtd_info *mtd;
1114  
1115  	mutex_lock(&mtd_table_mutex);
1116  
1117  	list_add(&new->list, &mtd_notifiers);
1118  
1119  	__module_get(THIS_MODULE);
1120  
1121  	mtd_for_each_device(mtd)
1122  		new->add(mtd);
1123  
1124  	mutex_unlock(&mtd_table_mutex);
1125  }
1126  EXPORT_SYMBOL_GPL(register_mtd_user);
1127  
1128  /**
1129   *	unregister_mtd_user - unregister a 'user' of MTD devices.
1130   *	@old: pointer to notifier info structure
1131   *
1132   *	Removes a callback function pair from the list of 'users' to be
1133   *	notified upon addition or removal of MTD devices. Causes the
1134   *	'remove' callback to be immediately invoked for each MTD device
1135   *	currently present in the system.
1136   */
unregister_mtd_user(struct mtd_notifier * old)1137  int unregister_mtd_user (struct mtd_notifier *old)
1138  {
1139  	struct mtd_info *mtd;
1140  
1141  	mutex_lock(&mtd_table_mutex);
1142  
1143  	module_put(THIS_MODULE);
1144  
1145  	mtd_for_each_device(mtd)
1146  		old->remove(mtd);
1147  
1148  	list_del(&old->list);
1149  	mutex_unlock(&mtd_table_mutex);
1150  	return 0;
1151  }
1152  EXPORT_SYMBOL_GPL(unregister_mtd_user);
1153  
1154  /**
1155   *	get_mtd_device - obtain a validated handle for an MTD device
1156   *	@mtd: last known address of the required MTD device
1157   *	@num: internal device number of the required MTD device
1158   *
1159   *	Given a number and NULL address, return the num'th entry in the device
1160   *	table, if any.	Given an address and num == -1, search the device table
1161   *	for a device with that address and return if it's still present. Given
1162   *	both, return the num'th driver only if its address matches. Return
1163   *	error code if not.
1164   */
get_mtd_device(struct mtd_info * mtd,int num)1165  struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1166  {
1167  	struct mtd_info *ret = NULL, *other;
1168  	int err = -ENODEV;
1169  
1170  	mutex_lock(&mtd_table_mutex);
1171  
1172  	if (num == -1) {
1173  		mtd_for_each_device(other) {
1174  			if (other == mtd) {
1175  				ret = mtd;
1176  				break;
1177  			}
1178  		}
1179  	} else if (num >= 0) {
1180  		ret = idr_find(&mtd_idr, num);
1181  		if (mtd && mtd != ret)
1182  			ret = NULL;
1183  	}
1184  
1185  	if (!ret) {
1186  		ret = ERR_PTR(err);
1187  		goto out;
1188  	}
1189  
1190  	err = __get_mtd_device(ret);
1191  	if (err)
1192  		ret = ERR_PTR(err);
1193  out:
1194  	mutex_unlock(&mtd_table_mutex);
1195  	return ret;
1196  }
1197  EXPORT_SYMBOL_GPL(get_mtd_device);
1198  
1199  
__get_mtd_device(struct mtd_info * mtd)1200  int __get_mtd_device(struct mtd_info *mtd)
1201  {
1202  	struct mtd_info *master = mtd_get_master(mtd);
1203  	int err;
1204  
1205  	if (!try_module_get(master->owner))
1206  		return -ENODEV;
1207  
1208  	if (master->_get_device) {
1209  		err = master->_get_device(mtd);
1210  
1211  		if (err) {
1212  			module_put(master->owner);
1213  			return err;
1214  		}
1215  	}
1216  
1217  	master->usecount++;
1218  
1219  	while (mtd->parent) {
1220  		mtd->usecount++;
1221  		mtd = mtd->parent;
1222  	}
1223  
1224  	return 0;
1225  }
1226  EXPORT_SYMBOL_GPL(__get_mtd_device);
1227  
1228  /**
1229   * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
1230   *
1231   * @np: device tree node
1232   */
of_get_mtd_device_by_node(struct device_node * np)1233  struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
1234  {
1235  	struct mtd_info *mtd = NULL;
1236  	struct mtd_info *tmp;
1237  	int err;
1238  
1239  	mutex_lock(&mtd_table_mutex);
1240  
1241  	err = -EPROBE_DEFER;
1242  	mtd_for_each_device(tmp) {
1243  		if (mtd_get_of_node(tmp) == np) {
1244  			mtd = tmp;
1245  			err = __get_mtd_device(mtd);
1246  			break;
1247  		}
1248  	}
1249  
1250  	mutex_unlock(&mtd_table_mutex);
1251  
1252  	return err ? ERR_PTR(err) : mtd;
1253  }
1254  EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
1255  
1256  /**
1257   *	get_mtd_device_nm - obtain a validated handle for an MTD device by
1258   *	device name
1259   *	@name: MTD device name to open
1260   *
1261   * 	This function returns MTD device description structure in case of
1262   * 	success and an error code in case of failure.
1263   */
get_mtd_device_nm(const char * name)1264  struct mtd_info *get_mtd_device_nm(const char *name)
1265  {
1266  	int err = -ENODEV;
1267  	struct mtd_info *mtd = NULL, *other;
1268  
1269  	mutex_lock(&mtd_table_mutex);
1270  
1271  	mtd_for_each_device(other) {
1272  		if (!strcmp(name, other->name)) {
1273  			mtd = other;
1274  			break;
1275  		}
1276  	}
1277  
1278  	if (!mtd)
1279  		goto out_unlock;
1280  
1281  	err = __get_mtd_device(mtd);
1282  	if (err)
1283  		goto out_unlock;
1284  
1285  	mutex_unlock(&mtd_table_mutex);
1286  	return mtd;
1287  
1288  out_unlock:
1289  	mutex_unlock(&mtd_table_mutex);
1290  	return ERR_PTR(err);
1291  }
1292  EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1293  
put_mtd_device(struct mtd_info * mtd)1294  void put_mtd_device(struct mtd_info *mtd)
1295  {
1296  	mutex_lock(&mtd_table_mutex);
1297  	__put_mtd_device(mtd);
1298  	mutex_unlock(&mtd_table_mutex);
1299  
1300  }
1301  EXPORT_SYMBOL_GPL(put_mtd_device);
1302  
__put_mtd_device(struct mtd_info * mtd)1303  void __put_mtd_device(struct mtd_info *mtd)
1304  {
1305  	struct mtd_info *master = mtd_get_master(mtd);
1306  
1307  	while (mtd->parent) {
1308  		--mtd->usecount;
1309  		BUG_ON(mtd->usecount < 0);
1310  		mtd = mtd->parent;
1311  	}
1312  
1313  	master->usecount--;
1314  
1315  	if (master->_put_device)
1316  		master->_put_device(master);
1317  
1318  	module_put(master->owner);
1319  }
1320  EXPORT_SYMBOL_GPL(__put_mtd_device);
1321  
1322  /*
1323   * Erase is an synchronous operation. Device drivers are epected to return a
1324   * negative error code if the operation failed and update instr->fail_addr
1325   * to point the portion that was not properly erased.
1326   */
mtd_erase(struct mtd_info * mtd,struct erase_info * instr)1327  int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1328  {
1329  	struct mtd_info *master = mtd_get_master(mtd);
1330  	u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1331  	struct erase_info adjinstr;
1332  	int ret;
1333  
1334  	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1335  	adjinstr = *instr;
1336  
1337  	if (!mtd->erasesize || !master->_erase)
1338  		return -ENOTSUPP;
1339  
1340  	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1341  		return -EINVAL;
1342  	if (!(mtd->flags & MTD_WRITEABLE))
1343  		return -EROFS;
1344  
1345  	if (!instr->len)
1346  		return 0;
1347  
1348  	ledtrig_mtd_activity();
1349  
1350  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1351  		adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1352  				master->erasesize;
1353  		adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1354  				master->erasesize) -
1355  			       adjinstr.addr;
1356  	}
1357  
1358  	adjinstr.addr += mst_ofs;
1359  
1360  	ret = master->_erase(master, &adjinstr);
1361  
1362  	if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1363  		instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1364  		if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1365  			instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1366  							 master);
1367  			instr->fail_addr *= mtd->erasesize;
1368  		}
1369  	}
1370  
1371  	return ret;
1372  }
1373  EXPORT_SYMBOL_GPL(mtd_erase);
1374  
1375  /*
1376   * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1377   */
mtd_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)1378  int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1379  	      void **virt, resource_size_t *phys)
1380  {
1381  	struct mtd_info *master = mtd_get_master(mtd);
1382  
1383  	*retlen = 0;
1384  	*virt = NULL;
1385  	if (phys)
1386  		*phys = 0;
1387  	if (!master->_point)
1388  		return -EOPNOTSUPP;
1389  	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1390  		return -EINVAL;
1391  	if (!len)
1392  		return 0;
1393  
1394  	from = mtd_get_master_ofs(mtd, from);
1395  	return master->_point(master, from, len, retlen, virt, phys);
1396  }
1397  EXPORT_SYMBOL_GPL(mtd_point);
1398  
1399  /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
mtd_unpoint(struct mtd_info * mtd,loff_t from,size_t len)1400  int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1401  {
1402  	struct mtd_info *master = mtd_get_master(mtd);
1403  
1404  	if (!master->_unpoint)
1405  		return -EOPNOTSUPP;
1406  	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1407  		return -EINVAL;
1408  	if (!len)
1409  		return 0;
1410  	return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1411  }
1412  EXPORT_SYMBOL_GPL(mtd_unpoint);
1413  
1414  /*
1415   * Allow NOMMU mmap() to directly map the device (if not NULL)
1416   * - return the address to which the offset maps
1417   * - return -ENOSYS to indicate refusal to do the mapping
1418   */
mtd_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)1419  unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1420  				    unsigned long offset, unsigned long flags)
1421  {
1422  	size_t retlen;
1423  	void *virt;
1424  	int ret;
1425  
1426  	ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1427  	if (ret)
1428  		return ret;
1429  	if (retlen != len) {
1430  		mtd_unpoint(mtd, offset, retlen);
1431  		return -ENOSYS;
1432  	}
1433  	return (unsigned long)virt;
1434  }
1435  EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1436  
mtd_update_ecc_stats(struct mtd_info * mtd,struct mtd_info * master,const struct mtd_ecc_stats * old_stats)1437  static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1438  				 const struct mtd_ecc_stats *old_stats)
1439  {
1440  	struct mtd_ecc_stats diff;
1441  
1442  	if (master == mtd)
1443  		return;
1444  
1445  	diff = master->ecc_stats;
1446  	diff.failed -= old_stats->failed;
1447  	diff.corrected -= old_stats->corrected;
1448  
1449  	while (mtd->parent) {
1450  		mtd->ecc_stats.failed += diff.failed;
1451  		mtd->ecc_stats.corrected += diff.corrected;
1452  		mtd = mtd->parent;
1453  	}
1454  }
1455  
mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1456  int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1457  	     u_char *buf)
1458  {
1459  	struct mtd_oob_ops ops = {
1460  		.len = len,
1461  		.datbuf = buf,
1462  	};
1463  	int ret;
1464  
1465  	ret = mtd_read_oob(mtd, from, &ops);
1466  	*retlen = ops.retlen;
1467  
1468  	return ret;
1469  }
1470  EXPORT_SYMBOL_GPL(mtd_read);
1471  
mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1472  int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1473  	      const u_char *buf)
1474  {
1475  	struct mtd_oob_ops ops = {
1476  		.len = len,
1477  		.datbuf = (u8 *)buf,
1478  	};
1479  	int ret;
1480  
1481  	ret = mtd_write_oob(mtd, to, &ops);
1482  	*retlen = ops.retlen;
1483  
1484  	return ret;
1485  }
1486  EXPORT_SYMBOL_GPL(mtd_write);
1487  
1488  /*
1489   * In blackbox flight recorder like scenarios we want to make successful writes
1490   * in interrupt context. panic_write() is only intended to be called when its
1491   * known the kernel is about to panic and we need the write to succeed. Since
1492   * the kernel is not going to be running for much longer, this function can
1493   * break locks and delay to ensure the write succeeds (but not sleep).
1494   */
mtd_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1495  int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1496  		    const u_char *buf)
1497  {
1498  	struct mtd_info *master = mtd_get_master(mtd);
1499  
1500  	*retlen = 0;
1501  	if (!master->_panic_write)
1502  		return -EOPNOTSUPP;
1503  	if (to < 0 || to >= mtd->size || len > mtd->size - to)
1504  		return -EINVAL;
1505  	if (!(mtd->flags & MTD_WRITEABLE))
1506  		return -EROFS;
1507  	if (!len)
1508  		return 0;
1509  	if (!master->oops_panic_write)
1510  		master->oops_panic_write = true;
1511  
1512  	return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1513  				    retlen, buf);
1514  }
1515  EXPORT_SYMBOL_GPL(mtd_panic_write);
1516  
mtd_check_oob_ops(struct mtd_info * mtd,loff_t offs,struct mtd_oob_ops * ops)1517  static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1518  			     struct mtd_oob_ops *ops)
1519  {
1520  	/*
1521  	 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1522  	 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1523  	 *  this case.
1524  	 */
1525  	if (!ops->datbuf)
1526  		ops->len = 0;
1527  
1528  	if (!ops->oobbuf)
1529  		ops->ooblen = 0;
1530  
1531  	if (offs < 0 || offs + ops->len > mtd->size)
1532  		return -EINVAL;
1533  
1534  	if (ops->ooblen) {
1535  		size_t maxooblen;
1536  
1537  		if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1538  			return -EINVAL;
1539  
1540  		maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1541  				      mtd_div_by_ws(offs, mtd)) *
1542  			     mtd_oobavail(mtd, ops)) - ops->ooboffs;
1543  		if (ops->ooblen > maxooblen)
1544  			return -EINVAL;
1545  	}
1546  
1547  	return 0;
1548  }
1549  
mtd_read_oob_std(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1550  static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1551  			    struct mtd_oob_ops *ops)
1552  {
1553  	struct mtd_info *master = mtd_get_master(mtd);
1554  	int ret;
1555  
1556  	from = mtd_get_master_ofs(mtd, from);
1557  	if (master->_read_oob)
1558  		ret = master->_read_oob(master, from, ops);
1559  	else
1560  		ret = master->_read(master, from, ops->len, &ops->retlen,
1561  				    ops->datbuf);
1562  
1563  	return ret;
1564  }
1565  
mtd_write_oob_std(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1566  static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1567  			     struct mtd_oob_ops *ops)
1568  {
1569  	struct mtd_info *master = mtd_get_master(mtd);
1570  	int ret;
1571  
1572  	to = mtd_get_master_ofs(mtd, to);
1573  	if (master->_write_oob)
1574  		ret = master->_write_oob(master, to, ops);
1575  	else
1576  		ret = master->_write(master, to, ops->len, &ops->retlen,
1577  				     ops->datbuf);
1578  
1579  	return ret;
1580  }
1581  
mtd_io_emulated_slc(struct mtd_info * mtd,loff_t start,bool read,struct mtd_oob_ops * ops)1582  static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1583  			       struct mtd_oob_ops *ops)
1584  {
1585  	struct mtd_info *master = mtd_get_master(mtd);
1586  	int ngroups = mtd_pairing_groups(master);
1587  	int npairs = mtd_wunit_per_eb(master) / ngroups;
1588  	struct mtd_oob_ops adjops = *ops;
1589  	unsigned int wunit, oobavail;
1590  	struct mtd_pairing_info info;
1591  	int max_bitflips = 0;
1592  	u32 ebofs, pageofs;
1593  	loff_t base, pos;
1594  
1595  	ebofs = mtd_mod_by_eb(start, mtd);
1596  	base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1597  	info.group = 0;
1598  	info.pair = mtd_div_by_ws(ebofs, mtd);
1599  	pageofs = mtd_mod_by_ws(ebofs, mtd);
1600  	oobavail = mtd_oobavail(mtd, ops);
1601  
1602  	while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1603  		int ret;
1604  
1605  		if (info.pair >= npairs) {
1606  			info.pair = 0;
1607  			base += master->erasesize;
1608  		}
1609  
1610  		wunit = mtd_pairing_info_to_wunit(master, &info);
1611  		pos = mtd_wunit_to_offset(mtd, base, wunit);
1612  
1613  		adjops.len = ops->len - ops->retlen;
1614  		if (adjops.len > mtd->writesize - pageofs)
1615  			adjops.len = mtd->writesize - pageofs;
1616  
1617  		adjops.ooblen = ops->ooblen - ops->oobretlen;
1618  		if (adjops.ooblen > oobavail - adjops.ooboffs)
1619  			adjops.ooblen = oobavail - adjops.ooboffs;
1620  
1621  		if (read) {
1622  			ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1623  			if (ret > 0)
1624  				max_bitflips = max(max_bitflips, ret);
1625  		} else {
1626  			ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1627  		}
1628  
1629  		if (ret < 0)
1630  			return ret;
1631  
1632  		max_bitflips = max(max_bitflips, ret);
1633  		ops->retlen += adjops.retlen;
1634  		ops->oobretlen += adjops.oobretlen;
1635  		adjops.datbuf += adjops.retlen;
1636  		adjops.oobbuf += adjops.oobretlen;
1637  		adjops.ooboffs = 0;
1638  		pageofs = 0;
1639  		info.pair++;
1640  	}
1641  
1642  	return max_bitflips;
1643  }
1644  
mtd_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1645  int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1646  {
1647  	struct mtd_info *master = mtd_get_master(mtd);
1648  	struct mtd_ecc_stats old_stats = master->ecc_stats;
1649  	int ret_code;
1650  
1651  	ops->retlen = ops->oobretlen = 0;
1652  
1653  	ret_code = mtd_check_oob_ops(mtd, from, ops);
1654  	if (ret_code)
1655  		return ret_code;
1656  
1657  	ledtrig_mtd_activity();
1658  
1659  	/* Check the validity of a potential fallback on mtd->_read */
1660  	if (!master->_read_oob && (!master->_read || ops->oobbuf))
1661  		return -EOPNOTSUPP;
1662  
1663  	if (ops->stats)
1664  		memset(ops->stats, 0, sizeof(*ops->stats));
1665  
1666  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1667  		ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1668  	else
1669  		ret_code = mtd_read_oob_std(mtd, from, ops);
1670  
1671  	mtd_update_ecc_stats(mtd, master, &old_stats);
1672  
1673  	/*
1674  	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1675  	 * similar to mtd->_read(), returning a non-negative integer
1676  	 * representing max bitflips. In other cases, mtd->_read_oob() may
1677  	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1678  	 */
1679  	if (unlikely(ret_code < 0))
1680  		return ret_code;
1681  	if (mtd->ecc_strength == 0)
1682  		return 0;	/* device lacks ecc */
1683  	if (ops->stats)
1684  		ops->stats->max_bitflips = ret_code;
1685  	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1686  }
1687  EXPORT_SYMBOL_GPL(mtd_read_oob);
1688  
mtd_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1689  int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1690  				struct mtd_oob_ops *ops)
1691  {
1692  	struct mtd_info *master = mtd_get_master(mtd);
1693  	int ret;
1694  
1695  	ops->retlen = ops->oobretlen = 0;
1696  
1697  	if (!(mtd->flags & MTD_WRITEABLE))
1698  		return -EROFS;
1699  
1700  	ret = mtd_check_oob_ops(mtd, to, ops);
1701  	if (ret)
1702  		return ret;
1703  
1704  	ledtrig_mtd_activity();
1705  
1706  	/* Check the validity of a potential fallback on mtd->_write */
1707  	if (!master->_write_oob && (!master->_write || ops->oobbuf))
1708  		return -EOPNOTSUPP;
1709  
1710  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1711  		return mtd_io_emulated_slc(mtd, to, false, ops);
1712  
1713  	return mtd_write_oob_std(mtd, to, ops);
1714  }
1715  EXPORT_SYMBOL_GPL(mtd_write_oob);
1716  
1717  /**
1718   * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1719   * @mtd: MTD device structure
1720   * @section: ECC section. Depending on the layout you may have all the ECC
1721   *	     bytes stored in a single contiguous section, or one section
1722   *	     per ECC chunk (and sometime several sections for a single ECC
1723   *	     ECC chunk)
1724   * @oobecc: OOB region struct filled with the appropriate ECC position
1725   *	    information
1726   *
1727   * This function returns ECC section information in the OOB area. If you want
1728   * to get all the ECC bytes information, then you should call
1729   * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1730   *
1731   * Returns zero on success, a negative error code otherwise.
1732   */
mtd_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobecc)1733  int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1734  		      struct mtd_oob_region *oobecc)
1735  {
1736  	struct mtd_info *master = mtd_get_master(mtd);
1737  
1738  	memset(oobecc, 0, sizeof(*oobecc));
1739  
1740  	if (!master || section < 0)
1741  		return -EINVAL;
1742  
1743  	if (!master->ooblayout || !master->ooblayout->ecc)
1744  		return -ENOTSUPP;
1745  
1746  	return master->ooblayout->ecc(master, section, oobecc);
1747  }
1748  EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1749  
1750  /**
1751   * mtd_ooblayout_free - Get the OOB region definition of a specific free
1752   *			section
1753   * @mtd: MTD device structure
1754   * @section: Free section you are interested in. Depending on the layout
1755   *	     you may have all the free bytes stored in a single contiguous
1756   *	     section, or one section per ECC chunk plus an extra section
1757   *	     for the remaining bytes (or other funky layout).
1758   * @oobfree: OOB region struct filled with the appropriate free position
1759   *	     information
1760   *
1761   * This function returns free bytes position in the OOB area. If you want
1762   * to get all the free bytes information, then you should call
1763   * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1764   *
1765   * Returns zero on success, a negative error code otherwise.
1766   */
mtd_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobfree)1767  int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1768  		       struct mtd_oob_region *oobfree)
1769  {
1770  	struct mtd_info *master = mtd_get_master(mtd);
1771  
1772  	memset(oobfree, 0, sizeof(*oobfree));
1773  
1774  	if (!master || section < 0)
1775  		return -EINVAL;
1776  
1777  	if (!master->ooblayout || !master->ooblayout->free)
1778  		return -ENOTSUPP;
1779  
1780  	return master->ooblayout->free(master, section, oobfree);
1781  }
1782  EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1783  
1784  /**
1785   * mtd_ooblayout_find_region - Find the region attached to a specific byte
1786   * @mtd: mtd info structure
1787   * @byte: the byte we are searching for
1788   * @sectionp: pointer where the section id will be stored
1789   * @oobregion: used to retrieve the ECC position
1790   * @iter: iterator function. Should be either mtd_ooblayout_free or
1791   *	  mtd_ooblayout_ecc depending on the region type you're searching for
1792   *
1793   * This function returns the section id and oobregion information of a
1794   * specific byte. For example, say you want to know where the 4th ECC byte is
1795   * stored, you'll use:
1796   *
1797   * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1798   *
1799   * Returns zero on success, a negative error code otherwise.
1800   */
mtd_ooblayout_find_region(struct mtd_info * mtd,int byte,int * sectionp,struct mtd_oob_region * oobregion,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1801  static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1802  				int *sectionp, struct mtd_oob_region *oobregion,
1803  				int (*iter)(struct mtd_info *,
1804  					    int section,
1805  					    struct mtd_oob_region *oobregion))
1806  {
1807  	int pos = 0, ret, section = 0;
1808  
1809  	memset(oobregion, 0, sizeof(*oobregion));
1810  
1811  	while (1) {
1812  		ret = iter(mtd, section, oobregion);
1813  		if (ret)
1814  			return ret;
1815  
1816  		if (pos + oobregion->length > byte)
1817  			break;
1818  
1819  		pos += oobregion->length;
1820  		section++;
1821  	}
1822  
1823  	/*
1824  	 * Adjust region info to make it start at the beginning at the
1825  	 * 'start' ECC byte.
1826  	 */
1827  	oobregion->offset += byte - pos;
1828  	oobregion->length -= byte - pos;
1829  	*sectionp = section;
1830  
1831  	return 0;
1832  }
1833  
1834  /**
1835   * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1836   *				  ECC byte
1837   * @mtd: mtd info structure
1838   * @eccbyte: the byte we are searching for
1839   * @section: pointer where the section id will be stored
1840   * @oobregion: OOB region information
1841   *
1842   * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1843   * byte.
1844   *
1845   * Returns zero on success, a negative error code otherwise.
1846   */
mtd_ooblayout_find_eccregion(struct mtd_info * mtd,int eccbyte,int * section,struct mtd_oob_region * oobregion)1847  int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1848  				 int *section,
1849  				 struct mtd_oob_region *oobregion)
1850  {
1851  	return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1852  					 mtd_ooblayout_ecc);
1853  }
1854  EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1855  
1856  /**
1857   * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1858   * @mtd: mtd info structure
1859   * @buf: destination buffer to store OOB bytes
1860   * @oobbuf: OOB buffer
1861   * @start: first byte to retrieve
1862   * @nbytes: number of bytes to retrieve
1863   * @iter: section iterator
1864   *
1865   * Extract bytes attached to a specific category (ECC or free)
1866   * from the OOB buffer and copy them into buf.
1867   *
1868   * Returns zero on success, a negative error code otherwise.
1869   */
mtd_ooblayout_get_bytes(struct mtd_info * mtd,u8 * buf,const u8 * oobbuf,int start,int nbytes,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1870  static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1871  				const u8 *oobbuf, int start, int nbytes,
1872  				int (*iter)(struct mtd_info *,
1873  					    int section,
1874  					    struct mtd_oob_region *oobregion))
1875  {
1876  	struct mtd_oob_region oobregion;
1877  	int section, ret;
1878  
1879  	ret = mtd_ooblayout_find_region(mtd, start, &section,
1880  					&oobregion, iter);
1881  
1882  	while (!ret) {
1883  		int cnt;
1884  
1885  		cnt = min_t(int, nbytes, oobregion.length);
1886  		memcpy(buf, oobbuf + oobregion.offset, cnt);
1887  		buf += cnt;
1888  		nbytes -= cnt;
1889  
1890  		if (!nbytes)
1891  			break;
1892  
1893  		ret = iter(mtd, ++section, &oobregion);
1894  	}
1895  
1896  	return ret;
1897  }
1898  
1899  /**
1900   * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1901   * @mtd: mtd info structure
1902   * @buf: source buffer to get OOB bytes from
1903   * @oobbuf: OOB buffer
1904   * @start: first OOB byte to set
1905   * @nbytes: number of OOB bytes to set
1906   * @iter: section iterator
1907   *
1908   * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1909   * is selected by passing the appropriate iterator.
1910   *
1911   * Returns zero on success, a negative error code otherwise.
1912   */
mtd_ooblayout_set_bytes(struct mtd_info * mtd,const u8 * buf,u8 * oobbuf,int start,int nbytes,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1913  static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1914  				u8 *oobbuf, int start, int nbytes,
1915  				int (*iter)(struct mtd_info *,
1916  					    int section,
1917  					    struct mtd_oob_region *oobregion))
1918  {
1919  	struct mtd_oob_region oobregion;
1920  	int section, ret;
1921  
1922  	ret = mtd_ooblayout_find_region(mtd, start, &section,
1923  					&oobregion, iter);
1924  
1925  	while (!ret) {
1926  		int cnt;
1927  
1928  		cnt = min_t(int, nbytes, oobregion.length);
1929  		memcpy(oobbuf + oobregion.offset, buf, cnt);
1930  		buf += cnt;
1931  		nbytes -= cnt;
1932  
1933  		if (!nbytes)
1934  			break;
1935  
1936  		ret = iter(mtd, ++section, &oobregion);
1937  	}
1938  
1939  	return ret;
1940  }
1941  
1942  /**
1943   * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1944   * @mtd: mtd info structure
1945   * @iter: category iterator
1946   *
1947   * Count the number of bytes in a given category.
1948   *
1949   * Returns a positive value on success, a negative error code otherwise.
1950   */
mtd_ooblayout_count_bytes(struct mtd_info * mtd,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1951  static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1952  				int (*iter)(struct mtd_info *,
1953  					    int section,
1954  					    struct mtd_oob_region *oobregion))
1955  {
1956  	struct mtd_oob_region oobregion;
1957  	int section = 0, ret, nbytes = 0;
1958  
1959  	while (1) {
1960  		ret = iter(mtd, section++, &oobregion);
1961  		if (ret) {
1962  			if (ret == -ERANGE)
1963  				ret = nbytes;
1964  			break;
1965  		}
1966  
1967  		nbytes += oobregion.length;
1968  	}
1969  
1970  	return ret;
1971  }
1972  
1973  /**
1974   * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1975   * @mtd: mtd info structure
1976   * @eccbuf: destination buffer to store ECC bytes
1977   * @oobbuf: OOB buffer
1978   * @start: first ECC byte to retrieve
1979   * @nbytes: number of ECC bytes to retrieve
1980   *
1981   * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1982   *
1983   * Returns zero on success, a negative error code otherwise.
1984   */
mtd_ooblayout_get_eccbytes(struct mtd_info * mtd,u8 * eccbuf,const u8 * oobbuf,int start,int nbytes)1985  int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1986  			       const u8 *oobbuf, int start, int nbytes)
1987  {
1988  	return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1989  				       mtd_ooblayout_ecc);
1990  }
1991  EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1992  
1993  /**
1994   * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1995   * @mtd: mtd info structure
1996   * @eccbuf: source buffer to get ECC bytes from
1997   * @oobbuf: OOB buffer
1998   * @start: first ECC byte to set
1999   * @nbytes: number of ECC bytes to set
2000   *
2001   * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
2002   *
2003   * Returns zero on success, a negative error code otherwise.
2004   */
mtd_ooblayout_set_eccbytes(struct mtd_info * mtd,const u8 * eccbuf,u8 * oobbuf,int start,int nbytes)2005  int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
2006  			       u8 *oobbuf, int start, int nbytes)
2007  {
2008  	return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2009  				       mtd_ooblayout_ecc);
2010  }
2011  EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
2012  
2013  /**
2014   * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
2015   * @mtd: mtd info structure
2016   * @databuf: destination buffer to store ECC bytes
2017   * @oobbuf: OOB buffer
2018   * @start: first ECC byte to retrieve
2019   * @nbytes: number of ECC bytes to retrieve
2020   *
2021   * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
2022   *
2023   * Returns zero on success, a negative error code otherwise.
2024   */
mtd_ooblayout_get_databytes(struct mtd_info * mtd,u8 * databuf,const u8 * oobbuf,int start,int nbytes)2025  int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
2026  				const u8 *oobbuf, int start, int nbytes)
2027  {
2028  	return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
2029  				       mtd_ooblayout_free);
2030  }
2031  EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
2032  
2033  /**
2034   * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
2035   * @mtd: mtd info structure
2036   * @databuf: source buffer to get data bytes from
2037   * @oobbuf: OOB buffer
2038   * @start: first ECC byte to set
2039   * @nbytes: number of ECC bytes to set
2040   *
2041   * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2042   *
2043   * Returns zero on success, a negative error code otherwise.
2044   */
mtd_ooblayout_set_databytes(struct mtd_info * mtd,const u8 * databuf,u8 * oobbuf,int start,int nbytes)2045  int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
2046  				u8 *oobbuf, int start, int nbytes)
2047  {
2048  	return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
2049  				       mtd_ooblayout_free);
2050  }
2051  EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
2052  
2053  /**
2054   * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2055   * @mtd: mtd info structure
2056   *
2057   * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2058   *
2059   * Returns zero on success, a negative error code otherwise.
2060   */
mtd_ooblayout_count_freebytes(struct mtd_info * mtd)2061  int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
2062  {
2063  	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
2064  }
2065  EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
2066  
2067  /**
2068   * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2069   * @mtd: mtd info structure
2070   *
2071   * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2072   *
2073   * Returns zero on success, a negative error code otherwise.
2074   */
mtd_ooblayout_count_eccbytes(struct mtd_info * mtd)2075  int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
2076  {
2077  	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
2078  }
2079  EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
2080  
2081  /*
2082   * Method to access the protection register area, present in some flash
2083   * devices. The user data is one time programmable but the factory data is read
2084   * only.
2085   */
mtd_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2086  int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2087  			   struct otp_info *buf)
2088  {
2089  	struct mtd_info *master = mtd_get_master(mtd);
2090  
2091  	if (!master->_get_fact_prot_info)
2092  		return -EOPNOTSUPP;
2093  	if (!len)
2094  		return 0;
2095  	return master->_get_fact_prot_info(master, len, retlen, buf);
2096  }
2097  EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2098  
mtd_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2099  int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2100  			   size_t *retlen, u_char *buf)
2101  {
2102  	struct mtd_info *master = mtd_get_master(mtd);
2103  
2104  	*retlen = 0;
2105  	if (!master->_read_fact_prot_reg)
2106  		return -EOPNOTSUPP;
2107  	if (!len)
2108  		return 0;
2109  	return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2110  }
2111  EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2112  
mtd_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2113  int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2114  			   struct otp_info *buf)
2115  {
2116  	struct mtd_info *master = mtd_get_master(mtd);
2117  
2118  	if (!master->_get_user_prot_info)
2119  		return -EOPNOTSUPP;
2120  	if (!len)
2121  		return 0;
2122  	return master->_get_user_prot_info(master, len, retlen, buf);
2123  }
2124  EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2125  
mtd_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2126  int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2127  			   size_t *retlen, u_char *buf)
2128  {
2129  	struct mtd_info *master = mtd_get_master(mtd);
2130  
2131  	*retlen = 0;
2132  	if (!master->_read_user_prot_reg)
2133  		return -EOPNOTSUPP;
2134  	if (!len)
2135  		return 0;
2136  	return master->_read_user_prot_reg(master, from, len, retlen, buf);
2137  }
2138  EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2139  
mtd_write_user_prot_reg(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2140  int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2141  			    size_t *retlen, const u_char *buf)
2142  {
2143  	struct mtd_info *master = mtd_get_master(mtd);
2144  	int ret;
2145  
2146  	*retlen = 0;
2147  	if (!master->_write_user_prot_reg)
2148  		return -EOPNOTSUPP;
2149  	if (!len)
2150  		return 0;
2151  	ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2152  	if (ret)
2153  		return ret;
2154  
2155  	/*
2156  	 * If no data could be written at all, we are out of memory and
2157  	 * must return -ENOSPC.
2158  	 */
2159  	return (*retlen) ? 0 : -ENOSPC;
2160  }
2161  EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2162  
mtd_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2163  int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2164  {
2165  	struct mtd_info *master = mtd_get_master(mtd);
2166  
2167  	if (!master->_lock_user_prot_reg)
2168  		return -EOPNOTSUPP;
2169  	if (!len)
2170  		return 0;
2171  	return master->_lock_user_prot_reg(master, from, len);
2172  }
2173  EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2174  
mtd_erase_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2175  int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2176  {
2177  	struct mtd_info *master = mtd_get_master(mtd);
2178  
2179  	if (!master->_erase_user_prot_reg)
2180  		return -EOPNOTSUPP;
2181  	if (!len)
2182  		return 0;
2183  	return master->_erase_user_prot_reg(master, from, len);
2184  }
2185  EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2186  
2187  /* Chip-supported device locking */
mtd_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2188  int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2189  {
2190  	struct mtd_info *master = mtd_get_master(mtd);
2191  
2192  	if (!master->_lock)
2193  		return -EOPNOTSUPP;
2194  	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2195  		return -EINVAL;
2196  	if (!len)
2197  		return 0;
2198  
2199  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2200  		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2201  		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2202  	}
2203  
2204  	return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2205  }
2206  EXPORT_SYMBOL_GPL(mtd_lock);
2207  
mtd_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2208  int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2209  {
2210  	struct mtd_info *master = mtd_get_master(mtd);
2211  
2212  	if (!master->_unlock)
2213  		return -EOPNOTSUPP;
2214  	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2215  		return -EINVAL;
2216  	if (!len)
2217  		return 0;
2218  
2219  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2220  		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2221  		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2222  	}
2223  
2224  	return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2225  }
2226  EXPORT_SYMBOL_GPL(mtd_unlock);
2227  
mtd_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)2228  int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2229  {
2230  	struct mtd_info *master = mtd_get_master(mtd);
2231  
2232  	if (!master->_is_locked)
2233  		return -EOPNOTSUPP;
2234  	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2235  		return -EINVAL;
2236  	if (!len)
2237  		return 0;
2238  
2239  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2240  		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2241  		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2242  	}
2243  
2244  	return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2245  }
2246  EXPORT_SYMBOL_GPL(mtd_is_locked);
2247  
mtd_block_isreserved(struct mtd_info * mtd,loff_t ofs)2248  int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2249  {
2250  	struct mtd_info *master = mtd_get_master(mtd);
2251  
2252  	if (ofs < 0 || ofs >= mtd->size)
2253  		return -EINVAL;
2254  	if (!master->_block_isreserved)
2255  		return 0;
2256  
2257  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2258  		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2259  
2260  	return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2261  }
2262  EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2263  
mtd_block_isbad(struct mtd_info * mtd,loff_t ofs)2264  int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2265  {
2266  	struct mtd_info *master = mtd_get_master(mtd);
2267  
2268  	if (ofs < 0 || ofs >= mtd->size)
2269  		return -EINVAL;
2270  	if (!master->_block_isbad)
2271  		return 0;
2272  
2273  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2274  		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2275  
2276  	return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2277  }
2278  EXPORT_SYMBOL_GPL(mtd_block_isbad);
2279  
mtd_block_markbad(struct mtd_info * mtd,loff_t ofs)2280  int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2281  {
2282  	struct mtd_info *master = mtd_get_master(mtd);
2283  	int ret;
2284  
2285  	if (!master->_block_markbad)
2286  		return -EOPNOTSUPP;
2287  	if (ofs < 0 || ofs >= mtd->size)
2288  		return -EINVAL;
2289  	if (!(mtd->flags & MTD_WRITEABLE))
2290  		return -EROFS;
2291  
2292  	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2293  		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2294  
2295  	ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2296  	if (ret)
2297  		return ret;
2298  
2299  	while (mtd->parent) {
2300  		mtd->ecc_stats.badblocks++;
2301  		mtd = mtd->parent;
2302  	}
2303  
2304  	return 0;
2305  }
2306  EXPORT_SYMBOL_GPL(mtd_block_markbad);
2307  
2308  /*
2309   * default_mtd_writev - the default writev method
2310   * @mtd: mtd device description object pointer
2311   * @vecs: the vectors to write
2312   * @count: count of vectors in @vecs
2313   * @to: the MTD device offset to write to
2314   * @retlen: on exit contains the count of bytes written to the MTD device.
2315   *
2316   * This function returns zero in case of success and a negative error code in
2317   * case of failure.
2318   */
default_mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)2319  static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2320  			      unsigned long count, loff_t to, size_t *retlen)
2321  {
2322  	unsigned long i;
2323  	size_t totlen = 0, thislen;
2324  	int ret = 0;
2325  
2326  	for (i = 0; i < count; i++) {
2327  		if (!vecs[i].iov_len)
2328  			continue;
2329  		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2330  				vecs[i].iov_base);
2331  		totlen += thislen;
2332  		if (ret || thislen != vecs[i].iov_len)
2333  			break;
2334  		to += vecs[i].iov_len;
2335  	}
2336  	*retlen = totlen;
2337  	return ret;
2338  }
2339  
2340  /*
2341   * mtd_writev - the vector-based MTD write method
2342   * @mtd: mtd device description object pointer
2343   * @vecs: the vectors to write
2344   * @count: count of vectors in @vecs
2345   * @to: the MTD device offset to write to
2346   * @retlen: on exit contains the count of bytes written to the MTD device.
2347   *
2348   * This function returns zero in case of success and a negative error code in
2349   * case of failure.
2350   */
mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)2351  int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2352  	       unsigned long count, loff_t to, size_t *retlen)
2353  {
2354  	struct mtd_info *master = mtd_get_master(mtd);
2355  
2356  	*retlen = 0;
2357  	if (!(mtd->flags & MTD_WRITEABLE))
2358  		return -EROFS;
2359  
2360  	if (!master->_writev)
2361  		return default_mtd_writev(mtd, vecs, count, to, retlen);
2362  
2363  	return master->_writev(master, vecs, count,
2364  			       mtd_get_master_ofs(mtd, to), retlen);
2365  }
2366  EXPORT_SYMBOL_GPL(mtd_writev);
2367  
2368  /**
2369   * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2370   * @mtd: mtd device description object pointer
2371   * @size: a pointer to the ideal or maximum size of the allocation, points
2372   *        to the actual allocation size on success.
2373   *
2374   * This routine attempts to allocate a contiguous kernel buffer up to
2375   * the specified size, backing off the size of the request exponentially
2376   * until the request succeeds or until the allocation size falls below
2377   * the system page size. This attempts to make sure it does not adversely
2378   * impact system performance, so when allocating more than one page, we
2379   * ask the memory allocator to avoid re-trying, swapping, writing back
2380   * or performing I/O.
2381   *
2382   * Note, this function also makes sure that the allocated buffer is aligned to
2383   * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2384   *
2385   * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2386   * to handle smaller (i.e. degraded) buffer allocations under low- or
2387   * fragmented-memory situations where such reduced allocations, from a
2388   * requested ideal, are allowed.
2389   *
2390   * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2391   */
mtd_kmalloc_up_to(const struct mtd_info * mtd,size_t * size)2392  void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2393  {
2394  	gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2395  	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2396  	void *kbuf;
2397  
2398  	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2399  
2400  	while (*size > min_alloc) {
2401  		kbuf = kmalloc(*size, flags);
2402  		if (kbuf)
2403  			return kbuf;
2404  
2405  		*size >>= 1;
2406  		*size = ALIGN(*size, mtd->writesize);
2407  	}
2408  
2409  	/*
2410  	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2411  	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2412  	 */
2413  	return kmalloc(*size, GFP_KERNEL);
2414  }
2415  EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2416  
2417  #ifdef CONFIG_PROC_FS
2418  
2419  /*====================================================================*/
2420  /* Support for /proc/mtd */
2421  
mtd_proc_show(struct seq_file * m,void * v)2422  static int mtd_proc_show(struct seq_file *m, void *v)
2423  {
2424  	struct mtd_info *mtd;
2425  
2426  	seq_puts(m, "dev:    size   erasesize  name\n");
2427  	mutex_lock(&mtd_table_mutex);
2428  	mtd_for_each_device(mtd) {
2429  		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2430  			   mtd->index, (unsigned long long)mtd->size,
2431  			   mtd->erasesize, mtd->name);
2432  	}
2433  	mutex_unlock(&mtd_table_mutex);
2434  	return 0;
2435  }
2436  #endif /* CONFIG_PROC_FS */
2437  
2438  /*====================================================================*/
2439  /* Init code */
2440  
mtd_bdi_init(const char * name)2441  static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2442  {
2443  	struct backing_dev_info *bdi;
2444  	int ret;
2445  
2446  	bdi = bdi_alloc(NUMA_NO_NODE);
2447  	if (!bdi)
2448  		return ERR_PTR(-ENOMEM);
2449  	bdi->ra_pages = 0;
2450  	bdi->io_pages = 0;
2451  
2452  	/*
2453  	 * We put '-0' suffix to the name to get the same name format as we
2454  	 * used to get. Since this is called only once, we get a unique name.
2455  	 */
2456  	ret = bdi_register(bdi, "%.28s-0", name);
2457  	if (ret)
2458  		bdi_put(bdi);
2459  
2460  	return ret ? ERR_PTR(ret) : bdi;
2461  }
2462  
2463  static struct proc_dir_entry *proc_mtd;
2464  
init_mtd(void)2465  static int __init init_mtd(void)
2466  {
2467  	int ret;
2468  
2469  	ret = class_register(&mtd_class);
2470  	if (ret)
2471  		goto err_reg;
2472  
2473  	mtd_bdi = mtd_bdi_init("mtd");
2474  	if (IS_ERR(mtd_bdi)) {
2475  		ret = PTR_ERR(mtd_bdi);
2476  		goto err_bdi;
2477  	}
2478  
2479  	proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2480  
2481  	ret = init_mtdchar();
2482  	if (ret)
2483  		goto out_procfs;
2484  
2485  	dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2486  	debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2487  			    &mtd_expert_analysis_mode);
2488  
2489  	return 0;
2490  
2491  out_procfs:
2492  	if (proc_mtd)
2493  		remove_proc_entry("mtd", NULL);
2494  	bdi_unregister(mtd_bdi);
2495  	bdi_put(mtd_bdi);
2496  err_bdi:
2497  	class_unregister(&mtd_class);
2498  err_reg:
2499  	pr_err("Error registering mtd class or bdi: %d\n", ret);
2500  	return ret;
2501  }
2502  
cleanup_mtd(void)2503  static void __exit cleanup_mtd(void)
2504  {
2505  	debugfs_remove_recursive(dfs_dir_mtd);
2506  	cleanup_mtdchar();
2507  	if (proc_mtd)
2508  		remove_proc_entry("mtd", NULL);
2509  	class_unregister(&mtd_class);
2510  	bdi_unregister(mtd_bdi);
2511  	bdi_put(mtd_bdi);
2512  	idr_destroy(&mtd_idr);
2513  }
2514  
2515  module_init(init_mtd);
2516  module_exit(cleanup_mtd);
2517  
2518  MODULE_LICENSE("GPL");
2519  MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2520  MODULE_DESCRIPTION("Core MTD registration and access routines");
2521