Lines Matching defs:child

50 	struct mtd_info *child;
56 child = kzalloc(sizeof(*child), GFP_KERNEL);
58 if (!name || !child) {
62 kfree(child);
67 child->type = parent->type;
68 child->part.flags = parent->flags & ~part->mask_flags;
69 child->part.flags |= part->add_flags;
70 child->flags = child->part.flags;
71 child->part.size = part->size;
72 child->writesize = parent->writesize;
73 child->writebufsize = parent->writebufsize;
74 child->oobsize = parent->oobsize;
75 child->oobavail = parent->oobavail;
76 child->subpage_sft = parent->subpage_sft;
78 child->name = name;
79 child->owner = parent->owner;
89 child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
91 child->dev.of_node = part->of_node;
92 child->parent = parent;
93 child->part.offset = part->offset;
94 INIT_LIST_HEAD(&child->partitions);
96 if (child->part.offset == MTDPART_OFS_APPEND)
97 child->part.offset = cur_offset;
98 if (child->part.offset == MTDPART_OFS_NXTBLK) {
100 child->part.offset = cur_offset;
103 child->part.offset += wr_alignment - remainder;
107 child->part.offset);
110 if (child->part.offset == MTDPART_OFS_RETAIN) {
111 child->part.offset = cur_offset;
112 if (parent_size - child->part.offset >= child->part.size) {
113 child->part.size = parent_size - child->part.offset -
114 child->part.size;
117 part->name, parent_size - child->part.offset,
118 child->part.size);
123 if (child->part.size == MTDPART_SIZ_FULL)
124 child->part.size = parent_size - child->part.offset;
127 child->part.offset, child->part.offset + child->part.size,
128 child->name);
131 if (child->part.offset >= parent_size) {
133 child->part.offset = 0;
134 child->part.size = 0;
137 child->erasesize = parent->erasesize;
142 if (child->part.offset + child->part.size > parent->size) {
143 child->part.size = parent_size - child->part.offset;
145 part->name, parent->name, child->part.size);
151 u64 end = child->part.offset + child->part.size;
156 for (i = 0; i < max && regions[i].offset <= child->part.offset;
165 if (child->erasesize < regions[i].erasesize)
166 child->erasesize = regions[i].erasesize;
168 BUG_ON(child->erasesize == 0);
171 child->erasesize = master->erasesize;
179 if (!(child->flags & MTD_NO_ERASE))
180 wr_alignment = child->erasesize;
182 tmp = mtd_get_master_ofs(child, 0);
184 if ((child->flags & MTD_WRITEABLE) && remainder) {
188 child->flags &= ~MTD_WRITEABLE;
193 tmp = mtd_get_master_ofs(child, 0) + child->part.size;
195 if ((child->flags & MTD_WRITEABLE) && remainder) {
196 child->flags &= ~MTD_WRITEABLE;
201 child->size = child->part.size;
202 child->ecc_step_size = parent->ecc_step_size;
203 child->ecc_strength = parent->ecc_strength;
204 child->bitflip_threshold = parent->bitflip_threshold;
209 while (offs < child->part.size) {
210 if (mtd_block_isreserved(child, offs))
211 child->ecc_stats.bbtblocks++;
212 else if (mtd_block_isbad(child, offs))
213 child->ecc_stats.badblocks++;
214 offs += child->erasesize;
219 return child;
252 struct mtd_info *child;
271 child = allocate_partition(parent, &part, -1, offset);
272 if (IS_ERR(child))
273 return PTR_ERR(child);
276 list_add_tail(&child->part.node, &parent->partitions);
279 ret = add_mtd_device(child);
283 mtd_add_partition_attrs(child);
289 list_del(&child->part.node);
292 free_partition(child);
307 struct mtd_info *child, *next;
310 list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
311 err = __mtd_del_partition(child);
332 struct mtd_info *child, *next;
335 list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
336 if (mtd_has_partitions(child))
337 __del_mtd_partitions(child);
339 pr_info("Deleting %s MTD partition\n", child->name);
340 list_del_init(&child->part.node);
341 ret = del_mtd_device(child);
344 child->name, ret);
369 struct mtd_info *child, *master = mtd_get_master(mtd);
373 list_for_each_entry(child, &mtd->partitions, part.node) {
374 if (child->index == partno) {
375 ret = __mtd_del_partition(child);
387 * and registers the child MTD objects which are bound to the parent according
398 struct mtd_info *child, *master = mtd_get_master(parent);
406 child = allocate_partition(parent, parts + i, i, cur_offset);
407 if (IS_ERR(child)) {
408 ret = PTR_ERR(child);
413 list_add_tail(&child->part.node, &parent->partitions);
416 ret = add_mtd_device(child);
419 list_del(&child->part.node);
422 free_partition(child);
426 mtd_add_partition_attrs(child);
429 ret = parse_mtd_partitions(child, parts[i].types, NULL);
435 cur_offset = child->part.offset + child->part.size;
585 struct device_node *child;
608 for_each_child_of_node(np, child)
609 if (of_device_is_compatible(child, "nvmem-cells"))
610 of_node_set_flag(child, OF_POPULATED);