• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/md/
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/dm-dirty-log.h>
9#include <linux/dm-region-hash.h>
10
11#include <linux/ctype.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16
17#include "dm.h"
18
19#define	DM_MSG_PREFIX	"region hash"
20
21/*-----------------------------------------------------------------
22 * Region hash
23 *
24 * The mirror splits itself up into discrete regions.  Each
25 * region can be in one of three states: clean, dirty,
26 * nosync.  There is no need to put clean regions in the hash.
27 *
28 * In addition to being present in the hash table a region _may_
29 * be present on one of three lists.
30 *
31 *   clean_regions: Regions on this list have no io pending to
32 *   them, they are in sync, we are no longer interested in them,
33 *   they are dull.  dm_rh_update_states() will remove them from the
34 *   hash table.
35 *
36 *   quiesced_regions: These regions have been spun down, ready
37 *   for recovery.  rh_recovery_start() will remove regions from
38 *   this list and hand them to kmirrord, which will schedule the
39 *   recovery io with kcopyd.
40 *
41 *   recovered_regions: Regions that kcopyd has successfully
42 *   recovered.  dm_rh_update_states() will now schedule any delayed
43 *   io, up the recovery_count, and remove the region from the
44 *   hash.
45 *
46 * There are 2 locks:
47 *   A rw spin lock 'hash_lock' protects just the hash table,
48 *   this is never held in write mode from interrupt context,
49 *   which I believe means that we only have to disable irqs when
50 *   doing a write lock.
51 *
52 *   An ordinary spin lock 'region_lock' that protects the three
53 *   lists in the region_hash, with the 'state', 'list' and
54 *   'delayed_bios' fields of the regions.  This is used from irq
55 *   context, so all other uses will have to suspend local irqs.
56 *---------------------------------------------------------------*/
57struct dm_region_hash {
58	uint32_t region_size;
59	unsigned region_shift;
60
61	/* holds persistent region state */
62	struct dm_dirty_log *log;
63
64	/* hash table */
65	rwlock_t hash_lock;
66	mempool_t *region_pool;
67	unsigned mask;
68	unsigned nr_buckets;
69	unsigned prime;
70	unsigned shift;
71	struct list_head *buckets;
72
73	unsigned max_recovery; /* Max # of regions to recover in parallel */
74
75	spinlock_t region_lock;
76	atomic_t recovery_in_flight;
77	struct semaphore recovery_count;
78	struct list_head clean_regions;
79	struct list_head quiesced_regions;
80	struct list_head recovered_regions;
81	struct list_head failed_recovered_regions;
82
83	/*
84	 * If there was a barrier failure no regions can be marked clean.
85	 */
86	int barrier_failure;
87
88	void *context;
89	sector_t target_begin;
90
91	/* Callback function to schedule bios writes */
92	void (*dispatch_bios)(void *context, struct bio_list *bios);
93
94	/* Callback function to wakeup callers worker thread. */
95	void (*wakeup_workers)(void *context);
96
97	/* Callback function to wakeup callers recovery waiters. */
98	void (*wakeup_all_recovery_waiters)(void *context);
99};
100
101struct dm_region {
102	struct dm_region_hash *rh;
103	region_t key;
104	int state;
105
106	struct list_head hash_list;
107	struct list_head list;
108
109	atomic_t pending;
110	struct bio_list delayed_bios;
111};
112
113/*
114 * Conversion fns
115 */
116static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
117{
118	return sector >> rh->region_shift;
119}
120
121sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
122{
123	return region << rh->region_shift;
124}
125EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
126
127region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
128{
129	return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
130}
131EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
132
133void *dm_rh_region_context(struct dm_region *reg)
134{
135	return reg->rh->context;
136}
137EXPORT_SYMBOL_GPL(dm_rh_region_context);
138
139region_t dm_rh_get_region_key(struct dm_region *reg)
140{
141	return reg->key;
142}
143EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
144
145sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
146{
147	return rh->region_size;
148}
149EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
150
151#define RH_HASH_MULT 2654435387U
152#define RH_HASH_SHIFT 12
153
154#define MIN_REGIONS 64
155struct dm_region_hash *dm_region_hash_create(
156		void *context, void (*dispatch_bios)(void *context,
157						     struct bio_list *bios),
158		void (*wakeup_workers)(void *context),
159		void (*wakeup_all_recovery_waiters)(void *context),
160		sector_t target_begin, unsigned max_recovery,
161		struct dm_dirty_log *log, uint32_t region_size,
162		region_t nr_regions)
163{
164	struct dm_region_hash *rh;
165	unsigned nr_buckets, max_buckets;
166	size_t i;
167
168	/*
169	 * Calculate a suitable number of buckets for our hash
170	 * table.
171	 */
172	max_buckets = nr_regions >> 6;
173	for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
174		;
175	nr_buckets >>= 1;
176
177	rh = kmalloc(sizeof(*rh), GFP_KERNEL);
178	if (!rh) {
179		DMERR("unable to allocate region hash memory");
180		return ERR_PTR(-ENOMEM);
181	}
182
183	rh->context = context;
184	rh->dispatch_bios = dispatch_bios;
185	rh->wakeup_workers = wakeup_workers;
186	rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
187	rh->target_begin = target_begin;
188	rh->max_recovery = max_recovery;
189	rh->log = log;
190	rh->region_size = region_size;
191	rh->region_shift = ffs(region_size) - 1;
192	rwlock_init(&rh->hash_lock);
193	rh->mask = nr_buckets - 1;
194	rh->nr_buckets = nr_buckets;
195
196	rh->shift = RH_HASH_SHIFT;
197	rh->prime = RH_HASH_MULT;
198
199	rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
200	if (!rh->buckets) {
201		DMERR("unable to allocate region hash bucket memory");
202		kfree(rh);
203		return ERR_PTR(-ENOMEM);
204	}
205
206	for (i = 0; i < nr_buckets; i++)
207		INIT_LIST_HEAD(rh->buckets + i);
208
209	spin_lock_init(&rh->region_lock);
210	sema_init(&rh->recovery_count, 0);
211	atomic_set(&rh->recovery_in_flight, 0);
212	INIT_LIST_HEAD(&rh->clean_regions);
213	INIT_LIST_HEAD(&rh->quiesced_regions);
214	INIT_LIST_HEAD(&rh->recovered_regions);
215	INIT_LIST_HEAD(&rh->failed_recovered_regions);
216	rh->barrier_failure = 0;
217
218	rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
219						      sizeof(struct dm_region));
220	if (!rh->region_pool) {
221		vfree(rh->buckets);
222		kfree(rh);
223		rh = ERR_PTR(-ENOMEM);
224	}
225
226	return rh;
227}
228EXPORT_SYMBOL_GPL(dm_region_hash_create);
229
230void dm_region_hash_destroy(struct dm_region_hash *rh)
231{
232	unsigned h;
233	struct dm_region *reg, *nreg;
234
235	BUG_ON(!list_empty(&rh->quiesced_regions));
236	for (h = 0; h < rh->nr_buckets; h++) {
237		list_for_each_entry_safe(reg, nreg, rh->buckets + h,
238					 hash_list) {
239			BUG_ON(atomic_read(&reg->pending));
240			mempool_free(reg, rh->region_pool);
241		}
242	}
243
244	if (rh->log)
245		dm_dirty_log_destroy(rh->log);
246
247	if (rh->region_pool)
248		mempool_destroy(rh->region_pool);
249
250	vfree(rh->buckets);
251	kfree(rh);
252}
253EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
254
255struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
256{
257	return rh->log;
258}
259EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
260
261static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
262{
263	return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
264}
265
266static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
267{
268	struct dm_region *reg;
269	struct list_head *bucket = rh->buckets + rh_hash(rh, region);
270
271	list_for_each_entry(reg, bucket, hash_list)
272		if (reg->key == region)
273			return reg;
274
275	return NULL;
276}
277
278static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
279{
280	list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
281}
282
283static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
284{
285	struct dm_region *reg, *nreg;
286
287	nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
288	if (unlikely(!nreg))
289		nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
290
291	nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
292		      DM_RH_CLEAN : DM_RH_NOSYNC;
293	nreg->rh = rh;
294	nreg->key = region;
295	INIT_LIST_HEAD(&nreg->list);
296	atomic_set(&nreg->pending, 0);
297	bio_list_init(&nreg->delayed_bios);
298
299	write_lock_irq(&rh->hash_lock);
300	reg = __rh_lookup(rh, region);
301	if (reg)
302		/* We lost the race. */
303		mempool_free(nreg, rh->region_pool);
304	else {
305		__rh_insert(rh, nreg);
306		if (nreg->state == DM_RH_CLEAN) {
307			spin_lock(&rh->region_lock);
308			list_add(&nreg->list, &rh->clean_regions);
309			spin_unlock(&rh->region_lock);
310		}
311
312		reg = nreg;
313	}
314	write_unlock_irq(&rh->hash_lock);
315
316	return reg;
317}
318
319static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
320{
321	struct dm_region *reg;
322
323	reg = __rh_lookup(rh, region);
324	if (!reg) {
325		read_unlock(&rh->hash_lock);
326		reg = __rh_alloc(rh, region);
327		read_lock(&rh->hash_lock);
328	}
329
330	return reg;
331}
332
333int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
334{
335	int r;
336	struct dm_region *reg;
337
338	read_lock(&rh->hash_lock);
339	reg = __rh_lookup(rh, region);
340	read_unlock(&rh->hash_lock);
341
342	if (reg)
343		return reg->state;
344
345	/*
346	 * The region wasn't in the hash, so we fall back to the
347	 * dirty log.
348	 */
349	r = rh->log->type->in_sync(rh->log, region, may_block);
350
351	/*
352	 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
353	 * taken as a DM_RH_NOSYNC
354	 */
355	return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
356}
357EXPORT_SYMBOL_GPL(dm_rh_get_state);
358
359static void complete_resync_work(struct dm_region *reg, int success)
360{
361	struct dm_region_hash *rh = reg->rh;
362
363	rh->log->type->set_region_sync(rh->log, reg->key, success);
364
365	/*
366	 * Dispatch the bios before we call 'wake_up_all'.
367	 * This is important because if we are suspending,
368	 * we want to know that recovery is complete and
369	 * the work queue is flushed.  If we wake_up_all
370	 * before we dispatch_bios (queue bios and call wake()),
371	 * then we risk suspending before the work queue
372	 * has been properly flushed.
373	 */
374	rh->dispatch_bios(rh->context, &reg->delayed_bios);
375	if (atomic_dec_and_test(&rh->recovery_in_flight))
376		rh->wakeup_all_recovery_waiters(rh->context);
377	up(&rh->recovery_count);
378}
379
380/* dm_rh_mark_nosync
381 * @ms
382 * @bio
383 *
384 * The bio was written on some mirror(s) but failed on other mirror(s).
385 * We can successfully endio the bio but should avoid the region being
386 * marked clean by setting the state DM_RH_NOSYNC.
387 *
388 * This function is _not_ safe in interrupt context!
389 */
390void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
391{
392	unsigned long flags;
393	struct dm_dirty_log *log = rh->log;
394	struct dm_region *reg;
395	region_t region = dm_rh_bio_to_region(rh, bio);
396	int recovering = 0;
397
398	if (bio_empty_barrier(bio)) {
399		rh->barrier_failure = 1;
400		return;
401	}
402
403	/* We must inform the log that the sync count has changed. */
404	log->type->set_region_sync(log, region, 0);
405
406	read_lock(&rh->hash_lock);
407	reg = __rh_find(rh, region);
408	read_unlock(&rh->hash_lock);
409
410	/* region hash entry should exist because write was in-flight */
411	BUG_ON(!reg);
412	BUG_ON(!list_empty(&reg->list));
413
414	spin_lock_irqsave(&rh->region_lock, flags);
415	/*
416	 * Possible cases:
417	 *   1) DM_RH_DIRTY
418	 *   2) DM_RH_NOSYNC: was dirty, other preceeding writes failed
419	 *   3) DM_RH_RECOVERING: flushing pending writes
420	 * Either case, the region should have not been connected to list.
421	 */
422	recovering = (reg->state == DM_RH_RECOVERING);
423	reg->state = DM_RH_NOSYNC;
424	BUG_ON(!list_empty(&reg->list));
425	spin_unlock_irqrestore(&rh->region_lock, flags);
426
427	if (recovering)
428		complete_resync_work(reg, 0);
429}
430EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
431
432void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
433{
434	struct dm_region *reg, *next;
435
436	LIST_HEAD(clean);
437	LIST_HEAD(recovered);
438	LIST_HEAD(failed_recovered);
439
440	/*
441	 * Quickly grab the lists.
442	 */
443	write_lock_irq(&rh->hash_lock);
444	spin_lock(&rh->region_lock);
445	if (!list_empty(&rh->clean_regions)) {
446		list_splice_init(&rh->clean_regions, &clean);
447
448		list_for_each_entry(reg, &clean, list)
449			list_del(&reg->hash_list);
450	}
451
452	if (!list_empty(&rh->recovered_regions)) {
453		list_splice_init(&rh->recovered_regions, &recovered);
454
455		list_for_each_entry(reg, &recovered, list)
456			list_del(&reg->hash_list);
457	}
458
459	if (!list_empty(&rh->failed_recovered_regions)) {
460		list_splice_init(&rh->failed_recovered_regions,
461				 &failed_recovered);
462
463		list_for_each_entry(reg, &failed_recovered, list)
464			list_del(&reg->hash_list);
465	}
466
467	spin_unlock(&rh->region_lock);
468	write_unlock_irq(&rh->hash_lock);
469
470	/*
471	 * All the regions on the recovered and clean lists have
472	 * now been pulled out of the system, so no need to do
473	 * any more locking.
474	 */
475	list_for_each_entry_safe(reg, next, &recovered, list) {
476		rh->log->type->clear_region(rh->log, reg->key);
477		complete_resync_work(reg, 1);
478		mempool_free(reg, rh->region_pool);
479	}
480
481	list_for_each_entry_safe(reg, next, &failed_recovered, list) {
482		complete_resync_work(reg, errors_handled ? 0 : 1);
483		mempool_free(reg, rh->region_pool);
484	}
485
486	list_for_each_entry_safe(reg, next, &clean, list) {
487		rh->log->type->clear_region(rh->log, reg->key);
488		mempool_free(reg, rh->region_pool);
489	}
490
491	rh->log->type->flush(rh->log);
492}
493EXPORT_SYMBOL_GPL(dm_rh_update_states);
494
495static void rh_inc(struct dm_region_hash *rh, region_t region)
496{
497	struct dm_region *reg;
498
499	read_lock(&rh->hash_lock);
500	reg = __rh_find(rh, region);
501
502	spin_lock_irq(&rh->region_lock);
503	atomic_inc(&reg->pending);
504
505	if (reg->state == DM_RH_CLEAN) {
506		reg->state = DM_RH_DIRTY;
507		list_del_init(&reg->list);	/* take off the clean list */
508		spin_unlock_irq(&rh->region_lock);
509
510		rh->log->type->mark_region(rh->log, reg->key);
511	} else
512		spin_unlock_irq(&rh->region_lock);
513
514
515	read_unlock(&rh->hash_lock);
516}
517
518void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
519{
520	struct bio *bio;
521
522	for (bio = bios->head; bio; bio = bio->bi_next) {
523		if (bio_empty_barrier(bio))
524			continue;
525		rh_inc(rh, dm_rh_bio_to_region(rh, bio));
526	}
527}
528EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
529
530void dm_rh_dec(struct dm_region_hash *rh, region_t region)
531{
532	unsigned long flags;
533	struct dm_region *reg;
534	int should_wake = 0;
535
536	read_lock(&rh->hash_lock);
537	reg = __rh_lookup(rh, region);
538	read_unlock(&rh->hash_lock);
539
540	spin_lock_irqsave(&rh->region_lock, flags);
541	if (atomic_dec_and_test(&reg->pending)) {
542		/*
543		 * There is no pending I/O for this region.
544		 * We can move the region to corresponding list for next action.
545		 * At this point, the region is not yet connected to any list.
546		 *
547		 * If the state is DM_RH_NOSYNC, the region should be kept off
548		 * from clean list.
549		 * The hash entry for DM_RH_NOSYNC will remain in memory
550		 * until the region is recovered or the map is reloaded.
551		 */
552
553		/* do nothing for DM_RH_NOSYNC */
554		if (unlikely(rh->barrier_failure)) {
555			/*
556			 * If a write barrier failed some time ago, we
557			 * don't know whether or not this write made it
558			 * to the disk, so we must resync the device.
559			 */
560			reg->state = DM_RH_NOSYNC;
561		} else if (reg->state == DM_RH_RECOVERING) {
562			list_add_tail(&reg->list, &rh->quiesced_regions);
563		} else if (reg->state == DM_RH_DIRTY) {
564			reg->state = DM_RH_CLEAN;
565			list_add(&reg->list, &rh->clean_regions);
566		}
567		should_wake = 1;
568	}
569	spin_unlock_irqrestore(&rh->region_lock, flags);
570
571	if (should_wake)
572		rh->wakeup_workers(rh->context);
573}
574EXPORT_SYMBOL_GPL(dm_rh_dec);
575
576/*
577 * Starts quiescing a region in preparation for recovery.
578 */
579static int __rh_recovery_prepare(struct dm_region_hash *rh)
580{
581	int r;
582	region_t region;
583	struct dm_region *reg;
584
585	/*
586	 * Ask the dirty log what's next.
587	 */
588	r = rh->log->type->get_resync_work(rh->log, &region);
589	if (r <= 0)
590		return r;
591
592	/*
593	 * Get this region, and start it quiescing by setting the
594	 * recovering flag.
595	 */
596	read_lock(&rh->hash_lock);
597	reg = __rh_find(rh, region);
598	read_unlock(&rh->hash_lock);
599
600	spin_lock_irq(&rh->region_lock);
601	reg->state = DM_RH_RECOVERING;
602
603	/* Already quiesced ? */
604	if (atomic_read(&reg->pending))
605		list_del_init(&reg->list);
606	else
607		list_move(&reg->list, &rh->quiesced_regions);
608
609	spin_unlock_irq(&rh->region_lock);
610
611	return 1;
612}
613
614void dm_rh_recovery_prepare(struct dm_region_hash *rh)
615{
616	/* Extra reference to avoid race with dm_rh_stop_recovery */
617	atomic_inc(&rh->recovery_in_flight);
618
619	while (!down_trylock(&rh->recovery_count)) {
620		atomic_inc(&rh->recovery_in_flight);
621		if (__rh_recovery_prepare(rh) <= 0) {
622			atomic_dec(&rh->recovery_in_flight);
623			up(&rh->recovery_count);
624			break;
625		}
626	}
627
628	/* Drop the extra reference */
629	if (atomic_dec_and_test(&rh->recovery_in_flight))
630		rh->wakeup_all_recovery_waiters(rh->context);
631}
632EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
633
634/*
635 * Returns any quiesced regions.
636 */
637struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
638{
639	struct dm_region *reg = NULL;
640
641	spin_lock_irq(&rh->region_lock);
642	if (!list_empty(&rh->quiesced_regions)) {
643		reg = list_entry(rh->quiesced_regions.next,
644				 struct dm_region, list);
645		list_del_init(&reg->list);  /* remove from the quiesced list */
646	}
647	spin_unlock_irq(&rh->region_lock);
648
649	return reg;
650}
651EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
652
653void dm_rh_recovery_end(struct dm_region *reg, int success)
654{
655	struct dm_region_hash *rh = reg->rh;
656
657	spin_lock_irq(&rh->region_lock);
658	if (success)
659		list_add(&reg->list, &reg->rh->recovered_regions);
660	else
661		list_add(&reg->list, &reg->rh->failed_recovered_regions);
662
663	spin_unlock_irq(&rh->region_lock);
664
665	rh->wakeup_workers(rh->context);
666}
667EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
668
669/* Return recovery in flight count. */
670int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
671{
672	return atomic_read(&rh->recovery_in_flight);
673}
674EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
675
676int dm_rh_flush(struct dm_region_hash *rh)
677{
678	return rh->log->type->flush(rh->log);
679}
680EXPORT_SYMBOL_GPL(dm_rh_flush);
681
682void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
683{
684	struct dm_region *reg;
685
686	read_lock(&rh->hash_lock);
687	reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
688	bio_list_add(&reg->delayed_bios, bio);
689	read_unlock(&rh->hash_lock);
690}
691EXPORT_SYMBOL_GPL(dm_rh_delay);
692
693void dm_rh_stop_recovery(struct dm_region_hash *rh)
694{
695	int i;
696
697	/* wait for any recovering regions */
698	for (i = 0; i < rh->max_recovery; i++)
699		down(&rh->recovery_count);
700}
701EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
702
703void dm_rh_start_recovery(struct dm_region_hash *rh)
704{
705	int i;
706
707	for (i = 0; i < rh->max_recovery; i++)
708		up(&rh->recovery_count);
709
710	rh->wakeup_workers(rh->context);
711}
712EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
713
714MODULE_DESCRIPTION(DM_NAME " region hash");
715MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
716MODULE_LICENSE("GPL");
717