Searched refs:zone (Results 1 - 25 of 213) sorted by relevance

123456789

/linux-master/drivers/md/dm-vdo/
H A Dlogical-zone.c6 #include "logical-zone.h"
21 #include "physical-zone.h"
47 * initialize_zone() - Initialize a logical zone.
48 * @zones: The logical_zones to which this zone belongs.
55 struct logical_zone *zone = &zones->zones[zone_number]; local
58 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->lbn_operations);
63 zone->next = &zones->zones[zone_number + 1];
65 vdo_initialize_completion(&zone->completion, vdo,
67 zone->zones = zones;
68 zone
91 zone_count_t zone; local
143 assert_on_zone_thread(struct logical_zone *zone, const char *what) argument
153 check_for_drain_complete(struct logical_zone *zone) argument
203 struct logical_zone *zone = &(((struct logical_zones *) context)->zones[zone_number]); local
225 update_oldest_active_generation(struct logical_zone *zone) argument
246 vdo_increment_logical_zone_flush_generation(struct logical_zone *zone, sequence_number_t expected_generation) argument
267 struct logical_zone *zone = data_vio->logical.zone; local
287 struct logical_zone *zone = as_logical_zone(completion); local
302 struct logical_zone *zone = as_logical_zone(completion); local
327 struct logical_zone *zone = data_vio->logical.zone; local
346 vdo_get_next_allocation_zone(struct logical_zone *zone) argument
364 vdo_dump_logical_zone(const struct logical_zone *zone) argument
[all...]
/linux-master/arch/x86/mm/
H A Dhighmem_32.c10 struct zone *zone; local
14 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
18 for_each_zone(zone) {
21 if (!is_highmem(zone))
24 zone_start_pfn = zone->zone_start_pfn;
25 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
27 nid = zone_to_nid(zone);
29 zone->name, nid, zone_start_pfn, zone_end_pfn);
/linux-master/include/net/netfilter/
H A Dnf_conntrack_zones.h12 return &ct->zone;
19 nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags) argument
21 zone->id = id;
22 zone->flags = flags;
23 zone->dir = dir;
25 return zone;
36 if (tmpl->zone.flags & NF_CT_FLAG_MARK)
37 return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0);
43 const struct nf_conntrack_zone *zone)
46 ct->zone
42 nf_ct_zone_add(struct nf_conn *ct, const struct nf_conntrack_zone *zone) argument
50 nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone, enum ip_conntrack_dir dir) argument
56 nf_ct_zone_id(const struct nf_conntrack_zone *zone, enum ip_conntrack_dir dir) argument
[all...]
/linux-master/include/linux/
H A Dpage-isolation.h6 static inline bool has_isolate_pageblock(struct zone *zone) argument
8 return zone->nr_isolate_pageblock;
19 static inline bool has_isolate_pageblock(struct zone *zone) argument
38 bool move_freepages_block_isolate(struct zone *zone, struct page *page,
H A Dcompaction.h38 * The full zone was compacted scanned but wasn't successful to compact
43 * direct compaction has scanned part of the zone but wasn't successful
85 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
86 extern int fragmentation_index(struct zone *zone, unsigned int order);
92 extern bool compaction_suitable(struct zone *zone, int order,
95 extern void compaction_defer_reset(struct zone *zone, in
110 compaction_suitable(struct zone *zone, int order, int highest_zoneidx) argument
[all...]
H A Dmemory_hotplug.h11 struct zone;
70 /* Types for control the zone type of onlined and offlined memory */
145 * Note: any attempt to resize a zone should has pgdat_resize_lock()
146 * zone_span_writelock() both held. This ensure the size of a zone
149 static inline unsigned zone_span_seqbegin(struct zone *zone) argument
151 return read_seqbegin(&zone->span_seqlock);
153 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) argument
155 return read_seqretry(&zone
157 zone_span_writelock(struct zone *zone) argument
161 zone_span_writeunlock(struct zone *zone) argument
165 zone_seqlock_init(struct zone *zone) argument
254 zone_span_seqbegin(struct zone *zone) argument
258 zone_span_seqretry(struct zone *zone, unsigned iv) argument
262 zone_span_writelock(struct zone *zone) argument
263 zone_span_writeunlock(struct zone *zone) argument
264 zone_seqlock_init(struct zone *zone) argument
339 offline_pages(unsigned long start_pfn, unsigned long nr_pages, struct zone *zone, struct memory_group *group) argument
[all...]
H A Dmmzone.h129 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
685 * zone lock contention and keep cache-hot pages reusing.
774 * faulted, they come from the right zone right away. However, it is
778 * to a different zone. When migration fails - pinning fails.
799 * on different platforms may end up in a movable zone. ZERO_PAGE(0)
802 * memory to the MOVABLE zone, the vmemmap pages are also placed in
803 * such zone. Such pages cannot be really moved around as they are
825 struct zone { struct
828 /* zone watermarks, access with *_wmark_pages(zone) macro
1020 zone_managed_pages(struct zone *zone) argument
1025 zone_cma_pages(struct zone *zone) argument
1034 zone_end_pfn(const struct zone *zone) argument
1039 zone_spans_pfn(const struct zone *zone, unsigned long pfn) argument
1044 zone_is_initialized(struct zone *zone) argument
1049 zone_is_empty(struct zone *zone) argument
1169 zone_intersects(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages) argument
1208 struct zone *zone; /* Pointer to actual zone */ member in struct:zoneref
1479 zone_is_zone_device(struct zone *zone) argument
1484 zone_is_zone_device(struct zone *zone) argument
1496 managed_zone(struct zone *zone) argument
1502 populated_zone(struct zone *zone) argument
1508 zone_to_nid(struct zone *zone) argument
1513 zone_set_nid(struct zone *zone, int nid) argument
1518 zone_to_nid(struct zone *zone) argument
1523 zone_set_nid(struct zone *zone, int nid) argument
1545 is_highmem(struct zone *zone) argument
[all...]
/linux-master/drivers/block/null_blk/
H A Dzoned.c25 struct nullb_zone *zone)
28 spin_lock_init(&zone->spinlock);
30 mutex_init(&zone->mutex);
34 struct nullb_zone *zone)
37 spin_lock_irq(&zone->spinlock);
39 mutex_lock(&zone->mutex);
43 struct nullb_zone *zone)
46 spin_unlock_irq(&zone->spinlock);
48 mutex_unlock(&zone->mutex);
55 struct nullb_zone *zone; local
24 null_init_zone_lock(struct nullb_device *dev, struct nullb_zone *zone) argument
33 null_lock_zone(struct nullb_device *dev, struct nullb_zone *zone) argument
42 null_unlock_zone(struct nullb_device *dev, struct nullb_zone *zone) argument
185 struct nullb_zone *zone; local
230 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)]; local
246 struct nullb_zone *zone; local
316 null_check_zone_resources(struct nullb_device *dev, struct nullb_zone *zone) argument
341 struct nullb_zone *zone = &dev->zones[zno]; local
426 null_open_zone(struct nullb_device *dev, struct nullb_zone *zone) argument
483 null_close_zone(struct nullb_device *dev, struct nullb_zone *zone) argument
530 null_finish_zone(struct nullb_device *dev, struct nullb_zone *zone) argument
581 null_reset_zone(struct nullb_device *dev, struct nullb_zone *zone) argument
625 struct nullb_zone *zone; local
686 struct nullb_zone *zone; local
716 null_set_zone_cond(struct nullb_device *dev, struct nullb_zone *zone, enum blk_zone_cond cond) argument
[all...]
/linux-master/tools/power/cpupower/lib/
H A Dpowercap.h38 int powercap_walk_zones(struct powercap_zone *zone,
39 int (*f)(struct powercap_zone *zone));
46 int powercap_get_max_energy_range_uj(struct powercap_zone *zone, uint64_t *val);
47 int powercap_get_energy_uj(struct powercap_zone *zone, uint64_t *val);
48 int powercap_get_max_power_range_uw(struct powercap_zone *zone, uint64_t *val);
49 int powercap_get_power_uw(struct powercap_zone *zone, uint64_t *val);
50 int powercap_zone_get_enabled(struct powercap_zone *zone, int *mode);
51 int powercap_zone_set_enabled(struct powercap_zone *zone, int mode);
H A Dpowercap.c116 static int sysfs_powercap_get64_val(struct powercap_zone *zone, argument
124 strcat(file, zone->sys_name);
138 int powercap_get_max_energy_range_uj(struct powercap_zone *zone, uint64_t *val) argument
140 return sysfs_powercap_get64_val(zone, GET_MAX_ENERGY_RANGE_UJ, val);
143 int powercap_get_energy_uj(struct powercap_zone *zone, uint64_t *val) argument
145 return sysfs_powercap_get64_val(zone, GET_ENERGY_UJ, val);
148 int powercap_get_max_power_range_uw(struct powercap_zone *zone, uint64_t *val) argument
150 return sysfs_powercap_get64_val(zone, GET_MAX_POWER_RANGE_UW, val);
153 int powercap_get_power_uw(struct powercap_zone *zone, uint64_t *val) argument
155 return sysfs_powercap_get64_val(zone, GET_POWER_U
158 powercap_zone_get_enabled(struct powercap_zone *zone, int *mode) argument
173 powercap_zone_set_enabled(struct powercap_zone *zone, int mode) argument
180 powercap_read_zone(struct powercap_zone *zone) argument
282 powercap_walk_zones(struct powercap_zone *zone, int (*f)(struct powercap_zone *zone)) argument
[all...]
/linux-master/fs/pstore/
H A Dzone.c26 * struct psz_buffer - header of zone to flush to storage
31 * @data: zone data.
66 * @off: zone offset of storage
67 * @type: front-end type for this zone
68 * @name: front-end name for this zone
69 * @buffer: pointer to data buffer managed by this zone
72 * @should_recover: whether this zone should recover from storage
75 * zone structure in memory.
90 * struct psz_context - all about running state of pstore/zone
93 * @ppsz: pmsg storage zone
160 buffer_datalen(struct pstore_zone *zone) argument
165 buffer_start(struct pstore_zone *zone) argument
175 psz_zone_read_buffer(struct pstore_zone *zone, char *buf, size_t len, unsigned long off) argument
187 psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf, size_t len, unsigned long off) argument
199 psz_zone_write(struct pstore_zone *zone, enum psz_flush_mode flush_mode, const char *buf, size_t len, unsigned long off) argument
262 psz_flush_dirty_zone(struct pstore_zone *zone) argument
284 struct pstore_zone *zone; local
335 struct pstore_zone *zone = NULL; local
374 struct pstore_zone *zone; local
481 psz_recover_zone(struct psz_context *cxt, struct pstore_zone *zone) argument
573 struct pstore_zone *zone; local
643 psz_old_ok(struct pstore_zone *zone) argument
650 psz_ok(struct pstore_zone *zone) argument
657 psz_kmsg_erase(struct psz_context *cxt, struct pstore_zone *zone, struct pstore_record *record) argument
680 psz_record_erase(struct psz_context *cxt, struct pstore_zone *zone) argument
720 psz_write_kmsg_hdr(struct pstore_zone *zone, struct pstore_record *record) argument
749 struct pstore_zone *zone; local
823 psz_record_write(struct pstore_zone *zone, struct pstore_record *record) argument
908 struct pstore_zone *zone = NULL; local
941 psz_kmsg_read_hdr(struct pstore_zone *zone, struct pstore_record *record) argument
958 psz_kmsg_read(struct pstore_zone *zone, struct pstore_record *record) argument
1001 psz_ftrace_read(struct pstore_zone *zone, struct pstore_record *record) argument
1032 psz_record_read(struct pstore_zone *zone, struct pstore_record *record) argument
1063 struct pstore_zone *zone; local
1115 struct pstore_zone *zone = *pszone; local
1156 struct pstore_zone *zone; local
1200 struct pstore_zone **zones, *zone; local
[all...]
/linux-master/virt/kvm/
H A Dcoalesced_mmio.h22 struct kvm_coalesced_mmio_zone zone; member in struct:kvm_coalesced_mmio_dev
28 struct kvm_coalesced_mmio_zone *zone);
30 struct kvm_coalesced_mmio_zone *zone);
H A Dcoalesced_mmio.c30 * (zone->addr, zone->size)
36 if (addr < dev->zone.addr)
38 if (addr + len > dev->zone.addr + dev->zone.size)
89 ring->coalesced_mmio[insert].pio = dev->zone.pio;
138 struct kvm_coalesced_mmio_zone *zone)
143 if (zone->pio != 1 && zone->pio != 0)
153 dev->zone
137 kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) argument
173 kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) argument
[all...]
/linux-master/mm/
H A Dpage_alloc.c82 * shuffle the whole zone).
91 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
277 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
290 static bool try_to_accept_memory(struct zone *zone, unsigned int order);
316 _deferred_grow_zone(struct zone *zone, unsigned int order) argument
318 return deferred_grow_zone(zone, order);
426 static int page_outside_zone_boundaries(struct zone *zone, struc argument
451 bad_range(struct zone *zone, struct page *page) argument
461 bad_range(struct zone *zone, struct page *page) argument
575 task_capc(struct zone *zone) argument
613 task_capc(struct zone *zone) argument
626 account_freepages(struct zone *zone, int nr_pages, int migratetype) argument
639 __add_to_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype, bool tail) argument
661 move_to_free_list(struct page *page, struct zone *zone, unsigned int order, int old_mt, int new_mt) argument
677 __del_page_from_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype) argument
694 del_page_from_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype) argument
757 __free_one_page(struct page *page, unsigned long pfn, struct zone *zone, unsigned int order, int migratetype, fpi_t fpi_flags) argument
1136 free_pcppages_bulk(struct zone *zone, int count, struct per_cpu_pages *pcp, int pindex) argument
1189 free_one_page(struct zone *zone, struct page *page, unsigned long pfn, unsigned int order, fpi_t fpi_flags) argument
1206 struct zone *zone = page_zone(page); local
1276 __pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone) argument
1318 expand(struct zone *zone, struct page *page, int low, int high, int migratetype) argument
1498 __rmqueue_smallest(struct zone *zone, unsigned int order, int migratetype) argument
1536 __rmqueue_cma_fallback(struct zone *zone, unsigned int order) argument
1542 __rmqueue_cma_fallback(struct zone *zone, unsigned int order) argument
1550 __move_freepages_block(struct zone *zone, unsigned long start_pfn, int old_mt, int new_mt) argument
1585 prep_move_freepages_block(struct zone *zone, struct page *page, unsigned long *start_pfn, int *num_free, int *num_movable) argument
1635 move_freepages_block(struct zone *zone, struct page *page, int old_mt, int new_mt) argument
1672 split_large_buddy(struct zone *zone, struct page *page, unsigned long pfn, int order) argument
1711 move_freepages_block_isolate(struct zone *zone, struct page *page, int migratetype) argument
1798 boost_watermark(struct zone *zone) argument
1844 steal_suitable_fallback(struct zone *zone, struct page *page, int current_order, int order, int start_type, unsigned int alloc_flags, bool whole_block) argument
1961 reserve_highatomic_pageblock(struct page *page, struct zone *zone) argument
2011 struct zone *zone; local
2094 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, unsigned int alloc_flags) argument
2175 __rmqueue(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) argument
2212 rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, unsigned int alloc_flags) argument
2247 decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) argument
2283 drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) argument
2300 drain_pages_zone(unsigned int cpu, struct zone *zone) argument
2320 struct zone *zone; local
2330 drain_local_pages(struct zone *zone) argument
2350 __drain_all_pages(struct zone *zone, bool force_all_cpus) argument
2423 drain_all_pages(struct zone *zone) argument
2453 nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, int batch, bool free_high) argument
2501 free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, struct page *page, int migratetype, unsigned int order) argument
2556 struct zone *zone; local
2634 struct zone *zone = folio_zone(folio); local
2721 struct zone *zone = page_zone(page); local
2771 struct zone *zone = page_zone(page); local
2808 rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, unsigned int order, unsigned int alloc_flags, int migratetype) argument
2846 nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) argument
2898 __rmqueue_pcplist(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags, struct per_cpu_pages *pcp, struct list_head *list) argument
2929 rmqueue_pcplist(struct zone *preferred_zone, struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) argument
2976 rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags, int migratetype) argument
3193 zone_allows_reclaim(struct zone *local_zone, struct zone *zone) argument
3199 zone_allows_reclaim(struct zone *local_zone, struct zone *zone) argument
3214 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) argument
3265 struct zone *zone; local
3639 struct zone *zone = page_zone(page); local
3739 struct zone *zone; local
3909 struct zone *zone; local
4030 struct zone *zone; local
4479 struct zone *zone; local
5011 struct zone *zone; local
5043 zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) argument
5056 struct zone *zone; local
5432 zone_batchsize(struct zone *zone) argument
5481 zone_highsize(struct zone *zone, int batch, int cpu_online, int high_fraction) argument
5578 __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, unsigned long high_max, unsigned long batch) argument
5594 zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) argument
5626 setup_zone_pageset(struct zone *zone) argument
5651 zone_pcp_update(struct zone *zone, int cpu_online) argument
5658 zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) argument
5682 struct zone *zone; local
5695 struct zone *zone; local
5720 zone_pcp_init(struct zone *zone) argument
5787 struct zone *zone; local
5818 struct zone *zone; local
5851 struct zone *zone = pgdat->node_zones + i; local
5888 struct zone *zone = &pgdat->node_zones[i]; local
5915 struct zone *zone; local
5983 struct zone *zone; local
6096 struct zone *zone; local
6124 struct zone *zone; local
6182 struct zone *zone; local
6544 zone_spans_last_pfn(const struct zone *zone, unsigned long start_pfn, unsigned long nr_pages) argument
6578 struct zone *zone; local
6633 zone_pcp_disable(struct zone *zone) argument
6640 zone_pcp_enable(struct zone *zone) argument
6647 zone_pcp_reset(struct zone *zone) argument
6675 struct zone *zone; local
6735 add_to_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype, bool tail) argument
6747 break_down_buddy_pages(struct zone *zone, struct page *page, struct page *target, int low, int high, int migratetype) argument
6778 struct zone *zone = page_zone(page); local
6814 struct zone *zone = page_zone(page); local
6841 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; local
6886 try_to_accept_memory_one(struct zone *zone) argument
6920 try_to_accept_memory(struct zone *zone, unsigned int order) argument
6948 struct zone *zone = page_zone(page); local
6979 try_to_accept_memory(struct zone *zone, unsigned int order) argument
[all...]
H A Dshow_mem.c26 static inline void show_node(struct zone *zone) argument
29 printk("Node %d ", zone_to_nid(zone));
38 struct zone *zone; local
40 for_each_zone(zone)
41 wmark_low += low_wmark_pages(zone);
104 struct zone *zone = &pgdat->node_zones[zone_type]; local
106 if (is_highmem(zone)) {
190 struct zone *zone; local
403 struct zone *zone; local
[all...]
H A Dvmstat.c37 /* zero numa counters within a zone */
38 static void zero_zone_numa_counters(struct zone *zone) argument
43 atomic_long_set(&zone->vm_numa_event[item], 0);
45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
54 struct zone *zone; local
56 for_each_populated_zone(zone)
57 zero_zone_numa_counters(zone);
158 * Manage combined zone base
169 fold_vm_zone_numa_events(struct zone *zone) argument
189 struct zone *zone; local
198 calculate_pressure_threshold(struct zone *zone) argument
222 calculate_normal_threshold(struct zone *zone) argument
275 struct zone *zone; local
320 struct zone *zone; local
342 __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) argument
432 __inc_zone_state(struct zone *zone, enum zone_stat_item item) argument
488 __dec_zone_state(struct zone *zone, enum zone_stat_item item) argument
557 mod_zone_state(struct zone *zone, enum zone_stat_item item, long delta, int overstep_mode) argument
596 mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) argument
692 mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) argument
706 struct zone *zone; local
811 struct zone *zone; local
899 struct zone *zone; local
955 drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats) argument
1052 fill_contig_page_info(struct zone *zone, unsigned int suitable_order, struct contig_page_info *info) argument
1119 extfrag_for_order(struct zone *zone, unsigned int order) argument
1133 fragmentation_index(struct zone *zone, unsigned int order) argument
1458 struct zone *zone; local
1476 frag_show_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
1501 pagetypeinfo_showfree_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
1558 pagetypeinfo_showblockcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
1666 is_zone_first_populated(pg_data_t *pgdat, struct zone *zone) argument
1680 zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
1970 struct zone *zone; local
2177 unusable_show_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
2227 extfrag_show_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
[all...]
H A Dmmzone.c30 struct zone *next_zone(struct zone *zone) argument
32 pg_data_t *pgdat = zone->zone_pgdat;
34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
35 zone++;
39 zone = pgdat->node_zones;
41 zone = NULL;
43 return zone;
55 /* Returns the next zone a
[all...]
H A Dshuffle.h20 extern void __shuffle_zone(struct zone *z);
21 static inline void __meminit shuffle_zone(struct zone *z)
44 static inline void shuffle_zone(struct zone *z)
H A Ddebug_page_alloc.c35 bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order) argument
47 void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order) argument
H A Dpage_isolation.c21 * consequently belong to a single zone.
37 struct zone *zone = page_zone(page); local
68 * If the zone is movable and we have ruled out all reserved
72 if (zone_idx(zone) == ZONE_MOVABLE)
150 struct zone *zone = page_zone(page); local
155 spin_lock_irqsave(&zone->lock, flags);
163 spin_unlock_irqrestore(&zone->lock, flags);
181 if (!move_freepages_block_isolate(zone, pag
204 struct zone *zone; local
312 struct zone *zone; local
625 struct zone *zone; local
[all...]
/linux-master/fs/adfs/
H A Dmap.c14 * zone which contains a bitstream made up of variable sized fragments.
30 * large or fragmented files. The first map zone a fragment starts in
32 * from any zone on the disk.
63 * return the map bit offset of the fragment frag_id in the zone dm.
109 * Scan the free space map, for this zone, calculating the total
133 * exist in this zone.
159 static int scan_map(struct adfs_sb_info *asb, unsigned int zone, argument
166 dm = asb->s_map + zone;
167 zone = asb->s_map_size;
168 dm_end = asb->s_map + zone;
202 unsigned int zone; local
220 unsigned int zone, mapoff; local
312 unsigned int zone, zone_size; local
337 unsigned int zone; local
350 unsigned int zone; local
[all...]
/linux-master/include/uapi/linux/tc_act/
H A Dtc_connmark.h10 __u16 zone; member in struct:tc_connmark
/linux-master/drivers/md/dm-vdo/indexer/
H A Dsparse-cache.h19 * Safe concurrent access to the cache by the zone threads is controlled by the triage queue and
20 * the barrier requests it issues to the zone queues. The set of cached chapters does not and must
21 * not change between the carefully coordinated calls to uds_update_sparse_cache() from the zone
22 * threads. Outside of updates, every zone will get the same result when calling
23 * uds_sparse_cache_contains() as every other zone.
38 int __must_check uds_update_sparse_cache(struct index_zone *zone, u64 virtual_chapter);
42 int __must_check uds_search_sparse_cache(struct index_zone *zone,
/linux-master/tools/power/cpupower/utils/
H A Dpowercap-info.c26 static int powercap_print_one_zone(struct powercap_zone *zone) argument
31 for (i = 0; i < zone->tree_depth && i < POWERCAP_MAX_TREE_DEPTH; i++)
34 printf("%sZone: %s", pr_prefix, zone->name);
35 ret = powercap_zone_get_enabled(zone, &mode);
40 if (zone->has_power_uw)
44 if (zone->has_energy_uj)
/linux-master/drivers/md/
H A Ddm-zoned-metadata.c33 * blocks indicating zone block validity.
39 * the first conventional zone found on disk.
87 * and give the zone ID (dzone_id) mapping the chunk on disk.
88 * This zone may be sequential or random. If it is a sequential
89 * zone, a second zone (bzone_id) used as a write buffer may
90 * also be specified. This second zone will always be a randomly
91 * writeable zone.
137 struct dm_zone *zone; member in struct:dmz_sb
221 static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone) argument
229 dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) argument
236 dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) argument
311 struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL); local
1359 struct dm_zone *zone; local
1430 struct dm_zone *zone; local
1457 struct dm_zone *zone = xa_load(&zmd->zones, idx); local
1562 struct dm_zone *zone = data; local
1581 dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) argument
1617 dmz_handle_seq_write_err(struct dmz_metadata *zmd, struct dm_zone *zone) argument
1643 dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone) argument
1850 __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) argument
1872 dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) argument
1902 dmz_lock_zone_reclaim(struct dm_zone *zone) argument
1914 dmz_unlock_zone_reclaim(struct dm_zone *zone) argument
1927 dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone) argument
1945 struct dm_zone *zone, *maxw_z = NULL; local
2003 struct dm_zone *zone; local
2021 struct dm_zone *zone = NULL; local
2212 struct dm_zone *zone; local
2284 dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone) argument
2331 dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone) argument
2403 dmz_get_bitmap(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block) argument
2479 dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks) argument
2560 dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks) argument
2608 dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block) argument
2634 dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks, int set) argument
2677 dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block) argument
2697 dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t *chunk_block) argument
2746 dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone) argument
2873 struct dm_zone *zone; local
3014 struct dm_zone *zone; local
[all...]

Completed in 695 milliseconds

123456789