Lines Matching defs:vif

35 static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
50 spin_lock_irqsave(&vif->hash.cache.lock, flags);
54 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
55 lockdep_is_held(&vif->hash.cache.lock)) {
65 new->seq = atomic_inc_return(&vif->hash.cache.seq);
66 list_add_rcu(&new->link, &vif->hash.cache.list);
68 if (++vif->hash.cache.count > xenvif_hash_cache_size) {
70 vif->hash.cache.count--;
75 spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
81 static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
86 val = xen_netif_toeplitz_hash(vif->hash.key,
87 sizeof(vif->hash.key),
91 xenvif_add_hash(vif, data, len, val);
96 static void xenvif_flush_hash(struct xenvif *vif)
104 spin_lock_irqsave(&vif->hash.cache.lock, flags);
106 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
107 lockdep_is_held(&vif->hash.cache.lock)) {
109 vif->hash.cache.count--;
113 spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
116 static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
127 return xenvif_new_hash(vif, data, len);
133 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
137 entry->seq = atomic_inc_return(&vif->hash.cache.seq);
146 val = xenvif_new_hash(vif, data, len);
151 void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
156 u32 flags = vif->hash.flags;
200 hash = xenvif_find_hash(vif, data, sizeof(data));
208 hash = xenvif_find_hash(vif, data, sizeof(data));
224 hash = xenvif_find_hash(vif, data, sizeof(data));
232 hash = xenvif_find_hash(vif, data, sizeof(data));
246 u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
257 vif->hash.alg = alg;
262 u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
264 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
275 u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
283 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
286 vif->hash.flags = flags;
291 u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
293 u8 *key = vif->hash.key;
296 .source.domid = vif->domid,
318 xenvif_flush_hash(vif);
323 u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
328 vif->hash.size = size;
329 memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
335 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
338 u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
342 .source.domid = vif->domid,
348 if ((off + len < off) || (off + len > vif->hash.size) ||
364 memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
365 vif->hash.size * sizeof(*mapping));
376 if (mapping[off++] >= vif->num_queues)
379 vif->hash.mapping_sel = !vif->hash.mapping_sel;
385 void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
389 switch (vif->hash.alg) {
401 if (vif->hash.flags) {
404 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
406 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
408 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
410 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
426 seq_printf(m, "%02x ", vif->hash.key[i]);
431 if (vif->hash.size != 0) {
432 const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
436 for (i = 0; i < vif->hash.size; ) {
440 if (i + n >= vif->hash.size)
441 n = vif->hash.size - i;
454 void xenvif_init_hash(struct xenvif *vif)
459 BUG_ON(vif->hash.cache.count);
461 spin_lock_init(&vif->hash.cache.lock);
462 INIT_LIST_HEAD(&vif->hash.cache.list);
465 void xenvif_deinit_hash(struct xenvif *vif)
467 xenvif_flush_hash(vif);