Lines Matching refs:ce

108 static inline bool cache_entry_expired(const struct cache_entry *ce)
113 return timespec64_compare(&ts, &ce->etime) >= 0;
116 static inline void free_tgts(struct cache_entry *ce)
120 list_for_each_entry_safe(t, n, &ce->tlist, list) {
127 static inline void flush_cache_ent(struct cache_entry *ce)
129 hlist_del_init(&ce->hlist);
130 kfree(ce->path);
131 free_tgts(ce);
133 kmem_cache_free(cache_slab, ce);
143 struct cache_entry *ce;
145 hlist_for_each_entry_safe(ce, n, l, hlist) {
146 if (!hlist_unhashed(&ce->hlist))
147 flush_cache_ent(ce);
158 struct cache_entry *ce;
167 hlist_for_each_entry(ce, l, hlist) {
168 if (hlist_unhashed(&ce->hlist))
173 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
174 ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
175 DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
176 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
178 list_for_each_entry(t, &ce->tlist, list) {
181 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
226 static inline void dump_tgts(const struct cache_entry *ce)
231 list_for_each_entry(t, &ce->tlist, list) {
233 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
237 static inline void dump_ce(const struct cache_entry *ce)
240 ce->path,
241 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
242 ce->etime.tv_nsec,
243 ce->hdr_flags, ce->ref_flags,
244 DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
245 ce->path_consumed,
246 cache_entry_expired(ce) ? "yes" : "no");
247 dump_tgts(ce);
340 static inline char *get_tgt_name(const struct cache_entry *ce)
342 struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
383 struct cache_entry *ce, const char *tgthint)
388 ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
389 ce->etime = get_expire_time(ce->ttl);
390 ce->srvtype = refs[0].server_type;
391 ce->hdr_flags = refs[0].flags;
392 ce->ref_flags = refs[0].ref_flag;
393 ce->path_consumed = refs[0].path_consumed;
400 free_tgts(ce);
404 list_add(&t->list, &ce->tlist);
407 list_add_tail(&t->list, &ce->tlist);
409 ce->numtgts++;
412 target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
414 WRITE_ONCE(ce->tgthint, target);
422 struct cache_entry *ce;
425 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
426 if (!ce)
429 ce->path = refs[0].path_name;
432 INIT_HLIST_NODE(&ce->hlist);
433 INIT_LIST_HEAD(&ce->tlist);
435 rc = copy_ref_data(refs, numrefs, ce, NULL);
437 kfree(ce->path);
438 kmem_cache_free(cache_slab, ce);
439 ce = ERR_PTR(rc);
441 return ce;
447 struct cache_entry *ce;
455 hlist_for_each_entry(ce, l, hlist) {
456 if (hlist_unhashed(&ce->hlist))
458 if (!to_del || timespec64_compare(&ce->etime,
460 to_del = ce;
479 struct cache_entry *ce;
494 ce = alloc_cache_entry(refs, numrefs);
495 if (IS_ERR(ce))
496 return ce;
498 ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
501 hlist_add_head(&ce->hlist, &cache_htable[hash]);
502 dump_ce(ce);
506 return ce;
537 struct cache_entry *ce;
539 hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
540 if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
541 dump_ce(ce);
542 return ce;
559 struct cache_entry *ce;
595 ce = __lookup_cache_entry(path, hash, len);
596 if (!IS_ERR(ce))
597 return ce;
620 static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
629 target = READ_ONCE(ce->tgthint);
636 free_tgts(ce);
637 ce->numtgts = 0;
639 rc = copy_ref_data(refs, numrefs, ce, th);
689 struct cache_entry *ce;
697 ce = lookup_cache_entry(path);
698 if (!IS_ERR(ce)) {
699 if (!force_refresh && !cache_entry_expired(ce))
700 return ce;
701 } else if (PTR_ERR(ce) != -ENOENT) {
703 return ce;
721 ce = ERR_PTR(rc);
729 ce = lookup_cache_entry(path);
730 if (!IS_ERR(ce)) {
731 if (force_refresh || cache_entry_expired(ce)) {
732 rc = update_cache_entry_locked(ce, refs, numrefs);
734 ce = ERR_PTR(rc);
736 } else if (PTR_ERR(ce) == -ENOENT) {
737 ce = add_cache_entry_locked(refs, numrefs);
740 if (IS_ERR(ce)) {
748 return ce;
756 static int setup_referral(const char *path, struct cache_entry *ce,
775 ref->path_consumed = ce->path_consumed;
776 ref->ttl = ce->ttl;
777 ref->server_type = ce->srvtype;
778 ref->ref_flag = ce->ref_flags;
779 ref->flags = ce->hdr_flags;
790 static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
800 list_for_each_entry(t, &ce->tlist, list) {
815 if (READ_ONCE(ce->tgthint) == t)
821 tl->tl_numtgts = ce->numtgts;
862 struct cache_entry *ce;
868 ce = cache_refresh_path(xid, ses, npath, false);
869 if (IS_ERR(ce)) {
870 rc = PTR_ERR(ce);
875 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
879 rc = get_targets(ce, tgt_list);
908 struct cache_entry *ce;
914 ce = lookup_cache_entry(path);
915 if (IS_ERR(ce)) {
916 rc = PTR_ERR(ce);
921 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
925 rc = get_targets(ce, tgt_list);
949 struct cache_entry *ce;
958 ce = lookup_cache_entry(path);
959 if (IS_ERR(ce))
962 t = READ_ONCE(ce->tgthint);
967 list_for_each_entry(t, &ce->tlist, list) {
969 WRITE_ONCE(ce->tgthint, t);
994 struct cache_entry *ce;
1003 ce = lookup_cache_entry(path);
1004 if (IS_ERR(ce)) {
1005 rc = PTR_ERR(ce);
1011 rc = setup_referral(path, ce, ref, it->it_name);
1182 struct cache_entry *ce;
1200 ce = lookup_cache_entry(path);
1201 needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1202 if (!IS_ERR(ce)) {
1203 rc = get_targets(ce, &old_tl);
1220 ce = cache_refresh_path(xid, ses, path, true);
1221 if (!IS_ERR(ce)) {
1222 rc = get_targets(ce, &new_tl);