1249268Sglebius/*- 2249268Sglebius * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 3249268Sglebius * All rights reserved. 4249268Sglebius * 5249268Sglebius * Redistribution and use in source and binary forms, with or without 6249268Sglebius * modification, are permitted provided that the following conditions 7249268Sglebius * are met: 8249268Sglebius * 1. Redistributions of source code must retain the above copyright 9249268Sglebius * notice, this list of conditions and the following disclaimer. 10249268Sglebius * 2. Redistributions in binary form must reproduce the above copyright 11249268Sglebius * notice, this list of conditions and the following disclaimer in the 12249268Sglebius * documentation and/or other materials provided with the distribution. 13249268Sglebius * 14249268Sglebius * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15249268Sglebius * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16249268Sglebius * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17249268Sglebius * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18249268Sglebius * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19249268Sglebius * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20249268Sglebius * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21249268Sglebius * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22249268Sglebius * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23249268Sglebius * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24249268Sglebius * SUCH DAMAGE. 25249268Sglebius * 26249268Sglebius * $FreeBSD: stable/11/sys/i386/include/counter.h 344230 2019-02-17 10:01:42Z kib $ 27249268Sglebius */ 28249268Sglebius 29249268Sglebius#ifndef __MACHINE_COUNTER_H__ 30249268Sglebius#define __MACHINE_COUNTER_H__ 31249268Sglebius 32249268Sglebius#include <sys/pcpu.h> 33249268Sglebius#ifdef INVARIANTS 34249268Sglebius#include <sys/proc.h> 35249268Sglebius#endif 36249268Sglebius#include <machine/md_var.h> 37249268Sglebius#include <machine/specialreg.h> 38249268Sglebius 39249268Sglebius#define counter_enter() do { \ 40249268Sglebius if ((cpu_feature & CPUID_CX8) == 0) \ 41249268Sglebius critical_enter(); \ 42249268Sglebius} while (0) 43249268Sglebius 44249268Sglebius#define counter_exit() do { \ 45249268Sglebius if ((cpu_feature & CPUID_CX8) == 0) \ 46249268Sglebius critical_exit(); \ 47249268Sglebius} while (0) 48249268Sglebius 49249314Skibextern struct pcpu __pcpu[MAXCPU]; 50249314Skib 51249268Sglebiusstatic inline void 52249268Sglebiuscounter_64_inc_8b(uint64_t *p, int64_t inc) 53249268Sglebius{ 54249268Sglebius 55249268Sglebius __asm __volatile( 56249268Sglebius "movl %%fs:(%%esi),%%eax\n\t" 57249268Sglebius "movl %%fs:4(%%esi),%%edx\n" 58249268Sglebius"1:\n\t" 59249268Sglebius "movl %%eax,%%ebx\n\t" 60249268Sglebius "movl %%edx,%%ecx\n\t" 61249268Sglebius "addl (%%edi),%%ebx\n\t" 62249268Sglebius "adcl 4(%%edi),%%ecx\n\t" 63249268Sglebius "cmpxchg8b %%fs:(%%esi)\n\t" 64249268Sglebius "jnz 1b" 65249268Sglebius : 66249314Skib : "S" ((char *)p - (char *)&__pcpu[0]), "D" (&inc) 67249268Sglebius : "memory", "cc", "eax", "edx", "ebx", "ecx"); 68249268Sglebius} 69249268Sglebius 70252434Skib#ifdef IN_SUBR_COUNTER_C 71344230Skibstruct counter_u64_fetch_cx8_arg { 72344230Skib uint64_t res; 73344230Skib uint64_t *p; 74344230Skib}; 75344230Skib 76344230Skibstatic uint64_t 77252434Skibcounter_u64_read_one_8b(uint64_t *p) 78252434Skib{ 79252434Skib uint32_t res_lo, res_high; 80252434Skib 81252434Skib __asm __volatile( 82252434Skib "movl %%eax,%%ebx\n\t" 83252434Skib "movl %%edx,%%ecx\n\t" 84252434Skib "cmpxchg8b (%2)" 85252434Skib : "=a" (res_lo), "=d"(res_high) 86252434Skib : "SD" (p) 87252434Skib : "cc", "ebx", "ecx"); 88252434Skib return (res_lo + ((uint64_t)res_high << 32)); 89252434Skib} 90252434Skib 91344230Skibstatic void 92344230Skibcounter_u64_fetch_cx8_one(void *arg1) 93344230Skib{ 94344230Skib struct counter_u64_fetch_cx8_arg *arg; 95344230Skib uint64_t val; 96344230Skib 97344230Skib arg = arg1; 98344230Skib val = counter_u64_read_one_8b((uint64_t *)((char *)arg->p + 99344230Skib sizeof(struct pcpu) * PCPU_GET(cpuid))); 100344230Skib atomic_add_64(&arg->res, val); 101344230Skib} 102344230Skib 103252434Skibstatic inline uint64_t 104252434Skibcounter_u64_fetch_inline(uint64_t *p) 105252434Skib{ 106344230Skib struct counter_u64_fetch_cx8_arg arg; 107252434Skib uint64_t res; 108252434Skib int i; 109252434Skib 110252434Skib res = 0; 111252434Skib if ((cpu_feature & CPUID_CX8) == 0) { 112252434Skib /* 113252434Skib * The machines without cmpxchg8b are not SMP. 114252434Skib * Disabling the preemption provides atomicity of the 115252434Skib * counter reading, since update is done in the 116252434Skib * critical section as well. 117252434Skib */ 118252434Skib critical_enter(); 119302372Snwhitehorn CPU_FOREACH(i) { 120252434Skib res += *(uint64_t *)((char *)p + 121252434Skib sizeof(struct pcpu) * i); 122252434Skib } 123252434Skib critical_exit(); 124252434Skib } else { 125344230Skib arg.p = p; 126344230Skib arg.res = 0; 127344230Skib smp_rendezvous(NULL, counter_u64_fetch_cx8_one, NULL, &arg); 128344230Skib res = arg.res; 129252434Skib } 130252434Skib return (res); 131252434Skib} 132252434Skib 133252434Skibstatic inline void 134252434Skibcounter_u64_zero_one_8b(uint64_t *p) 135252434Skib{ 136252434Skib 137252434Skib __asm __volatile( 138252434Skib "movl (%0),%%eax\n\t" 139252434Skib "movl 4(%0),%%edx\n" 140252434Skib "xorl %%ebx,%%ebx\n\t" 141252434Skib "xorl %%ecx,%%ecx\n\t" 142252434Skib"1:\n\t" 143252434Skib "cmpxchg8b (%0)\n\t" 144252434Skib "jnz 1b" 145252434Skib : 146252434Skib : "SD" (p) 147252434Skib : "memory", "cc", "eax", "edx", "ebx", "ecx"); 148252434Skib} 149252434Skib 150252434Skibstatic void 151252434Skibcounter_u64_zero_one_cpu(void *arg) 152252434Skib{ 153252434Skib uint64_t *p; 154252434Skib 155252434Skib p = (uint64_t *)((char *)arg + sizeof(struct pcpu) * PCPU_GET(cpuid)); 156252434Skib counter_u64_zero_one_8b(p); 157252434Skib} 158252434Skib 159252434Skibstatic inline void 160252434Skibcounter_u64_zero_inline(counter_u64_t c) 161252434Skib{ 162252434Skib int i; 163252434Skib 164252434Skib if ((cpu_feature & CPUID_CX8) == 0) { 165252434Skib critical_enter(); 166302372Snwhitehorn CPU_FOREACH(i) 167252434Skib *(uint64_t *)((char *)c + sizeof(struct pcpu) * i) = 0; 168252434Skib critical_exit(); 169252434Skib } else { 170328386Spkelsey smp_rendezvous(smp_no_rendezvous_barrier, 171328386Spkelsey counter_u64_zero_one_cpu, smp_no_rendezvous_barrier, c); 172252434Skib } 173252434Skib} 174252434Skib#endif 175252434Skib 176249268Sglebius#define counter_u64_add_protected(c, inc) do { \ 177249268Sglebius if ((cpu_feature & CPUID_CX8) == 0) { \ 178249268Sglebius CRITICAL_ASSERT(curthread); \ 179249268Sglebius *(uint64_t *)zpcpu_get(c) += (inc); \ 180249268Sglebius } else \ 181249268Sglebius counter_64_inc_8b((c), (inc)); \ 182249268Sglebius} while (0) 183249268Sglebius 184249268Sglebiusstatic inline void 185249268Sglebiuscounter_u64_add(counter_u64_t c, int64_t inc) 186249268Sglebius{ 187249268Sglebius 188249268Sglebius if ((cpu_feature & CPUID_CX8) == 0) { 189249268Sglebius critical_enter(); 190249268Sglebius *(uint64_t *)zpcpu_get(c) += inc; 191249268Sglebius critical_exit(); 192249268Sglebius } else { 193249268Sglebius counter_64_inc_8b(c, inc); 194249268Sglebius } 195249268Sglebius} 196249268Sglebius 197249268Sglebius#endif /* ! __MACHINE_COUNTER_H__ */ 198