/** \file * \brief Hello World application */ /* * Copyright (c) 2010, 2011, 2012, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group. */ #include #include #include #include #include #include #define max(x,y) ((x) > (y) ? (x) : (y)) // Store result // current size: 8 bytes struct perfmon_data_buffer { uint64_t ip; char name[PERFMON_DISP_NAME_LEN]; }; #define PF_DATA_MAX 10000 // Name of the domain to measure (cropped to length 8) #define PF_DOMAIN "monitor" static struct perfmon_data_buffer pf_data[PF_DATA_MAX]; static uint64_t pf_data_ptr = 0; // Endpoint for interrupt handlers static struct capref epcap; static struct lmp_endpoint *ep; static uint64_t overflow_ctr; static uint64_t max_outstanding; /* * \brief Performance counter overflow handler * * Whenever the performance counter overflows the current instruction pointer * along with the name of the running task is send as payload to this endpoint. * The data is buffered. Therefore, this handler might be able to retrieve * several messages from the buffer as soon as it is scheduled. */ static void overflow_handler(void *args) { errval_t err; uint64_t this_outstanding = 0; // Retrieve the payload do { struct lmp_recv_msg msg = LMP_RECV_MSG_INIT; err = lmp_endpoint_recv(ep, &msg.buf, NULL); if (err_is_ok(err)) { this_outstanding++; struct perfmon_overflow_data data; memcpy(&data, &msg.words, sizeof(data)); char res[PERFMON_DISP_NAME_LEN+1]; memset(res, '\0', PERFMON_DISP_NAME_LEN+1); strncpy(res, data.name, PERFMON_DISP_NAME_LEN); overflow_ctr++; // Future work // domain id from dispatcher // spawnd to translate the id to name // see: ps in shell // Store if comming from sk_server task .. if(pf_data_ptrmonitor>spawn err = invoke_perfmon_activate(cap_perfmon, 0x76, // Event to monitor ~0x0, // UMASK false, // Kernel 0, // Counter ID 250000, // number of events to cause overflow get_cap_addr(epcap)); // SAMPLE events to monitor for recent AMD machines. // 0x7e L2 cache misses // 0x76 CPU clocks not halt // SAMPLE events to monitor for Intel machines. // 0x3C Unhalted core cycles (umask = 0) assert(err_is_ok(err)); if(err_is_fail(err)) { USER_PANIC_ERR(err, "invoke_perfmon_activate"); } // Wait for overflows struct waitset *ws = get_default_waitset(); while (1) { err = event_dispatch(ws); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Performance counter overflow handler died"); break; } } // do never terminate assert(false); }