|
PAPI
5.0.1.0
|
00001 /****************************/ 00002 /* THIS IS OPEN SOURCE CODE */ 00003 /****************************/ 00004 00023 #include <stdio.h> 00024 #include <string.h> 00025 #include <stdlib.h> 00026 #include <stdint.h> 00027 00028 #include <unistd.h> 00029 #include <dlfcn.h> 00030 00031 /* Headers required by PAPI */ 00032 #include "papi.h" 00033 #include "papi_internal.h" 00034 #include "papi_vector.h" 00035 #include "papi_memory.h" 00036 00037 #define VMWARE_MAX_COUNTERS 256 00038 00039 #define VMWARE_CPU_LIMIT_MHZ 0 00040 #define VMWARE_CPU_RESERVATION_MHZ 1 00041 #define VMWARE_CPU_SHARES 2 00042 #define VMWARE_CPU_STOLEN_MS 3 00043 #define VMWARE_CPU_USED_MS 4 00044 #define VMWARE_ELAPSED_MS 5 00045 00046 #define VMWARE_MEM_ACTIVE_MB 6 00047 #define VMWARE_MEM_BALLOONED_MB 7 00048 #define VMWARE_MEM_LIMIT_MB 8 00049 #define VMWARE_MEM_MAPPED_MB 9 00050 #define VMWARE_MEM_OVERHEAD_MB 10 00051 #define VMWARE_MEM_RESERVATION_MB 11 00052 #define VMWARE_MEM_SHARED_MB 12 00053 #define VMWARE_MEM_SHARES 13 00054 #define VMWARE_MEM_SWAPPED_MB 14 00055 #define VMWARE_MEM_TARGET_SIZE_MB 15 00056 #define VMWARE_MEM_USED_MB 16 00057 00058 #define VMWARE_HOST_CPU_MHZ 17 00059 00060 /* The following 3 require VMWARE_PSEUDO_PERFORMANCE env_var to be set. */ 00061 00062 #define VMWARE_HOST_TSC 18 00063 #define VMWARE_ELAPSED_TIME 19 00064 #define VMWARE_ELAPSED_APPARENT 20 00065 00066 00068 struct _vmware_register { 00069 unsigned int selector; 00072 }; 00073 00075 struct _vmware_native_event_entry { 00076 char name[PAPI_MAX_STR_LEN]; 00077 char description[PAPI_HUGE_STR_LEN]; 00078 char units[PAPI_MIN_STR_LEN]; 00079 int which_counter; 00080 int report_difference; 00081 }; 00082 00083 struct _vmware_reg_alloc { 00084 struct _vmware_register ra_bits; 00085 }; 00086 00087 00088 inline uint64_t rdpmc(int c) 00089 { 00090 uint32_t low, high; 00091 __asm__ __volatile__("rdpmc" : "=a" (low), "=d" (high) : "c" (c)); 00092 return (uint64_t)high << 32 | (uint64_t)low; 00093 } 00094 00095 00096 00097 #ifdef VMGUESTLIB 00098 /* Headers required by VMware */ 00099 #include "vmGuestLib.h" 00100 00101 /* Functions to dynamically load from the GuestLib library. */ 00102 char const * (*GuestLib_GetErrorText)(VMGuestLibError); 00103 VMGuestLibError (*GuestLib_OpenHandle)(VMGuestLibHandle*); 00104 VMGuestLibError (*GuestLib_CloseHandle)(VMGuestLibHandle); 00105 VMGuestLibError (*GuestLib_UpdateInfo)(VMGuestLibHandle handle); 00106 VMGuestLibError (*GuestLib_GetSessionId)(VMGuestLibHandle handle, VMSessionId *id); 00107 VMGuestLibError (*GuestLib_GetCpuReservationMHz)(VMGuestLibHandle handle, uint32 *cpuReservationMHz); 00108 VMGuestLibError (*GuestLib_GetCpuLimitMHz)(VMGuestLibHandle handle, uint32 *cpuLimitMHz); 00109 VMGuestLibError (*GuestLib_GetCpuShares)(VMGuestLibHandle handle, uint32 *cpuShares); 00110 VMGuestLibError (*GuestLib_GetCpuUsedMs)(VMGuestLibHandle handle, uint64 *cpuUsedMs); 00111 VMGuestLibError (*GuestLib_GetHostProcessorSpeed)(VMGuestLibHandle handle, uint32 *mhz); 00112 VMGuestLibError (*GuestLib_GetMemReservationMB)(VMGuestLibHandle handle, uint32 *memReservationMB); 00113 VMGuestLibError (*GuestLib_GetMemLimitMB)(VMGuestLibHandle handle, uint32 *memLimitMB); 00114 VMGuestLibError (*GuestLib_GetMemShares)(VMGuestLibHandle handle, uint32 *memShares); 00115 VMGuestLibError (*GuestLib_GetMemMappedMB)(VMGuestLibHandle handle, uint32 *memMappedMB); 00116 VMGuestLibError (*GuestLib_GetMemActiveMB)(VMGuestLibHandle handle, uint32 *memActiveMB); 00117 VMGuestLibError (*GuestLib_GetMemOverheadMB)(VMGuestLibHandle handle, uint32 *memOverheadMB); 00118 VMGuestLibError (*GuestLib_GetMemBalloonedMB)(VMGuestLibHandle handle, uint32 *memBalloonedMB); 00119 VMGuestLibError (*GuestLib_GetMemSwappedMB)(VMGuestLibHandle handle, uint32 *memSwappedMB); 00120 VMGuestLibError (*GuestLib_GetMemSharedMB)(VMGuestLibHandle handle, uint32 *memSharedMB); 00121 VMGuestLibError (*GuestLib_GetMemSharedSavedMB)(VMGuestLibHandle handle, uint32 *memSharedSavedMB); 00122 VMGuestLibError (*GuestLib_GetMemUsedMB)(VMGuestLibHandle handle, uint32 *memUsedMB); 00123 VMGuestLibError (*GuestLib_GetElapsedMs)(VMGuestLibHandle handle, uint64 *elapsedMs); 00124 VMGuestLibError (*GuestLib_GetResourcePoolPath)(VMGuestLibHandle handle, size_t *bufferSize, char *pathBuffer); 00125 VMGuestLibError (*GuestLib_GetCpuStolenMs)(VMGuestLibHandle handle, uint64 *cpuStolenMs); 00126 VMGuestLibError (*GuestLib_GetMemTargetSizeMB)(VMGuestLibHandle handle, uint64 *memTargetSizeMB); 00127 VMGuestLibError (*GuestLib_GetHostNumCpuCores)(VMGuestLibHandle handle, uint32 *hostNumCpuCores); 00128 VMGuestLibError (*GuestLib_GetHostCpuUsedMs)(VMGuestLibHandle handle, uint64 *hostCpuUsedMs); 00129 VMGuestLibError (*GuestLib_GetHostMemSwappedMB)(VMGuestLibHandle handle, uint64 *hostMemSwappedMB); 00130 VMGuestLibError (*GuestLib_GetHostMemSharedMB)(VMGuestLibHandle handle, uint64 *hostMemSharedMB); 00131 VMGuestLibError (*GuestLib_GetHostMemUsedMB)(VMGuestLibHandle handle, uint64 *hostMemUsedMB); 00132 VMGuestLibError (*GuestLib_GetHostMemPhysMB)(VMGuestLibHandle handle, uint64 *hostMemPhysMB); 00133 VMGuestLibError (*GuestLib_GetHostMemPhysFreeMB)(VMGuestLibHandle handle, uint64 *hostMemPhysFreeMB); 00134 VMGuestLibError (*GuestLib_GetHostMemKernOvhdMB)(VMGuestLibHandle handle, uint64 *hostMemKernOvhdMB); 00135 VMGuestLibError (*GuestLib_GetHostMemMappedMB)(VMGuestLibHandle handle, uint64 *hostMemMappedMB); 00136 VMGuestLibError (*GuestLib_GetHostMemUnmappedMB)(VMGuestLibHandle handle, uint64 *hostMemUnmappedMB); 00137 00138 00139 static void *dlHandle = NULL; 00140 00141 00142 /* 00143 * Macro to load a single GuestLib function from the shared library. 00144 */ 00145 00146 #define LOAD_ONE_FUNC(funcname) \ 00147 do { \ 00148 funcname = dlsym(dlHandle, "VM" #funcname); \ 00149 if ((dlErrStr = dlerror()) != NULL) { \ 00150 fprintf(stderr, "Failed to load \'%s\': \'%s\'\n", \ 00151 #funcname, dlErrStr); \ 00152 return FALSE; \ 00153 } \ 00154 } while (0) 00155 00156 #endif 00157 00159 struct _vmware_control_state { 00160 long long value[VMWARE_MAX_COUNTERS]; 00161 int which_counter[VMWARE_MAX_COUNTERS]; 00162 int num_events; 00163 }; 00164 00166 struct _vmware_context { 00167 long long values[VMWARE_MAX_COUNTERS]; 00168 long long start_values[VMWARE_MAX_COUNTERS]; 00169 #ifdef VMGUESTLIB 00170 VMGuestLibHandle glHandle; 00171 #endif 00172 }; 00173 00174 00175 00176 00177 00178 00179 /* 00180 *----------------------------------------------------------------------------- 00181 * 00182 * LoadFunctions -- 00183 * 00184 * Load the functions from the shared library. 00185 * 00186 * Results: 00187 * TRUE on success 00188 * FALSE on failure 00189 * 00190 * Side effects: 00191 * None 00192 * 00193 * Credit: VMware 00194 *----------------------------------------------------------------------------- 00195 */ 00196 00197 static int 00198 LoadFunctions(void) 00199 { 00200 00201 #ifdef VMGUESTLIB 00202 /* 00203 * First, try to load the shared library. 00204 */ 00205 00206 char const *dlErrStr; 00207 char filename[BUFSIZ]; 00208 00209 sprintf(filename,"%s","libvmGuestLib.so"); 00210 dlHandle = dlopen(filename, RTLD_NOW); 00211 if (!dlHandle) { 00212 dlErrStr = dlerror(); 00213 fprintf(stderr, "dlopen of %s failed: \'%s\'\n", filename, 00214 dlErrStr); 00215 00216 sprintf(filename,"%s/lib/lib64/libvmGuestLib.so",VMWARE_INCDIR); 00217 dlHandle = dlopen(filename, RTLD_NOW); 00218 if (!dlHandle) { 00219 dlErrStr = dlerror(); 00220 fprintf(stderr, "dlopen of %s failed: \'%s\'\n", filename, 00221 dlErrStr); 00222 00223 sprintf(filename,"%s/lib/lib32/libvmGuestLib.so",VMWARE_INCDIR); 00224 dlHandle = dlopen(filename, RTLD_NOW); 00225 if (!dlHandle) { 00226 dlErrStr = dlerror(); 00227 fprintf(stderr, "dlopen of %s failed: \'%s\'\n", filename, 00228 dlErrStr); 00229 return PAPI_ECMP; 00230 } 00231 } 00232 } 00233 00234 /* Load all the individual library functions. */ 00235 LOAD_ONE_FUNC(GuestLib_GetErrorText); 00236 LOAD_ONE_FUNC(GuestLib_OpenHandle); 00237 LOAD_ONE_FUNC(GuestLib_CloseHandle); 00238 LOAD_ONE_FUNC(GuestLib_UpdateInfo); 00239 LOAD_ONE_FUNC(GuestLib_GetSessionId); 00240 LOAD_ONE_FUNC(GuestLib_GetCpuReservationMHz); 00241 LOAD_ONE_FUNC(GuestLib_GetCpuLimitMHz); 00242 LOAD_ONE_FUNC(GuestLib_GetCpuShares); 00243 LOAD_ONE_FUNC(GuestLib_GetCpuUsedMs); 00244 LOAD_ONE_FUNC(GuestLib_GetHostProcessorSpeed); 00245 LOAD_ONE_FUNC(GuestLib_GetMemReservationMB); 00246 LOAD_ONE_FUNC(GuestLib_GetMemLimitMB); 00247 LOAD_ONE_FUNC(GuestLib_GetMemShares); 00248 LOAD_ONE_FUNC(GuestLib_GetMemMappedMB); 00249 LOAD_ONE_FUNC(GuestLib_GetMemActiveMB); 00250 LOAD_ONE_FUNC(GuestLib_GetMemOverheadMB); 00251 LOAD_ONE_FUNC(GuestLib_GetMemBalloonedMB); 00252 LOAD_ONE_FUNC(GuestLib_GetMemSwappedMB); 00253 LOAD_ONE_FUNC(GuestLib_GetMemSharedMB); 00254 LOAD_ONE_FUNC(GuestLib_GetMemSharedSavedMB); 00255 LOAD_ONE_FUNC(GuestLib_GetMemUsedMB); 00256 LOAD_ONE_FUNC(GuestLib_GetElapsedMs); 00257 LOAD_ONE_FUNC(GuestLib_GetResourcePoolPath); 00258 LOAD_ONE_FUNC(GuestLib_GetCpuStolenMs); 00259 LOAD_ONE_FUNC(GuestLib_GetMemTargetSizeMB); 00260 LOAD_ONE_FUNC(GuestLib_GetHostNumCpuCores); 00261 LOAD_ONE_FUNC(GuestLib_GetHostCpuUsedMs); 00262 LOAD_ONE_FUNC(GuestLib_GetHostMemSwappedMB); 00263 LOAD_ONE_FUNC(GuestLib_GetHostMemSharedMB); 00264 LOAD_ONE_FUNC(GuestLib_GetHostMemUsedMB); 00265 LOAD_ONE_FUNC(GuestLib_GetHostMemPhysMB); 00266 LOAD_ONE_FUNC(GuestLib_GetHostMemPhysFreeMB); 00267 LOAD_ONE_FUNC(GuestLib_GetHostMemKernOvhdMB); 00268 LOAD_ONE_FUNC(GuestLib_GetHostMemMappedMB); 00269 LOAD_ONE_FUNC(GuestLib_GetHostMemUnmappedMB); 00270 #endif 00271 return PAPI_OK; 00272 } 00273 00274 00275 00276 /* Begin PAPI definitions */ 00277 papi_vector_t _vmware_vector; 00278 00280 static struct _vmware_native_event_entry *_vmware_native_table; 00282 static int num_events = 0; 00283 static int use_pseudo=0; 00284 static int use_guestlib=0; 00285 00286 /************************************************************************/ 00287 /* Below is the actual "hardware implementation" of our VMWARE counters */ 00288 /************************************************************************/ 00289 00293 static long long 00294 _vmware_hardware_read( struct _vmware_context *context, int starting) 00295 { 00296 00297 int i; 00298 00299 if (use_pseudo) { 00300 context->values[VMWARE_HOST_TSC]=rdpmc(0x10000); 00301 context->values[VMWARE_ELAPSED_TIME]=rdpmc(0x10001); 00302 context->values[VMWARE_ELAPSED_APPARENT]=rdpmc(0x10002); 00303 } 00304 00305 00306 #ifdef VMGUESTLIB 00307 static VMSessionId sessionId = 0; 00308 VMSessionId tmpSession; 00309 uint32_t temp32; 00310 uint64_t temp64; 00311 VMGuestLibError glError; 00312 00313 if (use_guestlib) { 00314 00315 glError = GuestLib_UpdateInfo(context->glHandle); 00316 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00317 fprintf(stderr,"UpdateInfo failed: %s\n", 00318 GuestLib_GetErrorText(glError)); 00319 return PAPI_ECMP; 00320 } 00321 00322 /* Retrieve and check the session ID */ 00323 glError = GuestLib_GetSessionId(context->glHandle, &tmpSession); 00324 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00325 fprintf(stderr, "Failed to get session ID: %s\n", 00326 GuestLib_GetErrorText(glError)); 00327 return PAPI_ECMP; 00328 } 00329 00330 if (tmpSession == 0) { 00331 fprintf(stderr, "Error: Got zero sessionId from GuestLib\n"); 00332 return PAPI_ECMP; 00333 } 00334 00335 if (sessionId == 0) { 00336 sessionId = tmpSession; 00337 } else if (tmpSession != sessionId) { 00338 sessionId = tmpSession; 00339 } 00340 00341 glError = GuestLib_GetCpuLimitMHz(context->glHandle,&temp32); 00342 context->values[VMWARE_CPU_LIMIT_MHZ]=temp32; 00343 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00344 fprintf(stderr,"Failed to get CPU limit: %s\n", 00345 GuestLib_GetErrorText(glError)); 00346 return PAPI_ECMP; 00347 } 00348 00349 glError = GuestLib_GetCpuReservationMHz(context->glHandle,&temp32); 00350 context->values[VMWARE_CPU_RESERVATION_MHZ]=temp32; 00351 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00352 fprintf(stderr,"Failed to get CPU reservation: %s\n", 00353 GuestLib_GetErrorText(glError)); 00354 return PAPI_ECMP; 00355 } 00356 00357 glError = GuestLib_GetCpuShares(context->glHandle,&temp32); 00358 context->values[VMWARE_CPU_SHARES]=temp32; 00359 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00360 fprintf(stderr,"Failed to get cpu shares: %s\n", 00361 GuestLib_GetErrorText(glError)); 00362 return PAPI_ECMP; 00363 } 00364 00365 glError = GuestLib_GetCpuStolenMs(context->glHandle,&temp64); 00366 context->values[VMWARE_CPU_STOLEN_MS]=temp64; 00367 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00368 if (glError == VMGUESTLIB_ERROR_UNSUPPORTED_VERSION) { 00369 context->values[VMWARE_CPU_STOLEN_MS]=0; 00370 fprintf(stderr, "Skipping CPU stolen, not supported...\n"); 00371 } else { 00372 fprintf(stderr, "Failed to get CPU stolen: %s\n", 00373 GuestLib_GetErrorText(glError)); 00374 return PAPI_ECMP; 00375 } 00376 } 00377 00378 glError = GuestLib_GetCpuUsedMs(context->glHandle,&temp64); 00379 context->values[VMWARE_CPU_USED_MS]=temp64; 00380 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00381 fprintf(stderr, "Failed to get used ms: %s\n", 00382 GuestLib_GetErrorText(glError)); 00383 return PAPI_ECMP; 00384 } 00385 00386 glError = GuestLib_GetElapsedMs(context->glHandle, &temp64); 00387 context->values[VMWARE_ELAPSED_MS]=temp64; 00388 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00389 fprintf(stderr, "Failed to get elapsed ms: %s\n", 00390 GuestLib_GetErrorText(glError)); 00391 return PAPI_ECMP; 00392 } 00393 00394 glError = GuestLib_GetMemActiveMB(context->glHandle, &temp32); 00395 context->values[VMWARE_MEM_ACTIVE_MB]=temp32; 00396 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00397 fprintf(stderr, "Failed to get active mem: %s\n", 00398 GuestLib_GetErrorText(glError)); 00399 return PAPI_ECMP; 00400 } 00401 00402 glError = GuestLib_GetMemBalloonedMB(context->glHandle, &temp32); 00403 context->values[VMWARE_MEM_BALLOONED_MB]=temp32; 00404 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00405 fprintf(stderr, "Failed to get ballooned mem: %s\n", 00406 GuestLib_GetErrorText(glError)); 00407 return PAPI_ECMP; 00408 } 00409 00410 glError = GuestLib_GetMemLimitMB(context->glHandle, &temp32); 00411 context->values[VMWARE_MEM_LIMIT_MB]=temp32; 00412 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00413 fprintf(stderr,"Failed to get mem limit: %s\n", 00414 GuestLib_GetErrorText(glError)); 00415 return PAPI_ECMP; 00416 } 00417 00418 glError = GuestLib_GetMemMappedMB(context->glHandle, &temp32); 00419 context->values[VMWARE_MEM_MAPPED_MB]=temp32; 00420 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00421 fprintf(stderr, "Failed to get mapped mem: %s\n", 00422 GuestLib_GetErrorText(glError)); 00423 return PAPI_ECMP; 00424 } 00425 00426 glError = GuestLib_GetMemOverheadMB(context->glHandle, &temp32); 00427 context->values[VMWARE_MEM_OVERHEAD_MB]=temp32; 00428 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00429 fprintf(stderr, "Failed to get overhead mem: %s\n", 00430 GuestLib_GetErrorText(glError)); 00431 return PAPI_ECMP; 00432 } 00433 00434 glError = GuestLib_GetMemReservationMB(context->glHandle, &temp32); 00435 context->values[VMWARE_MEM_RESERVATION_MB]=temp32; 00436 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00437 fprintf(stderr, "Failed to get mem reservation: %s\n", 00438 GuestLib_GetErrorText(glError)); 00439 return PAPI_ECMP; 00440 } 00441 00442 glError = GuestLib_GetMemSharedMB(context->glHandle, &temp32); 00443 context->values[VMWARE_MEM_SHARED_MB]=temp32; 00444 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00445 fprintf(stderr, "Failed to get swapped mem: %s\n", 00446 GuestLib_GetErrorText(glError)); 00447 return PAPI_ECMP; 00448 } 00449 00450 glError = GuestLib_GetMemShares(context->glHandle, &temp32); 00451 context->values[VMWARE_MEM_SHARES]=temp32; 00452 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00453 if (glError == VMGUESTLIB_ERROR_NOT_AVAILABLE) { 00454 context->values[VMWARE_MEM_SHARES]=0; 00455 fprintf(stderr, "Skipping mem shares, not supported...\n"); 00456 } else { 00457 fprintf(stderr, "Failed to get mem shares: %s\n", 00458 GuestLib_GetErrorText(glError)); 00459 return PAPI_ECMP; 00460 } 00461 } 00462 00463 glError = GuestLib_GetMemSwappedMB(context->glHandle, &temp32); 00464 context->values[VMWARE_MEM_SWAPPED_MB]=temp32; 00465 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00466 fprintf(stderr, "Failed to get swapped mem: %s\n", 00467 GuestLib_GetErrorText(glError)); 00468 return PAPI_ECMP; 00469 } 00470 00471 glError = GuestLib_GetMemTargetSizeMB(context->glHandle, &temp64); 00472 context->values[VMWARE_MEM_TARGET_SIZE_MB]=temp64; 00473 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00474 if (glError == VMGUESTLIB_ERROR_UNSUPPORTED_VERSION) { 00475 context->values[VMWARE_MEM_TARGET_SIZE_MB]=0; 00476 fprintf(stderr, "Skipping target mem size, not supported...\n"); 00477 } else { 00478 fprintf(stderr, "Failed to get target mem size: %s\n", 00479 GuestLib_GetErrorText(glError)); 00480 return PAPI_ECMP; 00481 } 00482 } 00483 00484 glError = GuestLib_GetMemUsedMB(context->glHandle, &temp32); 00485 context->values[VMWARE_MEM_USED_MB]=temp32; 00486 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00487 fprintf(stderr, "Failed to get swapped mem: %s\n", 00488 GuestLib_GetErrorText(glError)); 00489 return PAPI_ECMP; 00490 } 00491 00492 glError = GuestLib_GetHostProcessorSpeed(context->glHandle, &temp32); 00493 context->values[VMWARE_HOST_CPU_MHZ]=temp32; 00494 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00495 fprintf(stderr, "Failed to get host proc speed: %s\n", 00496 GuestLib_GetErrorText(glError)); 00497 return PAPI_ECMP; 00498 } 00499 } 00500 00501 #endif 00502 00503 if (starting) { 00504 00505 for(i=0;i<VMWARE_MAX_COUNTERS;i++) { 00506 context->start_values[i]=context->values[i]; 00507 } 00508 00509 } 00510 00511 return PAPI_OK; 00512 } 00513 00514 /********************************************************************/ 00515 /* Below are the functions required by the PAPI component interface */ 00516 /********************************************************************/ 00517 00519 int 00520 _vmware_init_thread( hwd_context_t *ctx ) 00521 { 00522 (void) ctx; 00523 00524 00525 #ifdef VMGUESTLIB 00526 00527 struct _vmware_context *context; 00528 VMGuestLibError glError; 00529 00530 context=(struct _vmware_context *)ctx; 00531 00532 if (use_guestlib) { 00533 glError = GuestLib_OpenHandle(&(context->glHandle)); 00534 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00535 fprintf(stderr,"OpenHandle failed: %s\n", 00536 GuestLib_GetErrorText(glError)); 00537 return PAPI_ECMP; 00538 } 00539 } 00540 00541 #endif 00542 00543 return PAPI_OK; 00544 } 00545 00546 00551 int 00552 _vmware_init_component( int cidx ) 00553 { 00554 00555 (void) cidx; 00556 00557 int result; 00558 00559 SUBDBG( "_vmware_init_component..." ); 00560 00561 /* Initialize and try to load the VMware library */ 00562 /* Try to load the library. */ 00563 result=LoadFunctions(); 00564 00565 if (result!=PAPI_OK) { 00566 strncpy(_vmware_vector.cmp_info.disabled_reason, 00567 "GuestLibTest: Failed to load shared library", 00568 PAPI_MAX_STR_LEN); 00569 return PAPI_ECMP; 00570 } 00571 00572 /* we know in advance how many events we want */ 00573 /* for actual hardware this might have to be determined dynamically */ 00574 00575 /* Allocate memory for the our event table */ 00576 _vmware_native_table = ( struct _vmware_native_event_entry * ) 00577 calloc( VMWARE_MAX_COUNTERS, sizeof ( struct _vmware_native_event_entry )); 00578 if ( _vmware_native_table == NULL ) { 00579 return PAPI_ENOMEM; 00580 } 00581 00582 00583 #ifdef VMGUESTLIB 00584 00585 /* Detect if GuestLib works */ 00586 { 00587 00588 VMGuestLibError glError; 00589 VMGuestLibHandle glHandle; 00590 00591 use_guestlib=0; 00592 00593 /* try to open */ 00594 glError = GuestLib_OpenHandle(&glHandle); 00595 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00596 fprintf(stderr,"OpenHandle failed: %s\n", 00597 GuestLib_GetErrorText(glError)); 00598 } 00599 else { 00600 /* open worked, try to update */ 00601 glError = GuestLib_UpdateInfo(glHandle); 00602 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 00603 fprintf(stderr,"UpdateInfo failed: %s\n", 00604 GuestLib_GetErrorText(glError)); 00605 } 00606 else { 00607 /* update worked, things work! */ 00608 use_guestlib=1; 00609 } 00610 /* shut things down */ 00611 glError = GuestLib_CloseHandle(glHandle); 00612 } 00613 00614 } 00615 00616 00617 00618 if (use_guestlib) { 00619 00620 /* fill in the event table parameters */ 00621 strcpy( _vmware_native_table[num_events].name, 00622 "CPU_LIMIT" ); 00623 strncpy( _vmware_native_table[num_events].description, 00624 "Retrieves the upper limit of processor use in MHz " 00625 "available to the virtual machine.", 00626 PAPI_HUGE_STR_LEN); 00627 strcpy( _vmware_native_table[num_events].units,"MHz"); 00628 _vmware_native_table[num_events].which_counter= 00629 VMWARE_CPU_LIMIT_MHZ; 00630 _vmware_native_table[num_events].report_difference=0; 00631 num_events++; 00632 00633 strcpy( _vmware_native_table[num_events].name, 00634 "CPU_RESERVATION" ); 00635 strncpy( _vmware_native_table[num_events].description, 00636 "Retrieves the minimum processing power in MHz " 00637 "reserved for the virtual machine.", 00638 PAPI_HUGE_STR_LEN); 00639 strcpy( _vmware_native_table[num_events].units,"MHz"); 00640 _vmware_native_table[num_events].which_counter= 00641 VMWARE_CPU_RESERVATION_MHZ; 00642 _vmware_native_table[num_events].report_difference=0; 00643 num_events++; 00644 00645 strcpy( _vmware_native_table[num_events].name, 00646 "CPU_SHARES" ); 00647 strncpy( _vmware_native_table[num_events].description, 00648 "Retrieves the number of CPU shares allocated " 00649 "to the virtual machine.", 00650 PAPI_HUGE_STR_LEN); 00651 strcpy( _vmware_native_table[num_events].units,"shares"); 00652 _vmware_native_table[num_events].which_counter= 00653 VMWARE_CPU_SHARES; 00654 _vmware_native_table[num_events].report_difference=0; 00655 num_events++; 00656 00657 strcpy( _vmware_native_table[num_events].name, 00658 "CPU_STOLEN" ); 00659 strncpy( _vmware_native_table[num_events].description, 00660 "Retrieves the number of milliseconds that the " 00661 "virtual machine was in a ready state (able to " 00662 "transition to a run state), but was not scheduled to run.", 00663 PAPI_HUGE_STR_LEN); 00664 strcpy( _vmware_native_table[num_events].units,"ms"); 00665 _vmware_native_table[num_events].which_counter= 00666 VMWARE_CPU_STOLEN_MS; 00667 _vmware_native_table[num_events].report_difference=0; 00668 num_events++; 00669 00670 strcpy( _vmware_native_table[num_events].name, 00671 "CPU_USED" ); 00672 strncpy( _vmware_native_table[num_events].description, 00673 "Retrieves the number of milliseconds during which " 00674 "the virtual machine has used the CPU. This value " 00675 "includes the time used by the guest operating system " 00676 "and the time used by virtualization code for tasks for " 00677 "this virtual machine. You can combine this value with " 00678 "the elapsed time (VMWARE_ELAPSED) to estimate the " 00679 "effective virtual machine CPU speed. This value is a " 00680 "subset of elapsedMs.", 00681 PAPI_HUGE_STR_LEN ); 00682 strcpy( _vmware_native_table[num_events].units,"ms"); 00683 _vmware_native_table[num_events].which_counter= 00684 VMWARE_CPU_USED_MS; 00685 _vmware_native_table[num_events].report_difference=1; 00686 num_events++; 00687 00688 strcpy( _vmware_native_table[num_events].name, 00689 "ELAPSED" ); 00690 strncpy( _vmware_native_table[num_events].description, 00691 "Retrieves the number of milliseconds that have passed " 00692 "in the virtual machine since it last started running on " 00693 "the server. The count of elapsed time restarts each time " 00694 "the virtual machine is powered on, resumed, or migrated " 00695 "using VMotion. This value counts milliseconds, regardless " 00696 "of whether the virtual machine is using processing power " 00697 "during that time. You can combine this value with the CPU " 00698 "time used by the virtual machine (VMWARE_CPU_USED) to " 00699 "estimate the effective virtual machine xCPU speed. " 00700 "cpuUsedMS is a subset of this value.", 00701 PAPI_HUGE_STR_LEN ); 00702 strcpy( _vmware_native_table[num_events].units,"ms"); 00703 _vmware_native_table[num_events].which_counter= 00704 VMWARE_ELAPSED_MS; 00705 _vmware_native_table[num_events].report_difference=1; 00706 num_events++; 00707 00708 strcpy( _vmware_native_table[num_events].name, 00709 "MEM_ACTIVE" ); 00710 strncpy( _vmware_native_table[num_events].description, 00711 "Retrieves the amount of memory the virtual machine is " 00712 "actively using in MB - Its estimated working set size.", 00713 PAPI_HUGE_STR_LEN ); 00714 strcpy( _vmware_native_table[num_events].units,"MB"); 00715 _vmware_native_table[num_events].which_counter= 00716 VMWARE_MEM_ACTIVE_MB; 00717 _vmware_native_table[num_events].report_difference=0; 00718 num_events++; 00719 00720 strcpy( _vmware_native_table[num_events].name, 00721 "MEM_BALLOONED" ); 00722 strncpy( _vmware_native_table[num_events].description, 00723 "Retrieves the amount of memory that has been reclaimed " 00724 "from this virtual machine by the vSphere memory balloon " 00725 "driver (also referred to as the 'vmemctl' driver) in MB.", 00726 PAPI_HUGE_STR_LEN ); 00727 strcpy( _vmware_native_table[num_events].units,"MB"); 00728 _vmware_native_table[num_events].which_counter= 00729 VMWARE_MEM_BALLOONED_MB; 00730 _vmware_native_table[num_events].report_difference=0; 00731 num_events++; 00732 00733 strcpy( _vmware_native_table[num_events].name, 00734 "MEM_LIMIT" ); 00735 strncpy( _vmware_native_table[num_events].description, 00736 "Retrieves the upper limit of memory that is available " 00737 "to the virtual machine in MB.", 00738 PAPI_HUGE_STR_LEN ); 00739 strcpy( _vmware_native_table[num_events].units,"MB"); 00740 _vmware_native_table[num_events].which_counter= 00741 VMWARE_MEM_LIMIT_MB; 00742 _vmware_native_table[num_events].report_difference=0; 00743 num_events++; 00744 00745 strcpy( _vmware_native_table[num_events].name, 00746 "MEM_MAPPED" ); 00747 strncpy( _vmware_native_table[num_events].description, 00748 "Retrieves the amount of memory that is allocated to " 00749 "the virtual machine in MB. Memory that is ballooned, " 00750 "swapped, or has never been accessed is excluded.", 00751 PAPI_HUGE_STR_LEN ); 00752 strcpy( _vmware_native_table[num_events].units,"MB"); 00753 _vmware_native_table[num_events].which_counter= 00754 VMWARE_MEM_MAPPED_MB; 00755 _vmware_native_table[num_events].report_difference=0; 00756 num_events++; 00757 00758 strcpy( _vmware_native_table[num_events].name, 00759 "MEM_OVERHEAD" ); 00760 strncpy( _vmware_native_table[num_events].description, 00761 "Retrieves the amount of 'overhead' memory associated " 00762 "with this virtual machine that is currently consumed " 00763 "on the host system in MB. Overhead memory is additional " 00764 "memory that is reserved for data structures required by " 00765 "the virtualization layer.", 00766 PAPI_HUGE_STR_LEN ); 00767 strcpy( _vmware_native_table[num_events].units,"MB"); 00768 _vmware_native_table[num_events].which_counter= 00769 VMWARE_MEM_OVERHEAD_MB; 00770 _vmware_native_table[num_events].report_difference=0; 00771 num_events++; 00772 00773 strcpy( _vmware_native_table[num_events].name, 00774 "MEM_RESERVATION" ); 00775 strncpy( _vmware_native_table[num_events].description, 00776 "Retrieves the minimum amount of memory that is " 00777 "reserved for the virtual machine in MB.", 00778 PAPI_HUGE_STR_LEN ); 00779 strcpy( _vmware_native_table[num_events].units,"MB"); 00780 _vmware_native_table[num_events].which_counter= 00781 VMWARE_MEM_RESERVATION_MB; 00782 _vmware_native_table[num_events].report_difference=0; 00783 num_events++; 00784 00785 strcpy( _vmware_native_table[num_events].name, 00786 "MEM_SHARED" ); 00787 strncpy( _vmware_native_table[num_events].description, 00788 "Retrieves the amount of physical memory associated " 00789 "with this virtual machine that is copy-on-write (COW) " 00790 "shared on the host in MB.", 00791 PAPI_HUGE_STR_LEN ); 00792 strcpy( _vmware_native_table[num_events].units,"MB"); 00793 _vmware_native_table[num_events].which_counter= 00794 VMWARE_MEM_SHARED_MB; 00795 _vmware_native_table[num_events].report_difference=0; 00796 num_events++; 00797 00798 strcpy( _vmware_native_table[num_events].name, 00799 "MEM_SHARES" ); 00800 strncpy( _vmware_native_table[num_events].description, 00801 "Retrieves the number of memory shares allocated to " 00802 "the virtual machine.", 00803 PAPI_HUGE_STR_LEN ); 00804 strcpy( _vmware_native_table[num_events].units,"shares"); 00805 _vmware_native_table[num_events].which_counter= 00806 VMWARE_MEM_SHARES; 00807 _vmware_native_table[num_events].report_difference=0; 00808 num_events++; 00809 00810 strcpy( _vmware_native_table[num_events].name, 00811 "MEM_SWAPPED" ); 00812 strncpy( _vmware_native_table[num_events].description, 00813 "Retrieves the amount of memory that has been reclaimed " 00814 "from this virtual machine by transparently swapping " 00815 "guest memory to disk in MB.", 00816 PAPI_HUGE_STR_LEN ); 00817 strcpy( _vmware_native_table[num_events].units,"MB"); 00818 _vmware_native_table[num_events].which_counter= 00819 VMWARE_MEM_SWAPPED_MB; 00820 _vmware_native_table[num_events].report_difference=0; 00821 num_events++; 00822 00823 strcpy( _vmware_native_table[num_events].name, 00824 "MEM_TARGET_SIZE" ); 00825 strncpy( _vmware_native_table[num_events].description, 00826 "Retrieves the size of the target memory allocation " 00827 "for this virtual machine in MB.", 00828 PAPI_HUGE_STR_LEN ); 00829 strcpy( _vmware_native_table[num_events].units,"MB"); 00830 _vmware_native_table[num_events].which_counter= 00831 VMWARE_MEM_TARGET_SIZE_MB; 00832 _vmware_native_table[num_events].report_difference=0; 00833 num_events++; 00834 00835 strcpy( _vmware_native_table[num_events].name, 00836 "MEM_USED" ); 00837 strncpy( _vmware_native_table[num_events].description, 00838 "Retrieves the estimated amount of physical host memory " 00839 "currently consumed for this virtual machine's " 00840 "physical memory.", 00841 PAPI_HUGE_STR_LEN ); 00842 strcpy( _vmware_native_table[num_events].units,"MB"); 00843 _vmware_native_table[num_events].which_counter= 00844 VMWARE_MEM_USED_MB; 00845 _vmware_native_table[num_events].report_difference=0; 00846 num_events++; 00847 00848 strcpy( _vmware_native_table[num_events].name, 00849 "HOST_CPU" ); 00850 strncpy( _vmware_native_table[num_events].description, 00851 "Retrieves the speed of the ESX system's physical " 00852 "CPU in MHz.", 00853 PAPI_HUGE_STR_LEN ); 00854 strcpy( _vmware_native_table[num_events].units,"MHz"); 00855 _vmware_native_table[num_events].which_counter= 00856 VMWARE_HOST_CPU_MHZ; 00857 _vmware_native_table[num_events].report_difference=0; 00858 num_events++; 00859 } 00860 00861 #endif 00862 00863 /* For VMWare Pseudo Performance Counters */ 00864 if ( getenv( "PAPI_VMWARE_PSEUDOPERFORMANCE" ) ) { 00865 00866 use_pseudo=1; 00867 00868 strcpy( _vmware_native_table[num_events].name, 00869 "HOST_TSC" ); 00870 strncpy( _vmware_native_table[num_events].description, 00871 "Physical host TSC", 00872 PAPI_HUGE_STR_LEN ); 00873 strcpy( _vmware_native_table[num_events].units,"cycles"); 00874 _vmware_native_table[num_events].which_counter= 00875 VMWARE_HOST_TSC; 00876 _vmware_native_table[num_events].report_difference=1; 00877 num_events++; 00878 00879 strcpy( _vmware_native_table[num_events].name, 00880 "ELAPSED_TIME" ); 00881 strncpy( _vmware_native_table[num_events].description, 00882 "Elapsed real time in ns.", 00883 PAPI_HUGE_STR_LEN ); 00884 strcpy( _vmware_native_table[num_events].units,"ns"); 00885 _vmware_native_table[num_events].which_counter= 00886 VMWARE_ELAPSED_TIME; 00887 _vmware_native_table[num_events].report_difference=1; 00888 num_events++; 00889 00890 strcpy( _vmware_native_table[num_events].name, 00891 "ELAPSED_APPARENT" ); 00892 strncpy( _vmware_native_table[num_events].description, 00893 "Elapsed apparent time in ns.", 00894 PAPI_HUGE_STR_LEN ); 00895 strcpy( _vmware_native_table[num_events].units,"ns"); 00896 _vmware_native_table[num_events].which_counter= 00897 VMWARE_ELAPSED_APPARENT; 00898 _vmware_native_table[num_events].report_difference=1; 00899 num_events++; 00900 } 00901 00902 if (num_events==0) { 00903 strncpy(_vmware_vector.cmp_info.disabled_reason, 00904 "VMware SDK not installed, and PAPI_VMWARE_PSEUDOPERFORMANCE not set", 00905 PAPI_MAX_STR_LEN); 00906 return PAPI_ECMP; 00907 } 00908 00909 _vmware_vector.cmp_info.num_native_events = num_events; 00910 00911 return PAPI_OK; 00912 } 00913 00915 int 00916 _vmware_init_control_state( hwd_control_state_t *ctl ) 00917 { 00918 (void) ctl; 00919 00920 return PAPI_OK; 00921 } 00922 00927 int 00928 _vmware_ntv_enum_events( unsigned int *EventCode, int modifier ) 00929 { 00930 00931 switch ( modifier ) { 00932 /* return EventCode of first event */ 00933 case PAPI_ENUM_FIRST: 00934 if (num_events==0) return PAPI_ENOEVNT; 00935 *EventCode = 0; 00936 return PAPI_OK; 00937 break; 00938 /* return EventCode of passed-in Event */ 00939 case PAPI_ENUM_EVENTS:{ 00940 int index = *EventCode; 00941 00942 if ( index < num_events - 1 ) { 00943 *EventCode = *EventCode + 1; 00944 return PAPI_OK; 00945 } else { 00946 return PAPI_ENOEVNT; 00947 } 00948 break; 00949 } 00950 default: 00951 return PAPI_EINVAL; 00952 } 00953 return PAPI_EINVAL; 00954 } 00955 00956 int 00957 _vmware_ntv_code_to_info(unsigned int EventCode, PAPI_event_info_t *info) 00958 { 00959 00960 int index = EventCode; 00961 00962 if ( ( index < 0) || (index >= num_events )) return PAPI_ENOEVNT; 00963 00964 strncpy( info->symbol, _vmware_native_table[index].name, 00965 sizeof(info->symbol)); 00966 00967 strncpy( info->long_descr, _vmware_native_table[index].description, 00968 sizeof(info->symbol)); 00969 00970 strncpy( info->units, _vmware_native_table[index].units, 00971 sizeof(info->units)); 00972 00973 return PAPI_OK; 00974 } 00975 00976 00982 int 00983 _vmware_ntv_code_to_name( unsigned int EventCode, char *name, int len ) 00984 { 00985 int index = EventCode; 00986 00987 if ( index >= 0 && index < num_events ) { 00988 strncpy( name, _vmware_native_table[index].name, len ); 00989 } 00990 return PAPI_OK; 00991 } 00992 00998 int 00999 _vmware_ntv_code_to_descr( unsigned int EventCode, char *name, int len ) 01000 { 01001 int index = EventCode; 01002 01003 if ( index >= 0 && index < num_events ) { 01004 strncpy( name, _vmware_native_table[index].description, len ); 01005 } 01006 return PAPI_OK; 01007 } 01008 01010 int 01011 _vmware_update_control_state( hwd_control_state_t *ctl, 01012 NativeInfo_t *native, 01013 int count, 01014 hwd_context_t *ctx ) 01015 { 01016 (void) ctx; 01017 01018 struct _vmware_control_state *control; 01019 01020 int i, index; 01021 01022 control=(struct _vmware_control_state *)ctl; 01023 01024 for ( i = 0; i < count; i++ ) { 01025 index = native[i].ni_event; 01026 control->which_counter[i]=_vmware_native_table[index].which_counter; 01027 native[i].ni_position = i; 01028 } 01029 control->num_events=count; 01030 01031 return PAPI_OK; 01032 } 01033 01035 int 01036 _vmware_start( hwd_context_t *ctx, hwd_control_state_t *ctl ) 01037 { 01038 struct _vmware_context *context; 01039 (void) ctl; 01040 01041 context=(struct _vmware_context *)ctx; 01042 01043 _vmware_hardware_read( context, 1 ); 01044 01045 return PAPI_OK; 01046 } 01047 01049 int 01050 _vmware_stop( hwd_context_t *ctx, hwd_control_state_t *ctl ) 01051 { 01052 01053 struct _vmware_context *context; 01054 (void) ctl; 01055 01056 context=(struct _vmware_context *)ctx; 01057 01058 _vmware_hardware_read( context, 0 ); 01059 01060 return PAPI_OK; 01061 } 01062 01064 int 01065 _vmware_read( hwd_context_t *ctx, 01066 hwd_control_state_t *ctl, 01067 long_long **events, int flags ) 01068 { 01069 01070 struct _vmware_context *context; 01071 struct _vmware_control_state *control; 01072 01073 (void) flags; 01074 int i; 01075 01076 context=(struct _vmware_context *)ctx; 01077 control=(struct _vmware_control_state *)ctl; 01078 01079 _vmware_hardware_read( context, 0 ); 01080 01081 for (i=0; i<control->num_events; i++) { 01082 01083 if (_vmware_native_table[ 01084 _vmware_native_table[control->which_counter[i]].which_counter]. 01085 report_difference) { 01086 control->value[i]=context->values[control->which_counter[i]]- 01087 context->start_values[control->which_counter[i]]; 01088 } else { 01089 control->value[i]=context->values[control->which_counter[i]]; 01090 } 01091 // printf("%d %d %lld-%lld=%lld\n",i,control->which_counter[i], 01092 // context->values[control->which_counter[i]], 01093 // context->start_values[control->which_counter[i]], 01094 // control->value[i]); 01095 01096 } 01097 01098 *events = control->value; 01099 01100 return PAPI_OK; 01101 } 01102 01104 /* otherwise, the updated state is written to ESI->hw_start */ 01105 int 01106 _vmware_write( hwd_context_t * ctx, hwd_control_state_t * ctrl, long_long events[] ) 01107 { 01108 (void) ctx; 01109 (void) ctrl; 01110 (void) events; 01111 SUBDBG( "_vmware_write... %p %p", ctx, ctrl ); 01112 /* FIXME... this should actually carry out the write, though */ 01113 /* this is non-trivial as which counter being written has to be */ 01114 /* determined somehow. */ 01115 return PAPI_OK; 01116 } 01117 01119 int 01120 _vmware_reset( hwd_context_t *ctx, hwd_control_state_t *ctl ) 01121 { 01122 (void) ctx; 01123 (void) ctl; 01124 01125 return PAPI_OK; 01126 } 01127 01129 int 01130 _vmware_shutdown_thread( hwd_context_t *ctx ) 01131 { 01132 (void) ctx; 01133 01134 #ifdef VMGUESTLIB 01135 VMGuestLibError glError; 01136 struct _vmware_context *context; 01137 01138 context=(struct _vmware_context *)ctx; 01139 01140 if (use_guestlib) { 01141 glError = GuestLib_CloseHandle(context->glHandle); 01142 if (glError != VMGUESTLIB_ERROR_SUCCESS) { 01143 fprintf(stderr, "Failed to CloseHandle: %s\n", 01144 GuestLib_GetErrorText(glError)); 01145 return PAPI_ECMP; 01146 } 01147 } 01148 #endif 01149 01150 return PAPI_OK; 01151 } 01152 01154 int 01155 _vmware_shutdown_component( void ) 01156 { 01157 01158 #ifdef VMGUESTLIB 01159 if (dlclose(dlHandle)) { 01160 fprintf(stderr, "dlclose failed\n"); 01161 return EXIT_FAILURE; 01162 } 01163 #endif 01164 01165 return PAPI_OK; 01166 } 01167 01168 01174 int 01175 _vmware_ctl( hwd_context_t *ctx, int code, _papi_int_option_t *option ) 01176 { 01177 01178 (void) ctx; 01179 (void) code; 01180 (void) option; 01181 01182 SUBDBG( "_vmware_ctl..." ); 01183 01184 return PAPI_OK; 01185 } 01186 01196 int 01197 _vmware_set_domain( hwd_control_state_t *ctl, int domain ) 01198 { 01199 (void) ctl; 01200 01201 int found = 0; 01202 SUBDBG( "_vmware_set_domain..." ); 01203 if ( PAPI_DOM_USER & domain ) { 01204 SUBDBG( " PAPI_DOM_USER " ); 01205 found = 1; 01206 } 01207 if ( PAPI_DOM_KERNEL & domain ) { 01208 SUBDBG( " PAPI_DOM_KERNEL " ); 01209 found = 1; 01210 } 01211 if ( PAPI_DOM_OTHER & domain ) { 01212 SUBDBG( " PAPI_DOM_OTHER " ); 01213 found = 1; 01214 } 01215 if ( PAPI_DOM_ALL & domain ) { 01216 SUBDBG( " PAPI_DOM_ALL " ); 01217 found = 1; 01218 } 01219 if ( !found ) { 01220 return ( PAPI_EINVAL ); 01221 } 01222 return PAPI_OK; 01223 } 01224 01226 papi_vector_t _vmware_vector = { 01227 .cmp_info = { 01228 /* default component information (unspecified values are initialized to 0) */ 01229 .name = "vmware", 01230 .short_name = "vmware", 01231 .description = "Provide support for VMware vmguest and pseudo counters", 01232 .version = "5.0", 01233 .num_mpx_cntrs = VMWARE_MAX_COUNTERS, 01234 .num_cntrs = VMWARE_MAX_COUNTERS, 01235 .default_domain = PAPI_DOM_USER, 01236 .available_domains = PAPI_DOM_USER, 01237 .default_granularity = PAPI_GRN_THR, 01238 .available_granularities = PAPI_GRN_THR, 01239 .hardware_intr_sig = PAPI_INT_SIGNAL, 01240 01241 /* component specific cmp_info initializations */ 01242 .fast_real_timer = 0, 01243 .fast_virtual_timer = 0, 01244 .attach = 0, 01245 .attach_must_ptrace = 0, 01246 }, 01247 /* sizes of framework-opaque component-private structures */ 01248 .size = { 01249 .context = sizeof ( struct _vmware_context ), 01250 .control_state = sizeof ( struct _vmware_control_state ), 01251 .reg_value = sizeof ( struct _vmware_register ), 01252 .reg_alloc = sizeof ( struct _vmware_reg_alloc ), 01253 } 01254 , 01255 /* function pointers in this component */ 01256 .init_thread = _vmware_init, 01257 .init_component = _vmware_init_component, 01258 .init_control_state = _vmware_init_control_state, 01259 .start = _vmware_start, 01260 .stop = _vmware_stop, 01261 .read = _vmware_read, 01262 .write = _vmware_write, 01263 .shutdown_thread = _vmware_shutdown_thread, 01264 .shutdown_component = _vmware_shutdown_component, 01265 .ctl = _vmware_ctl, 01266 01267 .update_control_state = _vmware_update_control_state, 01268 .set_domain = _vmware_set_domain, 01269 .reset = _vmware_reset, 01270 01271 .ntv_enum_events = _vmware_ntv_enum_events, 01272 .ntv_code_to_name = _vmware_ntv_code_to_name, 01273 .ntv_code_to_descr = _vmware_ntv_code_to_descr, 01274 .ntv_code_to_info = _vmware_ntv_code_to_info, 01275 01276 }; 01277