PAPI  5.3.0.0
vmware.c
Go to the documentation of this file.
00001 /****************************/
00002 /* THIS IS OPEN SOURCE CODE */
00003 /****************************/
00004 
00023 #include <stdio.h>
00024 #include <string.h>
00025 #include <stdlib.h>
00026 #include <stdint.h>
00027 
00028 #include <unistd.h>
00029 #include <dlfcn.h>
00030 
00031 /* Headers required by PAPI */
00032 #include "papi.h"
00033 #include "papi_internal.h"
00034 #include "papi_vector.h"
00035 #include "papi_memory.h"
00036 
00037 #define VMWARE_MAX_COUNTERS 256
00038 
00039 #define VMWARE_CPU_LIMIT_MHZ            0
00040 #define VMWARE_CPU_RESERVATION_MHZ      1
00041 #define VMWARE_CPU_SHARES               2
00042 #define VMWARE_CPU_STOLEN_MS            3
00043 #define VMWARE_CPU_USED_MS              4
00044 #define VMWARE_ELAPSED_MS               5
00045 
00046 #define VMWARE_MEM_ACTIVE_MB            6
00047 #define VMWARE_MEM_BALLOONED_MB         7
00048 #define VMWARE_MEM_LIMIT_MB             8
00049 #define VMWARE_MEM_MAPPED_MB            9
00050 #define VMWARE_MEM_OVERHEAD_MB          10
00051 #define VMWARE_MEM_RESERVATION_MB       11
00052 #define VMWARE_MEM_SHARED_MB            12
00053 #define VMWARE_MEM_SHARES               13
00054 #define VMWARE_MEM_SWAPPED_MB           14
00055 #define VMWARE_MEM_TARGET_SIZE_MB       15
00056 #define VMWARE_MEM_USED_MB              16
00057 
00058 #define VMWARE_HOST_CPU_MHZ             17
00059 
00060 /* The following 3 require VMWARE_PSEUDO_PERFORMANCE env_var to be set. */
00061 
00062 #define VMWARE_HOST_TSC         18
00063 #define VMWARE_ELAPSED_TIME             19
00064 #define VMWARE_ELAPSED_APPARENT         20
00065 
00066 /* Begin PAPI definitions */
00067 papi_vector_t _vmware_vector;
00068 
00069 
00070 void (*_dl_non_dynamic_init)(void) __attribute__((weak));
00071 
00073 struct _vmware_register {
00074     unsigned int selector;
00077 };
00078 
00080 struct _vmware_native_event_entry {
00081     char name[PAPI_MAX_STR_LEN];        
00082     char description[PAPI_HUGE_STR_LEN]; 
00083         char units[PAPI_MIN_STR_LEN];
00084         int which_counter;
00085         int report_difference;
00086 };
00087 
00088 struct _vmware_reg_alloc {
00089     struct _vmware_register ra_bits;
00090 };
00091 
00092 
00093 inline uint64_t rdpmc(int c)
00094 {
00095   uint32_t low, high;
00096   __asm__ __volatile__("rdpmc" : "=a" (low), "=d" (high) : "c" (c));
00097   return (uint64_t)high << 32 | (uint64_t)low;
00098 }
00099 
00100 
00101 
00102 #ifdef VMGUESTLIB
00103 /* Headers required by VMware */
00104 #include "vmGuestLib.h"
00105 
00106 /* Functions to dynamically load from the GuestLib library. */
00107 char const * (*GuestLib_GetErrorText)(VMGuestLibError);
00108 VMGuestLibError (*GuestLib_OpenHandle)(VMGuestLibHandle*);
00109 VMGuestLibError (*GuestLib_CloseHandle)(VMGuestLibHandle);
00110 VMGuestLibError (*GuestLib_UpdateInfo)(VMGuestLibHandle handle);
00111 VMGuestLibError (*GuestLib_GetSessionId)(VMGuestLibHandle handle, VMSessionId *id);
00112 VMGuestLibError (*GuestLib_GetCpuReservationMHz)(VMGuestLibHandle handle, uint32 *cpuReservationMHz);
00113 VMGuestLibError (*GuestLib_GetCpuLimitMHz)(VMGuestLibHandle handle, uint32 *cpuLimitMHz);
00114 VMGuestLibError (*GuestLib_GetCpuShares)(VMGuestLibHandle handle, uint32 *cpuShares);
00115 VMGuestLibError (*GuestLib_GetCpuUsedMs)(VMGuestLibHandle handle, uint64 *cpuUsedMs);
00116 VMGuestLibError (*GuestLib_GetHostProcessorSpeed)(VMGuestLibHandle handle, uint32 *mhz);
00117 VMGuestLibError (*GuestLib_GetMemReservationMB)(VMGuestLibHandle handle, uint32 *memReservationMB);
00118 VMGuestLibError (*GuestLib_GetMemLimitMB)(VMGuestLibHandle handle, uint32 *memLimitMB);
00119 VMGuestLibError (*GuestLib_GetMemShares)(VMGuestLibHandle handle, uint32 *memShares);
00120 VMGuestLibError (*GuestLib_GetMemMappedMB)(VMGuestLibHandle handle, uint32 *memMappedMB);
00121 VMGuestLibError (*GuestLib_GetMemActiveMB)(VMGuestLibHandle handle, uint32 *memActiveMB);
00122 VMGuestLibError (*GuestLib_GetMemOverheadMB)(VMGuestLibHandle handle, uint32 *memOverheadMB);
00123 VMGuestLibError (*GuestLib_GetMemBalloonedMB)(VMGuestLibHandle handle, uint32 *memBalloonedMB);
00124 VMGuestLibError (*GuestLib_GetMemSwappedMB)(VMGuestLibHandle handle, uint32 *memSwappedMB);
00125 VMGuestLibError (*GuestLib_GetMemSharedMB)(VMGuestLibHandle handle, uint32 *memSharedMB);
00126 VMGuestLibError (*GuestLib_GetMemSharedSavedMB)(VMGuestLibHandle handle, uint32 *memSharedSavedMB);
00127 VMGuestLibError (*GuestLib_GetMemUsedMB)(VMGuestLibHandle handle, uint32 *memUsedMB);
00128 VMGuestLibError (*GuestLib_GetElapsedMs)(VMGuestLibHandle handle, uint64 *elapsedMs);
00129 VMGuestLibError (*GuestLib_GetResourcePoolPath)(VMGuestLibHandle handle, size_t *bufferSize, char *pathBuffer);
00130 VMGuestLibError (*GuestLib_GetCpuStolenMs)(VMGuestLibHandle handle, uint64 *cpuStolenMs);
00131 VMGuestLibError (*GuestLib_GetMemTargetSizeMB)(VMGuestLibHandle handle, uint64 *memTargetSizeMB);
00132 VMGuestLibError (*GuestLib_GetHostNumCpuCores)(VMGuestLibHandle handle, uint32 *hostNumCpuCores);
00133 VMGuestLibError (*GuestLib_GetHostCpuUsedMs)(VMGuestLibHandle handle, uint64 *hostCpuUsedMs);
00134 VMGuestLibError (*GuestLib_GetHostMemSwappedMB)(VMGuestLibHandle handle, uint64 *hostMemSwappedMB);
00135 VMGuestLibError (*GuestLib_GetHostMemSharedMB)(VMGuestLibHandle handle, uint64 *hostMemSharedMB);
00136 VMGuestLibError (*GuestLib_GetHostMemUsedMB)(VMGuestLibHandle handle, uint64 *hostMemUsedMB);
00137 VMGuestLibError (*GuestLib_GetHostMemPhysMB)(VMGuestLibHandle handle, uint64 *hostMemPhysMB);
00138 VMGuestLibError (*GuestLib_GetHostMemPhysFreeMB)(VMGuestLibHandle handle, uint64 *hostMemPhysFreeMB);
00139 VMGuestLibError (*GuestLib_GetHostMemKernOvhdMB)(VMGuestLibHandle handle, uint64 *hostMemKernOvhdMB);
00140 VMGuestLibError (*GuestLib_GetHostMemMappedMB)(VMGuestLibHandle handle, uint64 *hostMemMappedMB);
00141 VMGuestLibError (*GuestLib_GetHostMemUnmappedMB)(VMGuestLibHandle handle, uint64 *hostMemUnmappedMB);
00142 
00143 
00144 static void *dlHandle = NULL;
00145 
00146 
00147 /*
00148  * Macro to load a single GuestLib function from the shared library.
00149  */
00150 
00151 #define LOAD_ONE_FUNC(funcname)                                 \
00152 do {                                                            \
00153 funcname = dlsym(dlHandle, "VM" #funcname);                     \
00154 if ((dlErrStr = dlerror()) != NULL) {                           \
00155 fprintf(stderr, "Failed to load \'%s\': \'%s\'\n",              \
00156 #funcname, dlErrStr);                                           \
00157 return FALSE;                                                   \
00158 }                                                               \
00159 } while (0)
00160 
00161 #endif
00162 
00164 struct _vmware_control_state {
00165    long long value[VMWARE_MAX_COUNTERS];
00166    int which_counter[VMWARE_MAX_COUNTERS];
00167    int num_events;
00168 };
00169 
00171 struct _vmware_context {
00172   long long values[VMWARE_MAX_COUNTERS];
00173   long long start_values[VMWARE_MAX_COUNTERS];
00174 #ifdef VMGUESTLIB
00175   VMGuestLibHandle glHandle;
00176 #endif
00177 };
00178 
00179 
00180 
00181 
00182 
00183 
00184 /*
00185  *-----------------------------------------------------------------------------
00186  *
00187  * LoadFunctions --
00188  *
00189  *      Load the functions from the shared library.
00190  *
00191  * Results:
00192  *      TRUE on success
00193  *      FALSE on failure
00194  *
00195  * Side effects:
00196  *      None
00197  *
00198  * Credit: VMware
00199  *-----------------------------------------------------------------------------
00200  */
00201 
00202 static int
00203 LoadFunctions(void)
00204 {
00205 
00206 #ifdef VMGUESTLIB
00207     /*
00208      * First, try to load the shared library.
00209      */
00210 
00211     /* Attempt to guess if we were statically linked to libc, if so bail */
00212     if ( _dl_non_dynamic_init != NULL ) {
00213         strncpy(_vmware_vector.cmp_info.disabled_reason, "The VMware component does not support statically linking of libc.", PAPI_MAX_STR_LEN);
00214         return PAPI_ENOSUPP;
00215     }
00216 
00217     char const *dlErrStr;
00218     char filename[BUFSIZ];
00219 
00220     sprintf(filename,"%s","libvmGuestLib.so");
00221     dlHandle = dlopen(filename, RTLD_NOW);
00222     if (!dlHandle) {
00223        dlErrStr = dlerror();
00224        fprintf(stderr, "dlopen of %s failed: \'%s\'\n", filename, 
00225            dlErrStr);
00226 
00227        sprintf(filename,"%s/lib/lib64/libvmGuestLib.so",VMWARE_INCDIR);
00228        dlHandle = dlopen(filename, RTLD_NOW);
00229        if (!dlHandle) {
00230           dlErrStr = dlerror();
00231           fprintf(stderr, "dlopen of %s failed: \'%s\'\n", filename, 
00232            dlErrStr);
00233 
00234           sprintf(filename,"%s/lib/lib32/libvmGuestLib.so",VMWARE_INCDIR);
00235           dlHandle = dlopen(filename, RTLD_NOW);
00236           if (!dlHandle) {
00237              dlErrStr = dlerror();
00238              fprintf(stderr, "dlopen of %s failed: \'%s\'\n", filename, 
00239               dlErrStr);
00240          return PAPI_ECMP;
00241           }
00242        }
00243     }
00244 
00245     /* Load all the individual library functions. */
00246     LOAD_ONE_FUNC(GuestLib_GetErrorText);
00247     LOAD_ONE_FUNC(GuestLib_OpenHandle);
00248     LOAD_ONE_FUNC(GuestLib_CloseHandle);
00249     LOAD_ONE_FUNC(GuestLib_UpdateInfo);
00250     LOAD_ONE_FUNC(GuestLib_GetSessionId);
00251     LOAD_ONE_FUNC(GuestLib_GetCpuReservationMHz);
00252     LOAD_ONE_FUNC(GuestLib_GetCpuLimitMHz);
00253     LOAD_ONE_FUNC(GuestLib_GetCpuShares);
00254     LOAD_ONE_FUNC(GuestLib_GetCpuUsedMs);
00255     LOAD_ONE_FUNC(GuestLib_GetHostProcessorSpeed);
00256     LOAD_ONE_FUNC(GuestLib_GetMemReservationMB);
00257     LOAD_ONE_FUNC(GuestLib_GetMemLimitMB);
00258     LOAD_ONE_FUNC(GuestLib_GetMemShares);
00259     LOAD_ONE_FUNC(GuestLib_GetMemMappedMB);
00260     LOAD_ONE_FUNC(GuestLib_GetMemActiveMB);
00261     LOAD_ONE_FUNC(GuestLib_GetMemOverheadMB);
00262     LOAD_ONE_FUNC(GuestLib_GetMemBalloonedMB);
00263     LOAD_ONE_FUNC(GuestLib_GetMemSwappedMB);
00264     LOAD_ONE_FUNC(GuestLib_GetMemSharedMB);
00265     LOAD_ONE_FUNC(GuestLib_GetMemSharedSavedMB);
00266     LOAD_ONE_FUNC(GuestLib_GetMemUsedMB);
00267     LOAD_ONE_FUNC(GuestLib_GetElapsedMs);
00268     LOAD_ONE_FUNC(GuestLib_GetResourcePoolPath);
00269     LOAD_ONE_FUNC(GuestLib_GetCpuStolenMs);
00270     LOAD_ONE_FUNC(GuestLib_GetMemTargetSizeMB);
00271     LOAD_ONE_FUNC(GuestLib_GetHostNumCpuCores);
00272     LOAD_ONE_FUNC(GuestLib_GetHostCpuUsedMs);
00273     LOAD_ONE_FUNC(GuestLib_GetHostMemSwappedMB);
00274     LOAD_ONE_FUNC(GuestLib_GetHostMemSharedMB);
00275     LOAD_ONE_FUNC(GuestLib_GetHostMemUsedMB);
00276     LOAD_ONE_FUNC(GuestLib_GetHostMemPhysMB);
00277     LOAD_ONE_FUNC(GuestLib_GetHostMemPhysFreeMB);
00278     LOAD_ONE_FUNC(GuestLib_GetHostMemKernOvhdMB);
00279     LOAD_ONE_FUNC(GuestLib_GetHostMemMappedMB);
00280     LOAD_ONE_FUNC(GuestLib_GetHostMemUnmappedMB);
00281 #endif
00282     return PAPI_OK;
00283 }
00284 
00285 
00286 
00288 static struct _vmware_native_event_entry *_vmware_native_table;
00290 static int num_events = 0;
00291 static int use_pseudo=0;
00292 static int use_guestlib=0;
00293 
00294 /************************************************************************/
00295 /* Below is the actual "hardware implementation" of our VMWARE counters */
00296 /************************************************************************/
00297 
00301 static long long
00302 _vmware_hardware_read( struct _vmware_context *context, int starting)
00303 {
00304 
00305   int i;
00306 
00307     if (use_pseudo) {
00308            context->values[VMWARE_HOST_TSC]=rdpmc(0x10000);
00309            context->values[VMWARE_ELAPSED_TIME]=rdpmc(0x10001);
00310            context->values[VMWARE_ELAPSED_APPARENT]=rdpmc(0x10002);
00311     }
00312 
00313 
00314 #ifdef VMGUESTLIB
00315     static VMSessionId sessionId = 0;
00316     VMSessionId tmpSession;
00317     uint32_t temp32;
00318     uint64_t temp64;
00319     VMGuestLibError glError;
00320 
00321     if (use_guestlib) {
00322 
00323     glError = GuestLib_UpdateInfo(context->glHandle);
00324     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00325        fprintf(stderr,"UpdateInfo failed: %s\n", 
00326            GuestLib_GetErrorText(glError));
00327            return PAPI_ECMP;
00328     }
00329 
00330     /* Retrieve and check the session ID */
00331     glError = GuestLib_GetSessionId(context->glHandle, &tmpSession);
00332     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00333        fprintf(stderr, "Failed to get session ID: %s\n", 
00334            GuestLib_GetErrorText(glError));
00335        return PAPI_ECMP;
00336     }
00337 
00338     if (tmpSession == 0) {
00339        fprintf(stderr, "Error: Got zero sessionId from GuestLib\n");
00340        return PAPI_ECMP;
00341     }
00342 
00343     if (sessionId == 0) {
00344        sessionId = tmpSession;
00345     } else if (tmpSession != sessionId) {
00346        sessionId = tmpSession;
00347     }
00348 
00349     glError = GuestLib_GetCpuLimitMHz(context->glHandle,&temp32);
00350     context->values[VMWARE_CPU_LIMIT_MHZ]=temp32;
00351     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00352        fprintf(stderr,"Failed to get CPU limit: %s\n", 
00353            GuestLib_GetErrorText(glError));
00354        return PAPI_ECMP;
00355     }
00356 
00357     glError = GuestLib_GetCpuReservationMHz(context->glHandle,&temp32); 
00358     context->values[VMWARE_CPU_RESERVATION_MHZ]=temp32;
00359         if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00360        fprintf(stderr,"Failed to get CPU reservation: %s\n", 
00361            GuestLib_GetErrorText(glError));
00362        return PAPI_ECMP;
00363     }
00364     
00365     glError = GuestLib_GetCpuShares(context->glHandle,&temp32);
00366     context->values[VMWARE_CPU_SHARES]=temp32;
00367     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00368        fprintf(stderr,"Failed to get cpu shares: %s\n", 
00369            GuestLib_GetErrorText(glError));
00370        return PAPI_ECMP;
00371     }
00372 
00373     glError = GuestLib_GetCpuStolenMs(context->glHandle,&temp64);
00374     context->values[VMWARE_CPU_STOLEN_MS]=temp64;
00375     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00376        if (glError == VMGUESTLIB_ERROR_UNSUPPORTED_VERSION) {
00377           context->values[VMWARE_CPU_STOLEN_MS]=0;
00378           fprintf(stderr, "Skipping CPU stolen, not supported...\n");
00379        } else {
00380           fprintf(stderr, "Failed to get CPU stolen: %s\n", 
00381               GuestLib_GetErrorText(glError));
00382           return PAPI_ECMP;
00383        }
00384     }
00385 
00386     glError = GuestLib_GetCpuUsedMs(context->glHandle,&temp64);
00387     context->values[VMWARE_CPU_USED_MS]=temp64;
00388     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00389        fprintf(stderr, "Failed to get used ms: %s\n", 
00390            GuestLib_GetErrorText(glError));
00391        return PAPI_ECMP;
00392     }
00393     
00394     glError = GuestLib_GetElapsedMs(context->glHandle, &temp64);
00395     context->values[VMWARE_ELAPSED_MS]=temp64;
00396     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00397        fprintf(stderr, "Failed to get elapsed ms: %s\n",
00398            GuestLib_GetErrorText(glError));
00399        return PAPI_ECMP;
00400     }
00401 
00402     glError = GuestLib_GetMemActiveMB(context->glHandle, &temp32);
00403     context->values[VMWARE_MEM_ACTIVE_MB]=temp32;
00404     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00405        fprintf(stderr, "Failed to get active mem: %s\n", 
00406            GuestLib_GetErrorText(glError));
00407        return PAPI_ECMP;
00408     }
00409     
00410     glError = GuestLib_GetMemBalloonedMB(context->glHandle, &temp32);
00411     context->values[VMWARE_MEM_BALLOONED_MB]=temp32;
00412     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00413        fprintf(stderr, "Failed to get ballooned mem: %s\n", 
00414            GuestLib_GetErrorText(glError));
00415        return PAPI_ECMP;
00416     }
00417     
00418     glError = GuestLib_GetMemLimitMB(context->glHandle, &temp32);
00419     context->values[VMWARE_MEM_LIMIT_MB]=temp32;
00420     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00421        fprintf(stderr,"Failed to get mem limit: %s\n", 
00422            GuestLib_GetErrorText(glError));
00423        return PAPI_ECMP;
00424     }
00425 
00426         glError = GuestLib_GetMemMappedMB(context->glHandle, &temp32);
00427     context->values[VMWARE_MEM_MAPPED_MB]=temp32;
00428     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00429        fprintf(stderr, "Failed to get mapped mem: %s\n", 
00430            GuestLib_GetErrorText(glError));
00431        return PAPI_ECMP;
00432     }
00433 
00434     glError = GuestLib_GetMemOverheadMB(context->glHandle, &temp32);
00435     context->values[VMWARE_MEM_OVERHEAD_MB]=temp32;
00436     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00437        fprintf(stderr, "Failed to get overhead mem: %s\n", 
00438            GuestLib_GetErrorText(glError));
00439        return PAPI_ECMP;
00440     }
00441 
00442     glError = GuestLib_GetMemReservationMB(context->glHandle, &temp32);
00443     context->values[VMWARE_MEM_RESERVATION_MB]=temp32;
00444     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00445        fprintf(stderr, "Failed to get mem reservation: %s\n", 
00446            GuestLib_GetErrorText(glError));
00447        return PAPI_ECMP;
00448     }
00449 
00450         glError = GuestLib_GetMemSharedMB(context->glHandle, &temp32);
00451     context->values[VMWARE_MEM_SHARED_MB]=temp32;
00452     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00453        fprintf(stderr, "Failed to get swapped mem: %s\n", 
00454            GuestLib_GetErrorText(glError));
00455        return PAPI_ECMP;
00456     }
00457 
00458     glError = GuestLib_GetMemShares(context->glHandle, &temp32);
00459     context->values[VMWARE_MEM_SHARES]=temp32;
00460     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00461        if (glError == VMGUESTLIB_ERROR_NOT_AVAILABLE) {
00462           context->values[VMWARE_MEM_SHARES]=0;
00463           fprintf(stderr, "Skipping mem shares, not supported...\n");
00464        } else {
00465           fprintf(stderr, "Failed to get mem shares: %s\n", 
00466               GuestLib_GetErrorText(glError));
00467           return PAPI_ECMP;
00468        }
00469     }
00470 
00471     glError = GuestLib_GetMemSwappedMB(context->glHandle, &temp32);
00472     context->values[VMWARE_MEM_SWAPPED_MB]=temp32;
00473     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00474        fprintf(stderr, "Failed to get swapped mem: %s\n",
00475            GuestLib_GetErrorText(glError));
00476        return PAPI_ECMP;
00477     }
00478     
00479     glError = GuestLib_GetMemTargetSizeMB(context->glHandle, &temp64);
00480     context->values[VMWARE_MEM_TARGET_SIZE_MB]=temp64;
00481         if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00482        if (glError == VMGUESTLIB_ERROR_UNSUPPORTED_VERSION) {
00483           context->values[VMWARE_MEM_TARGET_SIZE_MB]=0;
00484           fprintf(stderr, "Skipping target mem size, not supported...\n");
00485        } else {
00486           fprintf(stderr, "Failed to get target mem size: %s\n", 
00487               GuestLib_GetErrorText(glError));
00488           return PAPI_ECMP;
00489        }
00490     }
00491 
00492         glError = GuestLib_GetMemUsedMB(context->glHandle, &temp32);
00493     context->values[VMWARE_MEM_USED_MB]=temp32;
00494     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00495        fprintf(stderr, "Failed to get swapped mem: %s\n",
00496            GuestLib_GetErrorText(glError));
00497        return PAPI_ECMP;
00498     }
00499 
00500         glError = GuestLib_GetHostProcessorSpeed(context->glHandle, &temp32); 
00501     context->values[VMWARE_HOST_CPU_MHZ]=temp32;
00502     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00503        fprintf(stderr, "Failed to get host proc speed: %s\n", 
00504            GuestLib_GetErrorText(glError));
00505        return PAPI_ECMP;
00506     }
00507     }
00508 
00509 #endif
00510 
00511     if (starting) {
00512 
00513       for(i=0;i<VMWARE_MAX_COUNTERS;i++) {
00514         context->start_values[i]=context->values[i];
00515       }
00516 
00517     }
00518 
00519     return PAPI_OK;
00520 }
00521 
00522 /********************************************************************/
00523 /* Below are the functions required by the PAPI component interface */
00524 /********************************************************************/
00525 
00527 int
00528 _vmware_init_thread( hwd_context_t *ctx )
00529 {
00530     (void) ctx;
00531 
00532 
00533 #ifdef VMGUESTLIB
00534 
00535     struct _vmware_context *context;
00536     VMGuestLibError glError;
00537 
00538     context=(struct _vmware_context *)ctx;
00539 
00540     if (use_guestlib) {
00541        glError = GuestLib_OpenHandle(&(context->glHandle));
00542        if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00543           fprintf(stderr,"OpenHandle failed: %s\n", 
00544            GuestLib_GetErrorText(glError));
00545           return PAPI_ECMP;
00546        }
00547     }
00548 
00549 #endif
00550 
00551     return PAPI_OK;
00552 }
00553 
00554 
00559 int
00560 _vmware_init_component( int cidx )
00561 {
00562 
00563   (void) cidx;
00564 
00565   int result;
00566 
00567     SUBDBG( "_vmware_init_component..." );
00568 
00569     /* Initialize and try to load the VMware library */
00570     /* Try to load the library. */
00571     result=LoadFunctions();
00572 
00573     if (result!=PAPI_OK) {
00574        strncpy(_vmware_vector.cmp_info.disabled_reason,
00575           "GuestLibTest: Failed to load shared library",
00576            PAPI_MAX_STR_LEN);
00577        return PAPI_ECMP;
00578     }
00579 
00580     /* we know in advance how many events we want                       */
00581     /* for actual hardware this might have to be determined dynamically */
00582 
00583     /* Allocate memory for the our event table */
00584     _vmware_native_table = ( struct _vmware_native_event_entry * )
00585       calloc( VMWARE_MAX_COUNTERS, sizeof ( struct _vmware_native_event_entry ));
00586     if ( _vmware_native_table == NULL ) {
00587        return PAPI_ENOMEM;
00588     }
00589 
00590 
00591 #ifdef VMGUESTLIB
00592 
00593     /* Detect if GuestLib works */
00594     {
00595 
00596         VMGuestLibError glError;
00597         VMGuestLibHandle glHandle;
00598 
00599     use_guestlib=0;
00600 
00601     /* try to open */
00602     glError = GuestLib_OpenHandle(&glHandle);
00603     if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00604        fprintf(stderr,"OpenHandle failed: %s\n", 
00605            GuestLib_GetErrorText(glError));
00606     }
00607     else {
00608        /* open worked, try to update */
00609        glError = GuestLib_UpdateInfo(glHandle);
00610        if (glError != VMGUESTLIB_ERROR_SUCCESS) {
00611           fprintf(stderr,"UpdateInfo failed: %s\n", 
00612               GuestLib_GetErrorText(glError));
00613        }
00614        else {
00615           /* update worked, things work! */
00616           use_guestlib=1;
00617        }
00618        /* shut things down */
00619        glError = GuestLib_CloseHandle(glHandle);
00620     }
00621 
00622         }
00623 
00624 
00625 
00626     if (use_guestlib) {
00627 
00628     /* fill in the event table parameters */
00629     strcpy( _vmware_native_table[num_events].name,
00630         "CPU_LIMIT" );
00631     strncpy( _vmware_native_table[num_events].description,
00632         "Retrieves the upper limit of processor use in MHz "
00633         "available to the virtual machine.",
00634         PAPI_HUGE_STR_LEN);
00635     strcpy( _vmware_native_table[num_events].units,"MHz");
00636     _vmware_native_table[num_events].which_counter=
00637             VMWARE_CPU_LIMIT_MHZ;
00638     _vmware_native_table[num_events].report_difference=0;
00639     num_events++;
00640 
00641     strcpy( _vmware_native_table[num_events].name,
00642         "CPU_RESERVATION" );
00643     strncpy( _vmware_native_table[num_events].description,
00644         "Retrieves the minimum processing power in MHz "
00645         "reserved for the virtual machine.",
00646         PAPI_HUGE_STR_LEN);
00647     strcpy( _vmware_native_table[num_events].units,"MHz");
00648     _vmware_native_table[num_events].which_counter=
00649             VMWARE_CPU_RESERVATION_MHZ;
00650     _vmware_native_table[num_events].report_difference=0;
00651     num_events++;
00652 
00653     strcpy( _vmware_native_table[num_events].name,
00654         "CPU_SHARES" );
00655     strncpy( _vmware_native_table[num_events].description,
00656         "Retrieves the number of CPU shares allocated "
00657         "to the virtual machine.",
00658         PAPI_HUGE_STR_LEN);
00659     strcpy( _vmware_native_table[num_events].units,"shares");
00660     _vmware_native_table[num_events].which_counter=
00661             VMWARE_CPU_SHARES;
00662     _vmware_native_table[num_events].report_difference=0;
00663     num_events++;
00664 
00665     strcpy( _vmware_native_table[num_events].name,
00666         "CPU_STOLEN" );
00667     strncpy( _vmware_native_table[num_events].description,
00668         "Retrieves the number of milliseconds that the "
00669         "virtual machine was in a ready state (able to "
00670         "transition to a run state), but was not scheduled to run.",
00671         PAPI_HUGE_STR_LEN);
00672     strcpy( _vmware_native_table[num_events].units,"ms");
00673     _vmware_native_table[num_events].which_counter=
00674             VMWARE_CPU_STOLEN_MS;
00675     _vmware_native_table[num_events].report_difference=0;
00676     num_events++;
00677 
00678     strcpy( _vmware_native_table[num_events].name,
00679         "CPU_USED" );
00680     strncpy( _vmware_native_table[num_events].description,
00681         "Retrieves the number of milliseconds during which "
00682         "the virtual machine has used the CPU. This value "
00683         "includes the time used by the guest operating system "
00684         "and the time used by virtualization code for tasks for "
00685         "this virtual machine. You can combine this value with "
00686         "the elapsed time (VMWARE_ELAPSED) to estimate the "
00687         "effective virtual machine CPU speed. This value is a "
00688         "subset of elapsedMs.",
00689         PAPI_HUGE_STR_LEN );
00690     strcpy( _vmware_native_table[num_events].units,"ms");
00691     _vmware_native_table[num_events].which_counter=
00692             VMWARE_CPU_USED_MS;
00693     _vmware_native_table[num_events].report_difference=1;
00694     num_events++;
00695 
00696     strcpy( _vmware_native_table[num_events].name,
00697         "ELAPSED" );
00698     strncpy( _vmware_native_table[num_events].description,
00699         "Retrieves the number of milliseconds that have passed "
00700         "in the virtual machine since it last started running on "
00701         "the server. The count of elapsed time restarts each time "
00702         "the virtual machine is powered on, resumed, or migrated "
00703         "using VMotion. This value counts milliseconds, regardless "
00704         "of whether the virtual machine is using processing power "
00705         "during that time. You can combine this value with the CPU "
00706         "time used by the virtual machine (VMWARE_CPU_USED) to "
00707         "estimate the effective virtual machine xCPU speed. "
00708         "cpuUsedMS is a subset of this value.",
00709         PAPI_HUGE_STR_LEN );
00710     strcpy( _vmware_native_table[num_events].units,"ms");
00711     _vmware_native_table[num_events].which_counter=
00712             VMWARE_ELAPSED_MS;
00713     _vmware_native_table[num_events].report_difference=1;
00714     num_events++;
00715 
00716     strcpy( _vmware_native_table[num_events].name,
00717         "MEM_ACTIVE" );
00718     strncpy( _vmware_native_table[num_events].description,
00719          "Retrieves the amount of memory the virtual machine is "
00720          "actively using in MB - Its estimated working set size.",
00721          PAPI_HUGE_STR_LEN );
00722     strcpy( _vmware_native_table[num_events].units,"MB");
00723     _vmware_native_table[num_events].which_counter=
00724                  VMWARE_MEM_ACTIVE_MB;
00725     _vmware_native_table[num_events].report_difference=0;
00726     num_events++;
00727 
00728     strcpy( _vmware_native_table[num_events].name,
00729         "MEM_BALLOONED" );
00730     strncpy( _vmware_native_table[num_events].description,
00731         "Retrieves the amount of memory that has been reclaimed "
00732         "from this virtual machine by the vSphere memory balloon "
00733         "driver (also referred to as the 'vmemctl' driver) in MB.",
00734         PAPI_HUGE_STR_LEN );
00735     strcpy( _vmware_native_table[num_events].units,"MB");
00736     _vmware_native_table[num_events].which_counter=
00737             VMWARE_MEM_BALLOONED_MB;
00738     _vmware_native_table[num_events].report_difference=0;
00739     num_events++;
00740 
00741     strcpy( _vmware_native_table[num_events].name,
00742         "MEM_LIMIT" );
00743     strncpy( _vmware_native_table[num_events].description,
00744         "Retrieves the upper limit of memory that is available "
00745         "to the virtual machine in MB.",
00746         PAPI_HUGE_STR_LEN );
00747     strcpy( _vmware_native_table[num_events].units,"MB");
00748     _vmware_native_table[num_events].which_counter=
00749             VMWARE_MEM_LIMIT_MB;
00750     _vmware_native_table[num_events].report_difference=0;
00751     num_events++;
00752 
00753     strcpy( _vmware_native_table[num_events].name,
00754         "MEM_MAPPED" );
00755     strncpy( _vmware_native_table[num_events].description,
00756         "Retrieves the amount of memory that is allocated to "
00757         "the virtual machine in MB. Memory that is ballooned, "
00758         "swapped, or has never been accessed is excluded.",
00759         PAPI_HUGE_STR_LEN );
00760     strcpy( _vmware_native_table[num_events].units,"MB");
00761     _vmware_native_table[num_events].which_counter=
00762             VMWARE_MEM_MAPPED_MB;
00763     _vmware_native_table[num_events].report_difference=0;
00764     num_events++;
00765 
00766     strcpy( _vmware_native_table[num_events].name,
00767         "MEM_OVERHEAD" );
00768     strncpy( _vmware_native_table[num_events].description,
00769         "Retrieves the amount of 'overhead' memory associated "
00770         "with this virtual machine that is currently consumed "
00771         "on the host system in MB. Overhead memory is additional "
00772         "memory that is reserved for data structures required by "
00773         "the virtualization layer.",
00774         PAPI_HUGE_STR_LEN );
00775     strcpy( _vmware_native_table[num_events].units,"MB");
00776     _vmware_native_table[num_events].which_counter=
00777             VMWARE_MEM_OVERHEAD_MB;
00778     _vmware_native_table[num_events].report_difference=0;
00779     num_events++;
00780 
00781     strcpy( _vmware_native_table[num_events].name,
00782         "MEM_RESERVATION" );
00783     strncpy( _vmware_native_table[num_events].description,
00784         "Retrieves the minimum amount of memory that is "
00785         "reserved for the virtual machine in MB.",
00786         PAPI_HUGE_STR_LEN );
00787     strcpy( _vmware_native_table[num_events].units,"MB");
00788     _vmware_native_table[num_events].which_counter=
00789             VMWARE_MEM_RESERVATION_MB;
00790     _vmware_native_table[num_events].report_difference=0;
00791     num_events++;
00792 
00793     strcpy( _vmware_native_table[num_events].name,
00794         "MEM_SHARED" );
00795     strncpy( _vmware_native_table[num_events].description,
00796         "Retrieves the amount of physical memory associated "
00797         "with this virtual machine that is copy-on-write (COW) "
00798         "shared on the host in MB.",
00799         PAPI_HUGE_STR_LEN );
00800     strcpy( _vmware_native_table[num_events].units,"MB");
00801     _vmware_native_table[num_events].which_counter=
00802             VMWARE_MEM_SHARED_MB;
00803     _vmware_native_table[num_events].report_difference=0;
00804     num_events++;
00805 
00806     strcpy( _vmware_native_table[num_events].name,
00807         "MEM_SHARES" );
00808     strncpy( _vmware_native_table[num_events].description,
00809         "Retrieves the number of memory shares allocated to "
00810         "the virtual machine.",
00811         PAPI_HUGE_STR_LEN );
00812     strcpy( _vmware_native_table[num_events].units,"shares");
00813     _vmware_native_table[num_events].which_counter=
00814             VMWARE_MEM_SHARES;
00815     _vmware_native_table[num_events].report_difference=0;
00816     num_events++;
00817 
00818     strcpy( _vmware_native_table[num_events].name,
00819         "MEM_SWAPPED" );
00820     strncpy( _vmware_native_table[num_events].description,
00821         "Retrieves the amount of memory that has been reclaimed "
00822         "from this virtual machine by transparently swapping "
00823         "guest memory to disk in MB.",
00824         PAPI_HUGE_STR_LEN );
00825     strcpy( _vmware_native_table[num_events].units,"MB");
00826     _vmware_native_table[num_events].which_counter=
00827             VMWARE_MEM_SWAPPED_MB;
00828     _vmware_native_table[num_events].report_difference=0;
00829     num_events++;
00830 
00831     strcpy( _vmware_native_table[num_events].name,
00832         "MEM_TARGET_SIZE" );
00833     strncpy( _vmware_native_table[num_events].description,
00834         "Retrieves the size of the target memory allocation "
00835         "for this virtual machine in MB.",
00836         PAPI_HUGE_STR_LEN );
00837     strcpy( _vmware_native_table[num_events].units,"MB");
00838     _vmware_native_table[num_events].which_counter=
00839             VMWARE_MEM_TARGET_SIZE_MB;
00840     _vmware_native_table[num_events].report_difference=0;
00841     num_events++;
00842 
00843     strcpy( _vmware_native_table[num_events].name,
00844         "MEM_USED" );
00845     strncpy( _vmware_native_table[num_events].description,
00846         "Retrieves the estimated amount of physical host memory "
00847         "currently consumed for this virtual machine's "
00848         "physical memory.",
00849         PAPI_HUGE_STR_LEN );
00850     strcpy( _vmware_native_table[num_events].units,"MB");
00851     _vmware_native_table[num_events].which_counter=
00852             VMWARE_MEM_USED_MB;
00853     _vmware_native_table[num_events].report_difference=0;
00854     num_events++;
00855 
00856     strcpy( _vmware_native_table[num_events].name,
00857         "HOST_CPU" );
00858     strncpy( _vmware_native_table[num_events].description,
00859         "Retrieves the speed of the ESX system's physical "
00860         "CPU in MHz.",
00861         PAPI_HUGE_STR_LEN );
00862     strcpy( _vmware_native_table[num_events].units,"MHz");
00863     _vmware_native_table[num_events].which_counter=
00864             VMWARE_HOST_CPU_MHZ;
00865     _vmware_native_table[num_events].report_difference=0;
00866     num_events++;
00867     }
00868 
00869 #endif
00870 
00871     /* For VMWare Pseudo Performance Counters */
00872     if ( getenv( "PAPI_VMWARE_PSEUDOPERFORMANCE" ) ) {
00873 
00874             use_pseudo=1;
00875 
00876         strcpy( _vmware_native_table[num_events].name,
00877             "HOST_TSC" );
00878         strncpy( _vmware_native_table[num_events].description,
00879             "Physical host TSC",
00880             PAPI_HUGE_STR_LEN );
00881         strcpy( _vmware_native_table[num_events].units,"cycles");
00882         _vmware_native_table[num_events].which_counter=
00883                 VMWARE_HOST_TSC;
00884             _vmware_native_table[num_events].report_difference=1;
00885         num_events++;
00886 
00887         strcpy( _vmware_native_table[num_events].name,
00888             "ELAPSED_TIME" );
00889         strncpy( _vmware_native_table[num_events].description,
00890             "Elapsed real time in ns.",
00891             PAPI_HUGE_STR_LEN );
00892             strcpy( _vmware_native_table[num_events].units,"ns");
00893         _vmware_native_table[num_events].which_counter=
00894                 VMWARE_ELAPSED_TIME;
00895             _vmware_native_table[num_events].report_difference=1;
00896         num_events++;
00897 
00898         strcpy( _vmware_native_table[num_events].name,
00899             "ELAPSED_APPARENT" );
00900         strncpy( _vmware_native_table[num_events].description,
00901             "Elapsed apparent time in ns.",
00902             PAPI_HUGE_STR_LEN );
00903             strcpy( _vmware_native_table[num_events].units,"ns");
00904         _vmware_native_table[num_events].which_counter=
00905                 VMWARE_ELAPSED_APPARENT;
00906             _vmware_native_table[num_events].report_difference=1;
00907         num_events++;
00908     }
00909 
00910     if (num_events==0) {
00911        strncpy(_vmware_vector.cmp_info.disabled_reason,
00912           "VMware SDK not installed, and PAPI_VMWARE_PSEUDOPERFORMANCE not set",
00913            PAPI_MAX_STR_LEN);
00914       return PAPI_ECMP;
00915     }
00916 
00917     _vmware_vector.cmp_info.num_native_events = num_events;
00918 
00919     return PAPI_OK;
00920 }
00921 
00923 int
00924 _vmware_init_control_state( hwd_control_state_t *ctl )
00925 {
00926   (void) ctl;
00927 
00928     return PAPI_OK;
00929 }
00930 
00935 int
00936 _vmware_ntv_enum_events( unsigned int *EventCode, int modifier )
00937 {
00938 
00939     switch ( modifier ) {
00940             /* return EventCode of first event */
00941         case PAPI_ENUM_FIRST:
00942              if (num_events==0) return PAPI_ENOEVNT;
00943              *EventCode = 0;
00944              return PAPI_OK;
00945              break;
00946             /* return EventCode of passed-in Event */
00947         case PAPI_ENUM_EVENTS:{
00948              int index = *EventCode;
00949 
00950              if ( index < num_events - 1 ) {
00951             *EventCode = *EventCode + 1;
00952             return PAPI_OK;
00953              } else {
00954             return PAPI_ENOEVNT;
00955              }
00956              break;
00957         }
00958         default:
00959              return PAPI_EINVAL;
00960     }
00961     return PAPI_EINVAL;
00962 }
00963 
00964 int
00965 _vmware_ntv_code_to_info(unsigned int EventCode, PAPI_event_info_t *info) 
00966 {
00967 
00968   int index = EventCode;
00969 
00970   if ( ( index < 0) || (index >= num_events )) return PAPI_ENOEVNT;
00971 
00972   strncpy( info->symbol, _vmware_native_table[index].name, 
00973            sizeof(info->symbol));
00974 
00975   strncpy( info->long_descr, _vmware_native_table[index].description, 
00976            sizeof(info->symbol));
00977 
00978   strncpy( info->units, _vmware_native_table[index].units, 
00979            sizeof(info->units));
00980 
00981   return PAPI_OK;
00982 }
00983 
00984 
00990 int
00991 _vmware_ntv_code_to_name( unsigned int EventCode, char *name, int len )
00992 {
00993     int index = EventCode;
00994 
00995     if ( index >= 0 && index < num_events ) {
00996        strncpy( name, _vmware_native_table[index].name, len );
00997     }
00998     return PAPI_OK;
00999 }
01000 
01006 int
01007 _vmware_ntv_code_to_descr( unsigned int EventCode, char *name, int len )
01008 {
01009   int index = EventCode;
01010 
01011     if ( index >= 0 && index < num_events ) {
01012        strncpy( name, _vmware_native_table[index].description, len );
01013     }
01014     return PAPI_OK;
01015 }
01016 
01018 int
01019 _vmware_update_control_state( hwd_control_state_t *ctl, 
01020                   NativeInfo_t *native, 
01021                   int count, 
01022                   hwd_context_t *ctx )
01023 {
01024     (void) ctx;
01025 
01026     struct _vmware_control_state *control;
01027 
01028     int i, index;
01029 
01030     control=(struct _vmware_control_state *)ctl;
01031 
01032     for ( i = 0; i < count; i++ ) {
01033         index = native[i].ni_event;
01034         control->which_counter[i]=_vmware_native_table[index].which_counter;
01035         native[i].ni_position = i;
01036     }
01037     control->num_events=count;
01038 
01039     return PAPI_OK;
01040 }
01041 
01043 int
01044 _vmware_start( hwd_context_t *ctx, hwd_control_state_t *ctl )
01045 {
01046     struct _vmware_context *context;
01047     (void) ctl;
01048 
01049     context=(struct _vmware_context *)ctx;
01050 
01051     _vmware_hardware_read( context, 1 );
01052 
01053     return PAPI_OK;
01054 }
01055 
01057 int
01058 _vmware_stop( hwd_context_t *ctx, hwd_control_state_t *ctl )
01059 {
01060 
01061     struct _vmware_context *context;
01062     (void) ctl;
01063 
01064     context=(struct _vmware_context *)ctx;
01065 
01066     _vmware_hardware_read( context, 0 );    
01067 
01068     return PAPI_OK;
01069 }
01070 
01072 int
01073 _vmware_read( hwd_context_t *ctx, 
01074           hwd_control_state_t *ctl,
01075           long_long **events, int flags )
01076 {
01077 
01078     struct _vmware_context *context;
01079     struct _vmware_control_state *control;
01080 
01081     (void) flags;
01082     int i;
01083 
01084     context=(struct _vmware_context *)ctx;
01085     control=(struct _vmware_control_state *)ctl;
01086 
01087     _vmware_hardware_read( context, 0 );
01088 
01089     for (i=0; i<control->num_events; i++) {
01090       
01091       if (_vmware_native_table[
01092               _vmware_native_table[control->which_counter[i]].which_counter].
01093              report_difference) {
01094          control->value[i]=context->values[control->which_counter[i]]-
01095                            context->start_values[control->which_counter[i]];
01096       } else {
01097          control->value[i]=context->values[control->which_counter[i]];
01098       }
01099       //      printf("%d %d %lld-%lld=%lld\n",i,control->which_counter[i],
01100       // context->values[control->which_counter[i]],
01101       //     context->start_values[control->which_counter[i]],
01102       //     control->value[i]);
01103 
01104     }
01105 
01106     *events = control->value;
01107 
01108     return PAPI_OK;
01109 }
01110 
01112 /*    otherwise, the updated state is written to ESI->hw_start      */
01113 int
01114 _vmware_write( hwd_context_t * ctx, hwd_control_state_t * ctrl, long_long events[] )
01115 {
01116     (void) ctx;
01117     (void) ctrl;
01118     (void) events;
01119     SUBDBG( "_vmware_write... %p %p", ctx, ctrl );
01120     /* FIXME... this should actually carry out the write, though     */
01121     /*  this is non-trivial as which counter being written has to be */
01122     /*  determined somehow.                                          */
01123     return PAPI_OK;
01124 }
01125 
01127 int
01128 _vmware_reset( hwd_context_t *ctx, hwd_control_state_t *ctl )
01129 {
01130     (void) ctx;
01131     (void) ctl;
01132 
01133     return PAPI_OK;
01134 }
01135 
01137 int
01138 _vmware_shutdown_thread( hwd_context_t *ctx )
01139 {
01140     (void) ctx;
01141 
01142 #ifdef VMGUESTLIB
01143         VMGuestLibError glError;
01144     struct _vmware_context *context;
01145 
01146     context=(struct _vmware_context *)ctx;
01147 
01148     if (use_guestlib) {
01149            glError = GuestLib_CloseHandle(context->glHandle);
01150            if (glError != VMGUESTLIB_ERROR_SUCCESS) {
01151                fprintf(stderr, "Failed to CloseHandle: %s\n", 
01152                GuestLib_GetErrorText(glError));
01153                return PAPI_ECMP;
01154        }
01155     }
01156 #endif
01157 
01158     return PAPI_OK;
01159 }
01160 
01162 int
01163 _vmware_shutdown_component( void )
01164 {
01165 
01166 #ifdef VMGUESTLIB
01167     if (dlclose(dlHandle)) {
01168         fprintf(stderr, "dlclose failed\n");
01169         return EXIT_FAILURE;
01170     }
01171 #endif
01172 
01173     return PAPI_OK;
01174 }
01175 
01176 
01182 int
01183 _vmware_ctl( hwd_context_t *ctx, int code, _papi_int_option_t *option )
01184 {
01185 
01186     (void) ctx;
01187     (void) code;
01188     (void) option;
01189 
01190     SUBDBG( "_vmware_ctl..." );
01191 
01192     return PAPI_OK;
01193 }
01194 
01204 int
01205 _vmware_set_domain( hwd_control_state_t *ctl, int domain )
01206 {
01207     (void) ctl;
01208 
01209     int found = 0;
01210     SUBDBG( "_vmware_set_domain..." );
01211     if ( PAPI_DOM_USER & domain ) {
01212         SUBDBG( " PAPI_DOM_USER " );
01213         found = 1;
01214     }
01215     if ( PAPI_DOM_KERNEL & domain ) {
01216         SUBDBG( " PAPI_DOM_KERNEL " );
01217         found = 1;
01218     }
01219     if ( PAPI_DOM_OTHER & domain ) {
01220         SUBDBG( " PAPI_DOM_OTHER " );
01221         found = 1;
01222     }
01223     if ( PAPI_DOM_ALL & domain ) {
01224         SUBDBG( " PAPI_DOM_ALL " );
01225         found = 1;
01226     }
01227     if ( !found ) {
01228         return ( PAPI_EINVAL );
01229     }
01230     return PAPI_OK;
01231 }
01232 
01234 papi_vector_t _vmware_vector = {
01235     .cmp_info = {
01236         /* default component information (unspecified values are initialized to 0) */
01237         .name = "vmware",
01238         .short_name = "vmware",
01239         .description = "Provide support for VMware vmguest and pseudo counters",
01240         .version = "5.0",
01241         .num_mpx_cntrs = VMWARE_MAX_COUNTERS,
01242         .num_cntrs = VMWARE_MAX_COUNTERS,
01243         .default_domain = PAPI_DOM_USER,
01244         .available_domains = PAPI_DOM_USER,
01245         .default_granularity = PAPI_GRN_THR,
01246         .available_granularities = PAPI_GRN_THR,
01247         .hardware_intr_sig = PAPI_INT_SIGNAL,
01248 
01249         /* component specific cmp_info initializations */
01250         .fast_real_timer = 0,
01251         .fast_virtual_timer = 0,
01252         .attach = 0,
01253         .attach_must_ptrace = 0,
01254     },
01255     /* sizes of framework-opaque component-private structures */
01256     .size = {
01257         .context = sizeof ( struct _vmware_context ),
01258         .control_state = sizeof ( struct _vmware_control_state ),
01259         .reg_value = sizeof ( struct _vmware_register ),
01260         .reg_alloc = sizeof ( struct _vmware_reg_alloc ),
01261     }
01262     ,
01263     /* function pointers in this component */
01264     .init_thread =        _vmware_init,
01265     .init_component =     _vmware_init_component,
01266     .init_control_state = _vmware_init_control_state,
01267     .start =              _vmware_start,
01268     .stop =               _vmware_stop,
01269     .read =               _vmware_read,
01270     .write =              _vmware_write,
01271     .shutdown_thread =    _vmware_shutdown_thread,
01272     .shutdown_component = _vmware_shutdown_component,
01273     .ctl =                _vmware_ctl,
01274 
01275     .update_control_state = _vmware_update_control_state,
01276     .set_domain = _vmware_set_domain,
01277     .reset = _vmware_reset,
01278 
01279     .ntv_enum_events = _vmware_ntv_enum_events,
01280     .ntv_code_to_name = _vmware_ntv_code_to_name,
01281     .ntv_code_to_descr = _vmware_ntv_code_to_descr,
01282     .ntv_code_to_info = _vmware_ntv_code_to_info,
01283 
01284 };
01285 
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Defines