PAPI  5.3.0.0
perfctr-ppc64.c
Go to the documentation of this file.
00001 /****************************/
00002 /* THIS IS OPEN SOURCE CODE */
00003 /****************************/
00004 
00005 /*
00006 * File:    perfctr-ppc64.c
00007 * Author:  Maynard Johnson
00008 *          maynardj@us.ibm.com
00009 * Mods:    <your name here>
00010 *          <your email address>
00011 */
00012 
00013 /* PAPI stuff */
00014 #include "papi.h"
00015 #include "papi_internal.h"
00016 #include "papi_vector.h"
00017 #include SUBSTRATE
00018 
00019 #ifdef PERFCTR26
00020 #define PERFCTR_CPU_NAME   perfctr_info_cpu_name
00021 
00022 #define PERFCTR_CPU_NRCTRS perfctr_info_nrctrs
00023 #else
00024 #define PERFCTR_CPU_NAME perfctr_cpu_name
00025 #define PERFCTR_CPU_NRCTRS perfctr_cpu_nrctrs
00026 #endif
00027 
00028 static hwi_search_t preset_name_map_PPC64[PAPI_MAX_PRESET_EVENTS] = {
00029 #if defined(_POWER5) || defined(_POWER5p)
00030     {PAPI_L1_DCM, {DERIVED_ADD, {PNE_PM_LD_MISS_L1, PNE_PM_ST_MISS_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Level 1 data cache misses */
00031     {PAPI_L1_DCA, {DERIVED_ADD, {PNE_PM_LD_REF_L1, PNE_PM_ST_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*Level 1 data cache access */
00032     /* can't count level 1 data cache hits due to hardware limitations. */
00033     {PAPI_L1_LDM, {0, {PNE_PM_LD_MISS_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*Level 1 load misses */
00034     {PAPI_L1_STM, {0, {PNE_PM_ST_MISS_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*Level 1 store misses */
00035     {PAPI_L1_DCW, {0, {PNE_PM_ST_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*Level 1 D cache write */
00036     {PAPI_L1_DCR, {0, {PNE_PM_LD_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*Level 1 D cache read */
00037     /* can't count level 2 data cache reads due to hardware limitations. */
00038     /* can't count level 2 data cache hits due to hardware limitations. */
00039     {PAPI_L2_DCM, {0, {PNE_PM_DATA_FROM_L2MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Level 2 data cache misses */
00040     {PAPI_L2_LDM, {0, {PNE_PM_DATA_FROM_L2MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Level 2 cache read misses */
00041     {PAPI_L3_DCR, {0, {PNE_PM_DATA_FROM_L2MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Level 3 data cache reads */
00042     /* can't count level 3 data cache hits due to hardware limitations. */
00043     {PAPI_L3_DCM, {DERIVED_ADD, {PNE_PM_DATA_FROM_LMEM, PNE_PM_DATA_FROM_RMEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /* Level 3 data cache misses (reads & writes) */
00044     {PAPI_L3_LDM, {DERIVED_ADD, {PNE_PM_DATA_FROM_LMEM, PNE_PM_DATA_FROM_RMEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /* Level 3 data cache read misses */
00045     /* can't count level 1 instruction cache accesses due to hardware limitations. */
00046     {PAPI_L1_ICH, {0, {PNE_PM_INST_FROM_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /* Level 1 inst cache hits */
00047     /* can't count level 1 instruction cache misses due to hardware limitations. */
00048     /* can't count level 2 instruction cache accesses due to hardware limitations. */
00049     /* can't count level 2 instruction cache hits due to hardware limitations. */
00050     {PAPI_L2_ICM, {0, {PNE_PM_INST_FROM_L2MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /* Level 2 inst cache misses */
00051     {PAPI_L3_ICA, {0, {PNE_PM_INST_FROM_L2MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /* Level 3 inst cache accesses */
00052     /* can't count level 3 instruction cache hits due to hardware limitations. */
00053     {PAPI_L3_ICM, {DERIVED_ADD, {PNE_PM_DATA_FROM_LMEM, PNE_PM_DATA_FROM_RMEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /* Level 3 instruction cache misses (reads & writes) */
00054     {PAPI_FMA_INS, {0, {PNE_PM_FPU_FMA, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*FMA instructions completed */
00055     {PAPI_TOT_IIS, {0, {PNE_PM_INST_DISP, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*Total instructions issued */
00056     {PAPI_TOT_INS, {0, {PNE_PM_INST_CMPL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*Total instructions executed */
00057     {PAPI_INT_INS, {0, {PNE_PM_FXU_FIN, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Integer instructions executed */
00058     {PAPI_FP_OPS, {DERIVED_ADD, {PNE_PM_FPU_1FLOP, PNE_PM_FPU_FMA, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Floating point instructions executed */
00059     {PAPI_FP_INS, {0, {PNE_PM_FPU_FIN, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Floating point instructions executed */
00060     {PAPI_TOT_CYC, {0, {PNE_PM_RUN_CYC, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Processor cycles gated by the run latch */
00061     {PAPI_FDV_INS, {0, {PNE_PM_FPU_FDIV, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*FD ins */
00062     {PAPI_FSQ_INS, {0, {PNE_PM_FPU_FSQRT, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*FSq ins */
00063     {PAPI_TLB_DM, {0, {PNE_PM_DTLB_MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*Data translation lookaside buffer misses */
00064     {PAPI_TLB_IM, {0, {PNE_PM_ITLB_MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*Instr translation lookaside buffer misses */
00065     {PAPI_TLB_TL, {DERIVED_ADD, {PNE_PM_DTLB_MISS, PNE_PM_ITLB_MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*Total translation lookaside buffer misses */
00066     {PAPI_HW_INT, {0, {PNE_PM_EXT_INT, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Hardware interrupts */
00067     {PAPI_STL_ICY, {0, {PNE_PM_0INST_FETCH, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Cycles with No Instruction Issue */
00068     {PAPI_LD_INS, {0, {PNE_PM_LD_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*Load instructions */
00069     {PAPI_SR_INS, {0, {PNE_PM_ST_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*Store instructions */
00070     {PAPI_LST_INS, {DERIVED_ADD, {PNE_PM_ST_REF_L1, PNE_PM_LD_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Load and Store instructions */
00071     {PAPI_BR_INS, {0, {PNE_PM_BR_ISSUED, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /* Branch instructions */
00072     {PAPI_BR_MSP, {DERIVED_ADD, {PNE_PM_BR_MPRED_CR, PNE_PM_BR_MPRED_TA, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /* Branch mispredictions */
00073     {PAPI_FXU_IDL, {0, {PNE_PM_FXU_IDLE, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*Cycles integer units are idle */
00074     {0, {0, {PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}   /* end of list */
00075 #else
00076 #ifdef _PPC970
00077     {PAPI_L2_DCM, {0, {PNE_PM_DATA_FROM_MEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Level 2 data cache misses */
00078     {PAPI_L2_DCR, {DERIVED_ADD, {PNE_PM_DATA_FROM_L2, PNE_PM_DATA_FROM_L25_MOD, PNE_PM_DATA_FROM_L25_SHR, PNE_PM_DATA_FROM_MEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /* Level 2 data cache read attempts */
00079     {PAPI_L2_DCH, {DERIVED_ADD, {PNE_PM_DATA_FROM_L2, PNE_PM_DATA_FROM_L25_MOD, PNE_PM_DATA_FROM_L25_SHR, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /* Level 2 data cache hits */
00080     {PAPI_L2_LDM, {0, {PNE_PM_DATA_FROM_MEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /* Level 2 data cache read misses */
00081     /* no PAPI_L1_ICA since PM_INST_FROM_L1 and PM_INST_FROM_L2 cannot be counted simultaneously. */
00082     {PAPI_L1_ICM, {DERIVED_ADD, {PNE_PM_INST_FROM_L2, PNE_PM_INST_FROM_L25_SHR, PNE_PM_INST_FROM_L25_MOD, PNE_PM_INST_FROM_MEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /* Level 1 inst cache misses */
00083     {PAPI_L2_ICA, {DERIVED_ADD, {PNE_PM_INST_FROM_L2, PNE_PM_INST_FROM_L25_SHR, PNE_PM_INST_FROM_L25_MOD, PNE_PM_INST_FROM_MEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /* Level 2 inst cache accesses */
00084     {PAPI_L2_ICH, {DERIVED_ADD, {PNE_PM_INST_FROM_L2, PNE_PM_INST_FROM_L25_SHR, PNE_PM_INST_FROM_L25_MOD, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /* Level 2 inst cache hits */
00085     {PAPI_L2_ICM, {0, {PNE_PM_INST_FROM_MEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /* Level 2 inst cache misses */
00086 #endif
00087 /* Common preset events for PPC970 */
00088     {PAPI_L1_DCM, {DERIVED_ADD, {PNE_PM_LD_MISS_L1, PNE_PM_ST_MISS_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*Level 1 data cache misses */
00089     {PAPI_L1_DCA, {DERIVED_ADD, {PNE_PM_LD_REF_L1, PNE_PM_ST_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Level 1 data cache access */
00090     {PAPI_FXU_IDL, {0, {PNE_PM_FXU_IDLE, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Cycles integer units are idle */
00091     {PAPI_L1_LDM, {0, {PNE_PM_LD_MISS_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Level 1 load misses */
00092     {PAPI_L1_STM, {0, {PNE_PM_ST_MISS_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Level 1 store misses */
00093     {PAPI_L1_DCW, {0, {PNE_PM_ST_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Level 1 D cache write */
00094     {PAPI_L1_DCR, {0, {PNE_PM_LD_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Level 1 D cache read */
00095     {PAPI_FMA_INS, {0, {PNE_PM_FPU_FMA, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*FMA instructions completed */
00096     {PAPI_TOT_IIS, {0, {PNE_PM_INST_DISP, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Total instructions issued */
00097     {PAPI_TOT_INS, {0, {PNE_PM_INST_CMPL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Total instructions executed */
00098     {PAPI_INT_INS, {0, {PNE_PM_FXU_FIN, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*Integer instructions executed */
00099     {PAPI_FP_OPS, {DERIVED_POSTFIX, {PNE_PM_FPU0_FIN, PNE_PM_FPU1_FIN, PNE_PM_FPU_FMA, PNE_PM_FPU_STF, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, "N0|N1|+|N2|+|N3|-|"}}, /*Floating point instructions executed */
00100     {PAPI_FP_INS, {0, {PNE_PM_FPU_FIN, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*Floating point instructions executed */
00101     {PAPI_TOT_CYC, {0, {PNE_PM_CYC, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*Total cycles */
00102     {PAPI_FDV_INS, {0, {PNE_PM_FPU_FDIV, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*FD ins */
00103     {PAPI_FSQ_INS, {0, {PNE_PM_FPU_FSQRT, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*FSq ins */
00104     {PAPI_TLB_DM, {0, {PNE_PM_DTLB_MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Data translation lookaside buffer misses */
00105     {PAPI_TLB_IM, {0, {PNE_PM_ITLB_MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Instr translation lookaside buffer misses */
00106     {PAPI_TLB_TL, {DERIVED_ADD, {PNE_PM_DTLB_MISS, PNE_PM_ITLB_MISS, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /*Total translation lookaside buffer misses */
00107     {PAPI_HW_INT, {0, {PNE_PM_EXT_INT, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*Hardware interrupts */
00108     {PAPI_STL_ICY, {0, {PNE_PM_0INST_FETCH, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /*Cycles with No Instruction Issue */
00109     {PAPI_LD_INS, {0, {PNE_PM_LD_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Load instructions */
00110     {PAPI_SR_INS, {0, {PNE_PM_ST_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /*Store instructions */
00111     {PAPI_LST_INS, {DERIVED_ADD, {PNE_PM_ST_REF_L1, PNE_PM_LD_REF_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}}, /*Load and Store instructions */
00112     {PAPI_BR_INS, {0, {PNE_PM_BR_ISSUED, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /* Branch instructions */
00113     {PAPI_BR_MSP, {DERIVED_ADD, {PNE_PM_BR_MPRED_CR, PNE_PM_BR_MPRED_TA, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},  /* Branch mispredictions */
00114     {PAPI_L1_DCH, {DERIVED_POSTFIX, {PNE_PM_LD_REF_L1, PNE_PM_LD_MISS_L1, PNE_PM_ST_REF_L1, PNE_PM_ST_MISS_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, "N0|N1|-|N2|+|N3|-|"}}, /* Level 1 data cache hits */
00115     /* no PAPI_L2_STM, PAPI_L2_DCW nor PAPI_L2_DCA since stores/writes to L2 aren't countable */
00116     {PAPI_L3_DCM, {0, {PNE_PM_DATA_FROM_MEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /* Level 3 data cache misses (reads & writes) */
00117     {PAPI_L3_LDM, {0, {PNE_PM_DATA_FROM_MEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /* Level 3 data cache read misses */
00118     {PAPI_L1_ICH, {0, {PNE_PM_INST_FROM_L1, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},    /* Level 1 inst cache hits */
00119     {PAPI_L3_ICM, {0, {PNE_PM_INST_FROM_MEM, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}},   /* Level 3 inst cache misses */
00120     {0, {0, {PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL, PAPI_NULL}, {0}}} /* end of list */
00121 #endif
00122 };
00123 hwi_search_t *preset_search_map;
00124 
00125 #if defined(_POWER5) || defined(_POWER5p)
00126 unsigned long long pmc_sel_mask[NUM_COUNTER_MASKS] = {
00127     PMC1_SEL_MASK,
00128     PMC2_SEL_MASK,
00129     PMC3_SEL_MASK,
00130     PMC4_SEL_MASK
00131 };
00132 #else
00133 unsigned long long pmc_sel_mask[NUM_COUNTER_MASKS] = {
00134     PMC1_SEL_MASK,
00135     PMC2_SEL_MASK,
00136     PMC3_SEL_MASK,
00137     PMC4_SEL_MASK,
00138     PMC5_SEL_MASK,
00139     PMC6_SEL_MASK,
00140     PMC7_SEL_MASK,
00141     PMC8_SEL_MASK,
00142     PMC8a_SEL_MASK
00143 };
00144 #endif
00145 
00146 static void
00147 clear_unused_pmcsel_bits( hwd_control_state_t * cntrl )
00148 {
00149     struct perfctr_cpu_control *cpu_ctl = &cntrl->control.cpu_control;
00150     int i;
00151     int num_used_counters = cpu_ctl->nractrs + cpu_ctl->nrictrs;
00152     unsigned int used_counters = 0x0;
00153     for ( i = 0; i < num_used_counters; i++ ) {
00154         used_counters |= 1 << cpu_ctl->pmc_map[i];
00155     }
00156 #if defined(_POWER5) || defined(_POWER5p)
00157     int freeze_pmc5_pmc6 = 0;          /* for Power5 use only */
00158 #endif
00159 
00160     for ( i = 0; i < MAX_COUNTERS; i++ ) {
00161         unsigned int active_counter = ( ( 1 << i ) & used_counters );
00162         if ( !active_counter ) {
00163 #if defined(_POWER5) || defined(_POWER5p)
00164             if ( i > 3 )
00165                 freeze_pmc5_pmc6++;
00166             else
00167                 cpu_ctl->ppc64.mmcr1 &= pmc_sel_mask[i];
00168 #else
00169             if ( i < 2 ) {
00170                 cpu_ctl->ppc64.mmcr0 &= pmc_sel_mask[i];
00171             } else {
00172                 cpu_ctl->ppc64.mmcr1 &= pmc_sel_mask[i];
00173                 if ( i == ( MAX_COUNTERS - 1 ) )
00174                     cpu_ctl->ppc64.mmcra &= pmc_sel_mask[NUM_COUNTER_MASKS - 1];
00175             }
00176 #endif
00177         }
00178     }
00179 #if defined(_POWER5) || defined(_POWER5p)
00180     if ( freeze_pmc5_pmc6 == 2 )
00181         cpu_ctl->ppc64.mmcr0 |= PMC5_PMC6_FREEZE;
00182 #endif
00183 }
00184 static int
00185 set_domain( hwd_control_state_t * cntrl, unsigned int domain )
00186 {
00187     int did = 0;
00188 
00189     /* A bit setting of '0' indicates "count this context".
00190      * Start off by turning off counting for all contexts; 
00191      * then, selectively re-enable.
00192      */
00193     cntrl->control.cpu_control.ppc64.mmcr0 |=
00194         PERF_USER | PERF_KERNEL | PERF_HYPERVISOR;
00195     if ( domain & PAPI_DOM_USER ) {
00196         cntrl->control.cpu_control.ppc64.mmcr0 |= PERF_USER;
00197         cntrl->control.cpu_control.ppc64.mmcr0 ^= PERF_USER;
00198         did = 1;
00199     }
00200     if ( domain & PAPI_DOM_KERNEL ) {
00201         cntrl->control.cpu_control.ppc64.mmcr0 |= PERF_KERNEL;
00202         cntrl->control.cpu_control.ppc64.mmcr0 ^= PERF_KERNEL;
00203         did = 1;
00204     }
00205     if ( domain & PAPI_DOM_SUPERVISOR ) {
00206         cntrl->control.cpu_control.ppc64.mmcr0 |= PERF_HYPERVISOR;
00207         cntrl->control.cpu_control.ppc64.mmcr0 ^= PERF_HYPERVISOR;
00208         did = 1;
00209     }
00210 
00211     if ( did ) {
00212         return ( PAPI_OK );
00213     } else {
00214         return ( PAPI_EINVAL );
00215     }
00216 
00217 }
00218 
00219 
00220 //extern native_event_entry_t *native_table;
00221 //extern hwi_search_t _papi_hwd_preset_map[];
00222 extern papi_mdi_t _papi_hwi_system_info;
00223 
00224 #ifdef DEBUG
00225 void
00226 print_control( const struct perfctr_cpu_control *control )
00227 {
00228     unsigned int i;
00229 
00230     SUBDBG( "Control used:\n" );
00231     SUBDBG( "tsc_on\t\t\t%u\n", control->tsc_on );
00232     SUBDBG( "nractrs\t\t\t%u\n", control->nractrs );
00233     SUBDBG( "nrictrs\t\t\t%u\n", control->nrictrs );
00234     SUBDBG( "mmcr0\t\t\t0x%X\n", control->ppc64.mmcr0 );
00235     SUBDBG( "mmcr1\t\t\t0x%llX\n",
00236             ( unsigned long long ) control->ppc64.mmcr1 );
00237     SUBDBG( "mmcra\t\t\t0x%X\n", control->ppc64.mmcra );
00238 
00239     for ( i = 0; i < ( control->nractrs + control->nrictrs ); ++i ) {
00240         SUBDBG( "pmc_map[%u]\t\t%u\n", i, control->pmc_map[i] );
00241         if ( control->ireset[i] ) {
00242             SUBDBG( "ireset[%d]\t%X\n", i, control->ireset[i] );
00243         }
00244     }
00245 
00246 }
00247 #endif
00248 
00249 
00250 /* Assign the global native and preset table pointers, find the native
00251    table's size in memory and then call the preset setup routine. */
00252 int
00253 setup_ppc64_presets( int cputype )
00254 {
00255     preset_search_map = preset_name_map_PPC64;
00256     return ( _papi_hwi_setup_all_presets( preset_search_map, NULL ) );
00257 }
00258 
00259 /*called when an EventSet is allocated */
00260 int
00261 _papi_hwd_init_control_state( hwd_control_state_t * ptr )
00262 {
00263     int i = 0;
00264     for ( i = 0; i < _papi_hwi_system_info.sub_info.num_cntrs; i++ ) {
00265         ptr->control.cpu_control.pmc_map[i] = i;
00266     }
00267     ptr->control.cpu_control.tsc_on = 1;
00268     set_domain( ptr, _papi_hwi_system_info.sub_info.default_domain );
00269     return ( PAPI_OK );
00270 }
00271 
00272 /* At init time, the higher level library should always allocate and 
00273    reserve EventSet zero. */
00274 
00275 
00276 /* Called once per process. */
00277 /* No longer needed if not implemented
00278 int _papi_hwd_shutdown_global(void) {
00279    return (PAPI_OK);
00280 } */
00281 
00282 
00283 /* this function recusively does Modified Bipartite Graph counter allocation 
00284      success  return 1
00285      fail     return 0
00286 */
00287 static int
00288 do_counter_allocation( ppc64_reg_alloc_t * event_list, int size )
00289 {
00290     int i, j, group = -1;
00291     unsigned int map[GROUP_INTS];
00292 
00293     for ( i = 0; i < GROUP_INTS; i++ ) {
00294         map[i] = event_list[0].ra_group[i];
00295     }
00296 
00297     for ( i = 1; i < size; i++ ) {
00298         for ( j = 0; j < GROUP_INTS; j++ )
00299             map[j] &= event_list[i].ra_group[j];
00300     }
00301 
00302     for ( i = 0; i < GROUP_INTS; i++ ) {
00303         if ( map[i] ) {
00304             group = ffs( map[i] ) - 1 + i * 32;
00305             break;
00306         }
00307     }
00308 
00309     if ( group < 0 )
00310         return group;        /* allocation fail */
00311     else {
00312         for ( i = 0; i < size; i++ ) {
00313             for ( j = 0; j < MAX_COUNTERS; j++ ) {
00314                 if ( event_list[i].ra_counter_cmd[j] >= 0
00315                      && event_list[i].ra_counter_cmd[j] ==
00316                      group_map[group].counter_cmd[j] )
00317                     event_list[i].ra_position = j;
00318             }
00319         }
00320         return group;
00321     }
00322 }
00323 
00324 
00325 /* Register allocation */
00326 int
00327 _papi_hwd_allocate_registers( EventSetInfo_t * ESI )
00328 {
00329     hwd_control_state_t *this_state = &ESI->machdep;
00330     int i, j, natNum, index;
00331     ppc64_reg_alloc_t event_list[MAX_COUNTERS];
00332     int group;
00333 
00334     /* not yet successfully mapped, but have enough slots for events */
00335 
00336     /* Initialize the local structure needed 
00337        for counter allocation and optimization. */
00338     natNum = ESI->NativeCount;
00339     for ( i = 0; i < natNum; i++ ) {
00340         event_list[i].ra_position = -1;
00341         for ( j = 0; j < MAX_COUNTERS; j++ ) {
00342             if ( ( index =
00343                    native_name_map[ESI->NativeInfoArray[i].
00344                                    ni_event & PAPI_NATIVE_AND_MASK].index ) <
00345                  0 )
00346                 return PAPI_ECNFLCT;
00347             event_list[i].ra_counter_cmd[j] =
00348                 native_table[index].resources.counter_cmd[j];
00349         }
00350         for ( j = 0; j < GROUP_INTS; j++ ) {
00351             if ( ( index =
00352                    native_name_map[ESI->NativeInfoArray[i].
00353                                    ni_event & PAPI_NATIVE_AND_MASK].index ) <
00354                  0 )
00355                 return PAPI_ECNFLCT;
00356             event_list[i].ra_group[j] = native_table[index].resources.group[j];
00357         }
00358     }
00359     if ( ( group = do_counter_allocation( event_list, natNum ) ) >= 0 ) {   /* successfully mapped */
00360         /* copy counter allocations info back into NativeInfoArray */
00361         this_state->group_id = group;
00362         for ( i = 0; i < natNum; i++ ) {
00363 //         ESI->NativeInfoArray[i].ni_position = event_list[i].ra_position;
00364             this_state->control.cpu_control.pmc_map[i] =
00365                 event_list[i].ra_position;
00366             ESI->NativeInfoArray[i].ni_position = i;
00367         }
00368         /* update the control structure based on the NativeInfoArray */
00369         SUBDBG( "Group ID: %d\n", group );
00370 
00371         return PAPI_OK;
00372     } else {
00373         return PAPI_ECNFLCT;
00374     }
00375 }
00376 
00377 /* This function clears the current contents of the control structure and 
00378    updates it with whatever resources are allocated for all the native events
00379    in the native info structure array. */
00380 int
00381 _papi_hwd_update_control_state( hwd_control_state_t * this_state,
00382                                 NativeInfo_t * native, int count,
00383                                 hwd_context_t * context )
00384 {
00385 
00386 
00387     this_state->control.cpu_control.nractrs =
00388         count - this_state->control.cpu_control.nrictrs;
00389     // save control state
00390     unsigned int save_mmcr0_ctlbits =
00391         PERF_CONTROL_MASK & this_state->control.cpu_control.ppc64.mmcr0;
00392 
00393     this_state->control.cpu_control.ppc64.mmcr0 =
00394         group_map[this_state->group_id].mmcr0 | save_mmcr0_ctlbits;
00395 
00396     unsigned long long mmcr1 =
00397         ( ( unsigned long long ) group_map[this_state->group_id].mmcr1U ) << 32;
00398     mmcr1 += group_map[this_state->group_id].mmcr1L;
00399     this_state->control.cpu_control.ppc64.mmcr1 = mmcr1;
00400 
00401     this_state->control.cpu_control.ppc64.mmcra =
00402         group_map[this_state->group_id].mmcra;
00403 
00404     clear_unused_pmcsel_bits( this_state );
00405     return PAPI_OK;
00406 }
00407 
00408 
00409 int
00410 _papi_hwd_start( hwd_context_t * ctx, hwd_control_state_t * state )
00411 {
00412     int error;
00413 /*   clear_unused_pmcsel_bits(this_state);   moved to update_control_state */
00414 #ifdef DEBUG
00415     print_control( &state->control.cpu_control );
00416 #endif
00417     if ( state->rvperfctr != NULL ) {
00418         if ( ( error =
00419                rvperfctr_control( state->rvperfctr, &state->control ) ) < 0 ) {
00420             SUBDBG( "rvperfctr_control returns: %d\n", error );
00421             PAPIERROR( RCNTRL_ERROR );
00422             return ( PAPI_ESYS );
00423         }
00424         return ( PAPI_OK );
00425     }
00426     if ( ( error = vperfctr_control( ctx->perfctr, &state->control ) ) < 0 ) {
00427         SUBDBG( "vperfctr_control returns: %d\n", error );
00428         PAPIERROR( VCNTRL_ERROR );
00429         return ( PAPI_ESYS );
00430     }
00431     return ( PAPI_OK );
00432 }
00433 
00434 int
00435 _papi_hwd_stop( hwd_context_t * ctx, hwd_control_state_t * state )
00436 {
00437     if ( state->rvperfctr != NULL ) {
00438         if ( rvperfctr_stop( ( struct rvperfctr * ) ctx->perfctr ) < 0 ) {
00439             PAPIERROR( RCNTRL_ERROR );
00440             return ( PAPI_ESYS );
00441         }
00442         return ( PAPI_OK );
00443     }
00444     if ( vperfctr_stop( ctx->perfctr ) < 0 ) {
00445         PAPIERROR( VCNTRL_ERROR );
00446         return ( PAPI_ESYS );
00447     }
00448     return ( PAPI_OK );
00449 }
00450 
00451 int
00452 _papi_hwd_read( hwd_context_t * ctx, hwd_control_state_t * spc, long long **dp,
00453                 int flags )
00454 {
00455     if ( flags & PAPI_PAUSED ) {
00456         vperfctr_read_state( ctx->perfctr, &spc->state, NULL );
00457     } else {
00458         SUBDBG( "vperfctr_read_ctrs\n" );
00459         if ( spc->rvperfctr != NULL ) {
00460             rvperfctr_read_ctrs( spc->rvperfctr, &spc->state );
00461         } else {
00462             vperfctr_read_ctrs( ctx->perfctr, &spc->state );
00463         }
00464     }
00465 
00466     *dp = ( long long * ) spc->state.pmc;
00467 #ifdef DEBUG
00468     {
00469         if ( ISLEVEL( DEBUG_SUBSTRATE ) ) {
00470             int i;
00471             for ( i = 0;
00472                   i <
00473                   spc->control.cpu_control.nractrs +
00474                   spc->control.cpu_control.nrictrs; i++ ) {
00475                 SUBDBG( "raw val hardware index %d is %lld\n", i,
00476                         ( long long ) spc->state.pmc[i] );
00477             }
00478         }
00479     }
00480 #endif
00481     return ( PAPI_OK );
00482 }
00483 
00484 
00485 int
00486 _papi_hwd_reset( hwd_context_t * ctx, hwd_control_state_t * cntrl )
00487 {
00488     return ( _papi_hwd_start( ctx, cntrl ) );
00489 }
00490 
00491 
00492 /* This routine is for shutting down threads, including the
00493    master thread. */
00494 int
00495 _papi_hwd_shutdown( hwd_context_t * ctx )
00496 {
00497     int retval = vperfctr_unlink( ctx->perfctr );
00498     SUBDBG( "_papi_hwd_shutdown vperfctr_unlink(%p) = %d\n", ctx->perfctr,
00499             retval );
00500     vperfctr_close( ctx->perfctr );
00501     SUBDBG( "_papi_hwd_shutdown vperfctr_close(%p)\n", ctx->perfctr );
00502     memset( ctx, 0x0, sizeof ( hwd_context_t ) );
00503 
00504     if ( retval )
00505         return ( PAPI_ESYS );
00506     return ( PAPI_OK );
00507 }
00508 
00509 
00510 /* Perfctr requires that interrupting counters appear at the end of the pmc list
00511    In the case a user wants to interrupt on a counter in an evntset that is not
00512    among the last events, we need to move the perfctr virtual events around to
00513    make it last. This function swaps two perfctr events, and then adjust the
00514    position entries in both the NativeInfoArray and the EventInfoArray to keep
00515    everything consistent.
00516 */
00517 static void
00518 swap_events( EventSetInfo_t * ESI, struct hwd_pmc_control *contr, int cntr1,
00519              int cntr2 )
00520 {
00521     unsigned int ui;
00522     int si, i, j;
00523 
00524     for ( i = 0; i < ESI->NativeCount; i++ ) {
00525         if ( ESI->NativeInfoArray[i].ni_position == cntr1 )
00526             ESI->NativeInfoArray[i].ni_position = cntr2;
00527         else if ( ESI->NativeInfoArray[i].ni_position == cntr2 )
00528             ESI->NativeInfoArray[i].ni_position = cntr1;
00529     }
00530     for ( i = 0; i < ESI->NumberOfEvents; i++ ) {
00531         for ( j = 0; ESI->EventInfoArray[i].pos[j] >= 0; j++ ) {
00532             if ( ESI->EventInfoArray[i].pos[j] == cntr1 )
00533                 ESI->EventInfoArray[i].pos[j] = cntr2;
00534             else if ( ESI->EventInfoArray[i].pos[j] == cntr2 )
00535                 ESI->EventInfoArray[i].pos[j] = cntr1;
00536         }
00537     }
00538     ui = contr->cpu_control.pmc_map[cntr1];
00539     contr->cpu_control.pmc_map[cntr1] = contr->cpu_control.pmc_map[cntr2];
00540     contr->cpu_control.pmc_map[cntr2] = ui;
00541 
00542     si = contr->cpu_control.ireset[cntr1];
00543     contr->cpu_control.ireset[cntr1] = contr->cpu_control.ireset[cntr2];
00544     contr->cpu_control.ireset[cntr2] = si;
00545 }
00546 
00547 
00548 int
00549 _papi_hwd_set_overflow( EventSetInfo_t * ESI, int EventIndex, int threshold )
00550 {
00551     hwd_control_state_t *this_state = &ESI->machdep;
00552     struct hwd_pmc_control *contr = &this_state->control;
00553     int i, ncntrs, nricntrs = 0, nracntrs = 0, retval = 0;
00554 
00555     OVFDBG( "EventIndex=%d, threshold = %d\n", EventIndex, threshold );
00556 
00557     /* The correct event to overflow is EventIndex */
00558     ncntrs = _papi_hwi_system_info.sub_info.num_cntrs;
00559     i = ESI->EventInfoArray[EventIndex].pos[0];
00560     if ( i >= ncntrs ) {
00561         OVFDBG( "Selector id (%d) larger than ncntrs (%d)\n", i, ncntrs );
00562         return PAPI_EINVAL;
00563     }
00564     if ( threshold != 0 ) {  /* Set an overflow threshold */
00565         if ( ESI->EventInfoArray[EventIndex].derived ) {
00566             OVFDBG( "Can't overflow on a derived event.\n" );
00567             return PAPI_EINVAL;
00568         }
00569 
00570         if ( ( retval =
00571                _papi_hwi_start_signal( _papi_hwi_system_info.sub_info.
00572                                        hardware_intr_sig,
00573                                        NEED_CONTEXT ) ) != PAPI_OK )
00574             return ( retval );
00575 
00576         contr->cpu_control.ireset[i] = PMC_OVFL - threshold;
00577         nricntrs = ++contr->cpu_control.nrictrs;
00578         nracntrs = --contr->cpu_control.nractrs;
00579         contr->si_signo = _papi_hwi_system_info.sub_info.hardware_intr_sig;
00580         contr->cpu_control.ppc64.mmcr0 |= PERF_INT_ENABLE;
00581 
00582         /* move this event to the bottom part of the list if needed */
00583         if ( i < nracntrs )
00584             swap_events( ESI, contr, i, nracntrs );
00585 
00586         OVFDBG( "Modified event set\n" );
00587     } else {
00588         if ( contr->cpu_control.ppc64.mmcr0 & PERF_INT_ENABLE ) {
00589             contr->cpu_control.ireset[i] = 0;
00590             nricntrs = --contr->cpu_control.nrictrs;
00591             nracntrs = ++contr->cpu_control.nractrs;
00592             if ( !nricntrs )
00593                 contr->cpu_control.ppc64.mmcr0 &= ( ~PERF_INT_ENABLE );
00594         }
00595         /* move this event to the top part of the list if needed */
00596         if ( i >= nracntrs )
00597             swap_events( ESI, contr, i, nracntrs - 1 );
00598         if ( !nricntrs )
00599             contr->si_signo = 0;
00600 
00601         OVFDBG( "Modified event set\n" );
00602 
00603         retval =
00604             _papi_hwi_stop_signal( _papi_hwi_system_info.sub_info.
00605                                    hardware_intr_sig );
00606     }
00607 #ifdef DEBUG
00608     print_control( &contr->cpu_control );
00609 #endif
00610     OVFDBG( "%s:%d: Hardware overflow is still experimental.\n", __FILE__,
00611             __LINE__ );
00612     OVFDBG( "End of call. Exit code: %d\n", retval );
00613 
00614     return ( retval );
00615 }
00616 
00617 
00618 
00619 int
00620 _papi_hwd_set_profile( EventSetInfo_t * ESI, int EventIndex, int threshold )
00621 {
00622     /* This function is not used and shouldn't be called. */
00623     return PAPI_ECMP;
00624 }
00625 
00626 
00627 int
00628 _papi_hwd_stop_profiling( ThreadInfo_t * master, EventSetInfo_t * ESI )
00629 {
00630     ESI->profile.overflowcount = 0;
00631     return PAPI_OK;
00632 }
00633 
00634 int
00635 _papi_hwd_set_domain( hwd_control_state_t * cntrl, int domain )
00636 {
00637     return set_domain( cntrl, domain );
00638 }
00639 
00640 /* Routines to support an opaque native event table */
00641 char *
00642 _papi_hwd_ntv_code_to_name( unsigned int EventCode )
00643 {
00644     if ( ( EventCode & PAPI_NATIVE_AND_MASK ) >=
00645          _papi_hwi_system_info.sub_info.num_native_events )
00646         return ( '\0' );     // return a null string for invalid events
00647     return ( native_name_map[EventCode & PAPI_NATIVE_AND_MASK].name );
00648 }
00649 
00650 int
00651 _papi_hwd_ntv_code_to_bits( unsigned int EventCode, hwd_register_t * bits )
00652 {
00653     if ( ( EventCode & PAPI_NATIVE_AND_MASK ) >=
00654          _papi_hwi_system_info.sub_info.num_native_events ) {
00655         return ( PAPI_ENOEVNT );
00656     }
00657 
00658     memcpy( bits,
00659             &native_table[native_name_map[EventCode & PAPI_NATIVE_AND_MASK].
00660                           index].resources, sizeof ( hwd_register_t ) );
00661     return ( PAPI_OK );
00662 }
00663 
00664 static void
00665 copy_value( unsigned int val, char *nam, char *names, unsigned int *values,
00666             int len )
00667 {
00668     *values = val;
00669     strncpy( names, nam, len );
00670     names[len - 1] = 0;
00671 }
00672 
00673 
00674 char *
00675 _papi_hwd_ntv_code_to_descr( unsigned int EventCode )
00676 {
00677     if ( ( EventCode & PAPI_NATIVE_AND_MASK ) >=
00678          _papi_hwi_system_info.sub_info.num_native_events ) {
00679         return "\0";
00680     }
00681     return ( native_table
00682              [native_name_map[EventCode & PAPI_NATIVE_AND_MASK].index].
00683              description );
00684 }
00685 
00686 int
00687 _papi_hwd_ntv_enum_events( unsigned int *EventCode, int modifier )
00688 {
00689     if ( modifier == PAPI_ENUM_EVENTS ) {
00690         int index = *EventCode & PAPI_NATIVE_AND_MASK;
00691         if ( index + 1 == MAX_NATNAME_MAP_INDEX ) {
00692             return ( PAPI_ENOEVNT );
00693         } else {
00694             *EventCode = *EventCode + 1;
00695             return ( PAPI_OK );
00696         }
00697     } else if ( modifier == PAPI_PWR4_ENUM_GROUPS ) {
00698 /* Use this modifier for all supported PPC64 processors. */
00699         unsigned int group = ( *EventCode & 0x00FF0000 ) >> 16;
00700         int index = *EventCode & 0x000001FF;
00701         int i;
00702         unsigned int tmpg;
00703 
00704         *EventCode = *EventCode & 0xFF00FFFF;
00705         for ( i = 0; i < GROUP_INTS; i++ ) {
00706             tmpg = native_table[index].resources.group[i];
00707             if ( group != 0 ) {
00708                 while ( ( ffs( tmpg ) + i * 32 ) <= group && tmpg != 0 )
00709                     tmpg = tmpg ^ ( 1 << ( ffs( tmpg ) - 1 ) );
00710             }
00711             if ( tmpg != 0 ) {
00712                 group = ffs( tmpg ) + i * 32;
00713                 *EventCode = *EventCode | ( group << 16 );
00714                 return ( PAPI_OK );
00715             }
00716         }
00717         if ( index + 1 == MAX_NATNAME_MAP_INDEX ) {
00718             return ( PAPI_ENOEVNT );
00719         }
00720         *EventCode = *EventCode + 1;
00721         return ( PAPI_OK );
00722     } else
00723         return ( PAPI_EINVAL );
00724 }
00725 
00726 papi_svector_t _ppc64_vector_table[] = {
00727     {( void ( * )(  ) ) _papi_hwd_init_control_state,
00728      VEC_PAPI_HWD_INIT_CONTROL_STATE},
00729     {( void ( * )(  ) ) _papi_hwd_allocate_registers,
00730      VEC_PAPI_HWD_ALLOCATE_REGISTERS},
00731     {( void ( * )(  ) ) _papi_hwd_update_control_state,
00732      VEC_PAPI_HWD_UPDATE_CONTROL_STATE},
00733     {( void ( * )(  ) ) _papi_hwd_start, VEC_PAPI_HWD_START},
00734     {( void ( * )(  ) ) _papi_hwd_stop, VEC_PAPI_HWD_STOP},
00735     {( void ( * )(  ) ) _papi_hwd_read, VEC_PAPI_HWD_READ},
00736     {( void ( * )(  ) ) _papi_hwd_reset, VEC_PAPI_HWD_RESET},
00737     {( void ( * )(  ) ) _papi_hwd_shutdown, VEC_PAPI_HWD_SHUTDOWN},
00738     {( void ( * )(  ) ) _papi_hwd_set_overflow, VEC_PAPI_HWD_SET_OVERFLOW},
00739     {( void ( * )(  ) ) _papi_hwd_set_profile, VEC_PAPI_HWD_SET_PROFILE},
00740     {( void ( * )(  ) ) _papi_hwd_stop_profiling, VEC_PAPI_HWD_STOP_PROFILING},
00741     {( void ( * )(  ) ) _papi_hwd_set_domain, VEC_PAPI_HWD_SET_DOMAIN},
00742     {( void ( * )(  ) ) *_papi_hwd_ntv_code_to_name,
00743      VEC_PAPI_HWD_NTV_CODE_TO_NAME},
00744     {( void ( * )(  ) ) _papi_hwd_ntv_code_to_bits,
00745      VEC_PAPI_HWD_NTV_CODE_TO_BITS},
00746     {( void ( * )(  ) ) *_papi_hwd_ntv_code_to_descr,
00747      VEC_PAPI_HWD_NTV_CODE_TO_DESCR},
00748     {( void ( * )(  ) ) *_papi_hwd_ntv_enum_events,
00749      VEC_PAPI_HWD_NTV_ENUM_EVENTS},
00750     {NULL, VEC_PAPI_END}
00751 };
00752 
00753 int
00754 ppc64_setup_vector_table( papi_vectors_t * vtable )
00755 {
00756     int retval = PAPI_OK;
00757     retval = _papi_hwi_setup_vector_table( vtable, _ppc64_vector_table );
00758 }
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Defines