perfstat_partition_total Interface

The perfstat_partition_total interface returns a perfstat_partition_total_t structure, which is defined in the libperfstat.h file.

Selected fields from the perfstat_partition_total_t structure include:
Item Descriptor
purr_coalescing PURR cycles consumes coalescing data if the calling partition is authorized to see pool wide statistics, else set to zero
spurr_coalescing SPURR cycles consumes coalescing data if the calling partition is authorized to see pool wide statistics, else set to zero
type Partition type
online_cpus Number of virtual processors currently allocated to the partition
online_memory Amount of memory currently allocated to the partition
Note: Page coalescing is a transparent operation wherein the hypervisor detects duplicate pages, directs all user reads to a single copy, and reclaims duplicate physical memory pages
For a complete list, see the perfstat_partition_total_t section in the libperfstat.h header file.

The following code shows examples of how to use the perfstat_partition_total function.

The following example demonstrates how to emulate the lpartstat -i command:
#include <stdio.h>
#include <stdlib.h>
#include <libperfstat.h>

int main(int argc, char* argv[]) 
{

    perfstat_partition_total_t pinfo;
    int rc;

    rc = perfstat_partition_total(NULL, &pinfo, sizeof(perfstat_partition_total_t), 1);
    if (rc != 1) {
    perror("Error in perfstat_partition_total");
    exit(-1);
    }
    printf("Partition Name                 : %s\n", pinfo.name);
    printf("Partition Number               : %u\n", pinfo.lpar_id);
    printf("Type                           : %s\n", pinfo.type.b.shared_enabled ? "Shared" : "Dedicated");
    printf("Mode                           : %s\n", pinfo.type.b.donate_enabled ? "Donating" : 
                                                    pinfo.type.b.capped ? "Capped" : "Uncapped");
    printf("Entitled Capacity              : %u\n", pinfo.entitled_proc_capacity);
    printf("Partition Group-ID             : %u\n", pinfo.group_id);
    printf("Shared Pool ID                 : %u\n", pinfo.pool_id);
    printf("Online Virtual CPUs            : %u\n", pinfo.online_cpus);
    printf("Maximum Virtual CPUs           : %u\n", pinfo.max_cpus);
    printf("Minimum Virtual CPUs           : %u\n", pinfo.min_cpus);
    printf("Online Memory                  : %llu MB\n", pinfo.online_memory);
    printf("Maximum Memory                 : %llu MB\n", pinfo.max_memory);
    printf("Minimum Memory                 : %llu MB\n", pinfo.min_memory);
    printf("Variable Capacity Weight       : %u\n", pinfo.var_proc_capacity_weight);
    printf("Minimum Capacity               : %u\n", pinfo.min_proc_capacity);
    printf("Maximum Capacity               : %u\n", pinfo.max_proc_capacity);
    printf("Capacity Increment             : %u\n", pinfo.proc_capacity_increment);
    printf("Maximum Physical CPUs in system: %u\n", pinfo.max_phys_cpus_sys);
    printf("Active Physical CPUs in system : %u\n", pinfo.online_phys_cpus_sys);
    printf("Active CPUs in Pool            : %u\n", pinfo.phys_cpus_pool);
    printf("Unallocated Capacity           : %u\n", pinfo.unalloc_proc_capacity);
    printf("Physical CPU Percentage        : %4.2f%%\n",
           (double)pinfo.entitled_proc_capacity / (double)pinfo.online_cpus);
    printf("Unallocated Weight             : %u\n", pinfo.unalloc_var_proc_capacity_weight);
}
The program displays an output that is similar to the following example output:
Partition Name                 : perfdev10
Partition Number               : 23
Type                           : Shared
Mode                           : Capped
Entitled Capacity              : 100
Partition Group-ID             : 32791
Shared Pool ID                 : 0
Online Virtual CPUs            : 2
Maximum Virtual CPUs           : 4
Minimum Virtual CPUs           : 1
Online Memory                  : 4096 MB
Maximum Memory                 : 8192 MB
Minimum Memory                 : 2048 MB
Variable Capacity Weight       : 0
Minimum Capacity               : 100
Maximum Capacity               : 400
Capacity Increment             : 1
Maximum Physical CPUs in system: 64
Active Physical CPUs in system : 64
Active CPUs in Pool            : 59
Unallocated Capacity           : 0
Physical CPU Percentage        : 50.00%
Unallocated Weight             : 0
The following example demonstrates emulating the lparstat command in default mode:
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <libperfstat.h>
#include <sys/systemcfg.h>

#define XINTFRAC	((double)(_system_configuration.Xint)/(double)(_system_configuration.Xfrac))
/* convert physical processor tics to seconds */
#define HTIC2SEC(x)	((double)x * XINTFRAC)/(double)1000000000.0

#define INTERVAL_DEFAULT  	2
#define COUNT_DEFAULT  		10

/*simplelparstat.c file can be used in two modes:-
1) Auto Mode:It makes use of perfstat_cpu_util API to calculate utilization values,enable 'UTIL_AUTO' macro for execution in auto mode.
2) Manual Mode: Calculations are done in the current code.
*/

/*#define UTIL_AUTO     1*/

#ifdef  UTIL_AUTO
 #define UTIL_MS 1
 #define UTIL_PCT 0
 #define UTIL_CORE 2
 #define UTIL_PURR 0
 #define UTIL_SPURR 1
 void display_lpar_util_auto(int mode,int cpumode,int count,int interval);
#endif


static int disp_util_header = 1;
static u_longlong_t last_time_base;
static u_longlong_t last_pcpu_user, last_pcpu_sys, last_pcpu_idle, last_pcpu_wait;
static u_longlong_t last_lcpu_user, last_lcpu_sys, last_lcpu_idle, last_lcpu_wait;
static u_longlong_t last_busy_donated, last_idle_donated;
static u_longlong_t last_busy_stolen, last_idle_stolen;
static u_longlong_t last_phint = 0, last_vcsw = 0, last_pit = 0;

/* support for remote node statistics collection in a cluster environment */
perfstat_id_node_t nodeid;
static char nodename[MAXHOSTNAMELEN] = "";
static int collect_remote_node_stats = 0;

void display_lpar_util(void);

int main(int argc, char* argv[])
{
    int interval = INTERVAL_DEFAULT;
    int count = COUNT_DEFAULT;
    int i, rc;
    char *optlist = "i:c:n:";
    int mode=0,cpumode=0;

    /* Process the arguments */
    while ((i = getopt(argc, argv, optlist)) != EOF)
    {
        switch(i)
        {
            case 'i':               /* Interval */
                     interval = atoi(optarg);
                     if( interval <= 0 )
                         interval = INTERVAL_DEFAULT;
                     break;
            case 'c':               /* Number of interations */
                     count = atoi(optarg);
                     if( count <= 0 )
                         count = COUNT_DEFAULT;
                     break;
            case 'n':               /* Node name in a cluster environment */
                     strncpy(nodename, optarg, MAXHOSTNAMELEN);
                     nodename[MAXHOSTNAMELEN-1] = '\0';
                     collect_remote_node_stats = 1;
                     break;
            default:
                    /* Invalid arguments. Print the usage and terminate */
                    fprintf (stderr, "usage: %s [-i <interval in seconds> ] [-c <number of iterations> ] [-n <node name in the cluster> ]\n", argv[0]);
                    return(-1);
        }
    }
 
    if(collect_remote_node_stats)
    {   /* perfstat_config needs to be called to enable cluster statistics collection */
        rc = perfstat_config(PERFSTAT_ENABLE|PERFSTAT_CLUSTER_STATS, NULL);
        if (rc == -1)
        {
            perror("cluster statistics collection is not available");
            exit(-1);
        }
    }

    #ifdef UTIL_AUTO
      printf("Enter CPU mode.\n");
      printf(" 0 PURR \n 1 SPURR \n");
      scanf("%d",&cpumode);
      printf("Enter print mode.\n");
      printf(" 0 PERCENTAGE\n 1 MILLISECONDS\n 2 CORES \n");
      scanf("%d",&mode);

      if((mode>2)&& (cpumode>1))
      {

        printf("Error: Invalid Input\n");
        exit(0);
      }
      display_lpar_util_auto(mode,cpumode,count,interval);

    #else
    /* Iterate "count" times */
    while (count > 0)
    {
        display_lpar_util();
        sleep(interval);
        count--;
 
    } 
    #endif

    if(collect_remote_node_stats)
    {   /* Now disable cluster statistics by calling perfstat_config */
        perfstat_config(PERFSTAT_DISABLE|PERFSTAT_CLUSTER_STATS, NULL);
    }

    return(0);
}

/* Save the current values for the next iteration */
void save_last_values(perfstat_cpu_total_t *cpustats, perfstat_partition_total_t *lparstats)
{
    last_vcsw      = lparstats->vol_virt_cswitch + lparstats->invol_virt_cswitch; 
    last_time_base = lparstats->timebase_last;
    last_phint     = lparstats->phantintrs;
    last_pit       = lparstats->pool_idle_time;

    last_pcpu_user = lparstats->puser;
    last_pcpu_sys  = lparstats->psys;
    last_pcpu_idle = lparstats->pidle;
    last_pcpu_wait = lparstats->pwait;

    last_lcpu_user = cpustats->user;
    last_lcpu_sys  = cpustats->sys;
    last_lcpu_idle = cpustats->idle;
    last_lcpu_wait = cpustats->wait;

    last_busy_donated = lparstats->busy_donated_purr;
    last_idle_donated = lparstats->idle_donated_purr;

    last_busy_stolen = lparstats->busy_stolen_purr;
    last_idle_stolen = lparstats->idle_stolen_purr;
}

/* retrieve metrics using perfstat API */
void collect_metrics (perfstat_cpu_total_t *cpustats, perfstat_partition_total_t *lparstats)
{
    if (collect_remote_node_stats)
    {
        strncpy(nodeid.u.nodename, nodename, MAXHOSTNAMELEN);
        nodeid.spec = NODENAME;

        if (perfstat_partition_total_node(&nodeid, lparstats, sizeof(perfstat_partition_total_t), 1) <= 0) {
            perror("perfstat_partition_total_node");
            exit(-1);
        }  
        if (perfstat_cpu_total_node(&nodeid, cpustats, sizeof(perfstat_cpu_total_t), 1) <= 0) {
            perror("perfstat_cpu_total_node");
            exit(-1);
        }  
    }
    else
    {
        if (perfstat_partition_total(NULL, lparstats, sizeof(perfstat_partition_total_t), 1) <= 0) {
            perror("perfstat_partition_total");
            exit(-1);
        }  
    
        if (perfstat_cpu_total(NULL, cpustats, sizeof(perfstat_cpu_total_t), 1) <= 0) {
            perror("perfstat_cpu_total");
            exit(-1);
        }  
    }
}

/* print header informations */
void print_header(perfstat_partition_total_t *lparstats)
{
    if (lparstats->type.b.shared_enabled) { /* partition is a SPLPAR */
       if (lparstats->type.b.pool_util_authority) { /* partition has PUA access */
          printf("\n%5s %5s %6s %6s %5s %5s %5s %5s %4s %5s",
          "%user", "%sys", "%wait", "%idle", "physc", "%entc", "lbusy", "app", "vcsw", "phint");
         
          printf("\n%5s %5s %6s %6s %5s %5s %5s %5s %4s %5s",
          "-----", "----", "-----", "-----", "-----", "-----", "-----", "---", "----", "-----");
       } else {
          printf("\n%5s %5s %6s %6s %5s %5s %5s %4s %5s",
          "%user", "%sys", "%wait", "%idle", "physc", "%entc", "lbusy", "vcsw", "phint");
         
          printf("\n%5s %5s %6s %6s %5s %5s %5s %4s %5s",
          "-----", "----", "-----", "-----", "-----", "-----", "-----", "----", "-----");
       }
    } else { /* partition is a DLPAR */
       printf("\n%5s %5s %6s %6s", "%user", "%sys", "%wait", "%idle");
       printf("\n%5s %5s %6s %6s", "-----", "----", "-----", "-----");
       if (lparstats->type.b.donate_enabled) { /* if donation is enabled for this DLPAR */
         printf(" %6s %6s", "%phsyc", "%vcsw");
         printf(" %6s %6s", "------", "-----");
       }
    }
    fprintf(stdout,"\n");
}

/* Gather and display lpar utilization metrics */
void display_lpar_util(void)
{
    u_longlong_t delta_pcpu_user, delta_pcpu_sys, delta_pcpu_idle, delta_pcpu_wait;
    u_longlong_t delta_lcpu_user, delta_lcpu_sys, delta_lcpu_idle, delta_lcpu_wait;
    u_longlong_t delta_busy_stolen, delta_busy_donated, delta_idle_stolen, delta_idle_donated;
    u_longlong_t vcsw, lcputime, pcputime;
    u_longlong_t entitled_purr, unused_purr;
    u_longlong_t delta_purr, delta_time_base;
    double phys_proc_consumed, entitlement, percent_ent, delta_sec;
    perfstat_partition_total_t lparstats;
    perfstat_cpu_total_t cpustats;
    
    /* retrieve the metrics */
    collect_metrics (&cpustats, &lparstats);

    /* Print the header for utilization metrics (only once) */  
    if (disp_util_header) {
       print_header (&lparstats);

       disp_util_header = 0;

       /* first iteration, we only read the data, print the header and save the data */
       save_last_values(&cpustats, &lparstats);
       return;
    }

    /* calculate physcial processor tics during the last interval in user, system, idle and wait mode  */
    delta_pcpu_user  = lparstats.puser - last_pcpu_user; 
    delta_pcpu_sys   = lparstats.psys  - last_pcpu_sys;
    delta_pcpu_idle  = lparstats.pidle - last_pcpu_idle;
    delta_pcpu_wait  = lparstats.pwait - last_pcpu_wait;
   
    /* calculate total physcial processor tics during the last interval */ 
    delta_purr = pcputime = delta_pcpu_user + delta_pcpu_sys + delta_pcpu_idle + delta_pcpu_wait;

    /* calculate clock tics during the last interval in user, system, idle and wait mode */
    delta_lcpu_user  = cpustats.user - last_lcpu_user; 
    delta_lcpu_sys   = cpustats.sys  - last_lcpu_sys;
    delta_lcpu_idle  = cpustats.idle - last_lcpu_idle;
    delta_lcpu_wait  = cpustats.wait - last_lcpu_wait;
   
    /* calculate total clock tics during the last interval */ 
    lcputime = delta_lcpu_user + delta_lcpu_sys + delta_lcpu_idle + delta_lcpu_wait;

    /* calculate entitlement for this partition - entitled physical processors for this partition */
    entitlement = (double)lparstats.entitled_proc_capacity / 100.0 ;

    /* calculate delta time in terms of physical processor tics */
    delta_time_base = lparstats.timebase_last - last_time_base;
    
    if (lparstats.type.b.shared_enabled) { /* partition is a SPLPAR */
        /* calculate entitled physical processor tics for this partitions */
        entitled_purr = delta_time_base * entitlement;
        if (entitled_purr < delta_purr) { /* for uncapped SPLPAR */
            /* in case of uncapped SPLPAR, consider entitled physical processor tics or 
             * consumed physical processor tics, which ever is greater */ 
            entitled_purr = delta_purr;
        }
        /* calculate unused physical processor tics out of the entitled physical processor tics */
        unused_purr = entitled_purr - delta_purr;
       
        /* distributed unused physical processor tics amoung wait and idle proportionally to wait and idle in clock tics */
        delta_pcpu_wait += unused_purr * ((double)delta_lcpu_wait / (double)(delta_lcpu_wait + delta_lcpu_idle));
        delta_pcpu_idle += unused_purr * ((double)delta_lcpu_idle / (double)(delta_lcpu_wait + delta_lcpu_idle));
      
        /* far SPLPAR, consider the entitled physical processor tics as the actual delta physical processor tics */
        pcputime = entitled_purr;
    }
    else if (lparstats.type.b.donate_enabled) { /* if donation is enabled for this DLPAR */
        /* calculate busy stolen and idle stolen physical processor tics during the last interval */ 
        /* these physical processor tics are stolen from this partition by the hypervsior
         * which will be used by wanting partitions */  
        delta_busy_stolen = lparstats.busy_stolen_purr - last_busy_stolen;
        delta_idle_stolen = lparstats.idle_stolen_purr - last_idle_stolen; 

        /* calculate busy donated and idle donated physical processor tics during the last interval */
        /* these physical processor tics are voluntarily donated by this partition to the hypervsior
         * which will be used by wanting partitions */  
        delta_busy_donated = lparstats.busy_donated_purr - last_busy_donated;
        delta_idle_donated = lparstats.idle_donated_purr - last_idle_donated;

        /* add busy donated and busy stolen to the kernel bucket, as cpu
         * cycles were donated / stolen when this partition is busy */
        delta_pcpu_sys += delta_busy_donated;
        delta_pcpu_sys += delta_busy_stolen;

        /* distribute idle stolen to wait and idle proportionally to the logical wait and idle in clock tics, as
         * cpu cycles were stolen when this partition is idle or in wait */
        delta_pcpu_wait += delta_idle_stolen * 
                              ((double)delta_lcpu_wait / (double)(delta_lcpu_wait + delta_lcpu_idle));
        delta_pcpu_idle += delta_idle_stolen * 
                              ((double)delta_lcpu_idle / (double)(delta_lcpu_wait + delta_lcpu_idle));

        /* distribute idle donated to wait and idle proportionally to the logical wait and idle in clock tics, as
         * cpu cycles were donated when this partition is idle or in wait */
        delta_pcpu_wait += delta_idle_donated * 
                              ((double)delta_lcpu_wait / (double)(delta_lcpu_wait + delta_lcpu_idle));
        delta_pcpu_idle += delta_idle_donated * 
                              ((double)delta_lcpu_idle / (double)(delta_lcpu_wait + delta_lcpu_idle));
     
        /* add donated to the total physical processor tics for CPU usage calculation, as they were 
         * distributed to respective buckets accordingly */
        pcputime +=  (delta_idle_donated + delta_busy_donated);

        /* add stolen to the total physical processor tics for CPU usage calculation, as they were 
         * distributed to respective buckets accordingly */
        pcputime +=  (delta_idle_stolen + delta_busy_stolen);

    }

    /* Processor Utilization - Applies for both SPLPAR and DLPAR*/
    printf("%5.1f ", (double)delta_pcpu_user * 100.0 / (double)pcputime);
    printf("%5.1f ", (double)delta_pcpu_sys  * 100.0 / (double)pcputime);
    printf("%6.1f ", (double)delta_pcpu_wait * 100.0 / (double)pcputime);
    printf("%6.1f ", (double)delta_pcpu_idle * 100.0 / (double)pcputime);

    if (lparstats.type.b.shared_enabled) { /* print SPLPAR specific stats */  
        /* Physical Processor Consumed by this partition */  
        phys_proc_consumed = (double)delta_purr / (double)delta_time_base;
        printf("%5.2f ", (double)phys_proc_consumed); 

        /* Percentage of Entitlement Consumed - percentage of entitled physical processor tics consumed */
        percent_ent = (double)((phys_proc_consumed / entitlement) * 100);
        printf("%5.1f ", percent_ent);

        /* Logical Processor Utilization of this partition */
        printf("%5.1f ", (double)(delta_lcpu_user+delta_lcpu_sys) * 100.0 / (double)lcputime);

        if (lparstats.type.b.pool_util_authority) { 
        /* Available physical Processor units available in the shared pool (app) */ 
           printf("%5.2f ", (double)(lparstats.pool_idle_time - last_pit) / 
                 XINTFRAC*(double)delta_time_base);
        }

        /* Virtual CPU Context Switches per second */
        vcsw = lparstats.vol_virt_cswitch + lparstats.invol_virt_cswitch; 
	     delta_sec = HTIC2SEC(delta_time_base);
        printf("%4.0f ", (double)(vcsw - last_vcsw) / delta_sec);
        
        /* Phantom Interrupts per second */
        printf("%5.0f",(double)(lparstats.phantintrs - last_phint) / delta_sec);
    }
    else if (lparstats.type.b.donate_enabled) { /* print donation-enabled DLPAR specific stats */
        /* Physical Processor Consumed by this partition 
         * (excluding donated and stolen physical processor tics). */
        phys_proc_consumed = (double)delta_purr / (double)delta_time_base;
        printf("%5.2f ", (double)phys_proc_consumed); 

        /* Virtual CPU Context Switches per second */
        vcsw = lparstats.vol_virt_cswitch + lparstats.invol_virt_cswitch; 
	     delta_sec = HTIC2SEC(delta_time_base);
        printf("%5.0f ", (double)(vcsw - last_vcsw) / delta_sec);
    }
    printf("\n");

    save_last_values(&cpustats, &lparstats);
}


#ifdef UTIL_AUTO
void display_lpar_util_auto(int mode,int cpumode,int count,int interval)
{
    float user_core_purr,kern_core_purr,wait_core_purr,idle_core_purr;
    float user_core_spurr,kern_core_spurr,wait_core_spurr,idle_core_spurr,sum_core_spurr;
    u_longlong_t user_ms_purr,kern_ms_purr,wait_ms_purr,idle_ms_purr,sum_ms;
    u_longlong_t user_ms_spurr,kern_ms_spurr,wait_ms_spurr,idle_ms_spurr;
    perfstat_rawdata_t data;
    u_longlong_t delta_purr, delta_time_base;
    double phys_proc_consumed, entitlement, percent_ent, delta_sec;
    perfstat_partition_total_t lparstats;
    static perfstat_cpu_total_t oldt,newt;
    perfstat_cpu_util_t util;
    int rc;

    /* retrieve the metrics */

    /* Print the header for utilization metrics (only once) */
    if (disp_util_header) {
    if(mode==UTIL_PCT)
          printf("\n%5s %5s %6s %6s %5s  \n",
          "%user", "%sys", "%wait", "%idle", "physc");
    else if(mode==UTIL_MS)
          printf("\n%5s   %5s   %6s   %6s   %5s \n",
          "user(ms)", "sys(ms)", "wait(ms)", "idle(ms)", "physc");
    else if(mode==UTIL_CORE)
          printf("\n%5s  %5s  %6s  %6s  %5s  \n",
          "user", "sys", "wait", "idle", "physc");


       disp_util_header = 0;

       /* first iteration, we only read the data, print the header and save the data */
    }

  while(count)
  {
    collect_metrics (&oldt, &lparstats);
    sleep(interval);
    collect_metrics (&newt, &lparstats);

   data.type = UTIL_CPU_TOTAL;
   data.curstat = &newt; data.prevstat= &oldt;
   data.sizeof_data = sizeof(perfstat_cpu_total_t);
   data.cur_elems = 1;
   data.prev_elems = 1;
   rc = perfstat_cpu_util(&data, &util,sizeof(perfstat_cpu_util_t), 1);
   if(rc <= 0)
   {
     perror("Error in perfstat_cpu_util");
     exit(-1);
   }
   delta_time_base =  util.delta_time;



  switch(mode)
   {
   case  UTIL_PCT:
         printf(" %5.1f  %5.1f  %5.1f  %5.1f  %5.4f \n",util.user_pct,util.kern_pct,util.wait_pct,util.idle_pct,util.physical_consumed);
         break;

   case  UTIL_MS:
         user_ms_purr=((util.user_pct*delta_time_base)/100.0);
         kern_ms_purr=((util.kern_pct*delta_time_base)/100.0);
         wait_ms_purr=((util.wait_pct*delta_time_base)/100.0);
         idle_ms_purr=((util.idle_pct*delta_time_base)/100.0);

        if(cpumode==UTIL_PURR)
        {
            printf(" %llu    %llu    %llu    %llu   %5.4f\n",user_ms_purr,kern_ms_purr,wait_ms_purr,idle_ms_purr,util.physical_consumed);
        }
       else if(cpumode==UTIL_SPURR)
       {
            user_ms_spurr=(user_ms_purr*util.freq_pct)/100.0;
            kern_ms_spurr=(kern_ms_purr*util.freq_pct)/100.0;
            wait_ms_spurr=(wait_ms_purr*util.freq_pct)/100.0;
            sum_ms=user_ms_spurr+kern_ms_spurr+wait_ms_spurr;
            idle_ms_spurr=delta_time_base-sum_ms;

            printf(" %llu    %llu    %llu    %llu    %5.4f \n",user_ms_spurr,kern_ms_spurr,wait_ms_spurr,idle_ms_spurr,util.physical_consumed);

       }
            break;

   case  UTIL_CORE:

           user_core_purr=((util.user_pct*util.physical_consumed)/100.0);
           kern_core_purr=((util.kern_pct*util.physical_consumed)/100.0);
           wait_core_purr=((util.wait_pct*util.physical_consumed)/100.0);
           idle_core_purr=((util.idle_pct*util.physical_consumed)/100.0);

           user_core_spurr=((user_core_purr*util.freq_pct)/100.0);
           kern_core_spurr=((kern_core_purr*util.freq_pct)/100.0);
           wait_core_spurr=((wait_core_purr*util.freq_pct)/100.0);
           
           if(cpumode==UTIL_PURR)
           {
            printf("%5.4f   %5.4f   %5.4f   %5.4f   %5.4f\n",user_core_purr,kern_core_purr,wait_core_purr,idle_core_purr,util.physical_consumed);
           }
           else if(cpumode==UTIL_SPURR)
           {
           sum_core_spurr=user_core_spurr+kern_core_spurr+wait_core_spurr;
           idle_core_spurr=util.physical_consumed-sum_core_spurr;

            printf("%5.4f   %5.4f   %5.4f   %5.4f   %5.4f \n",user_core_spurr,kern_core_spurr,wait_core_spurr,idle_core_spurr,util.physical_consumed);
           }
           break;

           default:
           printf("In correct usage\n");
           return;

}
count--;
}
}
#endif
The program displays an output that is similar to the following example output:
%user  %sys  %wait  %idle physc %entc lbusy vcsw phint
-----  ----  -----  ----- ----- ----- ----- ---- -----
  0.1   0.4    0.0   99.5  0.01   1.2   0.2  278     0
  0.0   0.3    0.0   99.7  0.01   0.8   0.2  271     0
  0.0   0.2    0.0   99.8  0.01   0.5   0.1  180     0
  0.0   0.2    0.0   99.8  0.01   0.6   0.1  184     0
  0.0   0.2    0.0   99.7  0.01   0.6   0.1  181     0
  0.0   0.2    0.0   99.8  0.01   0.6   0.1  198     0
  0.0   0.2    0.0   99.8  0.01   0.7   0.2  189     0
  2.1   3.3    0.0   94.6  0.09   8.7   2.1  216     0
  0.0   0.2    0.0   99.8  0.01   0.7   0.1  265     0