Hallo, dies ist ein Test.
PWD: /www/data-lst1/unixsoft/unixsoft/kaempfer/.public_html
Running in File Mode
Relative path: ./../../../.././../../bin/daxstat
Real path: /usr/bin/daxstat
Zurück
#!/usr/bin/python3.7 -uEs import solaris.no_site_packages # # Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. # """daxstat - Reports Database Analytics Accelerator (DAX) statistics""" import argparse import errno import gettext import locale import sys import time from libsstore import SStore, SSException, ss_range from libsstore.const import (SS_TIME_NOW, SS_SHOW_UNSTABLE, SS_SHOW_UNBROWSABLE, SS_RET_WARNING, SS_WARN_NOENT, SS_RET_SUCCESS, SS_RET_ERROR, SS_RET_INVALID_COMMAND) from sstore_util import (format_warnings, handle_daemon_exception, pprint_warnings, setlocale, _) gettext.install('daxstat') MEGA = 1024 * 1024 MAXNUM = 10000 NANOSEC = 1000000000 NO_VAL = "no_val" SSTORE = SStore() ALL = _("Output is aggregated into a single 'all-cpu' or 'all-dax' or " "'all-queue' value.") COUNT = _("Only print count reports.") CPU = _("Display per-cpu statistics for specified cpus.") DAX = _("Display per-dax statistics for specified dax units. This is " "the default option.") INTERVAL = _("Report once each interval seconds.") EXTENDED = _("Display extended dax statistics. This is used with the " "-d option") QUEUE = _("Display per-queue statistics for specified queues in " "specified dax units.") TIMESTAMP = _("Specify u for a printed representation of the internal " "representation of time. Specify d for standard date format.") USAGE = """ {0} [[-T u | d] [-c processor_id] | [-d dax_id [-q queue_id]] | [-[x]d dax_id]] [interval [count]] {0} -a [[-T u | d] [-c] | [-d [-q]] | [-[x]d] [interval [count]] """.format("/usr/bin/daxstat") class CpuVars(object): """ CPU Variables """ def __init__(self): self.tot_res = 0 self.fails = 0 self.submit_calls = 0 self.submit_time = 0 self.successes = 0 # populate_vars - Populate CPU variables with latest queried data. # Input: res_data - Statistics obtained for the resource being queried # Returns: None # def populate_vars(self, res_data): """ Populate CPU variables with latest queried data. """ for (ssid, _tstmp, valp) in res_data: if ssid.endswith('dax_submit_calls'): self.submit_calls = valp.value() elif ssid.endswith('dax_submit_time'): self.submit_time = valp.value() elif ssid.endswith('dax_submit_eok'): self.successes = valp.value() elif ssid.endswith('dax_submit_einval'): self.fails = valp.value() # display_all_cpu_data - Prints an aggregated data of all cpus # # Display information for all CPUs # calls number of dax_submit fast trap calls # time total kernel plus hypervisor time in nsecs spent # submitting DAX commands # success number of ccb_submit hyper-calls that returned success # fallbacks number of ccb_submit hyper-calls that returned fail # def display_all_cpu_data(self, data): """ Displays all CPU DAX related data. """ tot = data['tot'] prev = data['prev'] fms = data['fms'] tot.submit_calls += self.submit_calls tot.submit_time += self.submit_time tot.successes += self.successes tot.fails += self.fails if tot.tot_res > 0: delta = calc_cpu_delta(tot, prev) print(fms.format('ALL', delta[0], delta[1], delta[2], delta[3])) # display_single_cpu_data - Prints data for a single cpu def display_single_cpu_data(self, cpu_id, data): """ Displays per-CPU DAX related data. """ prev = data['prev'] fms = data['fms'] delta = calc_cpu_delta(self, prev) print(fms.format(cpu_id, delta[0], delta[1], delta[2], delta[3])) class DaxVars(object): """ DAX Unit Variables """ def __init__(self): self.tot_res = 0 self.dic = {'bytes_in': 0, 'bytes_out': 0, 'cmds': 0, 'crtime': 0, 'cycles': 0, 'data_cycles': 0, 'fails': 0, 'frequency': 0, 'lookup_cycles': 0, 'snaptime': 0} # populate_vars - Populate DAX variables with latest queried data. # Input: res_data - Statistics obtained for the resource being queried # Returns: None # def populate_vars(self, res_data): """ Populate DAX variables with latest queried data. """ for (ssid, _tstmp, valp) in res_data: if ssid.endswith('dax_commands'): self.dic['cmds'] = valp.value() elif ssid.endswith('dax_fail'): self.dic['fails'] = valp.value() elif ssid.endswith('dax_cycles'): self.dic['cycles'] = valp.value() elif ssid.endswith('dax_bytes_in'): self.dic['bytes_in'] = valp.value() elif ssid.endswith('dax_bytes_out'): self.dic['bytes_out'] = valp.value() elif ssid.endswith('dax_data_cycles'): self.dic['data_cycles'] = valp.value() elif ssid.endswith('dax_lookup_cycles'): self.dic['lookup_cycles'] = valp.value() elif ssid.endswith('dax_frequency'): self.dic['frequency'] = valp.value() elif ssid.endswith('crtime'): self.dic['crtime'] = valp.value() elif ssid.endswith('snaptime'): self.dic['snaptime'] = valp.value() # display_all_dax_data - Prints an aggregated data of all dax units # # Displays information for all DAX units: # commands Number of commands completed in DAX # fallbacks Number of commands completed and failed in DAX # input Total input processed by DAX in Mbytes or Gbytes per sec. # output Total output produced by DAX in Mbytes or Gbytes per sec. # %busy The percentage of DAX cycles processing commands. # # with -x option: # commands Number of commands completed in DAX # fallbacks Number of commands completed and failed in DAX # input Total input processed by DAX in Mbytes or Gbytes per sec. # output Total output produced by DAX in Mbytes or Gbytes per sec. # %input The percent utilization of DAX read capacity. # %output The percent utilization of DAX of write capacity. # %data The percent utilization of DAX capacity to process data. # %lookup The percent utilization of DAX capacity to perform lookups. # %busy The percentage of DAX cycles processing commands. # def display_all_dax_data(self, data, first_pass, extended): """ Displays all DAX data. """ tot = data['tot'] prev = data['prev'] tot.dic['cmds'] += self.dic['cmds'] tot.dic['fails'] += self.dic['fails'] tot.dic['cycles'] += self.dic['cycles'] tot.dic['bytes_in'] += self.dic['bytes_in'] tot.dic['bytes_out'] += self.dic['bytes_out'] tot.dic['data_cycles'] += self.dic['data_cycles'] tot.dic['lookup_cycles'] += self.dic['lookup_cycles'] tot.dic['snaptime'] = self.dic['snaptime'] # If the end has been reached. Find the aggregate and display. if tot.tot_res > 0: if first_pass: prev.dic['snaptime'] = self.dic['crtime'] tot.dic['frequency'] = self.dic['frequency'] delta = calc_dax_delta(tot, prev) b_in = delta[2] / tot.tot_res b_out = delta[3] / tot.tot_res delta[4] /= tot.tot_res delta[5] /= tot.tot_res delta[6] /= tot.tot_res utils = calc_dax_util(b_in, b_out, delta) print_dax_data('ALL', utils, extended) # display_single_cpu_data - Prints data for a single dax unit def display_single_dax_data(self, dax_id, data, first_pass, extended): """ Displays all DAX data """ prev = data['prev'] if first_pass: prev.dic['snaptime'] = self.dic['crtime'] delta = calc_dax_delta(self, prev) utils = calc_dax_util(delta[2], delta[3], delta) print_dax_data(dax_id, utils, extended) class QueueVars(object): """ Queue Variable """ def __init__(self): self.tot_res = 0 self.qry_cmds = 0 # populate_vars - Populate Queue variables with latest queried data. # Input: res_data - Statistics obtained for the resource being queried # Returns: None # def populate_vars(self, res_data): """ Populate Queue variables with latest queried data. """ for (ssid, _tstmp, valp) in res_data: if ssid.endswith('dax_qry_commands'): self.qry_cmds = valp.value() # display_all_queue_data - Prints an aggregated data of all queues # # Displays information for all queues in the DAX unit # DAX dax ID # QUEUE queue ID # commands number of commands completed # def display_all_queue_data(self, dax_id, data): """ Displays all queue data per DAX unit """ tot = data['tot'] prev = data['prev'] fms = data['fms'] tot.qry_cmds += self.qry_cmds if tot.tot_res > 0: qry_cmds = tot.qry_cmds - prev.qry_cmds prev.qry_cmds = tot.qry_cmds print(fms.format(dax_id, 'ALL', qry_cmds)) # display_single_queue_data - Prints data for a single queue per dax unit def display_single_queue_data(self, ids, data): """ Displays per-queue data per DAX unit """ prev = data['prev'] fms = data['fms'] qry_cmds = self.qry_cmds - prev.qry_cmds prev.qry_cmds = self.qry_cmds if len(ids) > 1: print(fms.format(ids[0], ids[1], qry_cmds)) else: print(fms.format(" ", ids[0], qry_cmds)) # calc_cpu_delta - Calculate CPU data diffs # # Inputs: # tot - A data struct to keep track of aggregated data for the resource # prev - Previously obtained data for the resource # Returns: # delta - submit_calls, submit_time, successes, fails # def calc_cpu_delta(tot, prev): """ Calculate CPU data diffs. """ submit_calls = tot.submit_calls - prev.submit_calls submit_time = tot.submit_time - prev.submit_time successes = tot.successes - prev.successes fails = tot.fails - prev.fails prev.submit_calls = tot.submit_calls prev.submit_time = tot.submit_time prev.successes = tot.successes prev.fails = tot.fails delta = [submit_calls, submit_time, successes, fails] return delta # calc_dax_delta - Calculate DAX data diffs # # Inputs: # tot - A data struct to keep track of aggregated data for the resource # prev - Previously obtained data for the resource # Returns: # delta = cmds, fails, bytes_in, bytes_out, data_cycles, lookup_cycles, # cycles, snaptime, frequency # def calc_dax_delta(tot, prev): """ Calculate DAX data diffs. """ cmds = tot.dic['cmds'] - prev.dic['cmds'] fails = tot.dic['fails'] - prev.dic['fails'] bytes_in = tot.dic['bytes_in'] - prev.dic['bytes_in'] bytes_out = tot.dic['bytes_out'] - prev.dic['bytes_out'] data_cycles = tot.dic['data_cycles'] - prev.dic['data_cycles'] lookup_cycles = tot.dic['lookup_cycles'] - prev.dic['lookup_cycles'] cycles = tot.dic['cycles'] - prev.dic['cycles'] snaptime = tot.dic['snaptime'] - prev.dic['snaptime'] frequency = tot.dic['frequency'] prev.dic['cmds'] = tot.dic['cmds'] prev.dic['fails'] = tot.dic['fails'] prev.dic['bytes_in'] = tot.dic['bytes_in'] prev.dic['bytes_out'] = tot.dic['bytes_out'] prev.dic['data_cycles'] = tot.dic['data_cycles'] prev.dic['lookup_cycles'] = tot.dic['lookup_cycles'] prev.dic['cycles'] = tot.dic['cycles'] prev.dic['snaptime'] = tot.dic['snaptime'] delta = [cmds, fails, bytes_in, bytes_out, data_cycles, lookup_cycles, cycles, snaptime, frequency] return delta # calc_dax_util - Calculate DAX utilization # # Inputs: # b_in - Total input processed by per DAX unit # b_out - Total output produced by per DAX unit # delta = cmds, fails, bytes_in, bytes_out, data_cycles, lookup_cycles, # cycles, snaptime, frequency # Returns: # utils - cmds, fails, b_in_tput, b_out_tput, b_in_util, b_out_util, # d_cyc_util, l_cyc_util, cyc_util # def calc_dax_util(b_in, b_out, delta): """ Calculate DAX utilization. """ cmds = delta[0] fails = delta[1] el_secs = delta[7] / NANOSEC freq = delta[8] if el_secs > 0: b_in_tput = delta[2] / MEGA / el_secs b_out_tput = delta[3] / MEGA / el_secs else: b_in_tput = 0 b_out_tput = 0 if (freq > 0) and (el_secs > 0): b_in_util = int((b_in / 16 * 100) / freq / el_secs) b_out_util = int((b_out / 16 * 100) / freq / el_secs) d_cyc_util = int((delta[4] / 4 * 100) / freq / el_secs) l_cyc_util = int((delta[5] / 4 * 100) / freq / el_secs) cyc_util = int((delta[6] / 4 * 100) / freq / el_secs) if b_in_util > 100: b_in_util = 100 if b_out_util > 100: b_out_util = 100 if d_cyc_util > 100: d_cyc_util = 100 if l_cyc_util > 100: l_cyc_util = 100 if cyc_util > 100: cyc_util = 100 else: b_in_util = 0 b_out_util = 0 d_cyc_util = 0 l_cyc_util = 0 cyc_util = 0 utils = [cmds, fails, b_in_tput, b_out_tput, b_in_util, b_out_util, d_cyc_util, l_cyc_util, cyc_util] return utils # print_dax_data - Print per-dax data # # Inputs: ids, fms, utils, extended # ids - cpu_id or 'ALL' # utils - DAX utilization # extended - A boolean tracking -x option # Returns: None # def print_dax_data(ids, utils, extended): """ Prints dax data. """ if utils[2] / 1024 < 1: input_suffix = 'M' else: utils[2] /= 1024 input_suffix = 'G' if utils[3] / 1024 < 1: output_suffix = 'M' else: utils[3] /= 1024 output_suffix = 'G' if extended: fms1 = "{0:>3} {1:>11} {2:>9} {3:>7.1f}{4:1} {5:>8.1f}{6:1} {7:>6} " fms1 = fms1 + "{8:>7} {9:>5} {10:>7} {11:>5}" print(fms1.format(ids, utils[0], utils[1], utils[2], input_suffix, utils[3], output_suffix, utils[4], utils[5], utils[6], utils[7], utils[8])) else: fms1 = "{0:>3} {1:>11} {2:>9} {3:>7.1f}{4:1} {5:>8.1f}{6:1} {7:>5}" print(fms1.format(ids, utils[0], utils[1], utils[2], input_suffix, utils[3], output_suffix, utils[8])) # pprint_stats - Prints the kstat2 data. # # Inputs: # ids - Resource ID # args - CLI arguments entered by the user # res_data - Statistics obtained for the resource being queried # data: # tot - A data struct to keep track of aggregated data for the resource. # prev - Previously obtained data for the resource. # fms - Printing format string # first_pass - A boolean tracking first iteration # Returns: None # def pprint_stats(ids, args, res_data, data, first_pass): """ Prints kstat2 data. """ if args.cpu_id: cpu = CpuVars() cpu.populate_vars(res_data) if args.all: # If the user has requested aggregations of values with '-a'. cpu.display_all_cpu_data(data) else: cpu.display_single_cpu_data(ids[0], data) elif args.queue_id: queue = QueueVars() queue.populate_vars(res_data) if args.all: queue.display_all_queue_data(ids[0], data) else: queue.display_single_queue_data(ids, data) else: # Displaying DAX unit data is the default. dax = DaxVars() dax.populate_vars(res_data) if args.all: # If the user has requested aggregations of values with the -a # option. dax.display_all_dax_data(data, first_pass, args.extended) else: dax.display_single_dax_data(ids[0], data, first_pass, args.extended) # capture_and_print - Performs data retrieval for printing. # # Inputs: # ids - Resource IDs # res_desc - A string describing the resource to be queried # args - CLI arguments entered by the user # data: # tot - A data struct to keep track of aggregated data for the resource # prev - Previously obtained data for the resource # fms - Printing format string # first_pass - A boolean tracking first iteration # Returns: SS_RET_WARNING, SS_RET_ERROR, SS_RET_SUCCESS # def capture_and_print(ids, res_desc, args, data, first_pass): """ Perform data retrieval for printing. """ warnings = [] ret = SS_RET_SUCCESS # fatal_warn represents warning types: SS_WARN_INVALID, # SS_WARN_INTERNAL SS_WARN_CONN_BROKEN, SS_WARN_UNAUTHORIZED fatal_warn = False # recoverable_warn represents warning type SS_WARN_NOENT recoverable_warn = False trange = ss_range() trange.set_times(SS_TIME_NOW, SS_TIME_NOW, 0) trange.set_flags(SS_SHOW_UNSTABLE | SS_SHOW_UNBROWSABLE) try: res_data = SSTORE.data_read(res_desc, trange) warnings = format_warnings(SSTORE) except SSException as err: handle_daemon_exception(err) if res_data: pprint_stats(ids, args, res_data, data, first_pass) else: print(_("No data returned to display.")) ret = SS_RET_ERROR for ssid, desc, warn_code in warnings: recoverable_warn = True if warn_code == SS_WARN_NOENT: fatal_warn = True recoverable_warn = False print(_("Warning ({0}) - {1}".format(ssid, desc))) if not recoverable_warn and fatal_warn: pprint_warnings(warnings, False) return SS_RET_WARNING if warnings else ret # stat_cpu_report - Obtains statistics for the CPUs being queried. # # Inputs: # args - CLI arguments entered by the user # cpu_ids - an array of CPU IDs # fms - Print format string # prev - A structure to hold previously obtained data for the resource # first_pass - A boolean tracking first iteration # Returns: SS_RET_SUCCESS, SS_RET_ERROR # def stat_cpu_report(args, cpu_ids, fms, prev, first_pass): """ Obtains the appropriate statistics for the cpus being queried. """ # For the -a option, count keeps track of reaching the end of the ids list. if first_pass or (args.all is False): print(fms.format("CPU", _("calls"), _("time"), _("success"), _("fallbacks"))) count = 1 # If one or more CPUs were specified, obtain and print data for them. ret = SS_RET_SUCCESS tot_cpu = CpuVars() for idx in range(0, len(cpu_ids)): cpu = cpu_ids[idx] if count == len(cpu_ids): tot_cpu.tot_res = count res_desc = "//:class.kstat//:res.cpu/dax/%d//:*" % cpu ids = [cpu] data = {'tot': tot_cpu, 'prev': prev[idx], 'fms': fms} ret = capture_and_print(ids, res_desc, args, data, first_pass) count += 1 if (ret != SS_RET_SUCCESS) and (ret != SS_RET_WARNING): sys.exit(ret) return ret # stat_queue_report - Obtains statistics for the queues being queried. # # Inputs: # args - CLI arguments entered by the user # ids - an array of dax and queue IDs # fms - Print format string # prev - A structure to hold previously obtained data for the resource # first_pass - A boolean tracking first iteration # Returns: SS_RET_SUCCESS, SS_RET_ERROR # def stat_queue_report(args, ids, fms, prev, first_pass): """ Obtains the appropriate statistics for the queues being queried. """ print(fms.format("DAX", _("QUEUE"), _("commands"))) ret = SS_RET_SUCCESS all_queue_iter = 0 dax_queue_ids = ids['queue_ids'] dax_ids = ids['dax_ids'] for idx in range(0, len(dax_ids)): cur_queue_iter = 1 tot_que = QueueVars() dax = dax_ids[idx] queue_ids = dax_queue_ids[idx] for queue in queue_ids: if cur_queue_iter == len(queue_ids): tot_que.tot_res = cur_queue_iter res_desc = "//:class.kstat//:res.dax/%d/%d//:*" % (dax, queue) if (cur_queue_iter == 1) or args.all: # Only print the DAX ID on the first iteration. res_ids = [dax, queue] else: res_ids = [queue] data = {'tot': tot_que, 'prev': prev[all_queue_iter], 'fms': fms} ret = capture_and_print(res_ids, res_desc, args, data, first_pass) all_queue_iter += 1 cur_queue_iter += 1 if (ret != SS_RET_SUCCESS) and (ret != SS_RET_WARNING): sys.exit(ret) return ret # stat_dax_report - Obtains statistics for the DAX units being queried. # # Inputs: # args - CLI arguments entered by the user # ids - an array of DAX IDs. # fms - Print format string # prev - A structure to hold previously obtained data for the resource. # first_pass - A boolean tracking first iteration # Returns: SS_RET_SUCCESS, SS_RET_ERROR # def stat_dax_report(args, dax_ids, fms, prev, first_pass): """ Obtains the appropriate statistics for the DAX units being queried. """ # Print the header for each time interval # For the -a option, count helps to keep track of reaching the end of # the list of ids. if first_pass or (args.all is False): if args.extended: print(fms.format("DAX", _("commands"), _("fallbacks"), _("input"), _("output"), _("%input"), _("%output"), _("%data"), _("%lookup"), _("%busy"))) else: print(fms.format("DAX", _("commands"), _("fallbacks"), _("input"), _("output"), _("%busy"), _(" "), _(" "))) count = 1 ret = SS_RET_SUCCESS tot_dax = DaxVars() for idx in range(0, len(dax_ids)): dax = dax_ids[idx] if count == len(dax_ids): tot_dax.tot_res = count res_desc = "//:class.kstat//:res.dax/%d//:*" % dax ids = [dax] data = {'tot': tot_dax, 'prev': prev[idx], 'fms': fms} ret = capture_and_print(ids, res_desc, args, data, first_pass) count += 1 if ret != SS_RET_SUCCESS: return ret return ret # find_ids - Creates an array that includes all valid DAX, CPU, or Queue IDs # within a given range. # # Inputs: # dax_res - A boolean. True if DAX IDs are being requested. # query - The sstore query to obtain resource data. # parser - The parser data created by argparse. # id_range - A string containing the range of IDs. E.g. "3-20" # Returns: The array of IDs. # def find_ids(dax_res, query, parser, id_range): """ Create an array that includes all valid IDs within a given range. """ # If a specific ID range has been passed in, find the IDs within that # range. Otherwise, all IDs for this resource will be found. if id_range: range_ids = id_range.partition('-') if not range_ids[0].isdigit() or (int(range_ids[0]) > MAXNUM): parser.print_help() sys.exit(SS_RET_INVALID_COMMAND) if range_ids[2]: if not range_ids[2].isdigit() or (int(range_ids[2]) > MAXNUM): parser.print_help() sys.exit(SS_RET_INVALID_COMMAND) query = query + "///:s.[%s" % range_ids[0] + ":%s]//:*" % \ range_ids[2] else: query = query + "/%s//:*" % range_ids[0] else: query = query + "/*" # sstore needs this flag set in order to provide the required data. trange = ss_range() trange.set_times(SS_TIME_NOW, SS_TIME_NOW, 0) trange.set_flags(SS_SHOW_UNSTABLE | SS_SHOW_UNBROWSABLE) ns_iter = SSTORE.namespace_list(query, trange) ns_tuple = list(ns_iter) cur_id = -1 ids = [] tokens = [] for (ssid, _tstmp, _valp) in ns_tuple: tokens = ssid.split('/') if dax_res: # For DAX IDs, need token 5. if len(tokens) > 5: cur_id = int(tokens[5]) else: # If finding CPU or Queue IDs, need token 6. if len(tokens) > 6: cur_id = int(tokens[6]) if (cur_id > -1) and cur_id not in ids: ids.append(cur_id) if len(ids) == 0: print(_("No data available to display.")) sys.exit(SS_RET_ERROR) ids.sort() return ids # parse_arguments - Parse user CLI input. # Input: None # Returns: # args - CLI arguments. # parser - Data specific to the parsed CLI options. # def parse_arguments(): """ Parse arguments to get necessary settings. """ parser = argparse.ArgumentParser(usage=USAGE) parser.add_argument("-a", dest="all", action="store_true", default=False, help=ALL) parser.add_argument("-c", dest="cpu_id", nargs='?', help=CPU, const=NO_VAL, metavar="cpu_id") parser.add_argument("-d", dest="dax_id", nargs='?', help=DAX, const=NO_VAL, metavar="dax_id") parser.add_argument("-x", dest="extended", action="store_true", default=False, help=EXTENDED) parser.add_argument("-q", dest="queue_id", nargs='?', help=QUEUE, const=NO_VAL, metavar="queue_id") parser.add_argument("-T", dest="timestamp", nargs=1, help=TIMESTAMP, metavar="u | d") parser.add_argument("interval", type=int, default=0, nargs='?', help=INTERVAL) parser.add_argument("count", type=int, default=0, nargs='?', help=COUNT) args = parser.parse_args() # Users have to choose a processor ID or DAX ID but not both. if args.dax_id and args.cpu_id: parser.print_help() sys.exit(SS_RET_INVALID_COMMAND) # Users can't specify a Queue ID without also specifying a DAX ID. if args.queue_id and not args.dax_id: parser.print_help() sys.exit(SS_RET_INVALID_COMMAND) # Extended statistics require -d DAX ID. if args.extended and not args.dax_id: parser.print_help() sys.exit(SS_RET_INVALID_COMMAND) # Extended statistics can't be specified with Queue ID or CPU ID if args.extended and (args.cpu_id or args.queue_id): parser.print_help() sys.exit(SS_RET_INVALID_COMMAND) return args, parser # derive_cpu_opts - Derive CPU related options from the CLI input. # # Input: # args - CLI arguments. # parser - Data specific to the parsed CLI options. # Returns: cpu_ids - An array containing the IDs of all CPUs being queried. # def derive_cpu_opts(args, parser): """ Derive CPU related options from the CLI input. """ cpu_ids = [] if args.cpu_id: # Display per-cpu statistics for specified cpus. if args.all: # Make a small adjustment for argparse. E.g.: For this command: # daxstat -ac 2 5 # argparse makes cpu_id == 2, interval == 5, and count == 0. # We need cpu_id == all cpus, interval == 2, and count == 5. if str(args.cpu_id).isdigit() and str(args.interval).isdigit(): args.count = int(args.interval) args.interval = int(args.cpu_id) specified_ids = None elif args.cpu_id == NO_VAL: specified_ids = None else: parser.print_help() sys.exit(SS_RET_INVALID_COMMAND) else: specified_ids = args.cpu_id query = "//:class.kstat//:res.cpu/dax" cpu_ids = find_ids(None, query, parser, specified_ids) return cpu_ids # derive_dax_opts - Derive DAX related options from the CLI input. # # Input: # args - CLI arguments. # parser - Data specific to the parsed CLI options. # Returns: # dax_ids - An array containing the IDs of all DAX units being queried. # dax_queue_ids - An array containing the all Queue IDs per dax_id. # def derive_dax_opts(args, parser): """ Derive DAX related options from the CLI input. """ dax_ids = [] queue_ids = [] dax_queue_ids = [] dax_res = True if args.dax_id: # Display per-dax statistics for specified dax units. if args.all: if args.dax_id == 'q': # If the user entered 'daxstat -adq', argparse will get # confused and overlook the request for queue data. We fix this # so that it's clear that queue data was requested. args.queue_id = 'q' elif args.dax_id == 'x': args.extended = True elif str(args.dax_id).isdigit() and str(args.interval).isdigit(): # Interval and count get adjusted as above. args.count = int(args.interval) args.interval = int(args.dax_id) elif args.dax_id != NO_VAL: parser.print_help() sys.exit(SS_RET_INVALID_COMMAND) specified_ids = None else: specified_ids = args.dax_id query = "//:class.kstat//:res.dax" dax_ids = find_ids(dax_res, query, parser, specified_ids) if args.queue_id: # Display per-queue statistics for specified DAX units. # A queue ID has to be specified with a DAX ID. if args.all: if (args.queue_id == NO_VAL) or (args.queue_id == 'q'): specified_ids = None else: parser.print_help() sys.exit(SS_RET_INVALID_COMMAND) else: specified_ids = args.queue_id # Get queue_ids for a specified DAX ID. for dax in dax_ids: query = "//:class.kstat//:res.dax/" + str(dax) queue_ids = find_ids(None, query, parser, specified_ids) dax_queue_ids.append(queue_ids) else: # Default behavior: Print data for all DAX units. query = "//:class.kstat//:res.dax" dax_ids = find_ids(dax_res, query, parser, None) return dax_ids, dax_queue_ids # allocate_space - Prints the appropriate heading and allocates enough # space to keep track of previous values if users want > 1 iteration. # # Input: # cpu_ids - An array containing the IDs of all CPUs being queried. # dax_ids - An array containing the IDs of all DAX units being queried. # dax_queue_ids - An array containing the IDs of all queues being queried. # Returns: # fms - The printing format string. # prev - An array for holding the previous round of data. # def allocate_space(cpu_ids, dax_ids, dax_queue_ids, extended): """ Allocate space to hold resource statistics. """ prev = [] i = 0 if len(cpu_ids) > 0: fms = "{0:>4} {1:>12} {2:>13} {3:>10} {4:>10}" while i in range(0, len(cpu_ids)): prev.append(CpuVars()) i += 1 elif len(dax_queue_ids) > 0: fms = "{0:>3} {1:>8} {2:>12}" while i in range(0, len(dax_ids)): j = 0 queue_ids = dax_queue_ids[i] while j in range(0, len(queue_ids)): prev.append(QueueVars()) j += 1 i += 1 elif len(dax_ids) == 0: print(_("No data available to display.")) sys.exit(SS_RET_ERROR) else: # Center each column header and data if extended: fms = "{0:>3} {1:>11} {2:>9} {3:>8} {4:>9} {5:>6} " fms = fms + "{6:>7} {7:>5} {8:>7} {9:>5}" else: fms = "{0:>3} {1:>11} {2:>9} {3:>8} {4:>9} {5:>5}" while i in range(0, len(dax_ids)): prev.append(DaxVars()) i += 1 return fms, prev # process_opts - Process CLI command line options input by the user. # Inputs: None # Returns: sstore return codes. E.g.: SS_RET_SUCCESS, SS_RET_ERROR, # SS_RET_INVALID_COMMAND, SS_RET_WARNING # def process_opts(): """ Process CLI command line options input by the user. """ args, parser = parse_arguments() cpu_ids = derive_cpu_opts(args, parser) dax_ids, dax_queue_ids = derive_dax_opts(args, parser) # Ensure that 'interval' and 'count' are sane numbers. If neither # an interval or count have been specified, show output only one time. if (args.count < 0) or (args.count > MAXNUM) or (args.interval == 0): args.count = 1 if (args.interval <= 0) or (args.interval > MAXNUM): args.interval = 1 fms, prev = allocate_space(cpu_ids, dax_ids, dax_queue_ids, args.extended) # Loop indefinitely until cntl-c is entered (args.count == 0) and # (args.interval > 0) or loop the requested number of times # (i in range 0 to args.count). first_pass = True i = 0 while ((i in range(0, args.count)) or ((args.interval > 0) and (args.count == 0))): # The 'u' option shows the number of seconds since the epoch. The 'q' # option shows the standard date format. if args.timestamp: if args.timestamp[0] == 'u': print(int(time.time())) elif args.timestamp[0] == 'd': print(time.strftime("%a %b %d %H:%M:%S %Z %Y", time.localtime())) else: parser.print_help() sys.exit(SS_RET_INVALID_COMMAND) if first_pass is False: time.sleep(args.interval) if args.cpu_id: ret = stat_cpu_report(args, cpu_ids, fms, prev, first_pass) elif args.queue_id: ids = {'dax_ids': dax_ids, 'queue_ids': dax_queue_ids} ret = stat_queue_report(args, ids, fms, prev, first_pass) else: ret = stat_dax_report(args, dax_ids, fms, prev, first_pass) if (ret != SS_RET_SUCCESS) and (ret != SS_RET_WARNING): sys.exit(ret) i += 1 first_pass = False return SS_RET_SUCCESS # main - daxstat main function # Inputs: None # Returns: sstore return code # def main(): """ daxstat main function """ try: return process_opts() except SSException as err: handle_daemon_exception(err) return SS_RET_ERROR if __name__ == "__main__": setlocale(locale.LC_ALL, "", None) try: sys.exit(main()) except KeyboardInterrupt: sys.exit(SS_RET_ERROR) except IOError as err: if err.errno == errno.EPIPE: pass except SystemExit: raise