update
This commit is contained in:
135
bibliotheque/files/fusioninventory/etc-agent.cfg
Normal file
135
bibliotheque/files/fusioninventory/etc-agent.cfg
Normal file
@ -0,0 +1,135 @@
|
||||
################################################################################
|
||||
#
|
||||
# __ __ ______ __
|
||||
# / | / | / \ / |
|
||||
# _$$ |_ $$/ ______ _______ ______ /$$$$$$ | _____ ____ $$/ _______ ______
|
||||
# / $$ | / | / \ / |______ / \ $$ |_ $$/______ / \/ \ / |/ \ / \
|
||||
# $$$$$$/ $$ |/$$$$$$ |/$$$$$$$// |/$$$$$$ |$$ | / |$$$$$$ $$$$ |$$ |$$$$$$$ |/$$$$$$ |
|
||||
# $$ | __ $$ |$$ | $$ |$$ \$$$$$$/ $$ | $$ |$$$$/ $$$$$$/ $$ | $$ | $$ |$$ |$$ | $$ |$$ $$ |
|
||||
# $$ |/ |$$ |$$ |__$$ | $$$$$$ | $$ \__$$ |$$ | $$ | $$ | $$ |$$ |$$ | $$ |$$$$$$$$/
|
||||
# $$ $$/ $$ |$$ $$/ / $$/ $$ $$/ $$ | $$ | $$ | $$ |$$ |$$ | $$ |$$ |
|
||||
# $$$$/ $$/ $$$$$$$/ $$$$$$$/ $$$$$$/ $$/ $$/ $$/ $$/ $$/ $$/ $$/ $$$$$$$/
|
||||
# $$ |
|
||||
# $$ |
|
||||
# $$/
|
||||
#
|
||||
################################################################################
|
||||
# fusioninventory agent configuration
|
||||
|
||||
# all defined values match default
|
||||
# all commented values are examples
|
||||
|
||||
|
||||
#
|
||||
# Target definition options
|
||||
#
|
||||
|
||||
# send tasks results to an OCS server
|
||||
#server = http://server.domain.com/ocsinventory
|
||||
# send tasks results to a FusionInventory for GLPI server
|
||||
#server = http://server.domain.com/glpi/plugins/fusioninventory/
|
||||
server = http://glpi.EXEMPLE.LOCAL/plugins/fusioninventory/
|
||||
# write tasks results in a directory
|
||||
#local = /tmp
|
||||
|
||||
#
|
||||
# Task definition options
|
||||
#
|
||||
|
||||
# disable software deployment tasks
|
||||
#no-task = deploy
|
||||
#tasks = inventory,deploy,inventory
|
||||
|
||||
#
|
||||
# Target scheduling options
|
||||
#
|
||||
|
||||
# maximum delay before first target, in seconds
|
||||
delaytime = 3600
|
||||
# do not contact the target before next scheduled time
|
||||
lazy = 0
|
||||
|
||||
#
|
||||
# Inventory task specific options
|
||||
#
|
||||
|
||||
# do not list local printers
|
||||
# no-category = printer
|
||||
# allow to scan user home directories
|
||||
scan-homedirs = 0
|
||||
# allow to scan user profiles
|
||||
scan-profiles = 0
|
||||
# save the inventory as HTML
|
||||
html = 0
|
||||
# timeout for inventory modules execution
|
||||
backend-collect-timeout = 30
|
||||
# always send data to server
|
||||
force = 0
|
||||
# additional inventory content file
|
||||
additional-content =
|
||||
|
||||
#
|
||||
# Package deployment task specific options
|
||||
#
|
||||
|
||||
# do not use peer to peer to download files
|
||||
no-p2p = 0
|
||||
|
||||
#
|
||||
# Network options
|
||||
#
|
||||
|
||||
# proxy address
|
||||
proxy =
|
||||
# user name for server authentication
|
||||
user =
|
||||
# password for server authentication
|
||||
password =
|
||||
# CA certificates directory
|
||||
ca-cert-dir =
|
||||
# CA certificates file
|
||||
ca-cert-file =
|
||||
# do not check server SSL certificate
|
||||
no-ssl-check = 0
|
||||
# connection timeout, in seconds
|
||||
timeout = 180
|
||||
|
||||
#
|
||||
# Web interface options
|
||||
#
|
||||
|
||||
# disable embedded web server
|
||||
no-httpd = 0
|
||||
# network interface to listen to
|
||||
httpd-ip =
|
||||
# network port to listen to
|
||||
httpd-port = 62354
|
||||
# trust requests without authentication token
|
||||
httpd-trust =
|
||||
|
||||
#
|
||||
# Logging options
|
||||
#
|
||||
|
||||
# Logger backend, either Stderr, File or Syslog (Stderr)
|
||||
logger = stderr
|
||||
# log file
|
||||
#logfile = /var/log/fusioninventory.log
|
||||
# maximum log file size, in MB
|
||||
#logfile-maxsize = 0
|
||||
# Syslog facility
|
||||
logfacility = LOG_USER
|
||||
# Use color in the console
|
||||
color = 0
|
||||
|
||||
#
|
||||
# Execution mode options
|
||||
#
|
||||
|
||||
# add given tag to inventory results
|
||||
tag =
|
||||
# debug mode
|
||||
debug = 0
|
||||
|
||||
# time to wait to reload config (0 means no reload, it's default value)
|
||||
# conf-reload-interval = 0
|
34
bibliotheque/files/nagios/etc-centreon.cfg
Normal file
34
bibliotheque/files/nagios/etc-centreon.cfg
Normal file
@ -0,0 +1,34 @@
|
||||
################################################################################
|
||||
#
|
||||
# __ __ ______ __
|
||||
# / | / | / \ / |
|
||||
# _$$ |_ $$/ ______ _______ ______ /$$$$$$ | _____ ____ $$/ _______ ______
|
||||
# / $$ | / | / \ / |______ / \ $$ |_ $$/______ / \/ \ / |/ \ / \
|
||||
# $$$$$$/ $$ |/$$$$$$ |/$$$$$$$// |/$$$$$$ |$$ | / |$$$$$$ $$$$ |$$ |$$$$$$$ |/$$$$$$ |
|
||||
# $$ | __ $$ |$$ | $$ |$$ \$$$$$$/ $$ | $$ |$$$$/ $$$$$$/ $$ | $$ | $$ |$$ |$$ | $$ |$$ $$ |
|
||||
# $$ |/ |$$ |$$ |__$$ | $$$$$$ | $$ \__$$ |$$ | $$ | $$ | $$ |$$ |$$ | $$ |$$$$$$$$/
|
||||
# $$ $$/ $$ |$$ $$/ / $$/ $$ $$/ $$ | $$ | $$ | $$ |$$ |$$ | $$ |$$ |
|
||||
# $$$$/ $$/ $$$$$$$/ $$$$$$$/ $$$$$$/ $$/ $$/ $$/ $$/ $$/ $$/ $$/ $$$$$$$/
|
||||
# $$ |
|
||||
# $$ |
|
||||
# $$/
|
||||
#
|
||||
################################################################################
|
||||
# COMMAND DEFINITIONS
|
||||
# Syntax:
|
||||
# command[]=
|
||||
#
|
||||
command[users]=/usr/lib64/nagios/plugins/check_users -w 5 -c 10
|
||||
command[load]=/usr/lib64/nagios/plugins/check_load -w 15,10,5 -c 30,25,20
|
||||
command[check_load]=/usr/lib64/nagios/plugins/check_load -w 15,10,5 -c 30,25,20
|
||||
command[swap]=/usr/lib64/nagios/plugins/check_swap -w 20% -c 10%
|
||||
command[root_disk]=/usr/lib64/nagios/plugins/check_disk -w 20% -c 10% -p / -m
|
||||
command[usr_disk]=/usr/lib64/nagios/plugins/check_disk -w 20% -c 10% -p /usr -m
|
||||
command[var_disk]=/usr/lib64/nagios/plugins/check_disk -w 20% -c 10% -p /var -m
|
||||
command[zombie_procs]=/usr/lib64/nagios/plugins/check_procs -w 5 -c 10 -s Z
|
||||
command[total_procs]=/usr/lib64/nagios/plugins/check_procs -w 190 -c 200
|
||||
command[proc_named]=/usr/lib64/nagios/plugins/check_procs -w 1: -c 1:2 -C named
|
||||
command[proc_crond]=/usr/lib64/nagios/plugins/check_procs -w 1: -c 1:5 -C crond
|
||||
command[proc_syslogd]=/usr/lib64/nagios/plugins/check_procs -w 1: -c 1:2 -C syslog-ng
|
||||
command[proc_rsyslogd]=/usr/lib64/nagios/plugins/check_procs -w 1: -c 1:2 -C rsyslogd
|
||||
command[check_yum]=/usr/lib64/nagios/plugins/check_yum.py -t 60
|
380
bibliotheque/files/nrpe/etc-nrpe.cfg
Normal file
380
bibliotheque/files/nrpe/etc-nrpe.cfg
Normal file
@ -0,0 +1,380 @@
|
||||
################################################################################
|
||||
#
|
||||
# __ __ ______ __
|
||||
# / | / | / \ / |
|
||||
# _$$ |_ $$/ ______ _______ ______ /$$$$$$ | _____ ____ $$/ _______ ______
|
||||
# / $$ | / | / \ / |______ / \ $$ |_ $$/______ / \/ \ / |/ \ / \
|
||||
# $$$$$$/ $$ |/$$$$$$ |/$$$$$$$// |/$$$$$$ |$$ | / |$$$$$$ $$$$ |$$ |$$$$$$$ |/$$$$$$ |
|
||||
# $$ | __ $$ |$$ | $$ |$$ \$$$$$$/ $$ | $$ |$$$$/ $$$$$$/ $$ | $$ | $$ |$$ |$$ | $$ |$$ $$ |
|
||||
# $$ |/ |$$ |$$ |__$$ | $$$$$$ | $$ \__$$ |$$ | $$ | $$ | $$ |$$ |$$ | $$ |$$$$$$$$/
|
||||
# $$ $$/ $$ |$$ $$/ / $$/ $$ $$/ $$ | $$ | $$ | $$ |$$ |$$ | $$ |$$ |
|
||||
# $$$$/ $$/ $$$$$$$/ $$$$$$$/ $$$$$$/ $$/ $$/ $$/ $$/ $$/ $$/ $$/ $$$$$$$/
|
||||
# $$ |
|
||||
# $$ |
|
||||
# $$/
|
||||
#
|
||||
################################################################################
|
||||
|
||||
# LOG FACILITY
|
||||
# The syslog facility that should be used for logging purposes.
|
||||
|
||||
log_facility=daemon
|
||||
|
||||
|
||||
|
||||
# LOG FILE
|
||||
# If a log file is specified in this option, nrpe will write to
|
||||
# that file instead of using syslog.
|
||||
|
||||
#log_file=/var/run/nrpe.log
|
||||
|
||||
|
||||
|
||||
# DEBUGGING OPTION
|
||||
# This option determines whether or not debugging messages are logged to the
|
||||
# syslog facility.
|
||||
# Values: 0=debugging off, 1=debugging on
|
||||
|
||||
debug=0
|
||||
|
||||
|
||||
|
||||
# PID FILE
|
||||
# The name of the file in which the NRPE daemon should write it's process ID
|
||||
# number. The file is only written if the NRPE daemon is started by the root
|
||||
# user and is running in standalone mode.
|
||||
|
||||
pid_file=/var/run/nrpe/nrpe.pid
|
||||
|
||||
|
||||
|
||||
# PORT NUMBER
|
||||
# Port number we should wait for connections on.
|
||||
# NOTE: This must be a non-privileged port (i.e. > 1024).
|
||||
# NOTE: This option is ignored if NRPE is running under either inetd
|
||||
# or xinetd or via systemd. [In systemd please use
|
||||
# systemctl edit nrpe.service
|
||||
# to set up the port.
|
||||
|
||||
server_port=5666
|
||||
|
||||
|
||||
|
||||
# SERVER ADDRESS
|
||||
# Address that nrpe should bind to in case there are more than one interface
|
||||
# and you do not want nrpe to bind on all interfaces.
|
||||
# NOTE: This option is ignored if NRPE is running under either inetd or xinetd
|
||||
# or with systemd. Please start by hand.
|
||||
|
||||
#server_address=127.0.0.1
|
||||
|
||||
|
||||
|
||||
# LISTEN QUEUE SIZE
|
||||
# Listen queue size (backlog) for serving incoming connections.
|
||||
# You may want to increase this value under high load.
|
||||
|
||||
#listen_queue_size=5
|
||||
|
||||
|
||||
|
||||
# NRPE USER
|
||||
# This determines the effective user that the NRPE daemon should run as.
|
||||
# You can either supply a username or a UID.
|
||||
#
|
||||
# NOTE: This option is ignored if NRPE is running under either inetd
|
||||
# or xinetd or via systemd. [In systemd please use
|
||||
# systemctl edit nrpe.service
|
||||
# to set up the group.
|
||||
|
||||
nrpe_user=nrpe
|
||||
|
||||
|
||||
|
||||
# NRPE GROUP
|
||||
# This determines the effective group that the NRPE daemon should run as.
|
||||
# You can either supply a group name or a GID.
|
||||
#
|
||||
# NOTE: This option is ignored if NRPE is running under either inetd
|
||||
# or xinetd or via systemd. [In systemd please use
|
||||
# systemctl edit nrpe.service
|
||||
# to set up the user.
|
||||
|
||||
nrpe_group=nrpe
|
||||
|
||||
|
||||
|
||||
# ALLOWED HOST ADDRESSES
|
||||
# This is an optional comma-delimited list of IP address or hostnames
|
||||
# that are allowed to talk to the NRPE daemon. Network addresses with a bit mask
|
||||
# (i.e. 192.168.1.0/24) are also supported. Hostname wildcards are not currently
|
||||
# supported.
|
||||
#
|
||||
# Note: The daemon only does rudimentary checking of the client's IP
|
||||
# address. I would highly recommend adding entries in your /etc/hosts.allow
|
||||
# file to allow only the specified host to connect to the port
|
||||
# you are running this daemon on.
|
||||
#
|
||||
# NOTE: This option is ignored if NRPE is running under either inetd
|
||||
# or xinetd or systemd
|
||||
|
||||
allowed_hosts=127.0.0.1,::1,10.59.1.11,centreon,centreon.EXEMPLE.LOCAL
|
||||
|
||||
|
||||
|
||||
# COMMAND ARGUMENT PROCESSING
|
||||
# This option determines whether or not the NRPE daemon will allow clients
|
||||
# to specify arguments to commands that are executed. This option only works
|
||||
# if the daemon was configured with the --enable-command-args configure script
|
||||
# option.
|
||||
#
|
||||
# *** ENABLING THIS OPTION IS A SECURITY RISK! ***
|
||||
# Read the SECURITY file for information on some of the security implications
|
||||
# of enabling this variable.
|
||||
#
|
||||
# Values: 0=do not allow arguments, 1=allow command arguments
|
||||
|
||||
dont_blame_nrpe=1
|
||||
|
||||
|
||||
|
||||
# BASH COMMAND SUBSTITUTION
|
||||
# This option determines whether or not the NRPE daemon will allow clients
|
||||
# to specify arguments that contain bash command substitutions of the form
|
||||
# $(...). This option only works if the daemon was configured with both
|
||||
# the --enable-command-args and --enable-bash-command-substitution configure
|
||||
# script options.
|
||||
#
|
||||
# *** ENABLING THIS OPTION IS A HIGH SECURITY RISK! ***
|
||||
# Read the SECURITY file for information on some of the security implications
|
||||
# of enabling this variable.
|
||||
#
|
||||
# Values: 0=do not allow bash command substitutions,
|
||||
# 1=allow bash command substitutions
|
||||
|
||||
allow_bash_command_substitution=0
|
||||
|
||||
|
||||
|
||||
# COMMAND PREFIX
|
||||
# This option allows you to prefix all commands with a user-defined string.
|
||||
# A space is automatically added between the specified prefix string and the
|
||||
# command line from the command definition.
|
||||
#
|
||||
# *** THIS EXAMPLE MAY POSE A POTENTIAL SECURITY RISK, SO USE WITH CAUTION! ***
|
||||
# Usage scenario:
|
||||
# Execute restricted commmands using sudo. For this to work, you need to add
|
||||
# the nagios user to your /etc/sudoers. An example entry for allowing
|
||||
# execution of the plugins from might be:
|
||||
#
|
||||
# nagios ALL=(ALL) NOPASSWD: /usr/lib/nagios/plugins/
|
||||
#
|
||||
# This lets the nagios user run all commands in that directory (and only them)
|
||||
# without asking for a password. If you do this, make sure you don't give
|
||||
# random users write access to that directory or its contents!
|
||||
|
||||
# command_prefix=/usr/bin/sudo
|
||||
|
||||
|
||||
# MAX COMMANDS
|
||||
# This specifies how many children processes may be spawned at any one
|
||||
# time, essentially limiting the fork()s that occur.
|
||||
# Default (0) is set to unlimited
|
||||
# max_commands=0
|
||||
|
||||
|
||||
|
||||
# COMMAND TIMEOUT
|
||||
# This specifies the maximum number of seconds that the NRPE daemon will
|
||||
# allow plugins to finish executing before killing them off.
|
||||
|
||||
command_timeout=60
|
||||
|
||||
|
||||
|
||||
# CONNECTION TIMEOUT
|
||||
# This specifies the maximum number of seconds that the NRPE daemon will
|
||||
# wait for a connection to be established before exiting. This is sometimes
|
||||
# seen where a network problem stops the SSL being established even though
|
||||
# all network sessions are connected. This causes the nrpe daemons to
|
||||
# accumulate, eating system resources. Do not set this too low.
|
||||
|
||||
connection_timeout=300
|
||||
|
||||
|
||||
|
||||
# WEAK RANDOM SEED OPTION
|
||||
# This directive allows you to use SSL even if your system does not have
|
||||
# a /dev/random or /dev/urandom (on purpose or because the necessary patches
|
||||
# were not applied). The random number generator will be seeded from a file
|
||||
# which is either a file pointed to by the environment valiable $RANDFILE
|
||||
# or $HOME/.rnd. If neither exists, the pseudo random number generator will
|
||||
# be initialized and a warning will be issued.
|
||||
# Values: 0=only seed from /dev/[u]random, 1=also seed from weak randomness
|
||||
|
||||
#allow_weak_random_seed=1
|
||||
|
||||
|
||||
|
||||
# SSL/TLS OPTIONS
|
||||
# These directives allow you to specify how to use SSL/TLS.
|
||||
|
||||
# SSL VERSION
|
||||
# This can be any of: SSLv2 (only use SSLv2), SSLv2+ (use any version),
|
||||
# SSLv3 (only use SSLv3), SSLv3+ (use SSLv3 or above), TLSv1 (only use
|
||||
# TLSv1), TLSv1+ (use TLSv1 or above), TLSv1.1 (only use TLSv1.1),
|
||||
# TLSv1.1+ (use TLSv1.1 or above), TLSv1.2 (only use TLSv1.2),
|
||||
# TLSv1.2+ (use TLSv1.2 or above)
|
||||
# If an "or above" version is used, the best will be negotiated. So if both
|
||||
# ends are able to do TLSv1.2 and use specify SSLv2, you will get TLSv1.2.
|
||||
# If you are using openssl 1.1.0 or above, the SSLv2 options are not available.
|
||||
|
||||
#ssl_version=SSLv2+
|
||||
|
||||
# SSL USE ADH
|
||||
# This is for backward compatibility and is DEPRECATED. Set to 1 to enable
|
||||
# ADH or 2 to require ADH. 1 is currently the default but will be changed
|
||||
# in a later version.
|
||||
|
||||
#ssl_use_adh=1
|
||||
|
||||
# SSL CIPHER LIST
|
||||
# This lists which ciphers can be used. For backward compatibility, this
|
||||
# defaults to 'ssl_cipher_list=ALL:!MD5:@STRENGTH' for < OpenSSL 1.1.0,
|
||||
# and 'ssl_cipher_list=ALL:!MD5:@STRENGTH:@SECLEVEL=0' for OpenSSL 1.1.0 and
|
||||
# greater.
|
||||
|
||||
#ssl_cipher_list=ALL:!MD5:@STRENGTH
|
||||
#ssl_cipher_list=ALL:!MD5:@STRENGTH:@SECLEVEL=0
|
||||
#ssl_cipher_list=ALL:!aNULL:!eNULL:!SSLv2:!LOW:!EXP:!RC4:!MD5:@STRENGTH
|
||||
|
||||
# SSL Certificate and Private Key Files
|
||||
|
||||
#ssl_cacert_file=/etc/ssl/servercerts/ca-cert.pem
|
||||
#ssl_cert_file=/etc/ssl/servercerts/nagios-cert.pem
|
||||
#ssl_privatekey_file=/etc/ssl/servercerts/nagios-key.pem
|
||||
|
||||
# SSL USE CLIENT CERTS
|
||||
# This options determines client certificate usage.
|
||||
# Values: 0 = Don't ask for or require client certificates (default)
|
||||
# 1 = Ask for client certificates
|
||||
# 2 = Require client certificates
|
||||
|
||||
#ssl_client_certs=0
|
||||
|
||||
# SSL LOGGING
|
||||
# This option determines which SSL messages are send to syslog. OR values
|
||||
# together to specify multiple options.
|
||||
|
||||
# Values: 0x00 (0) = No additional logging (default)
|
||||
# 0x01 (1) = Log startup SSL/TLS parameters
|
||||
# 0x02 (2) = Log remote IP address
|
||||
# 0x04 (4) = Log SSL/TLS version of connections
|
||||
# 0x08 (8) = Log which cipher is being used for the connection
|
||||
# 0x10 (16) = Log if client has a certificate
|
||||
# 0x20 (32) = Log details of client's certificate if it has one
|
||||
# -1 or 0xff or 0x2f = All of the above
|
||||
|
||||
#ssl_logging=0x00
|
||||
|
||||
|
||||
|
||||
# NASTY METACHARACTERS
|
||||
# This option allows you to override the list of characters that cannot
|
||||
# be passed to the NRPE daemon.
|
||||
|
||||
# nasty_metachars="|`&><'\\[]{};\r\n"
|
||||
|
||||
|
||||
|
||||
# COMMAND DEFINITIONS
|
||||
# Command definitions that this daemon will run. Definitions
|
||||
# are in the following format:
|
||||
#
|
||||
# command[<command_name>]=<command_line>
|
||||
#
|
||||
# When the daemon receives a request to return the results of <command_name>
|
||||
# it will execute the command specified by the <command_line> argument.
|
||||
#
|
||||
# Unlike Nagios, the command line cannot contain macros - it must be
|
||||
# typed exactly as it should be executed.
|
||||
#
|
||||
# Note: Any plugins that are used in the command lines must reside
|
||||
# on the machine that this daemon is running on! The examples below
|
||||
# assume that you have plugins installed in a /usr/local/nagios/libexec
|
||||
# directory. Also note that you will have to modify the definitions below
|
||||
# to match the argument format the plugins expect. Remember, these are
|
||||
# examples only!
|
||||
|
||||
|
||||
# The following examples use hardcoded command arguments...
|
||||
# This is by far the most secure method of using NRPE
|
||||
|
||||
command[check_users]=/usr/lib64/nagios/plugins/check_users -w 5 -c 10
|
||||
command[check_load]=/usr/lib64/nagios/plugins/check_load -r -w .15,.10,.05 -c .30,.25,.20
|
||||
command[check_hda1]=/usr/lib64/nagios/plugins/check_disk -w 20% -c 10% -p /dev/hda1
|
||||
command[check_zombie_procs]=/usr/lib64/nagios/plugins/check_procs -w 5 -c 10 -s Z
|
||||
command[check_total_procs]=/usr/lib64/nagios/plugins/check_procs -w 150 -c 200
|
||||
|
||||
|
||||
# The following examples allow user-supplied arguments and can
|
||||
# only be used if the NRPE daemon was compiled with support for
|
||||
# command arguments *AND* the dont_blame_nrpe directive in this
|
||||
# config file is set to '1'. This poses a potential security risk, so
|
||||
# make sure you read the SECURITY file before doing this.
|
||||
|
||||
### MISC SYSTEM METRICS ###
|
||||
#command[check_users]=/usr/lib64/nagios/plugins/check_users $ARG1$
|
||||
#command[check_load]=/usr/lib64/nagios/plugins/check_load $ARG1$
|
||||
#command[check_disk]=/usr/lib64/nagios/plugins/check_disk $ARG1$
|
||||
#command[check_swap]=/usr/lib64/nagios/plugins/check_swap $ARG1$
|
||||
#command[check_cpu_stats]=/usr/lib64/nagios/plugins/check_cpu_stats.sh $ARG1$
|
||||
#command[check_mem]=/usr/lib64/nagios/plugins/custom_check_mem -n $ARG1$
|
||||
|
||||
### GENERIC SERVICES ###
|
||||
#command[check_init_service]=sudo /usr/lib64/nagios/plugins/check_init_service $ARG1$
|
||||
#command[check_services]=/usr/lib64/nagios/plugins/check_services -p $ARG1$
|
||||
|
||||
### SYSTEM UPDATES ###
|
||||
#command[check_yum]=/usr/lib64/nagios/plugins/check_yum
|
||||
#command[check_apt]=/usr/lib64/nagios/plugins/check_apt
|
||||
|
||||
### PROCESSES ###
|
||||
#command[check_all_procs]=/usr/lib64/nagios/plugins/custom_check_procs
|
||||
#command[check_procs]=/usr/lib64/nagios/plugins/check_procs $ARG1$
|
||||
|
||||
### OPEN FILES ###
|
||||
#command[check_open_files]=/usr/lib64/nagios/plugins/check_open_files.pl $ARG1$
|
||||
|
||||
### NETWORK CONNECTIONS ###
|
||||
#command[check_netstat]=/usr/lib64/nagios/plugins/check_netstat.pl -p $ARG1$ $ARG2$
|
||||
|
||||
### ASTERISK ###
|
||||
#command[check_asterisk]=/usr/lib64/nagios/plugins/check_asterisk.pl $ARG1$
|
||||
#command[check_sip]=/usr/lib64/nagios/plugins/check_sip $ARG1$
|
||||
#command[check_asterisk_sip_peers]=sudo /usr/lib64/nagios/plugins/check_asterisk_sip_peers.sh $ARG1$
|
||||
#command[check_asterisk_version]=/usr/lib64/nagios/plugins/nagisk.pl -c version
|
||||
#command[check_asterisk_peers]=/usr/lib64/nagios/plugins/nagisk.pl -c peers
|
||||
#command[check_asterisk_channels]=/usr/lib64/nagios/plugins/nagisk.pl -c channels
|
||||
#command[check_asterisk_zaptel]=/usr/lib64/nagios/plugins/nagisk.pl -c zaptel
|
||||
#command[check_asterisk_span]=/usr/lib64/nagios/plugins/nagisk.pl -c span -s 1
|
||||
|
||||
|
||||
|
||||
# INCLUDE CONFIG FILE
|
||||
# This directive allows you to include definitions from an external config file.
|
||||
|
||||
#include=<somefile.cfg>
|
||||
|
||||
|
||||
|
||||
# INCLUDE CONFIG DIRECTORY
|
||||
# This directive allows you to include definitions from config files (with a
|
||||
# .cfg extension) in one or more directories (with recursion).
|
||||
|
||||
#include_dir=<somedirectory>
|
||||
#include_dir=<someotherdirectory>
|
||||
|
||||
include_dir=/etc/nrpe.d/
|
||||
|
||||
|
327
bibliotheque/files/nsclient/etc-nsclient.ini
Normal file
327
bibliotheque/files/nsclient/etc-nsclient.ini
Normal file
@ -0,0 +1,327 @@
|
||||
################################################################################
|
||||
#
|
||||
# __ __ ______ __
|
||||
# / | / | / \ / |
|
||||
# _$$ |_ $$/ ______ _______ ______ /$$$$$$ | _____ ____ $$/ _______ ______
|
||||
# / $$ | / | / \ / |______ / \ $$ |_ $$/______ / \/ \ / |/ \ / \
|
||||
# $$$$$$/ $$ |/$$$$$$ |/$$$$$$$// |/$$$$$$ |$$ | / |$$$$$$ $$$$ |$$ |$$$$$$$ |/$$$$$$ |
|
||||
# $$ | __ $$ |$$ | $$ |$$ \$$$$$$/ $$ | $$ |$$$$/ $$$$$$/ $$ | $$ | $$ |$$ |$$ | $$ |$$ $$ |
|
||||
# $$ |/ |$$ |$$ |__$$ | $$$$$$ | $$ \__$$ |$$ | $$ | $$ | $$ |$$ |$$ | $$ |$$$$$$$$/
|
||||
# $$ $$/ $$ |$$ $$/ / $$/ $$ $$/ $$ | $$ | $$ | $$ |$$ |$$ | $$ |$$ |
|
||||
# $$$$/ $$/ $$$$$$$/ $$$$$$$/ $$$$$$/ $$/ $$/ $$/ $$/ $$/ $$/ $$/ $$$$$$$/
|
||||
# $$ |
|
||||
# $$ |
|
||||
# $$/
|
||||
#
|
||||
################################################################################
|
||||
|
||||
; Undocumented section
|
||||
[/modules]
|
||||
|
||||
; Undocumented key
|
||||
scripts = enabled
|
||||
|
||||
; CheckDisk - CheckDisk can check various file and disk related things. The current version has commands to check Size of hard drives and directories.
|
||||
CheckDisk = 1
|
||||
|
||||
; Event log Checker. - Check for errors and warnings in the event log. This is only supported through NRPE so if you plan to use only NSClient this wont help you at all.
|
||||
CheckEventLog = 1
|
||||
|
||||
; Check External Scripts - A simple wrapper to run external scripts and batch files.
|
||||
CheckExternalScripts = 1
|
||||
|
||||
; Helper function - Various helper function to extend other checks. This is also only supported through NRPE.
|
||||
CheckHelpers = 1
|
||||
|
||||
; Check NSCP - Checkes the state of the agent
|
||||
CheckNSCP = 1
|
||||
|
||||
; CheckSystem - Various system related checks, such as CPU load, process state, service state memory usage and PDH counters.
|
||||
CheckSystem = 1
|
||||
|
||||
; NRPE server - A simple server that listens for incoming NRPE connection and handles them.
|
||||
NRPEServer = 1
|
||||
|
||||
; NSClient server - A simple server that listens for incoming NSClient (check_nt) connection and handles them. Although NRPE is the preferred method NSClient is fully supported and can be used for simplicity or for compatibility.
|
||||
NSClientServer = 1
|
||||
|
||||
|
||||
; Undocumented section
|
||||
[/settings/default]
|
||||
|
||||
; ALLOWED CIPHERS - A better value is: ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH
|
||||
allowed ciphers = ADH
|
||||
|
||||
; ALLOWED HOSTS - A comaseparated list of allowed hosts. You can use netmasks (/ syntax) or * to create ranges.
|
||||
allowed hosts = 10.59.1.11,centreon.EXEMPLE.LOCAL,
|
||||
|
||||
; BIND TO ADDRESS - Allows you to bind server to a specific local address. This has to be a dotted ip address not a host name. Leaving this blank will bind to all available IP addresses.
|
||||
bind to =
|
||||
|
||||
; CACHE ALLOWED HOSTS - If hostnames should be cached, improves speed and security somewhat but wont allow you to have dynamic IPs for your nagios server.
|
||||
cache allowed hosts = true
|
||||
|
||||
; SSL CERTIFICATE -
|
||||
certificate =
|
||||
|
||||
; PASSWORD - Password used to authenticate againast server
|
||||
password = P@ssw0rd
|
||||
|
||||
; TIMEOUT - Timeout when reading packets on incoming sockets. If the data has not arrived within this time we will bail out.
|
||||
timeout = 80
|
||||
|
||||
; ENABLE SSL ENCRYPTION - This option controls if SSL should be enabled.
|
||||
use ssl = false
|
||||
|
||||
; VERIFY MODE -
|
||||
verify mode = none
|
||||
|
||||
|
||||
; Section for NRPE (NRPEServer.dll) (check_nrpe) protocol options.
|
||||
[/settings/NRPE/server]
|
||||
|
||||
; COMMAND ARGUMENT PROCESSING - This option determines whether or not the we will allow clients to specify arguments to commands that are executed.
|
||||
allow arguments = true
|
||||
|
||||
; COMMAND ALLOW NASTY META CHARS - This option determines whether or not the we will allow clients to specify nasty (as in |`&><'"\[]{}) characters in arguments.
|
||||
allow nasty characters = true
|
||||
|
||||
; PORT NUMBER - Port to use for NRPE.
|
||||
port = 5666
|
||||
|
||||
insecure = true
|
||||
|
||||
; A list of aliases available. An alias is an internal command that has been "wrapped" (to add arguments). Be careful so you don't create loops (ie check_loop=check_a, check_a=check_loop)
|
||||
[/settings/external scripts/alias]
|
||||
|
||||
; alias_cpu - Alias for alias_cpu. To configure this item add a section called: /settings/external scripts/alias/alias_cpu
|
||||
alias_cpu = checkCPU warn=80 crit=90 time=5m time=1m time=30s
|
||||
|
||||
; alias_cpu_ex - Alias for alias_cpu_ex. To configure this item add a section called: /settings/external scripts/alias/alias_cpu_ex
|
||||
alias_cpu_ex = checkCPU warn=$ARG1$ crit=$ARG2$ time=5m time=1m time=30s
|
||||
|
||||
; alias_disk - Alias for alias_disk. To configure this item add a section called: /settings/external scripts/alias/alias_disk
|
||||
alias_disk = CheckDriveSize MinWarn=10% MinCrit=5% CheckAll FilterType=FIXED
|
||||
|
||||
; alias_disk_loose - Alias for alias_disk_loose. To configure this item add a section called: /settings/external scripts/alias/alias_disk_loose
|
||||
alias_disk_loose = CheckDriveSize MinWarn=10% MinCrit=5% CheckAll FilterType=FIXED ignore-unreadable
|
||||
|
||||
; alias_event_log - Alias for alias_event_log. To configure this item add a section called: /settings/external scripts/alias/alias_event_log
|
||||
alias_event_log = CheckEventLog file=application file=system MaxWarn=1 MaxCrit=1 "filter=generated gt -2d AND severity NOT IN ('success', 'informational') AND source != 'SideBySide'" truncate=800 unique descriptions "syntax=%severity%: %source%: %message% (%count%)"
|
||||
|
||||
; alias_file_age - Alias for alias_file_age. To configure this item add a section called: /settings/external scripts/alias/alias_file_age
|
||||
alias_file_age = checkFile2 filter=out "file=$ARG1$" filter-written=>1d MaxWarn=1 MaxCrit=1 "syntax=%filename% %write%"
|
||||
|
||||
; alias_file_size - Alias for alias_file_size. To configure this item add a section called: /settings/external scripts/alias/alias_file_size
|
||||
alias_file_size = CheckFiles "filter=size > $ARG2$" "path=$ARG1$" MaxWarn=1 MaxCrit=1 "syntax=%filename% %size%" max-dir-depth=10
|
||||
|
||||
; alias_mem - Alias for alias_mem. To configure this item add a section called: /settings/external scripts/alias/alias_mem
|
||||
alias_mem = checkMem MaxWarn=80% MaxCrit=90% ShowAll=long type=physical type=virtual type=paged type=page
|
||||
|
||||
; alias_process - Alias for alias_process. To configure this item add a section called: /settings/external scripts/alias/alias_process
|
||||
alias_process = checkProcState "$ARG1$=started"
|
||||
|
||||
; alias_process_count - Alias for alias_process_count. To configure this item add a section called: /settings/external scripts/alias/alias_process_count
|
||||
alias_process_count = checkProcState MaxWarnCount=$ARG2$ MaxCritCount=$ARG3$ "$ARG1$=started"
|
||||
|
||||
; alias_process_hung - Alias for alias_process_hung. To configure this item add a section called: /settings/external scripts/alias/alias_process_hung
|
||||
alias_process_hung = checkProcState MaxWarnCount=1 MaxCritCount=1 "$ARG1$=hung"
|
||||
|
||||
; alias_process_stopped - Alias for alias_process_stopped. To configure this item add a section called: /settings/external scripts/alias/alias_process_stopped
|
||||
alias_process_stopped = checkProcState "$ARG1$=stopped"
|
||||
|
||||
; alias_sched_all - Alias for alias_sched_all. To configure this item add a section called: /settings/external scripts/alias/alias_sched_all
|
||||
alias_sched_all = CheckTaskSched "filter=exit_code ne 0" "syntax=%title%: %exit_code%" warn=>0
|
||||
|
||||
; alias_sched_long - Alias for alias_sched_long. To configure this item add a section called: /settings/external scripts/alias/alias_sched_long
|
||||
alias_sched_long = CheckTaskSched "filter=status = 'running' AND most_recent_run_time < -$ARG1$" "syntax=%title% (%most_recent_run_time%)" warn=>0
|
||||
|
||||
; alias_sched_task - Alias for alias_sched_task. To configure this item add a section called: /settings/external scripts/alias/alias_sched_task
|
||||
alias_sched_task = CheckTaskSched "filter=title eq '$ARG1$' AND exit_code ne 0" "syntax=%title% (%most_recent_run_time%)" warn=>0
|
||||
|
||||
; alias_service - Alias for alias_service. To configure this item add a section called: /settings/external scripts/alias/alias_service
|
||||
alias_service = checkServiceState CheckAll
|
||||
|
||||
; alias_service_ex - Alias for alias_service_ex. To configure this item add a section called: /settings/external scripts/alias/alias_service_ex
|
||||
alias_service_ex = checkServiceState CheckAll "exclude=Net Driver HPZ12" "exclude=Pml Driver HPZ12" exclude=stisvc
|
||||
|
||||
; alias_up - Alias for alias_up. To configure this item add a section called: /settings/external scripts/alias/alias_up
|
||||
alias_up = checkUpTime MinWarn=1d MinWarn=1h
|
||||
|
||||
; alias_updates - Alias for alias_updates. To configure this item add a section called: /settings/external scripts/alias/alias_updates
|
||||
alias_updates = check_updates -warning 0 -critical 0
|
||||
|
||||
; alias_volumes - Alias for alias_volumes. To configure this item add a section called: /settings/external scripts/alias/alias_volumes
|
||||
alias_volumes = CheckDriveSize MinWarn=10% MinCrit=5% CheckAll=volumes FilterType=FIXED
|
||||
|
||||
; alias_volumes_loose - Alias for alias_volumes_loose. To configure this item add a section called: /settings/external scripts/alias/alias_volumes_loose
|
||||
alias_volumes_loose = CheckDriveSize MinWarn=10% MinCrit=5% CheckAll=volumes FilterType=FIXED ignore-unreadable
|
||||
|
||||
; default - Alias for default. To configure this item add a section called: /settings/external scripts/alias/default
|
||||
default =
|
||||
|
||||
|
||||
[/settings/external scripts/wrapped scripts]
|
||||
|
||||
|
||||
|
||||
|
||||
; Section for external scripts configuration options (CheckExternalScripts).
|
||||
[/settings/external scripts]
|
||||
|
||||
; COMMAND ARGUMENT PROCESSING - This option determines whether or not the we will allow clients to specify arguments to commands that are executed.
|
||||
allow arguments = true
|
||||
|
||||
; COMMAND ALLOW NASTY META CHARS - This option determines whether or not the we will allow clients to specify nasty (as in |`&><'"\[]{}) characters in arguments.
|
||||
allow nasty characters = false
|
||||
|
||||
; SCRIPT DIRECTORY - Load all scripts in a directory and use them as commands. Probably dangerous but useful if you have loads of scripts :)
|
||||
script path =
|
||||
|
||||
; COMMAND TIMEOUT - The maximum time in seconds that a command can execute. (if more then this execution will be aborted). NOTICE this only affects external commands not internal ones.
|
||||
timeout = 180
|
||||
|
||||
|
||||
; Files to be included in the configuration
|
||||
[/includes]
|
||||
|
||||
|
||||
; Section for NSClient (NSClientServer.dll) (check_nt) protocol options.
|
||||
[/settings/NSClient/server]
|
||||
|
||||
; PERFORMANCE DATA - Send performance data back to nagios (set this to 0 to remove all performance data).
|
||||
performance data = true
|
||||
|
||||
; PORT NUMBER - Port to use for check_nt.
|
||||
port = 12489
|
||||
|
||||
|
||||
; Configure crash handling properties.
|
||||
[/settings/crash]
|
||||
|
||||
; ARCHIVE CRASHREPORTS - Archive crash reports in the archive folder
|
||||
archive = true
|
||||
|
||||
; folder - The archive folder for crash dunpes.
|
||||
archive folder = ${shared-path}/crash-dumps
|
||||
|
||||
; RESTART - Submit crash reports to nsclient.org (or your configured submission server)
|
||||
restart = true
|
||||
|
||||
; RESTART SERVICE NAME - The url to submit crash reports to
|
||||
restart target = NSClientpp
|
||||
|
||||
; SUBMIT CRASHREPORTS - Submit crash reports to nsclient.org (or your configured submission server)
|
||||
submit = false
|
||||
|
||||
; SUBMISSION URL - The url to submit crash reports to
|
||||
submit url = http://crash.nsclient.org/submit
|
||||
|
||||
|
||||
; Section for the EventLog Checker (CheckEventLog.dll).
|
||||
[/settings/eventlog]
|
||||
|
||||
; BUFFER_SIZE - The size of the buffer to use when getting messages this affects the speed and maximum size of messages you can recieve.
|
||||
buffer size = 131072
|
||||
|
||||
; DEBUG - Log more information when filtering (usefull to detect issues with filters) not usefull in production as it is a bit of a resource hog.
|
||||
debug = true
|
||||
|
||||
; LOOKUP NAMES - Lookup the names of eventlog files
|
||||
lookup names = true
|
||||
|
||||
; SYNTAX - Set this to use a specific syntax string for all commands (that don't specify one).
|
||||
syntax =
|
||||
|
||||
|
||||
; A set of options to configure the real time checks
|
||||
[/settings/eventlog/real-time]
|
||||
|
||||
; DEBUG - Log missed records (usefull to detect issues with filters) not usefull in production as it is a bit of a resource hog.
|
||||
debug =true
|
||||
|
||||
; REAL TIME CHECKING - Spawns a backgrounnd thread which detects issues and reports them back instantly.
|
||||
enabled = false
|
||||
|
||||
; LOGS TO CHECK - Comma separated list of logs to check
|
||||
log = application,system
|
||||
|
||||
; STARTUP AGE - The initial age to scan when starting NSClient++
|
||||
startup age = 30m
|
||||
|
||||
|
||||
; A set of filters to use in real-time mode
|
||||
[/settings/eventlog/real-time/filters]
|
||||
|
||||
|
||||
; A list of scripts available to run from the CheckExternalScripts module. Syntax is: <command>=<script> <arguments>
|
||||
[/settings/external scripts/scripts]
|
||||
|
||||
; default - Alias for default. To configure this item add a section called: /settings/external scripts/scripts/default
|
||||
default =
|
||||
|
||||
; on renseigne ci-dessous le nom de la commande et on la définie.
|
||||
|
||||
test_ps1 = cmd /c echo scripts\tracertSolstys.ps1; exit($lastexitcode) | powershell.exe -command -
|
||||
|
||||
test_ps2 = cmd /c echo scripts\tracertwan.ps1; exit($lastexitcode) | powershell.exe -command -
|
||||
|
||||
;verif_snapshot = cmd /c echo scripts/CheckSnapVNX.ps1 ; exit($lastexitcode) | powershell.exe -command -
|
||||
verif_snapshot = cmd /c echo scripts/verif_snapshot_psv2.ps1 ; exit($lastexitcode) | powershell.exe -command -
|
||||
verif_snapshot_space_used = cmd /c echo scripts/VSS_Snapshot_Used.ps1 ; exit($lastexitcode) | powershell.exe -command -
|
||||
|
||||
check_windows_updates = cmd /c echo scripts\check_windows_updates.ps1; exit($lastexitcode) | powershell.exe -command -
|
||||
check_wsus = cmd /c echo scripts\check_wsus.ps1; exit($lastexitcode) | powershell.exe -command -
|
||||
check_folder_size=check_folder_size.vbs "$ARG1$" "$ARG2$" "$ARG3$" | cscript.exe -command -
|
||||
check_files=check_files.vbs $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$ $ARG8$ $ARG9$ $ARG10$ $ARG11$ $ARG12$ $ARG13$ $ARG14$ | cscript.exe -command -
|
||||
check_print_spooler=cscript.exe //T:30 //NoLogo scripts\check_print_spooler.vbs $ARG1$ | cscript.exe -command -
|
||||
|
||||
; A list of templates for wrapped scripts
|
||||
[/settings/external scripts/wrappings]
|
||||
|
||||
; BATCH FILE WRAPPING -
|
||||
bat = scripts\\%SCRIPT% %ARGS%
|
||||
|
||||
; POWERSHELL WRAPPING -
|
||||
ps1 = cmd /c echo scripts\\%SCRIPT% %ARGS%; exit($lastexitcode) | powershell.exe -Command -
|
||||
|
||||
; VISUAL BASIC WRAPPING -
|
||||
vbs = cscript.exe //T:30 //NoLogo scripts\lib\wrapper.vbs %SCRIPT% %ARGS%
|
||||
|
||||
; Configure log properties.
|
||||
[/settings/log]
|
||||
|
||||
; DATEMASK - The size of the buffer to use when getting messages this affects the speed and maximum size of messages you can recieve.
|
||||
date format = %Y-%m-%d %H:%M:%S
|
||||
|
||||
; FILENAME - The file to write log data to. Set this to none to disable log to file.
|
||||
file name = ${exe-path}/nsclient.log
|
||||
|
||||
; LOG LEVEL - Log level to use. Avalible levels are error,warning,info,debug,trace
|
||||
level = debug
|
||||
|
||||
|
||||
; Configure log file properties.
|
||||
[/settings/log/file]
|
||||
|
||||
; MAXIMUM FILE SIZE - When file size reaches this it will be truncated to 50% if set to 0 (default) truncation will be disabled
|
||||
max size = 0
|
||||
|
||||
|
||||
; Section for configuring the shared session.
|
||||
[/settings/shared session]
|
||||
|
||||
; LOG LEVEL - Log level to use
|
||||
enabled = false
|
||||
|
||||
|
||||
; Section for system checks and system settings
|
||||
[/settings/system/windows]
|
||||
|
||||
; DEFAULT LENGTH - Used to define the default intervall for range buffer checks (ie. CPU).
|
||||
default buffer length = 1h
|
||||
|
||||
|
||||
; Confiure which services has to be in which state
|
||||
[/settings/system/windows/service mapping]
|
||||
|
3
bibliotheque/files/nsclient/scripts/check_60s.bat
Normal file
3
bibliotheque/files/nsclient/scripts/check_60s.bat
Normal file
@ -0,0 +1,3 @@
|
||||
@PING 127.0.0.1 -n 60 > NUL
|
||||
@echo OK: Everything is going to be fine
|
||||
@exit 0
|
230
bibliotheque/files/nsclient/scripts/check_ad.vbs
Normal file
230
bibliotheque/files/nsclient/scripts/check_ad.vbs
Normal file
@ -0,0 +1,230 @@
|
||||
'Script to check the status of a DOMAIN controller and report to Nagios
|
||||
'requires DCDIAG.EXE
|
||||
'Author: Felipe Ferreira
|
||||
'Version: 3.0
|
||||
'
|
||||
'Mauled over by John Jore, j-o-h-n-a-t-j-o-r-e-d-o-t-n-o 16/11/2010 to work on W2K8, x32
|
||||
'as well as remove some, eh... un-needed lines of code, general optimization as well as adding command parameter support
|
||||
'This is where i found the original script, http://felipeferreira.net/?p=315&cpage=1#comments
|
||||
'Tested by JJ on W2K8 SP2, x86
|
||||
' W2K3 R2 SP2, x64
|
||||
'Version 3.0-JJ-V0.2
|
||||
'Todo: Proper error handling
|
||||
' Add /help parameter
|
||||
' Add support for the two tests which require additional input (dcpromo is one such test)
|
||||
'Version 3.0-JJ-V0.3
|
||||
' Removed some surplus language detection code
|
||||
' Including non-working English test on a W2K8 x32 DC
|
||||
' Added support for multi-partition checks like 'crossrefvalidation'. Previously the last status result would mask previous failures
|
||||
' Incorporated Jonathan Vogt's german and multiline tests
|
||||
|
||||
|
||||
'Force all variables to be declared before usage
|
||||
option explicit
|
||||
|
||||
'Array for name and status (Ugly, but redim only works on last dimension, and can't set initial size if redim
|
||||
dim name(), status()
|
||||
redim preserve name(0)
|
||||
redim preserve status(0)
|
||||
redim preserve lock(0)
|
||||
|
||||
'Debug switch
|
||||
dim verbose : verbose = 0
|
||||
|
||||
'Return variables for NAGIOS
|
||||
const intOK = 0
|
||||
const intWarning = 1 'Not used. What dcdiag test would be warning instead of critical?
|
||||
const intCritical = 2
|
||||
const intUnknown = 3
|
||||
|
||||
'Lang dependend. Default is english
|
||||
dim strOK : strOK = "passed"
|
||||
dim strNotOK : strNotOk = "failed"
|
||||
|
||||
'Call dcdiag and grab relevant output
|
||||
exec(cmd)
|
||||
|
||||
'Generate NAGIOS compatible output from dcdiag
|
||||
printout()
|
||||
|
||||
'call dcdiag and parse the output
|
||||
sub exec(strCmd)
|
||||
'Declare variables
|
||||
dim objShell : Set objShell = WScript.CreateObject("WScript.Shell")
|
||||
dim objExecObject, lineout, tmpline
|
||||
lineout = ""
|
||||
'Command line options we're using
|
||||
pt strCmd
|
||||
|
||||
Set objExecObject = objShell.Exec(strCmd)
|
||||
'Loop until end of output from dcdiag
|
||||
do While not objExecObject.StdOut.AtEndOfStream
|
||||
tmpline = lcase(objExecObject.StdOut.ReadLine())
|
||||
|
||||
'Check the version of DCDiag being used and change the global 'passed' / 'failed' strings
|
||||
call DetectLang(tmpline)
|
||||
|
||||
if (instr(tmpline, ".....")) then
|
||||
'testresults start with a couple of dots, reset the lineout buffer
|
||||
lineout= tmpline
|
||||
'pt "lineout buffer '" & lineout & "'"
|
||||
else
|
||||
'Else append the next line to the buffer to capture multiline responses
|
||||
lineout = lineout + tmpline
|
||||
'pt "lineout buffer appended '" & lineout & "'"
|
||||
end if
|
||||
|
||||
if instr(lineout, lcase(strOK)) then
|
||||
'we have a strOK String which means we have reached the end of a result output (maybe on newline)
|
||||
call parse(lineout)
|
||||
lineout = ""
|
||||
end if
|
||||
loop
|
||||
|
||||
'Why call this at the end? Is it not pointless as we've looped through the entire output from dcdiag in the above loop?!?
|
||||
'call parse(lineout)
|
||||
end sub
|
||||
|
||||
|
||||
sub DetectLang(txtp)
|
||||
|
||||
'Change from looking for English words if we find the string below:
|
||||
if (instr(lcase(txtp), lcase("Verzeichnisserverdiagnose"))) then 'German
|
||||
pt "Detected German Language, changing the global test strings to look for"
|
||||
strOK = "bestanden"
|
||||
strNotOk = "nicht bestanden"
|
||||
end if
|
||||
|
||||
end sub
|
||||
|
||||
|
||||
sub parse(txtp)
|
||||
'Parse output of dcdiag command and change state of checks
|
||||
dim loop1
|
||||
|
||||
'Is this really required? Or is it for pretty debug output only?
|
||||
txtp = Replace(txtp,chr(10),"") ' Newline
|
||||
txtp = Replace(txtp,chr(13),"") ' CR
|
||||
txtp = Replace(txtp,chr(9),"") ' Tab
|
||||
do while instr(txtp, " ")
|
||||
txtp = Replace(txtp," "," ") ' Some tidy up
|
||||
loop
|
||||
|
||||
' We have to test twice because some localized (e.g. German) outputs simply use 'not', or 'nicht' as a prefix instead of 'passed' / 'failed'
|
||||
if instr(lcase(txtp), lcase(strOK)) then
|
||||
'What are we testing for now?
|
||||
pt "Checking :" & txtp & "' as it contains '" & strOK & "'"
|
||||
'What services are ok? 'By using instr we don't need to strip down text, remove vbCr, VbLf, or get the hostname
|
||||
for loop1 = 0 to Ubound(name)-1
|
||||
if (instr(lcase(txtp), lcase(name(loop1)))) AND (lock(loop1) = FALSE) then
|
||||
status(loop1)="OK"
|
||||
pt "Set the status for test '" & name(loop1) & "' to '" & status(loop1) & "'"
|
||||
end if
|
||||
next
|
||||
end if
|
||||
|
||||
' if we find the strNotOK string then reset to CRITICAL
|
||||
if instr(lcase(txtp), lcase(strNotOK)) then
|
||||
'What are we testing for now?
|
||||
pt txtp
|
||||
for loop1 = 0 to Ubound(name)-1
|
||||
if (instr(lcase(txtp), lcase(name(loop1)))) then
|
||||
status(loop1)="CRITICAL"
|
||||
'Lock the variable so it can't be reset back to success. Required for multi-partition tests like 'crossrefvalidation'
|
||||
lock(loop1)=TRUE
|
||||
pt "Reset the status for test '" & name(loop1) & "' to '" & status(loop1) & "' with a lock '" & lock(loop1) & "'"
|
||||
end if
|
||||
next
|
||||
end if
|
||||
end sub
|
||||
|
||||
'outputs result for NAGIOS
|
||||
sub printout()
|
||||
dim loop1, msg : msg = ""
|
||||
|
||||
for loop1 = 0 to ubound(name)-1
|
||||
msg = msg & name(loop1) & ": " & status(loop1) & ". "
|
||||
next
|
||||
|
||||
'What state are we in? Show and then quit with NAGIOS compatible exit code
|
||||
if instr(msg,"CRITICAL") then
|
||||
wscript.echo "CRITICAL - " & msg
|
||||
wscript.quit(intCritical)
|
||||
else
|
||||
wscript.echo "OK - " & msg
|
||||
wscript.quit(intOK)
|
||||
end if
|
||||
end sub
|
||||
|
||||
'Print messages to screen for debug purposes
|
||||
sub pt(msgTxt)
|
||||
if verbose then
|
||||
wscript.echo msgTXT
|
||||
end if
|
||||
end sub
|
||||
|
||||
'What tests do we run?
|
||||
function cmd()
|
||||
dim loop1, test, tests
|
||||
const intDefaultTests = 6
|
||||
|
||||
cmd = "dcdiag " 'Start with this
|
||||
|
||||
'If no command line parameters, then go with these defaults
|
||||
if Wscript.Arguments.Count = 0 Then
|
||||
redim preserve name(intDefaultTests)
|
||||
redim preserve status(intDefaultTests)
|
||||
redim preserve lock(intDefaultTests)
|
||||
'Test name
|
||||
name(0) = "services"
|
||||
name(1) = "replications"
|
||||
name(2) = "advertising"
|
||||
name(3) = "fsmocheck"
|
||||
name(4) = "ridmanager"
|
||||
name(5) = "machineaccount"
|
||||
|
||||
'Set default status for each named test
|
||||
for loop1 = 0 to (ubound(name)-1)
|
||||
status(loop1) = "CRITICAL"
|
||||
lock(loop1) = FALSE
|
||||
cmd = cmd & "/test:" & name(loop1) & " "
|
||||
next
|
||||
else
|
||||
'User specified which tests to perform.
|
||||
|
||||
for loop1 = 0 to wscript.arguments.count - 1
|
||||
if (instr(lcase(wscript.Arguments(loop1)), lcase("/test"))) then
|
||||
|
||||
'If parameter is wrong, give some hints
|
||||
if len(wscript.arguments(loop1)) < 6 then
|
||||
wscript.echo "Unknown parameter. Provide name of tests to perform like this:"
|
||||
wscript.echo vbTAB & "'cscript //nologo " & Wscript.ScriptName & " /test:advertising,dfsevent'"
|
||||
wscript.quit(intUnknown)
|
||||
end if
|
||||
|
||||
'Strip down the test to individual items
|
||||
tests = right(wscript.arguments(loop1), len(wscript.arguments(loop1))-6)
|
||||
pt "Tests: '" & tests & "'"
|
||||
|
||||
tests = split(tests,",")
|
||||
for each test in tests
|
||||
cmd = cmd & " /test:" & test
|
||||
|
||||
'Expand the array to make room for one more test
|
||||
redim preserve name(ubound(name)+1)
|
||||
redim preserve status(ubound(status)+1)
|
||||
redim preserve lock(ubound(lock)+1)
|
||||
|
||||
'Store name of test and status
|
||||
name(Ubound(name)-1) = test
|
||||
status(Ubound(status)-1) = "CRITICAL" 'Default status. Change to OK if test is ok
|
||||
lock(Ubound(lock)-1) = FALSE 'Don't lock the variable yet.
|
||||
|
||||
'pt "Contents: " & name(Ubound(name)-1) & " " & status(Ubound(status)-1)
|
||||
next
|
||||
end if
|
||||
next
|
||||
end if
|
||||
'We end up with this to test:
|
||||
pt "Command to run: " & cmd
|
||||
end function
|
45
bibliotheque/files/nsclient/scripts/check_battery.vbs
Normal file
45
bibliotheque/files/nsclient/scripts/check_battery.vbs
Normal file
@ -0,0 +1,45 @@
|
||||
' =========================================================
|
||||
' WMI script to return the charge remaining in a laptop battery, using the
|
||||
' EstimatedChargeRemaining property of the Win32_Battery class
|
||||
' =========================================================
|
||||
|
||||
' Required Variables
|
||||
Const PROGNAME = "check_battery"
|
||||
Const VERSION = "0.0.1"
|
||||
|
||||
' Default settings for your script.
|
||||
threshold_warning = "50:"
|
||||
threshold_critical = "20:"
|
||||
strComputer = "."
|
||||
|
||||
' Create the NagiosPlugin object
|
||||
Set np = New NagiosPlugin
|
||||
|
||||
' Define what args that should be used
|
||||
np.add_arg "computer", "Computer name", 0
|
||||
np.add_arg "warning", "warning threshold", 0
|
||||
np.add_arg "critical", "critical threshold", 0
|
||||
|
||||
' If we have no args or arglist contains /help or not all of the required arguments are fulfilled show the usage output,.
|
||||
If Args.Exists("help") Then
|
||||
np.Usage
|
||||
End If
|
||||
|
||||
' If we define /warning /critical on commandline it should override the script default.
|
||||
If Args.Exists("warning") Then threshold_warning = Args("warning")
|
||||
If Args.Exists("critical") Then threshold_critical = Args("critical")
|
||||
If Args.Exists("computer") Then strComputer = Args("computer")
|
||||
np.set_thresholds threshold_warning, threshold_critical
|
||||
|
||||
Set colInstances = np.simple_WMI_CIMV2(strComputer, "SELECT * FROM Win32_Battery")
|
||||
return_code = OK
|
||||
|
||||
For Each objInstance In colInstances
|
||||
if message <> "" then : message = message & ", "
|
||||
if perf <> "" then : perf = perf & ", "
|
||||
message = message & "Battery " & objInstance.Status & " - Charge Remaining = " & objInstance.EstimatedChargeRemaining & "%"
|
||||
perf = perf & "charge=" & objInstance.EstimatedChargeRemaining
|
||||
return_code = np.escalate_check_threshold(return_code, objInstance.EstimatedChargeRemaining)
|
||||
Next
|
||||
' Nice Exit with msg and exitcode
|
||||
np.nagios_exit message & "|" & perf, return_code
|
464
bibliotheque/files/nsclient/scripts/check_files.vbs
Normal file
464
bibliotheque/files/nsclient/scripts/check_files.vbs
Normal file
@ -0,0 +1,464 @@
|
||||
'
|
||||
'
|
||||
' Check files under a given path
|
||||
' ==============================
|
||||
'
|
||||
' based on example scripts fond in nsclient++/scripts directory
|
||||
'
|
||||
' Author: werner.fuerst@assmann.at 2010-12-21
|
||||
'
|
||||
' uses NagiosPluginCDbl.vbs from nsclient++/scripts/lib
|
||||
' - modified Version of NagiosPlugin.vbs which came with nsclient++ vers. 0.3.8.76
|
||||
' - compares bounds to double values, so 10 comes after 9 (numeric sorting)
|
||||
' - bounds definition conforms to nagios plugin guidelines (http://nagiosplug.sourceforge.net/developer-guidelines.html#THRESHOLDFORMAT)
|
||||
'
|
||||
' modifications in NSC.ini:
|
||||
' =========================
|
||||
'
|
||||
'
|
||||
' in [modules]:
|
||||
' CheckExternalScripts.dll
|
||||
'
|
||||
' in [NRPE]:
|
||||
' allow_arguments=1
|
||||
' allow_nasty_meta_chars=1
|
||||
' allowed_hosts=x.x.x.x
|
||||
'
|
||||
' in [External Script]:
|
||||
' allow_arguments=1
|
||||
' allow_nasty_meta_chars=1
|
||||
'
|
||||
' in [Script Wrappings]:
|
||||
' vbs=cscript.exe //T:30 //NoLogo scripts\lib\wrapperCDbl.vbs %SCRIPT% %ARGS%
|
||||
'
|
||||
' in [Wrapped Scripts]:
|
||||
' check_files=check_files.vbs $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$ $ARG8$ $ARG9$ $ARG10$ $ARG11$ $ARG12$ $ARG13$ $ARG14$
|
||||
'
|
||||
'
|
||||
' nagios usage:
|
||||
' =============
|
||||
'
|
||||
' define command{
|
||||
' command_name check_nrpe_external
|
||||
' command_line $USER1$/check_nrpe -H $HOSTADDRESS$ -c $ARG1$ -a $ARG2$
|
||||
' }
|
||||
'
|
||||
' define service{
|
||||
' use generic-service
|
||||
' host_name windowsxx
|
||||
' service_description Backup DB2
|
||||
' check_command check_nrpe_external!check_files!/path:"e:\\BACKUP\\DB" /namefilter:"DB2\.DAT" /expectmatch:1 /age:3 /selage:hour /warning:1: /critical:1: /size:9000000000 /weekdays:"2,3"
|
||||
'} }
|
||||
' give alarm when file DB2.DAT in e:\BACKUP\DB was not written until 3 o clock on Monday or Tuesday (alarm if count is less than 1, so threshold is set to 1:)
|
||||
'
|
||||
'
|
||||
'
|
||||
' check_command check_nrpe_external!check_files!/path:"d:\\journal" /searchdepth:1 /selage:ignore /warning:30 /critical:40
|
||||
' give alarm if there are more than 30 files under d:\journal
|
||||
'
|
||||
'
|
||||
' check_command check_nrpe_external!check_files!/path:"T:\\nfs\\interface1" /searchdepth:2 /age:2d /warning:5: /critical:3: /selage:newer /seldat:created
|
||||
' give alarm if there are fewer than 3 files which are newer than 2 days under t:\nfs\interface1, search only 1 subdir down, use the creation date for comparison
|
||||
'
|
||||
'
|
||||
'
|
||||
' args:
|
||||
' =====
|
||||
'
|
||||
'
|
||||
' warning: threshold warning
|
||||
' alert if x...
|
||||
' 10 < 0 or > 10, (outside the range of {0 .. 10})
|
||||
' 10: < 10, (outside {10 .. 8})
|
||||
' ~:10 > 10, (outside the range of {-8 .. 10})
|
||||
' 10:20 < 10 or > 20, (outside the range of {10 .. 20})
|
||||
' @10:20 >= 10 and <= 20, (inside the range of {10 .. 20})
|
||||
' critical: threshold critical
|
||||
' namefilter: regular expressionon on which files have to match
|
||||
' age: files have to be older/newer (see selage) than age
|
||||
' e.g.: 5d: 5 days, 4h: 4 hours, 10n: 10 Minutes, 90s: 90 seconds
|
||||
' selage: older/newer/hour/ignore
|
||||
' older/newer: count only files if the are older/newer
|
||||
' hour: alert if file is not written until hour
|
||||
' ignore: count files independent of their age
|
||||
' searchdepth: search down x subdirs (searchdepth:1 - do not go down in directory hierarchy)
|
||||
' seldat: modified/created
|
||||
' modified: date, when file was written
|
||||
' created: date, when file was created
|
||||
' size: if file is smaller than given size give a warning
|
||||
' expectmatch: if less than expectmatch files correspond to namefilter give a warning
|
||||
' weekdays:
|
||||
' if selage:hour files have to be written only on weekdays (1:sunday, 2:monday, ...)
|
||||
' if selage:newer or selage:older and the timeunit of age is d (days), we add as many days to age as the last weekday is back
|
||||
' e.g.: weekdays:23456 files are written on Monday, Tuesday, Wednesday, Thursday, Friday
|
||||
'
|
||||
|
||||
Const PROGNAME = "check_files"
|
||||
Const VERSION = "0.1.0"
|
||||
|
||||
Dim verbose
|
||||
verbose = 0
|
||||
|
||||
Dim lastfile
|
||||
Dim lastdat
|
||||
Dim lastsize
|
||||
Dim intdif
|
||||
Dim warnsize
|
||||
Dim matchcount
|
||||
Dim weekdays
|
||||
Dim ageint
|
||||
Dim ageunit
|
||||
Dim agestring
|
||||
Dim inlevel
|
||||
|
||||
|
||||
' Default settings for script.
|
||||
threshold_warning = 10
|
||||
threshold_critical = 20
|
||||
alias = "default"
|
||||
agestring = "5d" '5 days
|
||||
ageint = 5
|
||||
ageunit = "d"
|
||||
selage = "newer"
|
||||
namefilter = ""
|
||||
searchdepth = 0
|
||||
seldat = "modified"
|
||||
size = 0
|
||||
expectmatch = 0
|
||||
weekdays = "1,2,3,4,5,6,7"
|
||||
|
||||
' Create the NagiosPlugin object
|
||||
Set np = New NagiosPlugin
|
||||
|
||||
' Define what args that should be used
|
||||
np.add_arg "path", "Path", 1
|
||||
np.add_arg "namefilter", "Filename Filter", 0
|
||||
np.add_arg "age", "Age", 0
|
||||
np.add_arg "selage", "newer, older, hour, ignore", 0
|
||||
np.add_arg "searchdepth", "depth of subdirs to search", 0
|
||||
np.add_arg "seldat", "modified or created", 0
|
||||
np.add_arg "size", "size", 0
|
||||
np.add_arg "warning", "warning threshold", 0
|
||||
np.add_arg "critical", "critical threshold", 0
|
||||
np.add_arg "expectmatch", "expect at least x matches", 0
|
||||
np.add_arg "weekdays", "1,2,3,... 1-Sun 2-Mon 3-Tue...", 0
|
||||
np.add_arg "alias", "Alias", 0
|
||||
|
||||
' If we have no args or arglist contains /help or not all of the required arguments are fulfilled show the usage output,.
|
||||
If Args.Count < 1 Or Args.Exists("help") Or np.parse_args = 0 Then
|
||||
WScript.Echo Args.Count
|
||||
np.Usage
|
||||
End If
|
||||
|
||||
' If we define /warning /critical on commandline it should override the script default.
|
||||
If Args.Exists("warning") Then threshold_warning = Args("warning")
|
||||
If Args.Exists("critical") Then threshold_critical = Args("critical")
|
||||
If Args.Exists("namefilter") Then namefilter = Args("namefilter")
|
||||
If Args.Exists("age") Then agestring = Args("age")
|
||||
If Args.Exists("selage") Then selage = Args("selage")
|
||||
If Args.Exists("searchdepth") Then searchdepth = Cint(Args("searchdepth"))
|
||||
If Args.Exists("seldat") Then seldat = Args("seldat")
|
||||
If Args.Exists("size") Then size = CDbl(Args("size"))
|
||||
If Args.Exists("expectmatch") Then expectmatch = CInt(Args("expectmatch"))
|
||||
If Args.Exists("weekdays") Then weekdays = Args("weekdays")
|
||||
If Args.Exists("alias") Then alias = Args("alias")
|
||||
|
||||
' Set the msg output to be used (OK/WARNING/CRITICAL/UNKNOWN will be applied automaticly)
|
||||
np.set_thresholds threshold_warning, threshold_critical
|
||||
|
||||
' Set ageint and ageunit
|
||||
Set reage = New RegExp
|
||||
reage.IgnoreCase = False
|
||||
reage.Pattern = "^([0-9]+)([dhns]*)$" 'd: days h:hours n:minutes s:seconds
|
||||
|
||||
Set ages = reage.Execute(agestring)
|
||||
|
||||
For Each age In ages
|
||||
pt age & " > " & age.SubMatches(0) & " > " & age.SubMatches(1)
|
||||
ageint = CInt(age.SubMatches(0))
|
||||
ageunit = age.SubMatches(1)
|
||||
If ageunit = "" Then
|
||||
If selage = "hour" Then
|
||||
ageunit = "h"
|
||||
Else
|
||||
ageunit = "d"
|
||||
End If
|
||||
End If
|
||||
Next
|
||||
|
||||
' add some days if this and the days before do not belong to the weekdays
|
||||
If ageunit="d" And (selage="newer" Or selage="older") Then
|
||||
date1 = Now()
|
||||
date00 = DateValue(Now)
|
||||
date10 = DateValue(Now)
|
||||
|
||||
For i = 0 To 7
|
||||
' datex: last day when the file should have been written according to weekdays
|
||||
datex = SubtractDate(date00,i)
|
||||
'date00 = DateValue(datex)
|
||||
wdnowx = WeekDay(datex)
|
||||
If Instr(weekdays,wdnowx) Then
|
||||
' exit for if we found the youngest weekday
|
||||
Exit For
|
||||
End If
|
||||
Next
|
||||
|
||||
datex0 = DateValue(datex)
|
||||
diffd = DateDiff("d",datex0,date10)
|
||||
ageint = ageint + diffd
|
||||
End If
|
||||
|
||||
intdif=0
|
||||
matchcount=0
|
||||
warnsize = 0
|
||||
|
||||
' go down the hierarchy, in intdif we get the number of corresponding files
|
||||
CheckSubdirs Args("path")
|
||||
|
||||
' get return code according to intdif files
|
||||
return_code = np.check_threshold(CDbl(intdif))
|
||||
|
||||
last = ""
|
||||
If Len(namefilter) Then
|
||||
last = "filter: " & namefilter
|
||||
End If
|
||||
|
||||
' if there was only one file...
|
||||
If matchcount = 1 Then
|
||||
last = last & " found one: " & lastfile & " " & lastdat & " " & lastsize
|
||||
End If
|
||||
|
||||
' warning message if file too small or too few of them
|
||||
message = ""
|
||||
If warnsize = matchcount And warnsize > 0 Then
|
||||
If return_code = 0 Then
|
||||
return_code = 1
|
||||
End If
|
||||
message = " TOO SMALL!!! "
|
||||
End If
|
||||
|
||||
If matchcount < expectmatch Then
|
||||
If return_code = 0 Then
|
||||
return_code = 1
|
||||
End If
|
||||
message = " NOT FOUND!!! "
|
||||
End If
|
||||
|
||||
If selage = "ignore" Then
|
||||
agemessage = " count: "
|
||||
ElseIf selage = "newer" Or selage ="older" Then
|
||||
agemessage = selage & " than " & ageint & " " & ageunit & " : "
|
||||
ElseIf selage = "hour" Then
|
||||
agemessage = " written before " & ageint & " o-clock on " & weekdays & " : "
|
||||
Else
|
||||
agemessage = selage & ": " & age & " " & ageunit & " "
|
||||
End If
|
||||
|
||||
' for the performance data format warning and critical thresholds (no :@~)
|
||||
cintw=np.get_threshold_perfdat("warning") 'PerfDat(np.get_threshold("warning"))
|
||||
cintc=np.get_threshold_perfdat("critical")
|
||||
|
||||
' if we watch only a single file we will see the size in the performance data
|
||||
If expectmatch = 1 and matchcount = 1 Then
|
||||
perfdata = "size=" & lastsize
|
||||
Else
|
||||
perfdata = "count=" & Cint(intdif) & ";" & cintw & ";" & cintc
|
||||
End If
|
||||
|
||||
msg = "Testing " & Replace(Args("path"),"\","/") & " " & last & " " & agemessage & intdif & " w:" & np.get_threshold("warning") & " c: " & np.get_threshold("critical") & " " & message & " |" & perfdata
|
||||
|
||||
' Nice Exit with msg and exitcode
|
||||
np.nagios_exit msg, return_code
|
||||
|
||||
|
||||
Sub CheckSubdirs(StartDir)
|
||||
Set FSO = CreateObject("Scripting.FileSystemObject")
|
||||
inlevel = 0
|
||||
|
||||
' go down hierarchy
|
||||
ShowSubfolders FSO.GetFolder(StartDir)
|
||||
|
||||
'Clean up
|
||||
Set FSO = Nothing
|
||||
End Sub
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Sub ShowSubFolders(Folder)
|
||||
'inlevel: we start with 1, so the startdir has level 1!
|
||||
inlevel = inlevel + 1
|
||||
pt "level: " & inlevel & " Folder: " & Folder.Name & " searchdepth: " & searchdepth
|
||||
If (inlevel > searchdepth) And (searchdepth > 0) Then
|
||||
' we are too deep!
|
||||
inlevel = inlevel - 1
|
||||
pt "level2: " & inlevel & " Folder: " & Folder.Name
|
||||
Exit sub
|
||||
End If
|
||||
pt "level3: " & inlevel & " Folder: " & Folder.Name
|
||||
ListFiles Folder
|
||||
If ((inlevel < searchdepth) And (searchdepth > 0)) Or (searchdepth = 0) Then
|
||||
For Each Subfolder in Folder.SubFolders
|
||||
' go further down
|
||||
ShowSubFolders Subfolder
|
||||
Next
|
||||
End If
|
||||
' leave level
|
||||
inlevel = inlevel - 1
|
||||
End Sub
|
||||
|
||||
|
||||
|
||||
Sub ListFiles(Folder)
|
||||
Set colFiles = Folder.Files
|
||||
|
||||
pt "in folder " & Folder.Name
|
||||
For Each File in colFiles
|
||||
matched = true
|
||||
'pt "namefilter: " & namefilter & " len:" & Len(namefilter)
|
||||
' if we defined a namefilter, use it (regexp!)
|
||||
If Len(namefilter) > 0 Then
|
||||
|
||||
filename = File.Name
|
||||
|
||||
'pt "namefilter1: " & filename
|
||||
Set re = new regexp
|
||||
re.IgnoreCase = True
|
||||
re.Pattern = namefilter
|
||||
|
||||
'pt "namefilter2: " & namefilter & " len:" & Len(namefilter)
|
||||
If re.Test(filename) <> true Then
|
||||
matched = false
|
||||
End If
|
||||
End If
|
||||
|
||||
' now test the matched files against the age
|
||||
If matched = true Then
|
||||
|
||||
' count the matched files (we warn later if there are fewer the expectmatch files)
|
||||
matchcount = matchcount + 1
|
||||
pt "match: " & matchcount & " file: " & File.name
|
||||
|
||||
' Date2 can be the DateCeated (backup of old machine, which is not time synchronized, so the created date is the date when the file was backed up to our server)
|
||||
Date1 = Now()
|
||||
If seldat = "created" Then
|
||||
Date2 = File.DateCreated
|
||||
Else
|
||||
Date2 = File.DateLastModified
|
||||
End If
|
||||
|
||||
' time difference between file and now
|
||||
dif = Cint(DateDiff(ageunit,Date2,Date1))
|
||||
|
||||
'remember the last file checked (we use it later in the message)
|
||||
lastfile = File.name
|
||||
lastdat = Date2
|
||||
lastsize = File.Size
|
||||
|
||||
' remember the count of files which where too small
|
||||
If CDbl(File.Size) < CDbl(size) Then
|
||||
warnsize = warnsize + 1
|
||||
End If
|
||||
|
||||
'pt "c0 dif: " & dif & " age: " & age & " selage: " & selage & " seldat: " & seldat
|
||||
' count the numer of files which are newer/older than given age
|
||||
If dif < ageint And selage = "newer" Then
|
||||
intdif = intdif + 1
|
||||
ElseIf dif > ageint And selage = "older" Then
|
||||
intdif = intdif + 1
|
||||
ElseIf selage = "ignore" Then
|
||||
intdif = intdif + 1
|
||||
ElseIf selage = "hour" Then
|
||||
' if selage:hour check if file has been written not after age (here age is the hour)
|
||||
If isnotolder(File) Then
|
||||
intdif = intdif + 1
|
||||
End If
|
||||
End If
|
||||
End If
|
||||
Next
|
||||
End Sub
|
||||
|
||||
|
||||
|
||||
Function isnotolder(File)
|
||||
date1 = Now()
|
||||
date2 = File.DateLastModified
|
||||
date00 = DateValue(Now)
|
||||
date10 = DateValue(Now)
|
||||
date20 = DateValue(date2)
|
||||
|
||||
|
||||
For i = 0 To 7
|
||||
' datex: last day when the file should have been written according to weekdays
|
||||
datex = SubtractDate(date00,i)
|
||||
'date00 = DateValue(datex)
|
||||
wdnowx = WeekDay(datex)
|
||||
If Instr(weekdays,wdnowx) Then
|
||||
' exit for if we found the youngest weekday
|
||||
Exit For
|
||||
End If
|
||||
Next
|
||||
|
||||
|
||||
' we also need the second youngest weekday, when the file should have been written (is this english?)
|
||||
|
||||
date00 = datex
|
||||
|
||||
For i = 1 To 7
|
||||
datex2 = SubtractDate(date00,i)
|
||||
wdnowx2 = WeekDay(datex2)
|
||||
If Instr(weekdays,wdnowx2) Then
|
||||
Exit For
|
||||
End If
|
||||
Next
|
||||
|
||||
diffdx = DateDiff("d",datex,Now) ' days between expected write date and today
|
||||
diffh0 = DateDiff("h",date10,Now) ' hours since midnight
|
||||
diffd = DateDiff("d",date20,datex) ' days between written and expect to be written
|
||||
diffd2 = DateDiff("d",date20,datex2) ' days between second oldest expected write day and today
|
||||
diff1 = DateDiff("h",date20,date2) ' hour of write (we currently do not use it)
|
||||
|
||||
' we expect the best
|
||||
iswritten = 1
|
||||
|
||||
' today to be written an hour passed: should be written today!
|
||||
If diffdx = 0 And diffh0 > ageint And diffd > 0 Then
|
||||
iswritten = 0
|
||||
End If
|
||||
|
||||
' today to be written an hour not passed: should be written at least on second oldest expected day
|
||||
If diffdx = 0 And diffh0 <= ageint And diffd2 > 0 Then
|
||||
iswritten = 0
|
||||
End If
|
||||
|
||||
|
||||
' it should have been written on a day before, diffd gives a positive value: expected day passed
|
||||
If diffdx > 0 And diffd > 0 Then
|
||||
pt "i31: " & diffdx & " " & diffd
|
||||
iswritten = 0
|
||||
End If
|
||||
|
||||
|
||||
' older the 7 days!
|
||||
If diffd > 7 Then
|
||||
pt "i0: " & diffd
|
||||
iswritten = 0
|
||||
End If
|
||||
|
||||
' if none of the bad conditions matched, we still have iswritten=1
|
||||
isnotolder = iswritten
|
||||
End Function
|
||||
|
||||
|
||||
Function SubtractDate(datea,ii)
|
||||
dateb = DateAdd("d",-ii,datea)
|
||||
SubtractDate = dateb
|
||||
End Function
|
||||
|
||||
Function pt(strMsg)
|
||||
if verbose = 1 then
|
||||
wscript.echo strMsg
|
||||
end if
|
||||
end function
|
||||
|
1
bibliotheque/files/nsclient/scripts/check_long.bat
Normal file
1
bibliotheque/files/nsclient/scripts/check_long.bat
Normal file
@ -0,0 +1 @@
|
||||
@for %%a in (1 2 3 4 5 6 7 8 9 10 11 12 13 14 15) do @echo HELLO 12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
|
53
bibliotheque/files/nsclient/scripts/check_no_rdp.bat
Normal file
53
bibliotheque/files/nsclient/scripts/check_no_rdp.bat
Normal file
@ -0,0 +1,53 @@
|
||||
@echo off
|
||||
SET RDP_PORT=%1
|
||||
SET NAGIOSHOST1=%2
|
||||
SET NAGIOSHOST2=%3
|
||||
SET NAGIOSHOST3=%4
|
||||
IF NOT DEFINED RDP_PORT GOTO :defsettings
|
||||
IF %RDP_PORT% EQU "-h" GOTO :usage
|
||||
GOTO check
|
||||
|
||||
:defsettings
|
||||
set RDP_PORT=3389
|
||||
|
||||
:check
|
||||
netstat -a -n | find "%RDP_PORT%" | find "LISTENING" > NUL
|
||||
IF %ERRORLEVEL% NEQ 0 goto portnotfound
|
||||
|
||||
:connection_check
|
||||
IF NOT DEFINED NAGIOSHOST1 GOTO check_s0
|
||||
IF NOT DEFINED NAGIOSHOST2 GOTO check_s1
|
||||
IF NOT DEFINED NAGIOSHOST3 GOTO check_s12
|
||||
GOTO check_s123
|
||||
|
||||
:check_s0
|
||||
netstat -a -n | find "%RDP_PORT%" | find "ESTABLISHED"
|
||||
IF %ERRORLEVEL% NEQ 0 goto noconnections
|
||||
exit /b 2
|
||||
|
||||
:check_s1
|
||||
netstat -a -n | find "%RDP_PORT%" | find "ESTABLISHED" | find /V "%NAGIOSHOST1%"
|
||||
IF %ERRORLEVEL% NEQ 0 goto noconnections
|
||||
exit /b 2
|
||||
|
||||
:check_s12
|
||||
netstat -a -n | find "%RDP_PORT%" | find "ESTABLISHED" | find /V "%NAGIOSHOST1%" | find /V "%NAGIOSHOST2%"
|
||||
IF %ERRORLEVEL% NEQ 0 goto noconnections
|
||||
exit /b 2
|
||||
|
||||
:check_s123
|
||||
netstat -a -n | find "%RDP_PORT%" | find "ESTABLISHED" | find /V "%NAGIOSHOST1%" | find /V "%NAGIOSHOST2%" | find /V "%NAGIOSHOST3%"
|
||||
IF %ERRORLEVEL% NEQ 0 goto noconnections
|
||||
exit /b 2
|
||||
|
||||
:portnotfound
|
||||
echo RDP not listening! Is port %RDP_PORT% (still) correct?
|
||||
exit /b 2
|
||||
|
||||
:usage
|
||||
echo Usage: check_rdp.bat PORT HOST1 HOST2 HOST3
|
||||
exit /b 3
|
||||
|
||||
:noconnections
|
||||
echo OK: No connections.
|
||||
exit /b 0
|
2
bibliotheque/files/nsclient/scripts/check_ok.bat
Normal file
2
bibliotheque/files/nsclient/scripts/check_ok.bat
Normal file
@ -0,0 +1,2 @@
|
||||
@echo OK: Everything is going to be fine
|
||||
@exit 0
|
2
bibliotheque/files/nsclient/scripts/check_ok.sh
Normal file
2
bibliotheque/files/nsclient/scripts/check_ok.sh
Normal file
@ -0,0 +1,2 @@
|
||||
echo OK: Everything is going to be fine
|
||||
exit 0
|
17
bibliotheque/files/nsclient/scripts/check_ping.bat
Normal file
17
bibliotheque/files/nsclient/scripts/check_ping.bat
Normal file
@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
ping -n 1 %1 -w 20000 >NUL
|
||||
IF ERRORLEVEL 2 GOTO unknown
|
||||
IF ERRORLEVEL 1 GOTO err
|
||||
GOTO ok
|
||||
|
||||
:err
|
||||
echo CRITICAL: Ping check failed
|
||||
exit /B 1
|
||||
|
||||
:unknown
|
||||
echo UNKNOWN: Something went wrong
|
||||
exit /B 3
|
||||
|
||||
:ok
|
||||
echo OK: Ping succeded
|
||||
exit /B 0
|
134
bibliotheque/files/nsclient/scripts/check_printer.vbs
Normal file
134
bibliotheque/files/nsclient/scripts/check_printer.vbs
Normal file
@ -0,0 +1,134 @@
|
||||
' =========================================================
|
||||
' WMI script to return the charge remaining in a laptop battery, using the
|
||||
' EstimatedChargeRemaining property of the Win32_Battery class
|
||||
' =========================================================
|
||||
|
||||
' Required Variables
|
||||
Const PROGNAME = "check_printer"
|
||||
Const VERSION = "0.0.1"
|
||||
|
||||
' Default settings for your script.
|
||||
threshold_warning = 50
|
||||
threshold_critical = 20
|
||||
strComputer = "."
|
||||
strPrinter = ""
|
||||
|
||||
' Create the NagiosPlugin object
|
||||
Set np = New NagiosPlugin
|
||||
|
||||
' Define what args that should be used
|
||||
np.add_arg "computer", "Computer name", 0
|
||||
|
||||
' If we have no args or arglist contains /help or not all of the required arguments are fulfilled show the usage output,.
|
||||
If Args.Exists("help") Then
|
||||
np.Usage
|
||||
End If
|
||||
|
||||
' If we define /warning /critical on commandline it should override the script default.
|
||||
If Args.Exists("computer") Then strComputer = Args("computer")
|
||||
|
||||
Set colInstances = np.simple_WMI_CIMV2(strComputer, "SELECT * FROM Win32_Printer")
|
||||
return_code = OK
|
||||
|
||||
perf = ""
|
||||
msg = ""
|
||||
For Each objInstance In colInstances
|
||||
msg = msg & "" & objInstance.Caption & _
|
||||
" {Status: " & printer_status(objInstance.PrinterStatus) & _
|
||||
", State: " & printer_state(objInstance.PrinterState) & "}; "
|
||||
perf = perf & "status=" & objInstance.PrinterStatus & " state=" & objInstance.PrinterState & " "
|
||||
return_code = np.escalate(return_code, check_status(objInstance.PrinterStatus))
|
||||
return_code = np.escalate(return_code, check_state(objInstance.PrinterState))
|
||||
Next
|
||||
|
||||
' Nice Exit with msg and exitcode
|
||||
np.nagios_exit msg, return_code
|
||||
|
||||
|
||||
Public Function printer_status(code)
|
||||
Select Case code
|
||||
case 1: printer_status = "Other"
|
||||
case 2: printer_status = "Unknown"
|
||||
case 3: printer_status = "Idle"
|
||||
case 4: printer_status = "Printing"
|
||||
case 5: printer_status = "WarmUp"
|
||||
case 6: printer_status = "Stopped Printing"
|
||||
case 7: printer_status = "Offline"
|
||||
case else: printer_status = "Undefined"
|
||||
End Select
|
||||
End Function
|
||||
|
||||
Public Function check_status(code)
|
||||
Select Case code
|
||||
case 1: check_status = OK
|
||||
case 2: check_status = UNKNOWN
|
||||
case 3: check_status = OK
|
||||
case 4: check_status = OK
|
||||
case 5: check_status = OK
|
||||
case 6: check_status = OK
|
||||
case 7: check_status = CRITICAL
|
||||
case else: check_status = UNKNOWN
|
||||
End Select
|
||||
End Function
|
||||
|
||||
Public Function printer_state(code)
|
||||
Select Case code
|
||||
case 0: printer_state = "Paused"
|
||||
case 1: printer_state = "Error"
|
||||
case 2: printer_state = "PendingDeletion"
|
||||
case 3: printer_state = "PaperJam"
|
||||
case 4: printer_state = "PaperOut"
|
||||
case 5: printer_state = "ManualFeed"
|
||||
case 6: printer_state = "PaperProblem"
|
||||
case 7: printer_state = "Offline"
|
||||
case 8: printer_state = "IOActive"
|
||||
case 9: printer_state = "Busy"
|
||||
case 10: printer_state = "Printing"
|
||||
case 11: printer_state = "OutputBinFull"
|
||||
case 12: printer_state = "NotAvailable"
|
||||
case 13: printer_state = "Waiting"
|
||||
case 14: printer_state = "Processing"
|
||||
case 15: printer_state = "Initialization"
|
||||
case 16: printer_state = "WarmingUp"
|
||||
case 17: printer_state = "TonerLow"
|
||||
case 18: printer_state = "NoToner"
|
||||
case 19: printer_state = "PagePunt"
|
||||
case 20: printer_state = "UserInterventionRequired"
|
||||
case 21: printer_state = "OutofMemory"
|
||||
case 22: printer_state = "DoorOpen"
|
||||
case 23: printer_state = "Server_Unknown"
|
||||
case 24: printer_state = "PowerSave"
|
||||
case else: printer_state = "Undefined"
|
||||
End Select
|
||||
End Function
|
||||
|
||||
Public Function check_state(code)
|
||||
Select Case code
|
||||
case 0: check_state = OK
|
||||
case 1: check_state = CRITICAL
|
||||
case 2: check_state = OK
|
||||
case 3: check_state = WARNING
|
||||
case 4: check_state = CRITICAL
|
||||
case 5: check_state = OK
|
||||
case 6: check_state = CRITICAL
|
||||
case 7: check_state = CRITICAL
|
||||
case 8: check_state = OK
|
||||
case 9: check_state = OK
|
||||
case 10: check_state = OK
|
||||
case 11: check_state = WARNING
|
||||
case 12: check_state = UNKNOWN
|
||||
case 13: check_state = OK
|
||||
case 14: check_state = OK
|
||||
case 15: check_state = OK
|
||||
case 16: check_state = OK
|
||||
case 17: check_state = WARNING
|
||||
case 18: check_state = CRITICAL
|
||||
case 19: check_state = OK
|
||||
case 20: check_state = CRITICAL
|
||||
case 21: check_state = CRITICAL
|
||||
case 22: check_state = WARNING
|
||||
case 23: check_state = CRITICAL
|
||||
case 24: check_state = OK
|
||||
case else: check_state = UNKNOWN
|
||||
End Select
|
||||
End Function
|
34
bibliotheque/files/nsclient/scripts/check_test.bat
Normal file
34
bibliotheque/files/nsclient/scripts/check_test.bat
Normal file
@ -0,0 +1,34 @@
|
||||
@echo off
|
||||
echo Test arguments are: (%1 %2 %3)
|
||||
IF "%1" == "LONG" GOTO :PRINT_LONG
|
||||
GOTO :PRINT_SHORT
|
||||
|
||||
:PRINT_LONG
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
|
||||
:PRINT_SHORT
|
||||
|
||||
IF "%1" == "CRIT" GOTO :EXIT_CRIT
|
||||
IF "%1" == "WARN" GOTO :EXIT_WARN
|
||||
IF "%1" == "UNKNOWN" GOTO :EXIT_UNKNOWN
|
||||
|
||||
exit /B 0
|
||||
|
||||
:EXIT_WARN
|
||||
exit /B 1
|
||||
|
||||
:EXIT_CRIT
|
||||
exit /B 2
|
||||
|
||||
:EXIT_UNKNOWN
|
||||
exit /B 3
|
28
bibliotheque/files/nsclient/scripts/check_test.ps1
Normal file
28
bibliotheque/files/nsclient/scripts/check_test.ps1
Normal file
@ -0,0 +1,28 @@
|
||||
$a1 = ""
|
||||
$a2 = ""
|
||||
$a3 = ""
|
||||
if ($args.count -gt 0) { $a1 = $args[0] }
|
||||
if ($args.count -gt 1) { $a2 = $args[1] }
|
||||
if ($args.count -gt 2) { $a3 = $args[2] }
|
||||
write-host "Test arguments are: ($a1 ""$a2"" ""$a3"")"
|
||||
|
||||
if ($a1 -eq "LONG") {
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
write-host 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
}
|
||||
|
||||
if ($a1 -eq "WARN") { exit 1 }
|
||||
if ($a1 -eq "CRIT") { exit 2 }
|
||||
if ($a1 -eq "UNKNOWN") { exit 3 }
|
||||
exit 0
|
||||
|
||||
|
19
bibliotheque/files/nsclient/scripts/check_test.sh
Normal file
19
bibliotheque/files/nsclient/scripts/check_test.sh
Normal file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
echo "Test arguments are: ($1 $2 $3)"
|
||||
if [ "$1" == "LONG" ] ; then
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
echo 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
|
||||
fi
|
||||
if [ "$1" == "CRIT" ] ; then exit 2 ; fi
|
||||
if [ "$1" == "WARN" ] ; then exit 1 ; fi
|
||||
if [ "$1" == "UNKNOWN" ] ; then exit 3 ; fi
|
||||
exit 0
|
40
bibliotheque/files/nsclient/scripts/check_test.vbs
Normal file
40
bibliotheque/files/nsclient/scripts/check_test.vbs
Normal file
@ -0,0 +1,40 @@
|
||||
' =========================================================
|
||||
' Example file of setting up a script to use NagiosPlugin.vbs as base.
|
||||
' =========================================================
|
||||
|
||||
' Required Variables
|
||||
Const PROGNAME = "check_test"
|
||||
Const VERSION = "0.9.0"
|
||||
|
||||
' Default settings for your script.
|
||||
threshold_warning = 10
|
||||
threshold_critical = 20
|
||||
alias = "default"
|
||||
|
||||
' Create the NagiosPlugin object
|
||||
Set np = New NagiosPlugin
|
||||
|
||||
' Define what args that should be used
|
||||
np.add_arg "arg", "Argument", 1
|
||||
np.add_arg "warning", "warning threshold", 0
|
||||
np.add_arg "critical", "critical threshold", 0
|
||||
np.add_arg "alias", "Alias", 0
|
||||
|
||||
' If we have no args or arglist contains /help or not all of the required arguments are fulfilled show the usage output,.
|
||||
If Args.Count < 1 Or Args.Exists("help") Or np.parse_args = 0 Then
|
||||
WScript.Echo Args.Count
|
||||
np.Usage
|
||||
End If
|
||||
|
||||
' If we define /warning /critical on commandline it should override the script default.
|
||||
If Args.Exists("warning") Then threshold_warning = Args("warning")
|
||||
If Args.Exists("critical") Then threshold_critical = Args("critical")
|
||||
If Args.Exists("alias") Then alias = Args("alias")
|
||||
np.set_thresholds threshold_warning, threshold_critical
|
||||
|
||||
' Set the msg output to be used (OK/WARNING/CRITICAL/UNKNOWN will be applied automaticly)
|
||||
return_code = np.check_threshold(Args("arg"))
|
||||
msg = "Testing " & Args.Item("host") & " " & np.get_threshold("warning") & " " & np.get_threshold("critical")
|
||||
|
||||
' Nice Exit with msg and exitcode
|
||||
np.nagios_exit msg, return_code
|
101
bibliotheque/files/nsclient/scripts/check_time.vbs
Normal file
101
bibliotheque/files/nsclient/scripts/check_time.vbs
Normal file
@ -0,0 +1,101 @@
|
||||
' Author: Mattias Ryrl<72>n (mr@op5.com)
|
||||
' Website: http://www.op5.com
|
||||
' Created: 2008-09-18
|
||||
' Updated: 2008-10-09
|
||||
' Version: 0.9.1
|
||||
' Description: Check the offset of your server vs your Active Directory Domain.
|
||||
'
|
||||
' Usage cscript /NoLogo check_ad_time.vbs <domain> "<offset>"
|
||||
'
|
||||
' Changelog:
|
||||
'
|
||||
' 0.9.1:
|
||||
' * Fixed timeformat (i think, needs feedback).
|
||||
' * Changed /domain to /computers, still works to use the AD domain. eg domain.com
|
||||
'
|
||||
' 0.9:
|
||||
' Initial Release
|
||||
'
|
||||
|
||||
Err = 3
|
||||
msg = "UNKNOWN"
|
||||
|
||||
Set Args = WScript.Arguments
|
||||
If WScript.Arguments.Count <= 1 Then
|
||||
Usage()
|
||||
Else
|
||||
domain = Args.Item(0)
|
||||
|
||||
offset = Args.Item(1)
|
||||
|
||||
offset = Replace(offset,",",".")
|
||||
|
||||
Set objShell = CreateObject("Wscript.Shell")
|
||||
strCommand = "C:\windows\system32\w32tm.exe /monitor /computers:" & domain
|
||||
set objProc = objShell.Exec(strCommand)
|
||||
|
||||
input = ""
|
||||
strOutput = ""
|
||||
|
||||
Do While Not objProc.StdOut.AtEndOfStream
|
||||
input = objProc.StdOut.ReadLine
|
||||
|
||||
If InStr(input, "NTP") Then
|
||||
strOutput = input
|
||||
End If
|
||||
Loop
|
||||
|
||||
Set myRegExp = New RegExp
|
||||
myRegExp.IgnoreCase = True
|
||||
myRegExp.Global = True
|
||||
myRegExp.Pattern = " NTP: ([+-]+)([0-9]+).([0-9]+)s offset"
|
||||
|
||||
Set myMatches = myRegExp.Execute(strOutput)
|
||||
|
||||
result = ""
|
||||
dir = ""
|
||||
|
||||
For Each myMatch in myMatches
|
||||
If myMatch.SubMatches.Count > 0 Then
|
||||
For I = 0 To myMatch.SubMatches.Count - 1
|
||||
If I = 0 Then
|
||||
dir = myMatch.SubMatches(I)
|
||||
ElseIf I > 1 Then
|
||||
result = result & "." & myMatch.SubMatches(I)
|
||||
Else
|
||||
result = result & myMatch.SubMatches(I)
|
||||
End If
|
||||
Next
|
||||
End If
|
||||
Next
|
||||
|
||||
If Left(dir, 1) = "-" Then
|
||||
result = CDbl(result)
|
||||
Else
|
||||
result = CDbl("-" & result)
|
||||
End If
|
||||
|
||||
If result > CDbl(offset) OR result < -CDbl(offset) Then
|
||||
Err = 2
|
||||
msg = "NTP CRITICAL: Offset " & result & " secs|offset: " & result & ";0;" & Replace(CDbl(offset),",",".") & ";"
|
||||
Else
|
||||
Err = 0
|
||||
msg = "NTP OK: Offset " & result & " secs|offset: " & result & ";0;" & Replace(CDbl(offset),",",".") & ";"
|
||||
End If
|
||||
End If
|
||||
|
||||
Wscript.Echo msg
|
||||
Wscript.Quit(Err)
|
||||
|
||||
Function Usage()
|
||||
Err = 3
|
||||
WScript.Echo "Usage cscript /NoLogo check_ad_time.vbs <domain> ""<offset>"""
|
||||
Wscript.Echo ""
|
||||
Wscript.Echo "domain Name of domain to check roles on"
|
||||
Wscript.Echo ""
|
||||
Wscript.Echo "offset total number of seconds offset allowed."
|
||||
Wscript.Echo " will check both positive and negative"
|
||||
Wscript.Echo ""
|
||||
Wscript.Echo "Example: cscript /NoLogo check_ad_time.vbs mydomain.com ""0.4"""
|
||||
Wscript.Quit(Err)
|
||||
End Function
|
70
bibliotheque/files/nsclient/scripts/check_updates.vbs
Normal file
70
bibliotheque/files/nsclient/scripts/check_updates.vbs
Normal file
@ -0,0 +1,70 @@
|
||||
' =========================================================
|
||||
' Script to check for updates onwwindows machines.
|
||||
' Shamelessly stolen from Micha<68> Jankowski (fooky@pjwstk.edu.pl) script.
|
||||
' =========================================================
|
||||
|
||||
' Required Variables
|
||||
Const PROGNAME = "check_updates"
|
||||
Const VERSION = "0.0.1"
|
||||
|
||||
' Default settings for your script.
|
||||
threshold_warning = 50
|
||||
threshold_critical = 20
|
||||
|
||||
' Create the NagiosPlugin object
|
||||
Set np = New NagiosPlugin
|
||||
|
||||
' Define what args that should be used
|
||||
np.add_arg "warning", "warning threshold", 0
|
||||
np.add_arg "critical", "critical threshold", 0
|
||||
|
||||
' If we have no args or arglist contains /help or not all of the required arguments are fulfilled show the usage output,.
|
||||
If Args.Exists("help") Then
|
||||
np.Usage
|
||||
End If
|
||||
|
||||
' If we define /warning /critical on commandline it should override the script default.
|
||||
If Args.Exists("warning") Then threshold_warning = Args("warning")
|
||||
If Args.Exists("critical") Then threshold_critical = Args("critical")
|
||||
np.set_thresholds threshold_warning, threshold_critical
|
||||
|
||||
' Check if the Windows Update service is running
|
||||
Set wmi = GetObject("winmgmts://./root/cimv2")
|
||||
If wmi.Get("Win32_Service.Name='wuauserv'").StartMode = "Disabled" Then
|
||||
np.nagios_exit "UNKNOWN: Windows Update service is disabled", UNKNOWN
|
||||
End If
|
||||
|
||||
Set objAutoUpdate = CreateObject("Microsoft.Update.AutoUpdate")
|
||||
|
||||
intResultDetect = objAutoUpdate.DetectNow
|
||||
If intResultDetect <> 0 Then: np.nagios_exit "UNKNOWN: Unable to detect Automatic Updates.", UNKNOWN
|
||||
|
||||
Set objSession = CreateObject("Microsoft.Update.Session")
|
||||
Set objSearcher = objSession.CreateUpdateSearcher
|
||||
|
||||
intUncompleted = 0
|
||||
intUncompletedSoftware = 0
|
||||
|
||||
Set objSysInfo = CreateObject("Microsoft.Update.SystemInfo")
|
||||
If objSysInfo.RebootRequired Then: np.nagios_exit "WARNING: Reboot required.", WARNING
|
||||
|
||||
Set result = objSearcher.Search("IsInstalled = 0 and IsHidden = 0")
|
||||
Set colDownloads = result.Updates
|
||||
|
||||
For i = 0 to colDownloads.Count - 1
|
||||
If colDownloads.Item(i).AutoSelectOnWebsites Then
|
||||
updatesNames = colDownloads.Item(i).Title & "+ " & updatesNames
|
||||
intUncompleted = intUncompleted + 1
|
||||
Else
|
||||
intUncompletedSoftware = intUncompletedSoftware + 1
|
||||
End If
|
||||
Next
|
||||
|
||||
return_code = OK
|
||||
|
||||
If intUncompleted > 0 Then
|
||||
return_code = np.check_threshold(intUncompleted)
|
||||
np.nagios_exit "Number of critical updates not installed: " & intUncompleted & " <br />Number of software updates not installed: " & intUncompletedSoftware & " <br /> Critical updates name: " & updatesNames, return_code
|
||||
Else
|
||||
np.nagios_exit "There is no critical updates <br />Number of software or driver updates not installed: " & intUncompletedSoftware, OK
|
||||
End If
|
89
bibliotheque/files/nsclient/scripts/check_updates.wsf
Normal file
89
bibliotheque/files/nsclient/scripts/check_updates.wsf
Normal file
@ -0,0 +1,89 @@
|
||||
<job>
|
||||
<script language="VBScript">
|
||||
|
||||
Set objShell = CreateObject("WScript.Shell")
|
||||
|
||||
Dim sysroot
|
||||
sysroot = objShell.ExpandEnvironmentStrings("%systemroot%")
|
||||
|
||||
Set objExec = objShell.Exec("cmd.exe /c type " & sysroot & "\SoftwareDistribution\ReportingEvents.log")
|
||||
results = LCase(objExec.StdOut.ReadAll)
|
||||
|
||||
res_split = Split(results, vbCrLf)
|
||||
|
||||
Dim regEx
|
||||
Set regEx = New RegExp
|
||||
regEx.Pattern = "(.)\S*\s*\S*\s*\S*\s*\d\s*(\d*)\s*\S*\s*\S*[0-9\s]*\S*\s*\S*\s*.*\t(.*)"
|
||||
regEx.IgnoreCase = true
|
||||
|
||||
count = 1
|
||||
ReDim arrDyn(1)
|
||||
|
||||
For Each zeile in res_split
|
||||
|
||||
firstsign = regEx.Replace(zeile, "$1")
|
||||
|
||||
If (firstsign = "{") Then
|
||||
|
||||
number = regEx.Replace(zeile, "$2")
|
||||
finish = regEx.Replace(zeile, "$3")
|
||||
|
||||
If (number = 147) Then
|
||||
|
||||
count = count + 1
|
||||
ReDim Preserve arrDyn(count + 1)
|
||||
arrDyn(count + 1) = finish
|
||||
End If
|
||||
|
||||
End If
|
||||
|
||||
Next
|
||||
|
||||
mount_updates = -1
|
||||
|
||||
For x = 0 to UBound(arrDyn)
|
||||
|
||||
If x = UBound(arrDyn) Then
|
||||
end_array = Split(arrDyn(x), " ")
|
||||
mount_updates = end_array(UBound(end_array) - 1)
|
||||
End If
|
||||
|
||||
Next
|
||||
|
||||
Set objSysInfo = CreateObject("Microsoft.Update.SystemInfo")
|
||||
If objSysInfo.RebootRequired Then
|
||||
|
||||
reboot = " Reboot required!"
|
||||
status = 1
|
||||
|
||||
Else
|
||||
|
||||
reboot = " No Reboot required!"
|
||||
status = 0
|
||||
|
||||
End If
|
||||
|
||||
If mount_updates > 0 Then
|
||||
|
||||
If mount_updates = 1 Then
|
||||
|
||||
Wscript.echo("Warning: 1 Update detected!" & reboot)
|
||||
status = 1
|
||||
|
||||
ElseIf mount_updates >= 2 Then
|
||||
|
||||
Wscript.echo("Critical: " & mount_updates & " Updates detected!" & reboot)
|
||||
status = 2
|
||||
|
||||
End If
|
||||
|
||||
Else
|
||||
|
||||
Wscript.echo("OK, 0 Updates detected!" & reboot)
|
||||
|
||||
End If
|
||||
|
||||
Wscript.Quit(status)
|
||||
|
||||
</script>
|
||||
</job>
|
173
bibliotheque/files/nsclient/scripts/check_windows_updates.ps1
Normal file
173
bibliotheque/files/nsclient/scripts/check_windows_updates.ps1
Normal file
@ -0,0 +1,173 @@
|
||||
#################################################################################
|
||||
#
|
||||
# NAME: check_windows_updates.ps1
|
||||
#
|
||||
# COMMENT: Script to check for windows updates with Nagios + NRPE/NSClient++
|
||||
#
|
||||
# Checks:
|
||||
# - how many critical and optional updates are available
|
||||
# - whether the system is waiting for reboot after installed updates
|
||||
#
|
||||
# Features:
|
||||
# - properly handles NRPE's 1024b limitation in return packet
|
||||
# - configurable return states for pending reboot and optional updates
|
||||
# - performance data in return packet shows titles of available critical updates
|
||||
# - caches updates in file to reduce network traffic, also dramatically increases script execution speed
|
||||
#
|
||||
# Return Values for NRPE:
|
||||
# No updates available - OK (0)
|
||||
# Only Hidden Updates - OK (0)
|
||||
# Updates already installed, reboot required - WARNING (1)
|
||||
# Optional updates available - WARNING (1)
|
||||
# Critical updates available - CRITICAL (2)
|
||||
# Script errors - UNKNOWN (3)
|
||||
#
|
||||
# NRPE Handler to use with NSClient++:
|
||||
# [NRPE Handlers]
|
||||
# check_updates=cmd /c echo scripts\check_windows_updates.ps1 $ARG1$ $ARG2$; exit $LastExitCode | powershell.exe -command -
|
||||
#
|
||||
#
|
||||
# IMPORTANT: Please make absolutely sure that your Powershell ExecutionPolicy is set to Remotesigned.
|
||||
# Also note that there are two versions of powershell on a 64bit OS! Depending on the architecture
|
||||
# of your NSClient++ version you have to choose the right one:
|
||||
#
|
||||
# 64bit NSClient++ (installed under C:\Program Files ):
|
||||
# %SystemRoot%\SysWOW64\WindowsPowerShell\v1.0\powershell.exe "Set-ExecutionPolicy RemoteSigned"
|
||||
#
|
||||
# 32bit NSClient++ (installed under C:\Program Files (x86) ):
|
||||
# %SystemRoot%\syswow64\WindowsPowerShell\v1.0\powershell.exe "Set-ExecutionPolicy RemoteSigned"
|
||||
#
|
||||
#
|
||||
# CHANGELOG:
|
||||
# 1.45 2016-08-05 - corrected some typos, added newline after each critical update
|
||||
# 1.44 2016-04-05 - performance data added
|
||||
# 1.42 2015-07-20 - strip unwanted characters from returnString
|
||||
# 1.41 2015-04-24 - removed wuauclt /detectnow if updates available
|
||||
# 1.4 2015-01-14 - configurable return state for pending reboot
|
||||
# 1.3 2013-01-04 - configurable return state for optional updates
|
||||
# 1.2 2011-08-11 - cache updates, periodically update cache file
|
||||
# 1.1 2011-05-11 - hidden updates only -> state OK
|
||||
# - call wuauctl.exe to show available updates to user
|
||||
# 1.0 2011-05-10 - initial version
|
||||
#
|
||||
#################################################################################
|
||||
# Copyright (C) 2011-2015 Christian Kaufmann, ck@tupel7.de
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify it under
|
||||
# the terms of the GNU General Public License as published by the Free Software
|
||||
# Foundation; either version 3 of the License, or (at your option) any later
|
||||
# version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along with
|
||||
# this program; if not, see <http://www.gnu.org/licenses>.
|
||||
#################################################################################
|
||||
|
||||
$htReplace = New-Object hashtable
|
||||
foreach ($letter in (Write-Output ä ae ö oe ü ue Ä Ae Ö Oe Ü Ue ß ss)) {
|
||||
$foreach.MoveNext() | Out-Null
|
||||
$htReplace.$letter = $foreach.Current
|
||||
}
|
||||
$pattern = "[$(-join $htReplace.Keys)]"
|
||||
|
||||
$returnStateOK = 0
|
||||
$returnStateWarning = 1
|
||||
$returnStateCritical = 2
|
||||
$returnStateUnknown = 3
|
||||
$returnStatePendingReboot = $returnStateWarning
|
||||
$returnStateOptionalUpdates = $returnStateWarning
|
||||
|
||||
$updateCacheFile = "check_windows_updates-cache.xml"
|
||||
$updateCacheExpireHours = "24"
|
||||
|
||||
$logFile = "check_windows_update.log"
|
||||
|
||||
function LogLine( [String]$logFile = $(Throw 'LogLine:$logFile unspecified'),
|
||||
[String]$row = $(Throw 'LogLine:$row unspecified')) {
|
||||
$logDateTime = Get-Date -Format 'yyyy-MM-dd HH:mm:ss'
|
||||
Add-Content -Encoding UTF8 $logFile ($logDateTime + " - " + $row)
|
||||
}
|
||||
|
||||
if (Test-Path "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired"){
|
||||
Write-Host "updates installed, reboot required"
|
||||
if (Test-Path $logFile) {
|
||||
Remove-Item $logFile | Out-Null
|
||||
}
|
||||
if (Test-Path $updateCacheFile) {
|
||||
Remove-Item $updateCacheFile | Out-Null
|
||||
}
|
||||
exit $returnStatePendingReboot
|
||||
}
|
||||
|
||||
if (-not (Test-Path $updateCacheFile)) {
|
||||
LogLine -logFile $logFile -row ("$updateCacheFile not found, creating....")
|
||||
$updateSession = new-object -com "Microsoft.Update.Session"
|
||||
$updates=$updateSession.CreateupdateSearcher().Search(("IsInstalled=0 and Type='Software'")).Updates
|
||||
Export-Clixml -InputObject $updates -Encoding UTF8 -Path $updateCacheFile
|
||||
}
|
||||
|
||||
if ((Get-Date) -gt ((Get-Item $updateCacheFile).LastWriteTime.AddHours($updateCacheExpireHours))) {
|
||||
LogLine -logFile $logFile -row ("update cache expired, updating....")
|
||||
$updateSession = new-object -com "Microsoft.Update.Session"
|
||||
$updates=$updateSession.CreateupdateSearcher().Search(("IsInstalled=0 and Type='Software'")).Updates
|
||||
Export-Clixml -InputObject $updates -Encoding UTF8 -Path $updateCacheFile
|
||||
} else {
|
||||
LogLine -logFile $logFile -row ("using valid cache file....")
|
||||
$updates = Import-Clixml $updateCacheFile
|
||||
}
|
||||
|
||||
$criticalTitles = "";
|
||||
$countCritical = 0;
|
||||
$countOptional = 0;
|
||||
$countHidden = 0;
|
||||
|
||||
if ($updates.Count -eq 0) {
|
||||
Write-Host "OK - no pending updates.|critical=$countCritical;optional=$countOptional;hidden=$countHidden"
|
||||
exit $returnStateOK
|
||||
}
|
||||
|
||||
foreach ($update in $updates) {
|
||||
if ($update.IsHidden) {
|
||||
$countHidden++
|
||||
}
|
||||
elseif ($update.AutoSelectOnWebSites) {
|
||||
$criticalTitles += $update.Title + " `n"
|
||||
$countCritical++
|
||||
} else {
|
||||
$countOptional++
|
||||
}
|
||||
}
|
||||
if (($countCritical + $countOptional) -gt 0) {
|
||||
$returnString = "Updates: $countCritical critical, $countOptional optional" + [Environment]::NewLine + "$criticalTitles"
|
||||
$returnString = [regex]::Replace($returnString, $pattern, { $htReplace[$args[0].value] })
|
||||
|
||||
# 1024 chars max, reserving 48 chars for performance data ->
|
||||
if ($returnString.length -gt 976) {
|
||||
Write-Host ($returnString.SubString(0,975) + "|critical=$countCritical;optional=$countOptional;hidden=$countHidden")
|
||||
} else {
|
||||
Write-Host ($returnString + "|critical=$countCritical;optional=$countOptional;hidden=$countHidden")
|
||||
}
|
||||
}
|
||||
|
||||
#if ($countCritical -gt 0 -or $countOptional -gt 0) {
|
||||
# Start-Process "wuauclt.exe" -ArgumentList "/detectnow" -WindowStyle Hidden
|
||||
#}
|
||||
|
||||
if ($countCritical -gt 0) {
|
||||
exit $returnStateCritical
|
||||
}
|
||||
|
||||
if ($countOptional -gt 0) {
|
||||
exit $returnStateOptionalUpdates
|
||||
}
|
||||
|
||||
if ($countHidden -gt 0) {
|
||||
Write-Host "OK - $countHidden hidden updates.|critical=$countCritical;optional=$countOptional;hidden=$countHidden"
|
||||
exit $returnStateOK
|
||||
}
|
||||
|
||||
Write-Host "UNKNOWN script state"
|
||||
exit $returnStateUnknown
|
149
bibliotheque/files/nsclient/scripts/check_windows_updates.wsf
Normal file
149
bibliotheque/files/nsclient/scripts/check_windows_updates.wsf
Normal file
@ -0,0 +1,149 @@
|
||||
<job>
|
||||
<runtime>
|
||||
<description>
|
||||
Name:
|
||||
check_windows_updates (nrpe_nt-plugin) 1.5 based on check_msupdates (nrpe_nt-plugin) 1.0
|
||||
|
||||
License:
|
||||
The nagios plugins come with ABSOLUTELY NO WARRANTY. You may redistribute
|
||||
copies of the plugins under the terms of the GNU General Public License.
|
||||
For more information about these matters, see the file named COPYING.
|
||||
|
||||
Changelog / Contributors:
|
||||
2009 May - Albrecht Dress (albrecht.dress@arcor.de)
|
||||
2007 June - Micha Jankowski (fooky@pjwstk.edu.pl)
|
||||
|
||||
</description>
|
||||
<named
|
||||
name="h"
|
||||
helpstring="Help"
|
||||
type="simple"
|
||||
required="false"
|
||||
/>
|
||||
<named
|
||||
name="w"
|
||||
helpstring="number of updates before warning status"
|
||||
type="string"
|
||||
required="false"
|
||||
/>
|
||||
<named
|
||||
name="c"
|
||||
helpstring="number of updates before critical status "
|
||||
type="string"
|
||||
required="false"
|
||||
/>
|
||||
</runtime>
|
||||
<script language="VBScript">
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
' Const's and Var's
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
'Cons for return val's
|
||||
Const intOK = 0
|
||||
Const intWarning = 1
|
||||
Const intCritical = 2
|
||||
Const intUnknown = 3
|
||||
|
||||
' Cons for FSO
|
||||
Const ForReading = 1
|
||||
Const ForWriting = 2
|
||||
|
||||
Dim updatesNames
|
||||
|
||||
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
' Params
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
If Wscript.Arguments.Named.Exists("h") Then
|
||||
Wscript.Echo "Usage: check_windows_updates.wsf /w:1 /c:2"
|
||||
Wscript.Echo "/w: - number of updates before warning status "
|
||||
Wscript.Echo "/c: - number of updates before critical status "
|
||||
End If
|
||||
|
||||
If Wscript.Arguments.Named.Exists("w") Then
|
||||
intWarningLvl = Cint(Wscript.Arguments.Named("w"))
|
||||
Else
|
||||
intWarningLvl = 0
|
||||
End If
|
||||
|
||||
If Wscript.Arguments.Named.Exists("c") Then
|
||||
intCriticLvl = Cint(Wscript.Arguments.Named("c"))
|
||||
Else
|
||||
intCriticLvl = 0
|
||||
End If
|
||||
|
||||
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
' Main
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
Set objAutoUpdate = CreateObject("Microsoft.Update.AutoUpdate")
|
||||
|
||||
intResultDetect = objAutoUpdate.DetectNow
|
||||
If intResultDetect = 0 Then
|
||||
Else
|
||||
WScript.Echo "WARNING: Unable to detect Automatic Updates."
|
||||
Wscript.Quit(intUnknown)
|
||||
End If
|
||||
|
||||
Set objSession = CreateObject("Microsoft.Update.Session")
|
||||
Set objSearcher = objSession.CreateUpdateSearcher
|
||||
|
||||
|
||||
intImportant = 0
|
||||
intOptional = 0
|
||||
|
||||
Set objSysInfo = CreateObject("Microsoft.Update.SystemInfo")
|
||||
If objSysInfo.RebootRequired Then
|
||||
Wscript.Echo "Reboot required."
|
||||
Wscript.Quit(intWarning)
|
||||
End If
|
||||
|
||||
Set result = objSearcher.Search("IsInstalled = 0 and IsHidden = 0")
|
||||
Set colDownloads = result.Updates
|
||||
|
||||
For Each objEntry in colDownloads
|
||||
if objEntry.AutoSelectOnWebSites Then
|
||||
if intImportant = 0 Then
|
||||
importantNames = objEntry.Title
|
||||
else
|
||||
importantNames = importantNames & "; " & objEntry.Title
|
||||
End If
|
||||
intImportant = intImportant + 1
|
||||
Else
|
||||
If intOptional = 0 Then
|
||||
optionalNames = objEntry.Title
|
||||
Else
|
||||
optionalNames = optionalNames & "; " & objEntry.Title
|
||||
End If
|
||||
intOptional = intOptional + 1
|
||||
End If
|
||||
Next
|
||||
|
||||
If intImportant + intOptional > 0 Then
|
||||
echoStr = "Updates: " & intImportant & " important, " & intOptional & " optional|"
|
||||
If intImportant > 0 Then
|
||||
echoStr = echoStr & "Important: " & importantNames
|
||||
If intOptional > 0 Then
|
||||
echoStr = echoStr & " (note: optional updates not listed)"
|
||||
End If
|
||||
Else
|
||||
echoStr = echoStr & "Optional: " & optionalNames
|
||||
End If
|
||||
WScript.Echo echoStr
|
||||
|
||||
If intImportant > intCriticLvl Then
|
||||
Wscript.Quit(intCritical)
|
||||
End If
|
||||
|
||||
If intImportant > intWarningLvl Then
|
||||
Wscript.Quit(intWarning)
|
||||
End If
|
||||
Wscript.Quit(intOK)
|
||||
Else
|
||||
WScript.Echo "No updates waiting or installing."
|
||||
Wscript.Quit(intOK)
|
||||
End If
|
||||
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
' End
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
</script>
|
||||
</job>
|
||||
|
@ -0,0 +1,2 @@
|
||||
|
||||
|
402
bibliotheque/files/nsclient/scripts/lib/NagiosPlugins.vbs
Normal file
402
bibliotheque/files/nsclient/scripts/lib/NagiosPlugins.vbs
Normal file
@ -0,0 +1,402 @@
|
||||
' Author: Mattias Ryrl<72>n (mr@op5.com)
|
||||
' Website: http://www.op5.com
|
||||
' Created 2008-10-26
|
||||
' Updated 2008-10-26
|
||||
' Description: Class to ease the output from nagios plugins
|
||||
|
||||
' Exitcodes
|
||||
Const OK = 0
|
||||
Const WARNING = 1
|
||||
Const CRITICAL = 2
|
||||
Const UNKNOWN = 3
|
||||
|
||||
Dim return_code ' String to set exitcode
|
||||
Dim msg ' Output string for your check
|
||||
Dim np ' Object to be used with NagiosPlugin Class
|
||||
Dim threshold_warning
|
||||
Dim threshold_critical
|
||||
Dim Args ' List to hold your arguments
|
||||
Dim Timeout
|
||||
Dim ArgList() ' Array for your arguments to be used
|
||||
Dim tmpArgList()
|
||||
|
||||
' Default values
|
||||
return_code = UNKNOWN
|
||||
threshold_warning = "N/A"
|
||||
threshold_critical = "N/A"
|
||||
Timeout = 10
|
||||
ReDim tmpArgList(2)
|
||||
|
||||
' Create alias for arguments method
|
||||
Set Args = WScript.Arguments.Named
|
||||
|
||||
Class NagiosPlugin
|
||||
Public Function nagios_exit (msg, return_code)
|
||||
' Function to exit the plugin with text and exitcode
|
||||
If return_code = 0 Then
|
||||
msg = "OK: " & msg
|
||||
End If
|
||||
|
||||
If return_code = 1 Then
|
||||
msg = "WARNING: " & msg
|
||||
End If
|
||||
|
||||
If return_code = 2 Then
|
||||
msg = "CRITICAL: " & msg
|
||||
End If
|
||||
|
||||
If return_code >= 3 Then
|
||||
msg = "UNKNOWN: " & msg
|
||||
End If
|
||||
Wscript.Echo msg
|
||||
Wscript.Quit(return_code)
|
||||
End Function
|
||||
|
||||
Public Function add_perfdata (label, value, unit, threshold)
|
||||
' Adds perfdata to the output
|
||||
End Function
|
||||
|
||||
Public Function parse_args ()
|
||||
' Start the real parsing to see if we meet all required arguments needed.
|
||||
totalArg = UBound(ArgList)
|
||||
parse_args = 1
|
||||
|
||||
i = 0
|
||||
Do While i < totalArg
|
||||
If ArgList(i,2) = 1 Then
|
||||
If Not Args.Exists(ArgList(i,0)) Then
|
||||
parse_args = 0
|
||||
End If
|
||||
End If
|
||||
i = i + 1
|
||||
Loop
|
||||
End Function
|
||||
|
||||
Public Function add_arg (parameter, help, required)
|
||||
' Add an argument to be used, make it required or optional.
|
||||
totalArg = UBound(tmpArgList)
|
||||
|
||||
if tmpArgList(2) <> "" Then
|
||||
totalArg = totalArg + 3
|
||||
ReDim Preserve tmpArgList(totalArg)
|
||||
|
||||
tmpArgList(totalArg - 2) = parameter
|
||||
tmpArgList(totalArg - 1) = help
|
||||
tmpArgList(totalArg) = required
|
||||
Else
|
||||
tmpArgList(0) = parameter
|
||||
tmpArgList(1) = help
|
||||
tmpArgList(2) = required
|
||||
End If
|
||||
|
||||
Erase ArgList
|
||||
ReDim ArgList(Round(totalArg / 3), 2)
|
||||
|
||||
i = 0
|
||||
subi = 0
|
||||
For Each arg In tmpArgList
|
||||
ArgList(i, subi) = arg
|
||||
If subi >= 2 Then
|
||||
subi = 0
|
||||
i = i + 1
|
||||
Else
|
||||
subi = subi + 1
|
||||
End if
|
||||
Next
|
||||
End Function
|
||||
|
||||
Public Function set_thresholds (warning, critical)
|
||||
' Simple function to set warning/critical thresholds
|
||||
threshold_warning = warning
|
||||
threshold_critical = critical
|
||||
End Function
|
||||
|
||||
Public Function get_threshold (threshold)
|
||||
' Simple function to return the warning and critical threshold
|
||||
If LCase(threshold) = "warning" Then
|
||||
get_threshold = threshold_warning
|
||||
End IF
|
||||
|
||||
If LCase(threshold) = "critical" Then
|
||||
get_threshold = threshold_critical
|
||||
End If
|
||||
End Function
|
||||
|
||||
Public Function escalate_check_threshold (current, value)
|
||||
result = check_threshold (value)
|
||||
escalate_check_threshold = escalate(current, result)
|
||||
End Function
|
||||
|
||||
Public Function escalate (current, newValue)
|
||||
If newValue > current Then
|
||||
escalate = newValue
|
||||
Else
|
||||
escalate = current
|
||||
End If
|
||||
End Function
|
||||
|
||||
|
||||
|
||||
Public Function get_threshold_perfdat(string)
|
||||
|
||||
Dim cintw0
|
||||
Dim cintw
|
||||
Dim x
|
||||
Dim colon
|
||||
|
||||
cintw0=get_threshold(string)
|
||||
x=Replace(cintw0,"~","")
|
||||
cintw0=Replace(x,"@","")
|
||||
|
||||
colon=Instr(cintw0,":")
|
||||
|
||||
If (colon > 1) Then
|
||||
cintw=Left(cintw0,colon-1)
|
||||
Else
|
||||
If (colon=1) Then
|
||||
cintw=Mid(cintw0,2)
|
||||
Else
|
||||
cintw=cintw0
|
||||
End If
|
||||
End If
|
||||
|
||||
get_threshold_perfdat=cintw
|
||||
|
||||
End Function
|
||||
|
||||
|
||||
Public Function check_threshold (value)
|
||||
' Verify the thresholds for warning and critical
|
||||
' Return 0 if ok (don't generate alert)
|
||||
' Return 1 if within warning (generate alert)
|
||||
' Return 2 if within critical (generate alert)
|
||||
|
||||
'Option Range definition Generate an alert if x...
|
||||
'1 10 < 0 or > 10, (outside the range of {0 .. 10})
|
||||
'2 10: < 10, (outside {10 .. infinity})
|
||||
'3 ~:10 > 10, (outside the range of {-infinity .. 10})
|
||||
'4 10:20 < 10 or > 20, (outside the range of {10 .. 20})
|
||||
'5 @10:20 >= 10 and <= 20, (inside the range of {10 .. 20})
|
||||
|
||||
check_threshold = 0
|
||||
|
||||
value = CDbl(value)
|
||||
|
||||
Set re = New RegExp
|
||||
re.IgnoreCase = True
|
||||
|
||||
' Option 1
|
||||
re.Pattern = "^[0-9]+$"
|
||||
If re.Test(get_threshold("warning")) Then
|
||||
warning_nr = parse_range(get_threshold("warning"), value, 1)
|
||||
End If
|
||||
If re.Test(get_threshold("critical")) Then
|
||||
critical_nr = parse_range(get_threshold("critical"), value, 1)
|
||||
End If
|
||||
|
||||
' Option 2
|
||||
re.Pattern = "^[0-9]+:$"
|
||||
If re.Test(get_threshold("warning")) Then
|
||||
warning_nr = parse_range(get_threshold("warning"), value, 2)
|
||||
End If
|
||||
If re.Test(get_threshold("critical")) Then
|
||||
critical_nr = parse_range(get_threshold("critical"), value, 2)
|
||||
End If
|
||||
|
||||
' Option 3
|
||||
re.Pattern = "^~:[0-9]+$"
|
||||
If re.Test(get_threshold("warning")) Then
|
||||
warning_nr = parse_range(get_threshold("warning"), value, 3)
|
||||
End If
|
||||
If re.Test(get_threshold("critical")) Then
|
||||
critical_nr = parse_range(get_threshold("critical"), value, 3)
|
||||
End If
|
||||
|
||||
' Option 4
|
||||
re.Pattern = "^[0-9]+:[0-9]+$"
|
||||
If re.Test(get_threshold("warning")) Then
|
||||
warning_nr = parse_range(get_threshold("warning"), value, 4)
|
||||
End If
|
||||
If re.Test(get_threshold("critical")) Then
|
||||
critical_nr = parse_range(get_threshold("critical"), value, 4)
|
||||
End If
|
||||
|
||||
' Option 5
|
||||
re.Pattern = "^@[0-9]+:[0-9]+$"
|
||||
If re.Test(get_threshold("warning")) Then
|
||||
warning_nr = parse_range(get_threshold("warning"), value, 5)
|
||||
End If
|
||||
If re.Test(get_threshold("critical")) Then
|
||||
critical_nr = parse_range(get_threshold("critical"), value, 5)
|
||||
End If
|
||||
|
||||
If warning_nr > 0 And critical_nr < 1 Then
|
||||
check_threshold = 1
|
||||
End If
|
||||
|
||||
If critical_nr > 0 Then
|
||||
check_threshold = 2
|
||||
End if
|
||||
|
||||
'Wscript.Echo "warning/critical: " & warning_nr & "/" & critical_nr
|
||||
End Function
|
||||
|
||||
Private Function parse_range (threshold, value, myOpt)
|
||||
|
||||
'Option Range definition Generate an alert if x...
|
||||
'1 10 < 0 or > 10, (outside the range of {0 .. 10})
|
||||
'2 10: < 10, (outside {10 .. infinity})
|
||||
'3 ~:10 > 10, (outside the range of {-infinity .. 10})
|
||||
'4 10:20 < 10 or > 20, (outside the range of {10 .. 20})
|
||||
'5 @10:20 >= 10 and <= 20, (inside the range of {10 .. 20})
|
||||
|
||||
parse_range = 3
|
||||
|
||||
Set re = New RegExp
|
||||
re.IgnoreCase = True
|
||||
|
||||
' Make sure that "value" is of type Integer
|
||||
value = CDbl(value)
|
||||
|
||||
Select Case myOpt
|
||||
' Generate an alert if x ...
|
||||
Case 1
|
||||
' outside the range of 0-threshold
|
||||
re.Pattern = "^([0-9]+)$"
|
||||
Set threshold = re.Execute(threshold)
|
||||
|
||||
If value < 0 Or value > CDbl(threshold(0)) Then
|
||||
parse_range = 1
|
||||
Else
|
||||
parse_range = 0
|
||||
End If
|
||||
|
||||
Case 2
|
||||
' outside value -> iinfinity
|
||||
re.Pattern = "^([0-9]+):$"
|
||||
Set threshold = re.Execute(threshold)
|
||||
|
||||
|
||||
For Each thres In threshold
|
||||
'Wscript.Echo "SubMatches(0): " & thres.SubMatches(0) & " val: " & value
|
||||
If value < CDbl(thres.SubMatches(0)) Then
|
||||
parse_range = 1
|
||||
Else
|
||||
parse_range = 0
|
||||
End If
|
||||
Next
|
||||
|
||||
|
||||
|
||||
Case 3
|
||||
' outside the range infinity <- value
|
||||
re.Pattern = "^~:([0-9]+)$"
|
||||
Set threshold = re.Execute(threshold)
|
||||
|
||||
For Each thres In threshold
|
||||
If value > CDbl(thres.SubMatches(0)) Then
|
||||
parse_range = 1
|
||||
Else
|
||||
parse_range = 0
|
||||
End If
|
||||
Next
|
||||
|
||||
|
||||
Case 4
|
||||
' outside the range of value:value
|
||||
re.Pattern = "^([0-9]+):([0-9]+)$"
|
||||
Set threshold = re.Execute(threshold)
|
||||
|
||||
For Each thres In threshold
|
||||
If value < CDbl(thres.SubMatches(0)) Or value > CDbl(thres.SubMatches(1)) Then
|
||||
parse_range = 1
|
||||
Else
|
||||
parse_range = 0
|
||||
End If
|
||||
Next
|
||||
|
||||
Case 5
|
||||
re.Pattern = "^@([0-9]+):([0-9]+)$"
|
||||
Set threshold = re.Execute(threshold)
|
||||
|
||||
For Each thres In threshold
|
||||
If value >= CDbl(thres.SubMatches(0)) And value <= CDbl(thres.SubMatches(1)) Then
|
||||
'Wscript.Echo "Bigger than " & thres.SubMatches(0) & " and smaller than " & thres.SubMatches(1)
|
||||
parse_range = 1
|
||||
Else
|
||||
parse_range = 0
|
||||
End If
|
||||
Next
|
||||
|
||||
End Select
|
||||
End Function
|
||||
|
||||
Public Function Usage
|
||||
' Print the usage output, automaticly build by the add_arg functions.
|
||||
i = 0
|
||||
r = 0
|
||||
o = 0
|
||||
|
||||
Dim reqArgLong()
|
||||
Dim optArgLong()
|
||||
Dim value
|
||||
|
||||
Do While i <= Ubound(ArgList)
|
||||
|
||||
If ArgList(i,0) <> "" Then
|
||||
ReDim Preserve reqArgLong(r)
|
||||
ReDim Preserve optArgLong(o)
|
||||
value = "<value>"
|
||||
|
||||
If Args.Exists(ArgList(i,0)) Then
|
||||
value = Args(ArgList(i,0))
|
||||
End If
|
||||
|
||||
If ArgList(i,2) = 1 Then
|
||||
reqArgShort = reqArgShort & "/" & ArgList(i, 0) & ":" & value & " "
|
||||
reqArgLong(r) = tabilize("/" & ArgList(i, 0), ArgList(i, 1))
|
||||
r = r + 1
|
||||
Else
|
||||
optArgShort = optArgShort & "[/" & ArgList(i, 0) & ":" & value & "] "
|
||||
optArgLong(o) = tabilize("[/" & ArgList(i, 0) & "]", ArgList(i, 1))
|
||||
o = o + 1
|
||||
End If
|
||||
End If
|
||||
i = i + 1
|
||||
Loop
|
||||
Wscript.Echo "Usage: " & PROGNAME & " " & reqArgShort & optArgShort
|
||||
Wscript.Echo ""
|
||||
|
||||
i = 0
|
||||
Do While i <= Ubound(reqArgLong)
|
||||
Wscript.Echo reqArgLong(i)
|
||||
i = i + 1
|
||||
Loop
|
||||
|
||||
i = 0
|
||||
Do While i <= Ubound(optArgLong)
|
||||
Wscript.Echo optArgLong(i)
|
||||
i = i + 1
|
||||
Loop
|
||||
|
||||
Wscript.Quit(UNKNOWN)
|
||||
End Function
|
||||
|
||||
Private Function tabilize (name, txt)
|
||||
' Add some space to make it pretty
|
||||
MaxWith = 30
|
||||
command = Len(name)
|
||||
MaxWith = MaxWith - command
|
||||
|
||||
tabilize = name & space(MaxWith) & txt
|
||||
End Function
|
||||
|
||||
Public Function simple_WMI_CIMV2(strComputer, strQuery)
|
||||
Const wbemFlagReturnImmediately = &h10
|
||||
Const wbemFlagForwardOnly = &h20
|
||||
|
||||
Set objWMIService = GetObject( "winmgmts://" & strComputer & "/root/CIMV2" )
|
||||
Set simple_WMI_CIMV2 = objWMIService.ExecQuery(strQuery, "WQL", wbemFlagReturnImmediately + wbemFlagForwardOnly )
|
||||
' Set simple_WMI_CIMV2 = objWMIService.ExecQuery(strQuery, "WQL")
|
||||
End Function
|
||||
End Class
|
54
bibliotheque/files/nsclient/scripts/lib/wrapper.vbs
Normal file
54
bibliotheque/files/nsclient/scripts/lib/wrapper.vbs
Normal file
@ -0,0 +1,54 @@
|
||||
' =========================================================
|
||||
' Example file of setting up a script to use NagiosPlugin.vbs as base.
|
||||
' =========================================================
|
||||
' Setting up usage of NagiosPlugin Class
|
||||
Dim ret
|
||||
ret = includeFile("scripts\lib\NagiosPlugins.vbs")
|
||||
If ret(0) <> 0 Then
|
||||
WScript.Echo "Failed to load core: " & ret(0) & " - " & ret(1)
|
||||
Wscript.Quit(3)
|
||||
End If
|
||||
|
||||
' If we have no args or arglist contains /help or not all of the required arguments are fulfilled show the usage output,.
|
||||
If WScript.Arguments.Count = 0 Then
|
||||
WScript.Echo "Usage: cscript //NOLOGO [c-script options] wrapper.vbs <script.vbs> [...]" & WScript.Arguments.Count
|
||||
Wscript.Quit(3)
|
||||
End If
|
||||
|
||||
ret = includeFile(WScript.Arguments(0))
|
||||
If ret(0) <> 0 Then
|
||||
WScript.Echo "Failed to run script: " & ret(0) & " - " & ret(1)
|
||||
Wscript.Quit(3)
|
||||
End If
|
||||
|
||||
Function includeFile (file)
|
||||
Dim oFSO, oFile, strFile, eFileName
|
||||
|
||||
Set oFSO = CreateObject ("Scripting.FileSystemObject")
|
||||
eFileName = file
|
||||
If Not oFSO.FileExists(eFileName) Then
|
||||
eFileName = oFSO.getFolder(".") & "\" & file
|
||||
If Not oFSO.FileExists(eFileName) Then
|
||||
eFileName = oFSO.getFolder(".") & "\scripts\" & file
|
||||
If Not oFSO.FileExists(eFileName) Then
|
||||
includeFile = Array("1", "The specified file " & file & " does not exist")
|
||||
Exit Function
|
||||
End If
|
||||
End If
|
||||
End If
|
||||
On Error Resume Next
|
||||
Err.Clear
|
||||
|
||||
Set oFile = oFSO.OpenTextFile(eFileName)
|
||||
If Err.Number <> 0 Then
|
||||
includeFile = Array(Err.Number, "The specified file " & file & " could not be opened for reading.")
|
||||
Exit Function
|
||||
End If
|
||||
On Error GoTo 0
|
||||
strFile = oFile.ReadAll
|
||||
oFile.close
|
||||
Set oFso = Nothing
|
||||
Set oFile = Nothing
|
||||
ExecuteGlobal strFile
|
||||
includeFile = Array("0", "")
|
||||
End Function
|
39
bibliotheque/files/nsclient/scripts/lua/check_cpu_ex.lua
Normal file
39
bibliotheque/files/nsclient/scripts/lua/check_cpu_ex.lua
Normal file
@ -0,0 +1,39 @@
|
||||
function install()
|
||||
-- Used to install this script
|
||||
local conf = nscp.Settings()
|
||||
conf:set_string('/modules', 'CheckSystem', 'enabled')
|
||||
conf:set_string('/modules', 'CheckHelpers', 'enabled')
|
||||
conf:set_string('/modules', 'LUAScript', 'enabled')
|
||||
conf:set_string('/settings/lua/scripts', 'check_cpu_ex', 'check_cpu_ex')
|
||||
conf:save()
|
||||
end
|
||||
|
||||
function setup()
|
||||
-- register our function
|
||||
local reg = nscp.Registry()
|
||||
reg:simple_query('check_cpu_ex', check_cpu_ex, 'Check CPU version which returns top consumers')
|
||||
end
|
||||
|
||||
function check_cpu_ex(command, arguments)
|
||||
local core = nscp.Core()
|
||||
cpu_result, cpu_message, cpu_perf = core:simple_query('check_cpu', arguments)
|
||||
if cpu_result == 'UNKNOWN' then
|
||||
core:log('error', string.format('Invalid return from check_cpu: %s', cpu_result))
|
||||
return cpu_result, cpu_message, cpu_perf
|
||||
end
|
||||
-- Status is good, lets execute check_process and filter_perf.
|
||||
proc_result, proc_message, proc_perf = core:simple_query('filter_perf', {'command=check_process', 'sort=normal', 'limit=5', 'arguments', 'delta=true', 'warn=time>0', 'filter=time>0'})
|
||||
return cpu_result, 'Top preformers: ' .. proc_perf, cpu_perf
|
||||
end
|
||||
|
||||
setup()
|
||||
|
||||
function main(args)
|
||||
cmd = args[0] or ''
|
||||
if cmd == 'install' then
|
||||
install()
|
||||
return 'ok', 'Script installed'
|
||||
else
|
||||
return 'error', 'Usage: .. install'
|
||||
end
|
||||
end
|
29
bibliotheque/files/nsclient/scripts/lua/default_check_mk.lua
Normal file
29
bibliotheque/files/nsclient/scripts/lua/default_check_mk.lua
Normal file
@ -0,0 +1,29 @@
|
||||
|
||||
function client_process(packet)
|
||||
cnt = packet:size_section()
|
||||
nscp.print('Got packets: ' .. cnt)
|
||||
for i = 1, cnt do
|
||||
s = packet:get_section(i)
|
||||
ln = s:size_line()
|
||||
sz = s:get_title()
|
||||
nscp.print(' + ' .. ln .. ': ' .. sz)
|
||||
for j = 1, s:size_line() do
|
||||
ln = s:get_line(j)
|
||||
nscp.print(' + ' .. ln:get_line())
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
function server_process(packet)
|
||||
s = nscp.section()
|
||||
s:set_title("check_mk")
|
||||
s:add_line("Version: 0.0.1")
|
||||
s:add_line("Agent: nsclient++")
|
||||
s:add_line("AgentOS: Windows")
|
||||
packet:add_section(s)
|
||||
return true
|
||||
end
|
||||
|
||||
reg = nscp.check_mk()
|
||||
reg:client_callback(client_process)
|
||||
reg:server_callback(server_process)
|
162
bibliotheque/files/nsclient/scripts/lua/lib/test_helper.lua
Normal file
162
bibliotheque/files/nsclient/scripts/lua/lib/test_helper.lua
Normal file
@ -0,0 +1,162 @@
|
||||
-----------------------------------------------------------------------------
|
||||
-- Imports and dependencies
|
||||
-----------------------------------------------------------------------------
|
||||
local math = require('math')
|
||||
local os = require('os')
|
||||
local string = require('string')
|
||||
local table = require('table')
|
||||
local nscp = require('nscp')
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- Module declaration
|
||||
-----------------------------------------------------------------------------
|
||||
module("test_helper", package.seeall)
|
||||
local valid_chars = {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
|
||||
"0","1","2","3","4","5","6","7","8","9","-"}
|
||||
local core = nscp.Core()
|
||||
|
||||
math.randomseed(os.time())
|
||||
|
||||
function random(len) -- args: smallest and largest possible password lengths, inclusive
|
||||
pass = {}
|
||||
for z = 1,len do
|
||||
case = math.random(1,2)
|
||||
a = math.random(1,#valid_chars)
|
||||
if case == 1 then
|
||||
x=string.upper(valid_chars[a])
|
||||
elseif case == 2 then
|
||||
x=string.lower(valid_chars[a])
|
||||
end
|
||||
table.insert(pass, x)
|
||||
end
|
||||
return(table.concat(pass))
|
||||
end
|
||||
string.random = random
|
||||
|
||||
function status_to_int(status)
|
||||
if status == 'ok' then
|
||||
return 0
|
||||
elseif status == 'warn' then
|
||||
return 1
|
||||
elseif status == 'crit' then
|
||||
return 2
|
||||
elseif status == 'unknown' then
|
||||
return 3
|
||||
else
|
||||
core:log('error', "Unknown status: "..status)
|
||||
return 3
|
||||
end
|
||||
end
|
||||
|
||||
TestResult = { status = true; children = {} }
|
||||
function TestResult:new(o)
|
||||
o = o or {}
|
||||
o["children"] = o["children"] or {}
|
||||
if o["status"] == nil then o["status"] = true end
|
||||
setmetatable(o, self)
|
||||
self.__index = self
|
||||
return o
|
||||
|
||||
end
|
||||
function TestResult:add(result)
|
||||
if not result then
|
||||
error("invalid result")
|
||||
end
|
||||
if not result.status then self.status = false end
|
||||
table.insert(self.children,result)
|
||||
end
|
||||
function TestResult:add_message(result, message)
|
||||
table.insert(self.children,TestResult:new{status=result, message=message})
|
||||
end
|
||||
function TestResult:assert_equals(a, b, message)
|
||||
if a==b then
|
||||
table.insert(self.children,TestResult:new{status=true, message=message})
|
||||
else
|
||||
table.insert(self.children,TestResult:new{status=false, message=message..': '..tostring(a)..' != '..tostring(b)})
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function TestResult:print(indent)
|
||||
indent = indent or 0
|
||||
local pad = string.rep(' ', indent)
|
||||
if self.status then
|
||||
core:log("info", pad .. "[OK ] - " .. self.message)
|
||||
else
|
||||
core:log("error", pad .. "[ERR] - " .. self.message)
|
||||
end
|
||||
if # self.children > 0 then
|
||||
for i,v in ipairs(self.children) do v:print(indent+2) end
|
||||
end
|
||||
end
|
||||
|
||||
function TestResult:print_failed(indent)
|
||||
indent = indent or 0
|
||||
local pad = string.rep(' ', indent)
|
||||
if not self.status then
|
||||
core:log("error", pad .. "[ERR] - " .. self.message)
|
||||
end
|
||||
if # self.children > 0 then
|
||||
for i,v in ipairs(self.children) do v:print_failed(indent+2) end
|
||||
end
|
||||
end
|
||||
|
||||
function TestResult:count()
|
||||
local ok = 0
|
||||
local err = 0
|
||||
if self.status then
|
||||
ok = ok + 1
|
||||
else
|
||||
err = err + 1
|
||||
end
|
||||
if # self.children > 0 then
|
||||
for i,v in ipairs(self.children) do
|
||||
local lok, lerr = v:count()
|
||||
ok = ok + lok
|
||||
err = err + lerr
|
||||
end
|
||||
end
|
||||
return ok, err
|
||||
end
|
||||
|
||||
|
||||
|
||||
function TestResult:get_nagios()
|
||||
local ok, err = self:count()
|
||||
if not self.status then
|
||||
return 'crit', tostring(err)..' test cases failed', ''
|
||||
else
|
||||
return 'ok', tostring(ok)..' test cases succeded', ''
|
||||
end
|
||||
end
|
||||
|
||||
local test_cases = {}
|
||||
function install_test_manager(cases)
|
||||
test_cases = cases
|
||||
for i=1,# test_cases do
|
||||
test_cases[i]:install({})
|
||||
end
|
||||
return 'ok'
|
||||
end
|
||||
|
||||
local test_cases = {}
|
||||
function init_test_manager(cases)
|
||||
test_cases = cases
|
||||
local reg = nscp.Registry()
|
||||
reg:simple_query('lua_unittest', lua_unittest_handler, 'TODO')
|
||||
end
|
||||
|
||||
function lua_unittest_handler(command, args)
|
||||
local result = TestResult:new{message='Running testsuite'}
|
||||
for i=1,# test_cases do
|
||||
local case_result = TestResult:new{message='Running testsuite'}
|
||||
test_cases[i]:setup()
|
||||
case_result:add(test_cases[i]:run())
|
||||
test_cases[i]:teardown()
|
||||
result:add(case_result)
|
||||
end
|
||||
result:print()
|
||||
core:log("info", "--//Failed tests//---")
|
||||
result:print_failed()
|
||||
return result:get_nagios()
|
||||
end
|
9
bibliotheque/files/nsclient/scripts/lua/noperf.lua
Normal file
9
bibliotheque/files/nsclient/scripts/lua/noperf.lua
Normal file
@ -0,0 +1,9 @@
|
||||
function no_perf(command, args)
|
||||
local core = nscp.Core()
|
||||
nscp.print ('Uhmmn, 111')
|
||||
code, msg, perf = core:simple_query('check_uptime', args)
|
||||
nscp.print ('Uhmmn, 22')
|
||||
return code, msg, ''
|
||||
end
|
||||
local reg = nscp.Registry()
|
||||
reg:simple_query('check_uptime_no_perf', no_perf, 'Wrapped check-uptime which does not yield performance data')
|
55
bibliotheque/files/nsclient/scripts/lua/test.lua
Normal file
55
bibliotheque/files/nsclient/scripts/lua/test.lua
Normal file
@ -0,0 +1,55 @@
|
||||
nscp.print('Loading test script...')
|
||||
|
||||
v = nscp.getSetting('NSCA Agent', 'interval', 'broken')
|
||||
nscp.print('value: ' .. v)
|
||||
|
||||
function test_func_query(command, args)
|
||||
nscp.print('Inside function (query): ' .. command)
|
||||
return 'ok', 'whoops 001', ''
|
||||
end
|
||||
|
||||
function test_func_exec(command, args)
|
||||
nscp.print('Inside function (exec): ' .. command)
|
||||
return 'ok', 'whoops 002'
|
||||
end
|
||||
|
||||
function test_func_submission(command, args)
|
||||
nscp.print('Inside function (exec): ' .. command)
|
||||
return 'ok'
|
||||
end
|
||||
|
||||
|
||||
nscp.execute('version')
|
||||
|
||||
local reg = Registry()
|
||||
reg:simple_function('lua_test', test_func_query, 'this is a command')
|
||||
reg:simple_cmdline('lua_test', test_func_exec)
|
||||
reg:simple_subscription('lua_test', test_func_submission)
|
||||
|
||||
local settings = Settings()
|
||||
|
||||
str = settings:get_string('/settings/lua/scripts', 'testar', 'FOO BAR')
|
||||
nscp.print('Value: (FOO BAR): ' .. str)
|
||||
settings:set_string('/settings/lua/scripts', 'testar', 'BAR FOO')
|
||||
str = settings:get_string('/settings/lua/scripts', 'testar', 'FOO BAR')
|
||||
nscp.print('Value: (BAR FOO): ' .. str)
|
||||
i = settings:get_int('/settings/lua/scripts', 'testar', 123)
|
||||
nscp.print('Value: (123): ' .. i)
|
||||
settings:set_int('/settings/lua/scripts', 'testar', 456)
|
||||
i = settings:get_int('/settings/lua/scripts', 'testar', 789)
|
||||
nscp.print('Value: (456): ' .. i)
|
||||
|
||||
local core = Core()
|
||||
code, msg, perf = core:simple_query('lua_test', {'a', 'b', 'c'})
|
||||
nscp.print('Value: (query): ' .. code)
|
||||
nscp.print('Value: (query): ' .. msg)
|
||||
nscp.print('Value: (query): ' .. perf)
|
||||
code, msgs = core:simple_exec('*', 'lua_test', {'a', 'b', 'c'})
|
||||
nscp.print('Value: (exec): ' .. code)
|
||||
for msg in pairs(msgs) do
|
||||
nscp.print('Value: (exec): ' .. msg)
|
||||
end
|
||||
code, msg = core:simple_submit('lua_test', 'test_lua', 'ok', 'foo', '')
|
||||
nscp.print('Value: (submit): ' .. code)
|
||||
nscp.print('Value: (submit): ' .. msg)
|
||||
|
311
bibliotheque/files/nsclient/scripts/lua/test_ext_script.lua
Normal file
311
bibliotheque/files/nsclient/scripts/lua/test_ext_script.lua
Normal file
@ -0,0 +1,311 @@
|
||||
test = require("test_helper")
|
||||
|
||||
TestExtScript = {
|
||||
requests = {},
|
||||
responses = {}
|
||||
}
|
||||
function TestExtScript:install(arguments)
|
||||
local conf = nscp.Settings()
|
||||
|
||||
conf:set_string('/modules', 'test_extscripts', 'CheckExternalScripts')
|
||||
conf:set_string('/modules', 'luatest', 'LUAScript')
|
||||
|
||||
conf:set_string('/settings/luatest/scripts', 'test_nrpe', 'test_nrpe.lua')
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_server', 'port', '15666')
|
||||
conf:set_string('/settings/NRPE/test_nrpe_server', 'inbox', 'nrpe_test_inbox')
|
||||
conf:set_string('/settings/NRPE/test_nrpe_server', 'encryption', '1')
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets', 'nrpe_test_local', 'nrpe://127.0.0.1:15666')
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client', 'channel', 'nrpe_test_outbox')
|
||||
--conf:save()
|
||||
end
|
||||
|
||||
function TestExtScript:setup()
|
||||
local reg = nscp.Registry()
|
||||
reg:simple_query('check_py_nrpe_test_s', self, self.simple_handler, 'TODO')
|
||||
--reg:query('check_py_nrpe_test', self, self.handler, 'TODO')
|
||||
end
|
||||
function TestExtScript:teardown()
|
||||
end
|
||||
|
||||
function TestExtScript:uninstall()
|
||||
end
|
||||
|
||||
function TestExtScript:help()
|
||||
end
|
||||
|
||||
function TestExtScript:init(plugin_id)
|
||||
end
|
||||
|
||||
function TestExtScript:shutdown()
|
||||
end
|
||||
|
||||
function TestExtScript:has_response(id)
|
||||
return self.responses[id]
|
||||
end
|
||||
|
||||
function TestExtScript:get_response(id)
|
||||
msg = self.requests[id]
|
||||
if msg == nil then
|
||||
msg = TestMessage
|
||||
msg.uuid=id
|
||||
self.responses[id] = msg
|
||||
return msg
|
||||
end
|
||||
return msg
|
||||
end
|
||||
|
||||
function TestExtScript:set_response(msg)
|
||||
self.responses[msg.uuid] = msg
|
||||
end
|
||||
|
||||
function TestExtScript:del_response(id)
|
||||
self.responses[id] = nil
|
||||
end
|
||||
|
||||
function TestExtScript:get_request(id)
|
||||
msg = self.requests[id]
|
||||
if msg == nil then
|
||||
msg = TestMessage
|
||||
msg.uuid=id
|
||||
self.requests[id] = msg
|
||||
return msg
|
||||
end
|
||||
return msg
|
||||
end
|
||||
|
||||
function TestExtScript:set_request(msg)
|
||||
self.requests[msg.uuid] = msg
|
||||
end
|
||||
|
||||
function TestExtScript:del_request(id)
|
||||
self.requests[id] = nil
|
||||
end
|
||||
|
||||
function TestExtScript:simple_handler(command, args)
|
||||
local core = nscp.Core()
|
||||
msg = self:get_response(args[0])
|
||||
msg.got_simple_response = true
|
||||
self:set_response(msg)
|
||||
message = rmsg.message
|
||||
if args[1] then
|
||||
message = string.rep('x', args[1])
|
||||
end
|
||||
rmsg = self:get_request(args[0])
|
||||
return rmsg.status, message, rmsg.perfdata
|
||||
end
|
||||
|
||||
function TestExtScript:handler(req)
|
||||
local msg = self:get_response(args[0])
|
||||
msg.got_response = true
|
||||
self:set_response(msg)
|
||||
end
|
||||
|
||||
function TestExtScript:submit_payload(tag, ssl, length, payload_length, source, status, message, perf, target)
|
||||
local core = nscp.Core()
|
||||
local result = test.TestResult:new{message='Testing NRPE: '..tag..' for '..target}
|
||||
|
||||
local msg = protobuf.Plugin.QueryRequestMessage.new()
|
||||
hdr = msg:get_header()
|
||||
hdr:set_recipient_id(target)
|
||||
hdr:set_command('nrpe_forward')
|
||||
host = hdr:add_hosts()
|
||||
host:set_address("127.0.0.1:15666")
|
||||
host:set_id(target)
|
||||
if target == 'valid' then
|
||||
else
|
||||
enc = host:add_metadata()
|
||||
enc:set_key("use ssl")
|
||||
enc:set_value(tostring(ssl))
|
||||
enc = host:add_metadata()
|
||||
enc:set_key("payload length")
|
||||
enc:set_value(tostring(length))
|
||||
enc = host:add_metadata()
|
||||
enc:set_key("timeout")
|
||||
enc:set_value('10')
|
||||
end
|
||||
|
||||
uid = string.random(12)
|
||||
payload = msg:add_payload()
|
||||
payload:set_command('check_py_nrpe_test_s')
|
||||
payload:set_arguments(1, uid)
|
||||
if payload_length ~= 0 then
|
||||
payload:set_arguments(2, payload_length)
|
||||
end
|
||||
rmsg = self:get_request(uid)
|
||||
rmsg.status = status
|
||||
rmsg.message = message
|
||||
rmsg.perfdata = perf
|
||||
self:set_request(rmsg)
|
||||
serialized = msg:serialized()
|
||||
result_code, response = core:query(serialized)
|
||||
response_message = protobuf.Plugin.QueryResponseMessage.parsefromstring(response)
|
||||
|
||||
|
||||
found = False
|
||||
for i = 0,10 do
|
||||
if (self:has_response(uid)) then
|
||||
rmsg = self:get_response(uid)
|
||||
--#result.add_message(rmsg.got_response, 'Testing to recieve message using %s'%tag)
|
||||
result:add_message(rmsg.got_simple_response, 'Testing to recieve simple message using '..tag)
|
||||
result:add_message(response_message:size_payload() == 1, 'Verify that we only get one payload response for '..tag)
|
||||
pl = response_message:get_payload(1)
|
||||
result:assert_equals(pl:get_result(), test.status_to_int(status), 'Verify that status is sent through '..tag)
|
||||
if payload_length == 0 then
|
||||
result:assert_equals(pl:get_message(), rmsg.message, 'Verify that message is sent through '..tag)
|
||||
else
|
||||
max_len = payload_length
|
||||
if max_len >= length then
|
||||
max_len = length - 1
|
||||
end
|
||||
result:assert_equals(string.len(pl:get_message()), max_len, 'Verify that message length is correct ' .. max_len .. ': ' ..tag)
|
||||
end
|
||||
--#result.assert_equals(rmsg.perfdata, perf, 'Verify that performance data is sent through')
|
||||
self:del_response(uid)
|
||||
found = true
|
||||
break
|
||||
else
|
||||
core:log('info', string.format('Waiting for %s (%s/%s)', uid,tag,target))
|
||||
nscp.sleep(500)
|
||||
end
|
||||
end
|
||||
if (not found) then
|
||||
result:add_message(false, string.format('Failed to find message %s using %s', uid, tag))
|
||||
end
|
||||
|
||||
return result
|
||||
end
|
||||
|
||||
function TestExtScript:test_one(ssl, length, payload_length, status)
|
||||
tag = string.format("%s/%d/%s", tostring(ssl), length, status)
|
||||
local result = test.TestResult:new{message=string.format('Testing: %s with various targets', tag)}
|
||||
for k,t in pairs({'valid', 'test_rp', 'invalid'}) do
|
||||
result:add(self:submit_payload(tag, ssl, length, payload_length, tag .. 'src' .. tag, status, tag .. 'msg' .. tag, '', t))
|
||||
end
|
||||
return result
|
||||
end
|
||||
|
||||
function TestExtScript:do_one_test(ssl, length)
|
||||
if ssl == nil then ssl = true end
|
||||
length = length or 1024
|
||||
|
||||
local conf = nscp.Settings()
|
||||
local core = nscp.Core()
|
||||
conf:set_int('/settings/NRPE/test_nrpe_server', 'payload length', length)
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_server', 'use ssl', ssl)
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_server', 'allow arguments', true)
|
||||
core:reload('test_nrpe_server')
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets/default', 'address', 'nrpe://127.0.0.1:35666')
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_client/targets/default', 'use ssl', not ssl)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_client/targets/default', 'payload length', length*3)
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets/invalid', 'address', 'nrpe://127.0.0.1:25666')
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_client/targets/invalid', 'use ssl', not ssl)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_client/targets/invalid', 'payload length', length*2)
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets/valid', 'address', 'nrpe://127.0.0.1:15666')
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_client/targets/valid', 'use ssl', ssl)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_client/targets/valid', 'payload length', length)
|
||||
core:reload('test_nrpe_client')
|
||||
|
||||
local result = test.TestResult:new{message="Testing "..tostring(ssl)..", "..tostring(length)}
|
||||
result:add(self:test_one(ssl, length, 0, 'unknown'))
|
||||
result:add(self:test_one(ssl, length, 0, 'ok'))
|
||||
result:add(self:test_one(ssl, length, 0, 'warn'))
|
||||
result:add(self:test_one(ssl, length, 0, 'crit'))
|
||||
result:add(self:test_one(ssl, length, length/2, 'ok'))
|
||||
result:add(self:test_one(ssl, length, length, 'ok'))
|
||||
result:add(self:test_one(ssl, length, length*2, 'ok'))
|
||||
return result
|
||||
end
|
||||
|
||||
function TestExtScript:test_timeout(ssl, server_timeout, client_timeout, length)
|
||||
|
||||
local conf = nscp.Settings()
|
||||
local core = nscp.Core()
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_server', 'use ssl', ssl)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_server', 'timeout', server_timeout)
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_server', 'allow arguments', true)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_server', 'payload length', length)
|
||||
core:reload('test_nrpe_server')
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets/default', 'address', 'nrpe://127.0.0.1:15666')
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_client/targets/default', 'use ssl', ssl)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_client/targets/default', 'timeout', client_timeout)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_client/targets/default', 'payload length', length)
|
||||
|
||||
core:reload('test_nrpe_client')
|
||||
|
||||
local result = test.TestResult:new{message="Testing timeouts ssl: "..tostring(ssl)..", server: "..tostring(server_timeout)..", client: "..tostring(client_timeout)}
|
||||
|
||||
local msg = protobuf.Plugin.QueryRequestMessage.new()
|
||||
hdr = msg:get_header()
|
||||
hdr:set_recipient_id('test')
|
||||
host = hdr:add_hosts()
|
||||
host:set_address("127.0.0.1:15666")
|
||||
host:set_id('test')
|
||||
meta = hdr:add_metadata()
|
||||
meta:set_key("command")
|
||||
meta:set_value('check_py_nrpe_test_s')
|
||||
meta = hdr:add_metadata()
|
||||
meta:set_key("retry")
|
||||
meta:set_value('0')
|
||||
|
||||
uid = string.random(12)
|
||||
payload = msg:add_payload()
|
||||
payload:set_command('nrpe_forward')
|
||||
payload:set_arguments(1, uid)
|
||||
rmsg = self:get_request(uid)
|
||||
rmsg.status = 'ok'
|
||||
rmsg.message = 'Hello: Timeout'
|
||||
rmsg.perfdata = ''
|
||||
self:set_request(rmsg)
|
||||
serialized = msg:serialized()
|
||||
result_code, response = core:query(serialized)
|
||||
response_message = protobuf.Plugin.QueryResponseMessage.parsefromstring(response)
|
||||
|
||||
|
||||
found = False
|
||||
for i = 0,10 do
|
||||
if (self:has_response(uid)) then
|
||||
rmsg = self:get_response(uid)
|
||||
result:add_message(false, string.format('Testing to recieve message using'))
|
||||
self:del_response(uid)
|
||||
found = true
|
||||
break
|
||||
else
|
||||
core:log('error', string.format('Timeout waiting for %s', uid))
|
||||
--sleep(500)
|
||||
end
|
||||
end
|
||||
if (found) then
|
||||
result:add_message(false, string.format('Making sure timeout message was never delivered'))
|
||||
end
|
||||
|
||||
return result
|
||||
end
|
||||
|
||||
function TestExtScript:run()
|
||||
local result = test.TestResult:new{message="NRPE Test Suite"}
|
||||
result:add(self:do_one_test(true, 1024))
|
||||
result:add(self:do_one_test(false, 1024))
|
||||
result:add(self:do_one_test(true, 4096))
|
||||
result:add(self:do_one_test(true, 65536))
|
||||
result:add(self:do_one_test(true, 1048576))
|
||||
|
||||
result:add(self:test_timeout(false, 30, 1, 1048576000))
|
||||
result:add(self:test_timeout(false, 1, 30, 1048576000))
|
||||
result:add(self:test_timeout(true, 30, 1, 1048576000))
|
||||
result:add(self:test_timeout(true, 1, 30, 1048576000))
|
||||
return result
|
||||
end
|
||||
|
||||
|
||||
instances = { TestNRPE }
|
||||
test.init_test_manager(instances)
|
||||
|
||||
function main(args)
|
||||
return test.install_test_manager(instances)
|
||||
end
|
328
bibliotheque/files/nsclient/scripts/lua/test_nrpe.lua
Normal file
328
bibliotheque/files/nsclient/scripts/lua/test_nrpe.lua
Normal file
@ -0,0 +1,328 @@
|
||||
test = require("test_helper")
|
||||
|
||||
TestMessage = {
|
||||
uuid = nil,
|
||||
source = nil,
|
||||
command = nil,
|
||||
status = nil,
|
||||
message = nil,
|
||||
perfdata = nil,
|
||||
got_simple_response = false,
|
||||
got_response = false
|
||||
}
|
||||
|
||||
TestNRPE = {
|
||||
requests = {},
|
||||
responses = {}
|
||||
}
|
||||
function TestNRPE:install(arguments)
|
||||
local conf = nscp.Settings()
|
||||
|
||||
conf:set_string('/modules', 'test_nrpe_server', 'NRPEServer')
|
||||
conf:set_string('/modules', 'test_nrpe_client', 'NRPEClient')
|
||||
conf:set_string('/modules', 'luatest', 'LUAScript')
|
||||
|
||||
conf:set_string('/settings/luatest/scripts', 'test_nrpe', 'test_nrpe.lua')
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_server', 'port', '15666')
|
||||
conf:set_string('/settings/NRPE/test_nrpe_server', 'inbox', 'nrpe_test_inbox')
|
||||
conf:set_string('/settings/NRPE/test_nrpe_server', 'encryption', '1')
|
||||
conf:set_string('/settings/NRPE/test_nrpe_server', 'insecure', 'true')
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets', 'nrpe_test_local', 'nrpe://127.0.0.1:15666')
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets/default', 'insecure', 'true')
|
||||
end
|
||||
|
||||
function TestNRPE:setup()
|
||||
local reg = nscp.Registry()
|
||||
reg:simple_query('check_py_nrpe_test_s', self, self.simple_handler, 'TODO')
|
||||
--reg:query('check_py_nrpe_test', self, self.handler, 'TODO')
|
||||
end
|
||||
function TestNRPE:teardown()
|
||||
end
|
||||
|
||||
function TestNRPE:uninstall()
|
||||
end
|
||||
|
||||
function TestNRPE:help()
|
||||
end
|
||||
|
||||
function TestNRPE:init(plugin_id)
|
||||
end
|
||||
|
||||
function TestNRPE:shutdown()
|
||||
end
|
||||
|
||||
function TestNRPE:has_response(id)
|
||||
return self.responses[id]
|
||||
end
|
||||
|
||||
function TestNRPE:get_response(id)
|
||||
msg = self.requests[id]
|
||||
if msg == nil then
|
||||
msg = TestMessage
|
||||
msg.uuid=id
|
||||
self.responses[id] = msg
|
||||
return msg
|
||||
end
|
||||
return msg
|
||||
end
|
||||
|
||||
function TestNRPE:set_response(msg)
|
||||
self.responses[msg.uuid] = msg
|
||||
end
|
||||
|
||||
function TestNRPE:del_response(id)
|
||||
self.responses[id] = nil
|
||||
end
|
||||
|
||||
function TestNRPE:get_request(id)
|
||||
msg = self.requests[id]
|
||||
if msg == nil then
|
||||
msg = TestMessage
|
||||
msg.uuid=id
|
||||
self.requests[id] = msg
|
||||
return msg
|
||||
end
|
||||
return msg
|
||||
end
|
||||
|
||||
function TestNRPE:set_request(msg)
|
||||
self.requests[msg.uuid] = msg
|
||||
end
|
||||
|
||||
function TestNRPE:del_request(id)
|
||||
self.requests[id] = nil
|
||||
end
|
||||
|
||||
function TestNRPE:simple_handler(command, args)
|
||||
local core = nscp.Core()
|
||||
msg = self:get_response(args[0])
|
||||
msg.got_simple_response = true
|
||||
self:set_response(msg)
|
||||
message = rmsg.message
|
||||
if args[1] then
|
||||
message = string.rep('x', args[1])
|
||||
end
|
||||
rmsg = self:get_request(args[0])
|
||||
return rmsg.status, message, rmsg.perfdata
|
||||
end
|
||||
|
||||
function TestNRPE:handler(req)
|
||||
local msg = self:get_response(args[0])
|
||||
msg.got_response = true
|
||||
self:set_response(msg)
|
||||
end
|
||||
|
||||
function TestNRPE:submit_payload(tag, ssl, length, payload_length, source, status, message, perf, target)
|
||||
local core = nscp.Core()
|
||||
local result = test.TestResult:new{message='Testing NRPE: '..tag..' for '..target}
|
||||
|
||||
local msg = protobuf.Plugin.QueryRequestMessage.new()
|
||||
hdr = msg:get_header()
|
||||
hdr:set_destination_id(target)
|
||||
hdr:set_command("nrpe_forward")
|
||||
host = hdr:add_hosts()
|
||||
host:set_address("127.0.0.1:15666")
|
||||
host:set_id(target)
|
||||
if target == 'valid' then
|
||||
else
|
||||
enc = host:add_metadata()
|
||||
enc:set_key("use ssl")
|
||||
enc:set_value(tostring(ssl))
|
||||
enc = host:add_metadata()
|
||||
enc:set_key("payload length")
|
||||
enc:set_value(tostring(length))
|
||||
enc = host:add_metadata()
|
||||
enc:set_key("timeout")
|
||||
enc:set_value('10')
|
||||
end
|
||||
|
||||
uid = string.random(12)
|
||||
payload = msg:add_payload()
|
||||
payload:set_command('check_py_nrpe_test_s')
|
||||
payload:set_arguments(1, uid)
|
||||
if payload_length ~= 0 then
|
||||
payload:set_arguments(2, payload_length)
|
||||
end
|
||||
rmsg = self:get_request(uid)
|
||||
rmsg.status = status
|
||||
rmsg.message = message
|
||||
rmsg.perfdata = perf
|
||||
self:set_request(rmsg)
|
||||
serialized = msg:serialized()
|
||||
result_code, response = core:query(serialized)
|
||||
response_message = protobuf.Plugin.QueryResponseMessage.parsefromstring(response)
|
||||
|
||||
|
||||
found = False
|
||||
for i = 0,10 do
|
||||
if (self:has_response(uid)) then
|
||||
rmsg = self:get_response(uid)
|
||||
--#result.add_message(rmsg.got_response, 'Testing to recieve message using %s'%tag)
|
||||
result:add_message(rmsg.got_simple_response, 'Testing to recieve simple message using '..tag)
|
||||
result:add_message(response_message:size_payload() == 1, 'Verify that we only get one payload response for '..tag)
|
||||
pl = response_message:get_payload(1)
|
||||
result:assert_equals(pl:get_result(), test.status_to_int(status), 'Verify that status is sent through '..tag)
|
||||
if payload_length == 0 then
|
||||
l = pl:get_lines(1)
|
||||
result:assert_equals(l:get_message(), rmsg.message, 'Verify that message is sent through '..tag)
|
||||
else
|
||||
max_len = payload_length
|
||||
if max_len >= length then
|
||||
max_len = length - 1
|
||||
end
|
||||
l = pl:get_lines(1)
|
||||
result:assert_equals(string.len(l:get_message()), max_len, 'Verify that message length is correct ' .. max_len .. ': ' ..tag)
|
||||
end
|
||||
--#result.assert_equals(rmsg.perfdata, perf, 'Verify that performance data is sent through')
|
||||
self:del_response(uid)
|
||||
found = true
|
||||
break
|
||||
else
|
||||
core:log('info', string.format('Waiting for %s (%s/%s)', uid,tag,target))
|
||||
nscp.sleep(500)
|
||||
end
|
||||
end
|
||||
if (not found) then
|
||||
result:add_message(false, string.format('Failed to find message %s using %s', uid, tag))
|
||||
end
|
||||
|
||||
return result
|
||||
end
|
||||
|
||||
function TestNRPE:test_one(ssl, length, payload_length, status)
|
||||
tag = string.format("%s/%d/%s", tostring(ssl), length, status)
|
||||
local result = test.TestResult:new{message=string.format('Testing: %s with various targets', tag)}
|
||||
for k,t in pairs({'valid', 'test_rp', 'invalid'}) do
|
||||
result:add(self:submit_payload(tag, ssl, length, payload_length, tag .. 'src' .. tag, status, tag .. 'msg' .. tag, '', t))
|
||||
end
|
||||
result:add(self:submit_payload(tag, ssl, length, payload_length, tag .. 'src' .. tag, status, tag .. 'msg' .. tag, '', 'valid'))
|
||||
return result
|
||||
end
|
||||
|
||||
function TestNRPE:do_one_test(ssl, length)
|
||||
if ssl == nil then ssl = true end
|
||||
length = length or 1024
|
||||
|
||||
local conf = nscp.Settings()
|
||||
local core = nscp.Core()
|
||||
conf:set_int('/settings/NRPE/test_nrpe_server', 'payload length', length)
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_server', 'use ssl', ssl)
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_server', 'allow arguments', true)
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_server', 'extended response', false)
|
||||
core:reload('test_nrpe_server')
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets/default', 'address', 'nrpe://127.0.0.1:35666')
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_client/targets/default', 'ssl', not ssl)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_client/targets/default', 'payload length', length*3)
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets/invalid', 'address', 'nrpe://127.0.0.1:25666')
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_client/targets/invalid', 'use ssl', not ssl)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_client/targets/invalid', 'payload length', length*2)
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets/valid', 'address', 'nrpe://127.0.0.1:15666')
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_client/targets/valid', 'use ssl', ssl)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_client/targets/valid', 'payload length', length)
|
||||
core:reload('test_nrpe_client')
|
||||
|
||||
local result = test.TestResult:new{message="Testing "..tostring(ssl)..", "..tostring(length)}
|
||||
result:add(self:test_one(ssl, length, 0, 'unknown'))
|
||||
result:add(self:test_one(ssl, length, 0, 'ok'))
|
||||
result:add(self:test_one(ssl, length, 0, 'warn'))
|
||||
result:add(self:test_one(ssl, length, 0, 'crit'))
|
||||
result:add(self:test_one(ssl, length, length/2, 'ok'))
|
||||
result:add(self:test_one(ssl, length, length, 'ok'))
|
||||
result:add(self:test_one(ssl, length, length*2, 'ok'))
|
||||
return result
|
||||
end
|
||||
|
||||
function TestNRPE:test_timeout(ssl, server_timeout, client_timeout, length)
|
||||
|
||||
local conf = nscp.Settings()
|
||||
local core = nscp.Core()
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_server', 'use ssl', ssl)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_server', 'timeout', server_timeout)
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_server', 'allow arguments', true)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_server', 'payload length', length)
|
||||
core:reload('test_nrpe_server')
|
||||
|
||||
conf:set_string('/settings/NRPE/test_nrpe_client/targets/default', 'address', 'nrpe://127.0.0.1:15666')
|
||||
conf:set_bool('/settings/NRPE/test_nrpe_client/targets/default', 'use ssl', ssl)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_client/targets/default', 'timeout', client_timeout)
|
||||
conf:set_int('/settings/NRPE/test_nrpe_client/targets/default', 'payload length', length)
|
||||
|
||||
core:reload('test_nrpe_client')
|
||||
|
||||
local result = test.TestResult:new{message="Testing timeouts ssl: "..tostring(ssl)..", server: "..tostring(server_timeout)..", client: "..tostring(client_timeout)}
|
||||
|
||||
local msg = protobuf.Plugin.QueryRequestMessage.new()
|
||||
hdr = msg:get_header()
|
||||
hdr:set_destination_id('test')
|
||||
host = hdr:add_hosts()
|
||||
host:set_address("127.0.0.1:15666")
|
||||
host:set_id('test')
|
||||
meta = hdr:add_metadata()
|
||||
meta:set_key("command")
|
||||
meta:set_value('check_py_nrpe_test_s')
|
||||
meta = hdr:add_metadata()
|
||||
meta:set_key("retry")
|
||||
meta:set_value('0')
|
||||
|
||||
uid = string.random(12)
|
||||
payload = msg:add_payload()
|
||||
payload:set_command('nrpe_forward')
|
||||
payload:set_arguments(1, uid)
|
||||
rmsg = self:get_request(uid)
|
||||
rmsg.status = 'ok'
|
||||
rmsg.message = 'Hello: Timeout'
|
||||
rmsg.perfdata = ''
|
||||
self:set_request(rmsg)
|
||||
serialized = msg:serialized()
|
||||
result_code, response = core:query(serialized)
|
||||
response_message = protobuf.Plugin.QueryResponseMessage.parsefromstring(response)
|
||||
|
||||
|
||||
found = False
|
||||
for i = 0,10 do
|
||||
if (self:has_response(uid)) then
|
||||
rmsg = self:get_response(uid)
|
||||
result:add_message(false, string.format('Testing to recieve message using'))
|
||||
self:del_response(uid)
|
||||
found = true
|
||||
break
|
||||
else
|
||||
core:log('error', string.format('Timeout waiting for %s', uid))
|
||||
nscp.sleep(500)
|
||||
end
|
||||
end
|
||||
if (found) then
|
||||
result:add_message(false, string.format('Making sure timeout message was never delivered'))
|
||||
end
|
||||
|
||||
return result
|
||||
end
|
||||
|
||||
function TestNRPE:run()
|
||||
local result = test.TestResult:new{message="NRPE Test Suite"}
|
||||
result:add(self:do_one_test(true, 1024))
|
||||
result:add(self:do_one_test(false, 1024))
|
||||
result:add(self:do_one_test(true, 4096))
|
||||
result:add(self:do_one_test(true, 65536))
|
||||
result:add(self:do_one_test(true, 1048576))
|
||||
|
||||
result:add(self:test_timeout(false, 30, 1, 104857600))
|
||||
result:add(self:test_timeout(false, 1, 30, 104857600))
|
||||
result:add(self:test_timeout(true, 30, 1, 104857600))
|
||||
result:add(self:test_timeout(true, 1, 30, 104857600))
|
||||
nscp.sleep(500)
|
||||
return result
|
||||
end
|
||||
|
||||
|
||||
instances = { TestNRPE }
|
||||
test.init_test_manager(instances)
|
||||
|
||||
function main(args)
|
||||
return test.install_test_manager(instances)
|
||||
end
|
230
bibliotheque/files/nsclient/scripts/op5/check_ad.vbs
Normal file
230
bibliotheque/files/nsclient/scripts/op5/check_ad.vbs
Normal file
@ -0,0 +1,230 @@
|
||||
'Script to check the status of a DOMAIN controller and report to Nagios
|
||||
'requires DCDIAG.EXE
|
||||
'Author: Felipe Ferreira
|
||||
'Version: 3.0
|
||||
'
|
||||
'Mauled over by John Jore, j-o-h-n-a-t-j-o-r-e-d-o-t-n-o 16/11/2010 to work on W2K8, x32
|
||||
'as well as remove some, eh... un-needed lines of code, general optimization as well as adding command parameter support
|
||||
'This is where i found the original script, http://felipeferreira.net/?p=315&cpage=1#comments
|
||||
'Tested by JJ on W2K8 SP2, x86
|
||||
' W2K3 R2 SP2, x64
|
||||
'Version 3.0-JJ-V0.2
|
||||
'Todo: Proper error handling
|
||||
' Add /help parameter
|
||||
' Add support for the two tests which require additional input (dcpromo is one such test)
|
||||
'Version 3.0-JJ-V0.3
|
||||
' Removed some surplus language detection code
|
||||
' Including non-working English test on a W2K8 x32 DC
|
||||
' Added support for multi-partition checks like 'crossrefvalidation'. Previously the last status result would mask previous failures
|
||||
' Incorporated Jonathan Vogt's german and multiline tests
|
||||
|
||||
|
||||
'Force all variables to be declared before usage
|
||||
option explicit
|
||||
|
||||
'Array for name and status (Ugly, but redim only works on last dimension, and can't set initial size if redim
|
||||
dim name(), status()
|
||||
redim preserve name(0)
|
||||
redim preserve status(0)
|
||||
redim preserve lock(0)
|
||||
|
||||
'Debug switch
|
||||
dim verbose : verbose = 0
|
||||
|
||||
'Return variables for NAGIOS
|
||||
const intOK = 0
|
||||
const intWarning = 1 'Not used. What dcdiag test would be warning instead of critical?
|
||||
const intCritical = 2
|
||||
const intUnknown = 3
|
||||
|
||||
'Lang dependend. Default is english
|
||||
dim strOK : strOK = "passed"
|
||||
dim strNotOK : strNotOk = "failed"
|
||||
|
||||
'Call dcdiag and grab relevant output
|
||||
exec(cmd)
|
||||
|
||||
'Generate NAGIOS compatible output from dcdiag
|
||||
printout()
|
||||
|
||||
'call dcdiag and parse the output
|
||||
sub exec(strCmd)
|
||||
'Declare variables
|
||||
dim objShell : Set objShell = WScript.CreateObject("WScript.Shell")
|
||||
dim objExecObject, lineout, tmpline
|
||||
lineout = ""
|
||||
'Command line options we're using
|
||||
pt strCmd
|
||||
|
||||
Set objExecObject = objShell.Exec(strCmd)
|
||||
'Loop until end of output from dcdiag
|
||||
do While not objExecObject.StdOut.AtEndOfStream
|
||||
tmpline = lcase(objExecObject.StdOut.ReadLine())
|
||||
|
||||
'Check the version of DCDiag being used and change the global 'passed' / 'failed' strings
|
||||
call DetectLang(tmpline)
|
||||
|
||||
if (instr(tmpline, ".....")) then
|
||||
'testresults start with a couple of dots, reset the lineout buffer
|
||||
lineout= tmpline
|
||||
'pt "lineout buffer '" & lineout & "'"
|
||||
else
|
||||
'Else append the next line to the buffer to capture multiline responses
|
||||
lineout = lineout + tmpline
|
||||
'pt "lineout buffer appended '" & lineout & "'"
|
||||
end if
|
||||
|
||||
if instr(lineout, lcase(strOK)) then
|
||||
'we have a strOK String which means we have reached the end of a result output (maybe on newline)
|
||||
call parse(lineout)
|
||||
lineout = ""
|
||||
end if
|
||||
loop
|
||||
|
||||
'Why call this at the end? Is it not pointless as we've looped through the entire output from dcdiag in the above loop?!?
|
||||
'call parse(lineout)
|
||||
end sub
|
||||
|
||||
|
||||
sub DetectLang(txtp)
|
||||
|
||||
'Change from looking for English words if we find the string below:
|
||||
if (instr(lcase(txtp), lcase("Verzeichnisserverdiagnose"))) then 'German
|
||||
pt "Detected German Language, changing the global test strings to look for"
|
||||
strOK = "bestanden"
|
||||
strNotOk = "nicht bestanden"
|
||||
end if
|
||||
|
||||
end sub
|
||||
|
||||
|
||||
sub parse(txtp)
|
||||
'Parse output of dcdiag command and change state of checks
|
||||
dim loop1
|
||||
|
||||
'Is this really required? Or is it for pretty debug output only?
|
||||
txtp = Replace(txtp,chr(10),"") ' Newline
|
||||
txtp = Replace(txtp,chr(13),"") ' CR
|
||||
txtp = Replace(txtp,chr(9),"") ' Tab
|
||||
do while instr(txtp, " ")
|
||||
txtp = Replace(txtp," "," ") ' Some tidy up
|
||||
loop
|
||||
|
||||
' We have to test twice because some localized (e.g. German) outputs simply use 'not', or 'nicht' as a prefix instead of 'passed' / 'failed'
|
||||
if instr(lcase(txtp), lcase(strOK)) then
|
||||
'What are we testing for now?
|
||||
pt "Checking :" & txtp & "' as it contains '" & strOK & "'"
|
||||
'What services are ok? 'By using instr we don't need to strip down text, remove vbCr, VbLf, or get the hostname
|
||||
for loop1 = 0 to Ubound(name)-1
|
||||
if (instr(lcase(txtp), lcase(name(loop1)))) AND (lock(loop1) = FALSE) then
|
||||
status(loop1)="OK"
|
||||
pt "Set the status for test '" & name(loop1) & "' to '" & status(loop1) & "'"
|
||||
end if
|
||||
next
|
||||
end if
|
||||
|
||||
' if we find the strNotOK string then reset to CRITICAL
|
||||
if instr(lcase(txtp), lcase(strNotOK)) then
|
||||
'What are we testing for now?
|
||||
pt txtp
|
||||
for loop1 = 0 to Ubound(name)-1
|
||||
if (instr(lcase(txtp), lcase(name(loop1)))) then
|
||||
status(loop1)="CRITICAL"
|
||||
'Lock the variable so it can't be reset back to success. Required for multi-partition tests like 'crossrefvalidation'
|
||||
lock(loop1)=TRUE
|
||||
pt "Reset the status for test '" & name(loop1) & "' to '" & status(loop1) & "' with a lock '" & lock(loop1) & "'"
|
||||
end if
|
||||
next
|
||||
end if
|
||||
end sub
|
||||
|
||||
'outputs result for NAGIOS
|
||||
sub printout()
|
||||
dim loop1, msg : msg = ""
|
||||
|
||||
for loop1 = 0 to ubound(name)-1
|
||||
msg = msg & name(loop1) & ": " & status(loop1) & ". "
|
||||
next
|
||||
|
||||
'What state are we in? Show and then quit with NAGIOS compatible exit code
|
||||
if instr(msg,"CRITICAL") then
|
||||
wscript.echo "CRITICAL - " & msg
|
||||
wscript.quit(intCritical)
|
||||
else
|
||||
wscript.echo "OK - " & msg
|
||||
wscript.quit(intOK)
|
||||
end if
|
||||
end sub
|
||||
|
||||
'Print messages to screen for debug purposes
|
||||
sub pt(msgTxt)
|
||||
if verbose then
|
||||
wscript.echo msgTXT
|
||||
end if
|
||||
end sub
|
||||
|
||||
'What tests do we run?
|
||||
function cmd()
|
||||
dim loop1, test, tests
|
||||
const intDefaultTests = 6
|
||||
|
||||
cmd = "dcdiag " 'Start with this
|
||||
|
||||
'If no command line parameters, then go with these defaults
|
||||
if Wscript.Arguments.Count = 0 Then
|
||||
redim preserve name(intDefaultTests)
|
||||
redim preserve status(intDefaultTests)
|
||||
redim preserve lock(intDefaultTests)
|
||||
'Test name
|
||||
name(0) = "services"
|
||||
name(1) = "replications"
|
||||
name(2) = "advertising"
|
||||
name(3) = "fsmocheck"
|
||||
name(4) = "ridmanager"
|
||||
name(5) = "machineaccount"
|
||||
|
||||
'Set default status for each named test
|
||||
for loop1 = 0 to (ubound(name)-1)
|
||||
status(loop1) = "CRITICAL"
|
||||
lock(loop1) = FALSE
|
||||
cmd = cmd & "/test:" & name(loop1) & " "
|
||||
next
|
||||
else
|
||||
'User specified which tests to perform.
|
||||
|
||||
for loop1 = 0 to wscript.arguments.count - 1
|
||||
if (instr(lcase(wscript.Arguments(loop1)), lcase("/test"))) then
|
||||
|
||||
'If parameter is wrong, give some hints
|
||||
if len(wscript.arguments(loop1)) < 6 then
|
||||
wscript.echo "Unknown parameter. Provide name of tests to perform like this:"
|
||||
wscript.echo vbTAB & "'cscript //nologo " & Wscript.ScriptName & " /test:advertising,dfsevent'"
|
||||
wscript.quit(intUnknown)
|
||||
end if
|
||||
|
||||
'Strip down the test to individual items
|
||||
tests = right(wscript.arguments(loop1), len(wscript.arguments(loop1))-6)
|
||||
pt "Tests: '" & tests & "'"
|
||||
|
||||
tests = split(tests,",")
|
||||
for each test in tests
|
||||
cmd = cmd & " /test:" & test
|
||||
|
||||
'Expand the array to make room for one more test
|
||||
redim preserve name(ubound(name)+1)
|
||||
redim preserve status(ubound(status)+1)
|
||||
redim preserve lock(ubound(lock)+1)
|
||||
|
||||
'Store name of test and status
|
||||
name(Ubound(name)-1) = test
|
||||
status(Ubound(status)-1) = "CRITICAL" 'Default status. Change to OK if test is ok
|
||||
lock(Ubound(lock)-1) = FALSE 'Don't lock the variable yet.
|
||||
|
||||
'pt "Contents: " & name(Ubound(name)-1) & " " & status(Ubound(status)-1)
|
||||
next
|
||||
end if
|
||||
next
|
||||
end if
|
||||
'We end up with this to test:
|
||||
pt "Command to run: " & cmd
|
||||
end function
|
101
bibliotheque/files/nsclient/scripts/op5/check_time.vbs
Normal file
101
bibliotheque/files/nsclient/scripts/op5/check_time.vbs
Normal file
@ -0,0 +1,101 @@
|
||||
' Author: Mattias Ryrl<72>n (mr@op5.com)
|
||||
' Website: http://www.op5.com
|
||||
' Created: 2008-09-18
|
||||
' Updated: 2008-10-09
|
||||
' Version: 0.9.1
|
||||
' Description: Check the offset of your server vs your Active Directory Domain.
|
||||
'
|
||||
' Usage cscript /NoLogo check_ad_time.vbs <domain> "<offset>"
|
||||
'
|
||||
' Changelog:
|
||||
'
|
||||
' 0.9.1:
|
||||
' * Fixed timeformat (i think, needs feedback).
|
||||
' * Changed /domain to /computers, still works to use the AD domain. eg domain.com
|
||||
'
|
||||
' 0.9:
|
||||
' Initial Release
|
||||
'
|
||||
|
||||
Err = 3
|
||||
msg = "UNKNOWN"
|
||||
|
||||
Set Args = WScript.Arguments
|
||||
If WScript.Arguments.Count <= 1 Then
|
||||
Usage()
|
||||
Else
|
||||
domain = Args.Item(0)
|
||||
|
||||
offset = Args.Item(1)
|
||||
|
||||
offset = Replace(offset,",",".")
|
||||
|
||||
Set objShell = CreateObject("Wscript.Shell")
|
||||
strCommand = "C:\windows\system32\w32tm.exe /monitor /computers:" & domain
|
||||
set objProc = objShell.Exec(strCommand)
|
||||
|
||||
input = ""
|
||||
strOutput = ""
|
||||
|
||||
Do While Not objProc.StdOut.AtEndOfStream
|
||||
input = objProc.StdOut.ReadLine
|
||||
|
||||
If InStr(input, "NTP") Then
|
||||
strOutput = input
|
||||
End If
|
||||
Loop
|
||||
|
||||
Set myRegExp = New RegExp
|
||||
myRegExp.IgnoreCase = True
|
||||
myRegExp.Global = True
|
||||
myRegExp.Pattern = " NTP: ([+-]+)([0-9]+).([0-9]+)s offset"
|
||||
|
||||
Set myMatches = myRegExp.Execute(strOutput)
|
||||
|
||||
result = ""
|
||||
dir = ""
|
||||
|
||||
For Each myMatch in myMatches
|
||||
If myMatch.SubMatches.Count > 0 Then
|
||||
For I = 0 To myMatch.SubMatches.Count - 1
|
||||
If I = 0 Then
|
||||
dir = myMatch.SubMatches(I)
|
||||
ElseIf I > 1 Then
|
||||
result = result & "." & myMatch.SubMatches(I)
|
||||
Else
|
||||
result = result & myMatch.SubMatches(I)
|
||||
End If
|
||||
Next
|
||||
End If
|
||||
Next
|
||||
|
||||
If Left(dir, 1) = "-" Then
|
||||
result = CDbl(result)
|
||||
Else
|
||||
result = CDbl("-" & result)
|
||||
End If
|
||||
|
||||
If result > CDbl(offset) OR result < -CDbl(offset) Then
|
||||
Err = 2
|
||||
msg = "NTP CRITICAL: Offset " & result & " secs|offset: " & result & ";0;" & Replace(CDbl(offset),",",".") & ";"
|
||||
Else
|
||||
Err = 0
|
||||
msg = "NTP OK: Offset " & result & " secs|offset: " & result & ";0;" & Replace(CDbl(offset),",",".") & ";"
|
||||
End If
|
||||
End If
|
||||
|
||||
Wscript.Echo msg
|
||||
Wscript.Quit(Err)
|
||||
|
||||
Function Usage()
|
||||
Err = 3
|
||||
WScript.Echo "Usage cscript /NoLogo check_ad_time.vbs <domain> ""<offset>"""
|
||||
Wscript.Echo ""
|
||||
Wscript.Echo "domain Name of domain to check roles on"
|
||||
Wscript.Echo ""
|
||||
Wscript.Echo "offset total number of seconds offset allowed."
|
||||
Wscript.Echo " will check both positive and negative"
|
||||
Wscript.Echo ""
|
||||
Wscript.Echo "Example: cscript /NoLogo check_ad_time.vbs mydomain.com ""0.4"""
|
||||
Wscript.Quit(Err)
|
||||
End Function
|
21
bibliotheque/files/nsclient/scripts/op5/restart_service.ps1
Normal file
21
bibliotheque/files/nsclient/scripts/op5/restart_service.ps1
Normal file
@ -0,0 +1,21 @@
|
||||
# Restart Service Script
|
||||
# Please enable external scripts and external scrips variable before use.
|
||||
|
||||
param (
|
||||
[string[]]$serviceName
|
||||
)
|
||||
Foreach ($Service in $ServiceName)
|
||||
{
|
||||
Restart-Service $ServiceName -ErrorAction SilentlyContinue -ErrorVariable ServiceError
|
||||
If (!$ServiceError) {
|
||||
$Time=Get-Date
|
||||
Write-Host "Restarted service $Service at $Time"
|
||||
}
|
||||
If ($ServiceError) {
|
||||
write-host $error[0]
|
||||
exit 3
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
45
bibliotheque/files/nsclient/scripts/op5/services.vbs
Normal file
45
bibliotheque/files/nsclient/scripts/op5/services.vbs
Normal file
@ -0,0 +1,45 @@
|
||||
' Services.vbs
|
||||
' Script to List running autostarted services
|
||||
' www.computerperformance.co.uk/
|
||||
' Author Guy Thomas http://computerperformance.co.uk/
|
||||
' Version 1.5 December 2005
|
||||
'
|
||||
' Modified by Per Asberg Dec 2010, op5 AB, http://www.op5.com
|
||||
' Modified by Peter Ostlin May 2011, op5 AB, http://www.op5.com
|
||||
' -------------------------------------------------------'
|
||||
Option Explicit
|
||||
Dim icnt, cnt, page, start, objWMIService, objItem, objService, strServiceList
|
||||
Dim colListOfServices, strComputer, strService, Args
|
||||
|
||||
'On Error Resume Next
|
||||
|
||||
' ---------------------------------------------------------
|
||||
|
||||
cnt = 0 ' tot count
|
||||
icnt = 0 ' count listed (returned) services
|
||||
page = 20 ' nr of services to include (pagination)
|
||||
start = 0 ' where to start (pagination)
|
||||
|
||||
Set Args = WScript.Arguments.Named
|
||||
|
||||
If Args.Exists("start") Then start = Cint(Args("start"))
|
||||
|
||||
strComputer = "."
|
||||
Set objWMIService = GetObject("winmgmts:" _
|
||||
& "{impersonationLevel=impersonate}!\\" _
|
||||
& strComputer & "\root\cimv2")
|
||||
Set colListOfServices = objWMIService.ExecQuery _
|
||||
("Select * from Win32_Service WHERE StartMode='auto' AND name != 'NSClientpp'")
|
||||
|
||||
' WMI and VBScript loop
|
||||
For Each objService in colListOfServices
|
||||
If icnt < page AND cnt >= start THEN
|
||||
strServiceList = strServiceList & objService.name & ","
|
||||
icnt = icnt +1
|
||||
End if
|
||||
cnt = cnt + 1
|
||||
Next
|
||||
|
||||
WScript.Echo strServiceList
|
||||
|
||||
' End of WMI script to list services
|
2
bibliotheque/files/nsclient/scripts/powershell.ps1
Normal file
2
bibliotheque/files/nsclient/scripts/powershell.ps1
Normal file
@ -0,0 +1,2 @@
|
||||
write-host "WARN: Everything is not going to be fine!"
|
||||
exit 4
|
18
bibliotheque/files/nsclient/scripts/python/badapp.py
Normal file
18
bibliotheque/files/nsclient/scripts/python/badapp.py
Normal file
@ -0,0 +1,18 @@
|
||||
import threading
|
||||
|
||||
class BadThread(threading.Thread):
|
||||
id = -1
|
||||
def __init__(self, id):
|
||||
self.id = id
|
||||
threading.Thread.__init__(self)
|
||||
|
||||
def run(self):
|
||||
i = 0
|
||||
while(True):
|
||||
i = i + 1
|
||||
if i > 100000:
|
||||
print 'Processing: %d'%self.id
|
||||
i = 0
|
||||
|
||||
for x in xrange(1000):
|
||||
BadThread(x).start()
|
771
bibliotheque/files/nsclient/scripts/python/docs.py
Normal file
771
bibliotheque/files/nsclient/scripts/python/docs.py
Normal file
@ -0,0 +1,771 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
from NSCP import Settings, Registry, Core, log, log_debug, log_error, status
|
||||
import plugin_pb2
|
||||
from optparse import OptionParser
|
||||
from sets import Set
|
||||
import os
|
||||
import traceback
|
||||
#import string
|
||||
from jinja2 import Template, Environment
|
||||
import hashlib
|
||||
helper = None
|
||||
|
||||
module_template = u"""# {{module.key}}
|
||||
|
||||
{{module.info.description}}
|
||||
|
||||
{%if module.ext_desc -%}
|
||||
{{ module.ext_desc}}
|
||||
{%- endif %}
|
||||
|
||||
{% if module.queries -%}
|
||||
|
||||
## List of commands
|
||||
|
||||
A list of all available queries (check commands)
|
||||
{% set table = [] -%}
|
||||
{% for key,query in module.queries|dictsort -%}
|
||||
{% if query.info.description.startswith('Alternative name for:') -%}
|
||||
{% set command = query.info.description[22:] -%}
|
||||
{% do table.append([query.key|md_self_link, command|rst_link('query')]) -%}
|
||||
{%- elif query.info.description.startswith('Alias for:') -%}
|
||||
{% set command = query.info.description[11:] -%}
|
||||
{% do table.append([query.key|md_self_link, command|rst_link('query')]) -%}
|
||||
{%- else -%}
|
||||
{% do table.append([query.key|md_self_link, query.info.description|firstline]) -%}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{{table|rst_table('Command', 'Description')}}
|
||||
{%- endif %}
|
||||
|
||||
{% if module.aliases -%}
|
||||
## List of command aliases
|
||||
|
||||
A list of all short hand aliases for queries (check commands)
|
||||
|
||||
{% set table = [] -%}
|
||||
{% for key,query in module.aliases|dictsort -%}
|
||||
{% if query.info.description.startswith('Alternative name for:') -%}
|
||||
{% set command = query.info.description[22:] -%}
|
||||
{% do table.append([query.key, "Alias for: " + command|rst_link('query')]) -%}
|
||||
{%- elif query.info.description.startswith('Alias for:') -%}
|
||||
{% set command = query.info.description[11:] -%}
|
||||
{% do table.append([query.key, "Alias for: " + command|rst_link('query')]) -%}
|
||||
{%- else -%}
|
||||
{% do table.append([query.key, query.info.description|firstline]) -%}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{{table|rst_table('Command', 'Description')}}
|
||||
{%- endif %}
|
||||
|
||||
{% if module.paths -%}
|
||||
## List of Configuration
|
||||
|
||||
{% set table = [] -%}
|
||||
{% set table_adv = [] -%}
|
||||
{% set table_sam = [] -%}
|
||||
{% for k,path in module.paths|dictsort -%}
|
||||
{% set pkey = path.key|md_self_link -%}
|
||||
{% for k,key in path.keys|dictsort -%}
|
||||
{% set kkey = k|md_prefix_lnk(path.key)|md_self_link(k) -%}
|
||||
{% if key.info.sample -%}
|
||||
{% do table_sam.append([pkey, kkey, key.info.title|firstline]) -%}
|
||||
{%- elif key.info.advanced -%}
|
||||
{% do table_adv.append([pkey, kkey, key.info.title|firstline]) -%}
|
||||
{%- else -%}
|
||||
{% do table.append([pkey, kkey, key.info.title|firstline]) -%}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endfor %}
|
||||
{% if table -%}
|
||||
### Common Keys
|
||||
|
||||
{{table|rst_table('Path / Section', 'Key', 'Description')}}
|
||||
{%- endif %}
|
||||
{% if table_adv -%}
|
||||
### Advanced keys
|
||||
|
||||
{{table_adv|rst_table('Path / Section', 'Key', 'Description')}}
|
||||
{%- endif %}
|
||||
{% if table_sam -%}
|
||||
### Sample keys
|
||||
|
||||
{{table_sam|rst_table('Path / Section', 'Key', 'Description')}}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
{% if module.sample %}
|
||||
# Usage
|
||||
|
||||
_To edit the usage section please edit [this page](https://github.com/mickem/nscp-docs/blob/master/{{module.sample_source}})_
|
||||
|
||||
{{module.sample}}
|
||||
|
||||
{%- endif %}
|
||||
{% if module.queries -%}
|
||||
|
||||
# Queries
|
||||
|
||||
A quick reference for all available queries (check commands) in the {{module.key}} module.
|
||||
|
||||
{% for k,query in module.queries|dictsort -%}
|
||||
|
||||
## {{query.key}}
|
||||
|
||||
{{query.info.description}}
|
||||
|
||||
{% if query.sample -%}
|
||||
### Usage
|
||||
|
||||
_To edit these sample please edit [this page](https://github.com/mickem/nscp-docs/blob/master/{{query.sample_source}})_
|
||||
|
||||
{{query.sample}}
|
||||
{%- endif %}
|
||||
### Usage
|
||||
|
||||
{% set table = [] -%}
|
||||
{% for help in query.params -%}{% if help.content_type == 4 -%}
|
||||
{% do table.append([help.name|md_prefix_lnk(query.key)|md_self_link(help.name),'N/A', help.long_description|firstline]) %}{% else -%}
|
||||
{% do table.append([help.name|md_prefix_lnk(query.key)|md_self_link(help.name),help.default_value, help.long_description|firstline]) %}{%- endif %}
|
||||
{%- endfor %}
|
||||
{{table|rst_table('Option', 'Default Value', 'Description')}}
|
||||
|
||||
{% for help in query.params -%}
|
||||
<a name="{{help.name|md_prefix_lnk(query.key)}}"/>
|
||||
### {{help.name}}
|
||||
|
||||
{% if help.default_value %}
|
||||
**Deafult Value:** {{help.default_value}}
|
||||
{%- endif %}
|
||||
|
||||
**Description:**
|
||||
{%if help.ext -%}
|
||||
{{help.ext.head}}
|
||||
|
||||
{{help.ext.data|rst_table}}
|
||||
|
||||
{{help.ext.tail}}
|
||||
|
||||
{% else -%}
|
||||
{{help.long_description}}
|
||||
{%- endif %}
|
||||
{{'\n'}}
|
||||
{%- endfor %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
{% if module.paths -%}
|
||||
# Configuration
|
||||
|
||||
{% for pkey,path in module.paths|dictsort -%}
|
||||
<a name="{{path.key}}"/>
|
||||
## {{path.info.title}}
|
||||
|
||||
{{path.info.description}}
|
||||
|
||||
```ini
|
||||
# {{path.info.description|firstline}}
|
||||
[{{path.key}}]
|
||||
{% for kkey,key in path.keys|dictsort -%}
|
||||
{% if key.info.default_value|extract_value -%}
|
||||
{{kkey}}={{key.info.default_value|extract_value}}
|
||||
{% endif %}
|
||||
{%- endfor %}
|
||||
```
|
||||
|
||||
{% set tbl = [] -%}
|
||||
{% set pkey = path.key|md_self_link -%}
|
||||
{% for k,key in path.keys|dictsort -%}
|
||||
{% set kkey = k|md_prefix_lnk(path.key)|md_self_link(k) -%}
|
||||
{% do tbl.append([kkey, key.info.default_value|extract_value, key.info.title|firstline]) -%}
|
||||
{%- endfor %}
|
||||
{{tbl|rst_table('Key', 'Default Value', 'Description')}}
|
||||
|
||||
|
||||
{% for kkey,key in path.keys|dictsort %}
|
||||
<a name="{{kkey|md_prefix_lnk(path.key)}}"/>
|
||||
### {{kkey}}
|
||||
|
||||
**{{key.info.title|as_text}}**
|
||||
|
||||
{%if key.ext -%}
|
||||
{{key.ext.head}}
|
||||
|
||||
{{key.ext.data|rst_table}}
|
||||
|
||||
{{key.ext.tail}}
|
||||
|
||||
{% else -%}
|
||||
{{key.info.description|as_text}}
|
||||
{%- endif %}
|
||||
|
||||
{% set table = [] -%}
|
||||
{% do table.append(['Path:', path.key|md_self_link(path.key)]) -%}
|
||||
{% do table.append(['Key:', kkey]) -%}
|
||||
{% if key.info.advanced -%}
|
||||
{% do table.append(['Advanced:', 'Yes (means it is not commonly used)']) -%}
|
||||
{%- endif %}
|
||||
{% if key.info.default_value|extract_value -%}
|
||||
{% do table.append(['Default value:', '`' + key.info.default_value|extract_value + '`']) -%}
|
||||
{% else %}
|
||||
{% do table.append(['Default value:', '_N/A_']) -%}
|
||||
{%- endif %}
|
||||
{% if key.info.sample -%}
|
||||
{% do table.append(['Sample key:', 'Yes (This section is only to show how this key is used)']) -%}
|
||||
{%- endif %}
|
||||
{% do table.append(['Used by:', ', '.join(path.info.plugin|sort)]) -%}
|
||||
{{table|rst_table('Key', 'Description')}}
|
||||
|
||||
#### Sample
|
||||
|
||||
```
|
||||
[{{path.key}}]
|
||||
# {{key.info.title}}
|
||||
{{kkey}}={{key.info.default_value|extract_value}}
|
||||
```
|
||||
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{%- endif %}
|
||||
"""
|
||||
|
||||
|
||||
|
||||
index_template = u"""
|
||||
|
||||
# Modules
|
||||
|
||||
{% set table = [] -%}
|
||||
{% for key,module in plugins|dictsort -%}
|
||||
{% do table.append([module.namespace, ('reference/' + module.namespace + '/' + module.key)|md_link(module.key), module.info.description|firstline]) -%}
|
||||
{%- endfor %}
|
||||
{{table|rst_table('Type', 'Module', 'Description')}}
|
||||
|
||||
# Queries
|
||||
|
||||
{% set table = [] -%}
|
||||
{% for mk,module in plugins|dictsort -%}
|
||||
{% for key,query in module.queries|dictsort -%}
|
||||
{% set desc = query.info.description|firstline %}
|
||||
{% if desc.startswith('Alternative name for:') -%}
|
||||
{% set desc = query.info.description[22:]|rst_link('query') -%}
|
||||
{%- elif desc.startswith('Legacy version of ') -%}
|
||||
{% set desc = '**Deprecated** please use: ' + query.info.description[18:]|rst_link('query') -%}
|
||||
{%- elif query.info.description.startswith('Alias for:') -%}
|
||||
{% set desc = query.info.description[11:]|rst_link('query') -%}
|
||||
{%- endif %}
|
||||
{% do table.append([mk, ('reference/' + module.namespace + '/' + module.key + '#' + query.key)|md_link(query.key), desc]) -%}
|
||||
{%- endfor %}
|
||||
{%- endfor %}
|
||||
{{table|rst_table('Module', 'Command', 'Description')}}
|
||||
"""
|
||||
|
||||
|
||||
samples_template = u""".. default-domain:: nscp
|
||||
|
||||
.. default-domain:: nscp
|
||||
|
||||
===========
|
||||
All samples
|
||||
===========
|
||||
|
||||
A collection of all sample commands
|
||||
|
||||
{% for mk,module in plugins|dictsort -%}
|
||||
{% set vars = {'found': False} -%}
|
||||
{% for qk,query in module.queries|dictsort -%}
|
||||
{% if query.sample -%}
|
||||
{% if vars.update({'found': True}) -%}{%- endif %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{% if vars.found -%}
|
||||
{{mk|rst_heading('=')}}
|
||||
|
||||
{% for qk,query in module.queries|dictsort -%}
|
||||
{% if query.sample -%}
|
||||
{{qk|rst_heading}}
|
||||
|
||||
{{query.info.description|firstline}}
|
||||
|
||||
.. include:: ../samples/{{query.sample}}
|
||||
|
||||
{% endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
"""
|
||||
|
||||
def split_argllist(name, desc):
|
||||
extdata = {}
|
||||
spos = desc.find('\n\n')
|
||||
if spos != -1:
|
||||
epos = desc.find('\n\n', spos+2)
|
||||
if epos != -1:
|
||||
pos = desc.find('\t', spos+2, epos)
|
||||
if pos != -1:
|
||||
extdata['head'] = desc[:spos]
|
||||
extdata['tail'] = desc[epos+2:]
|
||||
data = desc[spos+2:epos]
|
||||
rows = data.split('\n')
|
||||
tbl = []
|
||||
for r in rows:
|
||||
tbl.append(r.split('\t'))
|
||||
extdata['data'] = tbl
|
||||
return extdata
|
||||
|
||||
class root_container(object):
|
||||
paths = {}
|
||||
commands = {}
|
||||
aliases = {}
|
||||
plugins = {}
|
||||
windows_modules = ['CheckSystem', 'CheckDisk', 'NSClientServer', 'DotnetPlugins', 'CheckEventLog', 'CheckTaskSched', 'CheckWMI']
|
||||
unix_modules = ['CheckSystemUnix']
|
||||
check_modules = ['CheckExternalScripts', 'CheckHelpers', 'CheckLogFile', 'CheckMKClient', 'CheckMKServer', 'CheckNSCP']
|
||||
client_modules = ['GraphiteClient', 'NRDPClient', 'NRPEClient', 'NRPEServer', 'NSCAClient', 'NSCAServer', 'NSClientServer', 'SMTPClient', 'SyslogClient']
|
||||
generic_modules = ['CommandClient', 'DotnetPlugins', 'LUAScript', 'PythonScript', 'Scheduler', 'SimpleCache', 'SimpleFileWriter', 'WEBServer']
|
||||
|
||||
def __init__(self):
|
||||
self.paths = {}
|
||||
self.commands = {}
|
||||
self.aliases = {}
|
||||
self.plugins = {}
|
||||
|
||||
def append_key(self, info):
|
||||
path = info.node.path
|
||||
if path in self.paths:
|
||||
self.paths[path].append_key(info)
|
||||
else:
|
||||
p = path_container()
|
||||
p.append_key(info)
|
||||
self.paths[path] = p
|
||||
|
||||
def append_path(self, info):
|
||||
path = info.node.path
|
||||
if not path in self.paths:
|
||||
self.paths[path] = path_container(info)
|
||||
|
||||
def append_command(self, info):
|
||||
name = info.name
|
||||
if not name in self.commands:
|
||||
self.commands[name] = command_container(info)
|
||||
|
||||
def append_alias(self, info):
|
||||
name = info.name
|
||||
if not name in self.commands:
|
||||
self.aliases[name] = command_container(info)
|
||||
|
||||
def append_plugin(self, info, folder):
|
||||
name = info.name
|
||||
namespace = ''
|
||||
if name in self.windows_modules:
|
||||
namespace = 'windows'
|
||||
elif name in self.unix_modules:
|
||||
namespace = 'unix'
|
||||
elif name in self.check_modules:
|
||||
namespace = 'check'
|
||||
elif name in self.client_modules:
|
||||
namespace = 'client'
|
||||
elif name in self.generic_modules:
|
||||
namespace = 'generic'
|
||||
else:
|
||||
namespace = 'misc'
|
||||
|
||||
if not name in self.plugins:
|
||||
self.plugins[name] = plugin_container(info, namespace)
|
||||
spath = '%s/samples/%s_samples.md'%(folder, name)
|
||||
self.plugins[name].sample = ''
|
||||
if os.path.exists(spath):
|
||||
with open(spath) as f:
|
||||
self.plugins[name].sample = unicode(f.read(), 'utf8')
|
||||
self.plugins[name].sample_source = 'samples/%s_samples.md'%(name)
|
||||
spath = '%s/samples/%s_desc.md'%(folder, name)
|
||||
self.plugins[name].ext_desc = ''
|
||||
if os.path.exists(spath):
|
||||
with open(spath) as f:
|
||||
self.plugins[name].ext_desc = unicode(f.read(), 'utf8')
|
||||
self.plugins[name].ext_desc_source = 'samples/%s_desc.md'%(name)
|
||||
|
||||
def get_hash(self):
|
||||
ret = {}
|
||||
ret['paths'] = self.paths
|
||||
ret['commands'] = self.commands
|
||||
ret['aliases'] = self.aliases
|
||||
ret['plugins'] = self.plugins
|
||||
return ret
|
||||
|
||||
class path_container(object):
|
||||
keys = {}
|
||||
info = None
|
||||
def __init__(self, info = None):
|
||||
self.keys = {}
|
||||
self.info = info.info
|
||||
|
||||
def append_key(self, info):
|
||||
extdata = split_argllist(info.node.key, info.info.description)
|
||||
if extdata:
|
||||
ninfo = {}
|
||||
ninfo['info'] = info.info
|
||||
ninfo['node'] = info.node
|
||||
ninfo['ext'] = extdata
|
||||
self.keys[info.node.key] = ninfo
|
||||
else:
|
||||
self.keys[info.node.key] = info
|
||||
|
||||
class command_container(object):
|
||||
info = None
|
||||
parameters = None
|
||||
def __init__(self, info = None):
|
||||
self.info = info.info
|
||||
self.parameters = info.parameters
|
||||
|
||||
class plugin_container(object):
|
||||
info = None
|
||||
namespace = ''
|
||||
def __init__(self, info = None, namespace = ''):
|
||||
self.info = info.info
|
||||
self.namespace = namespace
|
||||
|
||||
def first_line(string):
|
||||
return string.strip().split('\n')[0]
|
||||
|
||||
def make_rst_link(name, type, title = None):
|
||||
if title:
|
||||
return ':%s:`%s<%s>`'%(type, title, name)
|
||||
return ':%s:`%s`'%(type, name)
|
||||
|
||||
def make_md_link(name, title = None):
|
||||
if title:
|
||||
return '[%s](%s)'%(title, name)
|
||||
return '[%s](%s)'%(name, name)
|
||||
|
||||
def make_md_self_link(name, title = None):
|
||||
if title:
|
||||
return '[%s](#%s)'%(title, name)
|
||||
return '[%s](#%s)'%(name, name)
|
||||
def make_md_code(name):
|
||||
return '`%s`'%name
|
||||
def make_md_prefix_lnk(value, prefix):
|
||||
return '%s_%s'%(prefix, value)
|
||||
|
||||
def largest_value(a,b):
|
||||
return map(lambda n: n[0] if len(n[0])>len(n[1]) else n[1], zip(a, b))
|
||||
|
||||
def extract_value(value):
|
||||
if value.HasField("string_data"):
|
||||
return value.string_data
|
||||
if value.HasField("int_data"):
|
||||
return '%d'%value.int_data
|
||||
if value.HasField("bool_data"):
|
||||
return "true" if value.bool_data else "false"
|
||||
return ''
|
||||
|
||||
def as_text(value):
|
||||
value = value.replace('\\', '\\\\')
|
||||
value = value.replace('`', '\\`')
|
||||
value = value.replace('|', '\\|')
|
||||
return value
|
||||
|
||||
def block_pad(string, pad, prefix = ''):
|
||||
string = string.strip()
|
||||
if not string:
|
||||
return ''
|
||||
ret = ''
|
||||
for line in string.split('\n'):
|
||||
if line != '\n':
|
||||
ret += (pad * ' ') + prefix + line + '\n'
|
||||
else:
|
||||
ret += line + '\n'
|
||||
return ret.rstrip()
|
||||
|
||||
def render_rst_table(table, *args):
|
||||
if not table:
|
||||
return ''
|
||||
if args:
|
||||
table.insert(0, args)
|
||||
ret = ''
|
||||
maxcol = map(lambda a:len(a), reduce(lambda a,b: largest_value(a,b), table))
|
||||
for idx, line in enumerate(table):
|
||||
ret += '|' + ''.join(map(lambda a:' ' + a[1].ljust(a[0],' ') + ' |', zip(maxcol, line))) + '\n'
|
||||
if idx == 0:
|
||||
ret += '|' + ''.join(map(lambda a:'-' + ''.ljust(a[0],'-') + '-|', zip(maxcol, line))) + '\n'
|
||||
return ret
|
||||
|
||||
def render_rst_heading(string, char='-', top=False):
|
||||
if top:
|
||||
return "\n".rjust(len(string)+1, char) + string + "\n".ljust(len(string)+1, char)
|
||||
return string + "\n".ljust(len(string)+1, char)
|
||||
|
||||
def getcommonletters(strlist):
|
||||
return ''.join([x[0] for x in zip(*strlist) \
|
||||
if reduce(lambda a,b:(a == b) and a or None,x)])
|
||||
|
||||
def calculate_common_head(strlist):
|
||||
strlist = strlist[:]
|
||||
prev = None
|
||||
while True:
|
||||
common = getcommonletters(strlist)
|
||||
if common == prev:
|
||||
break
|
||||
strlist.append(common)
|
||||
prev = common
|
||||
|
||||
return getcommonletters(strlist)
|
||||
|
||||
def render_template(hash, template, filename):
|
||||
data = template.render(hash).encode('utf8')
|
||||
|
||||
path = os.path.dirname(filename)
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
if os.path.exists(filename):
|
||||
m1 = hashlib.sha256()
|
||||
m1.update(data)
|
||||
sha1 = m1.digest()
|
||||
with open(filename) as f:
|
||||
m2 = hashlib.sha256()
|
||||
m2.update(f.read())
|
||||
sha2 = m2.digest()
|
||||
if sha1 == sha2:
|
||||
log_debug("no changes detected in: %s"%filename)
|
||||
return
|
||||
|
||||
log_debug('Writing file: %s'%filename)
|
||||
f = open(filename,"wb")
|
||||
f.write(data)
|
||||
f.close()
|
||||
|
||||
class DocumentationHelper(object):
|
||||
plugin_id = None
|
||||
plugin_alias = None
|
||||
script_alias = None
|
||||
conf = None
|
||||
registry = None
|
||||
core = None
|
||||
dir = None
|
||||
folder = None
|
||||
command_cache = {}
|
||||
|
||||
def __init__(self, plugin_id, plugin_alias, script_alias):
|
||||
self.plugin_id = plugin_id
|
||||
self.plugin_alias = plugin_alias
|
||||
self.script_alias = script_alias
|
||||
self.conf = Settings.get(self.plugin_id)
|
||||
self.registry = Registry.get(self.plugin_id)
|
||||
self.core = Core.get(self.plugin_id)
|
||||
self.command_cache = {}
|
||||
self.folder = None
|
||||
|
||||
def build_inventory_request(self, path = '/', recursive = True, keys = False):
|
||||
message = plugin_pb2.SettingsRequestMessage()
|
||||
payload = message.payload.add()
|
||||
payload.plugin_id = self.plugin_id
|
||||
payload.inventory.node.path = path
|
||||
payload.inventory.recursive_fetch = recursive
|
||||
payload.inventory.fetch_keys = keys
|
||||
payload.inventory.fetch_paths = not keys
|
||||
payload.inventory.fetch_samples = True
|
||||
payload.inventory.descriptions = True
|
||||
return message.SerializeToString()
|
||||
|
||||
def build_command_request(self, type = 1):
|
||||
message = plugin_pb2.RegistryRequestMessage()
|
||||
payload = message.payload.add()
|
||||
payload.inventory.fetch_all = True
|
||||
payload.inventory.type.append(type)
|
||||
return message.SerializeToString()
|
||||
|
||||
def get_paths(self):
|
||||
(code, data) = self.conf.query(self.build_inventory_request())
|
||||
if code == 1:
|
||||
message = plugin_pb2.SettingsResponseMessage()
|
||||
message.ParseFromString(data)
|
||||
for payload in message.payload:
|
||||
if payload.inventory:
|
||||
log_debug('Found %d paths'%len(payload.inventory))
|
||||
return payload.inventory
|
||||
return []
|
||||
|
||||
def get_keys(self, path):
|
||||
(code, data) = self.conf.query(self.build_inventory_request(path, False, True))
|
||||
if code == 1:
|
||||
message = plugin_pb2.SettingsResponseMessage()
|
||||
message.ParseFromString(data)
|
||||
for payload in message.payload:
|
||||
if payload.inventory:
|
||||
log_debug('Found %d keys for %s'%(len(payload.inventory), path))
|
||||
return payload.inventory
|
||||
return []
|
||||
|
||||
def get_queries(self):
|
||||
log_debug('Fetching queries...')
|
||||
(code, data) = self.registry.query(self.build_command_request(1))
|
||||
if code == 1:
|
||||
message = plugin_pb2.RegistryResponseMessage()
|
||||
message.ParseFromString(data)
|
||||
for payload in message.payload:
|
||||
if payload.inventory:
|
||||
log_debug('Found %d queries'%len(payload.inventory))
|
||||
return payload.inventory
|
||||
log_error('No queries found')
|
||||
return []
|
||||
|
||||
def get_query_aliases(self):
|
||||
log_debug('Fetching aliases...')
|
||||
(code, data) = self.registry.query(self.build_command_request(5))
|
||||
if code == 1:
|
||||
message = plugin_pb2.RegistryResponseMessage()
|
||||
message.ParseFromString(data)
|
||||
for payload in message.payload:
|
||||
if payload.inventory:
|
||||
log_debug('Found %d aliases'%len(payload.inventory))
|
||||
return payload.inventory
|
||||
log_error('No aliases found')
|
||||
return []
|
||||
|
||||
def get_plugins(self):
|
||||
log_debug('Fetching plugins...')
|
||||
(code, data) = self.registry.query(self.build_command_request(7))
|
||||
if code == 1:
|
||||
message = plugin_pb2.RegistryResponseMessage()
|
||||
message.ParseFromString(data)
|
||||
for payload in message.payload:
|
||||
if payload.inventory:
|
||||
log_debug('Found %d plugins'%len(payload.inventory))
|
||||
return payload.inventory
|
||||
log_error('No plugins')
|
||||
return []
|
||||
|
||||
def get_info(self):
|
||||
root = root_container()
|
||||
for p in self.get_plugins():
|
||||
root.append_plugin(p, self.folder)
|
||||
for p in self.get_paths():
|
||||
root.append_path(p)
|
||||
for k in self.get_keys(p.node.path):
|
||||
root.append_key(k)
|
||||
for p in self.get_queries():
|
||||
root.append_command(p)
|
||||
for p in self.get_query_aliases():
|
||||
root.append_alias(p)
|
||||
return root
|
||||
|
||||
|
||||
def fetch_command(self, module, minfo, command, cinfo):
|
||||
if command in self.command_cache:
|
||||
return self.command_cache[command]
|
||||
params = []
|
||||
for p in cinfo.parameters.parameter:
|
||||
extdata = split_argllist(p.name, p.long_description)
|
||||
if extdata:
|
||||
np = {}
|
||||
np['long_description'] = p.long_description
|
||||
np['default_value'] = p.default_value
|
||||
np['name'] = p.name
|
||||
np['ext'] = extdata
|
||||
np['content_type'] = p.content_type
|
||||
params.append(np)
|
||||
else:
|
||||
params.append(p)
|
||||
cinfo.params = params
|
||||
spath = '%s/samples/%s_%s_samples.md'%(self.folder, module, command)
|
||||
cinfo.sample = ''
|
||||
if os.path.exists(spath):
|
||||
with open(spath) as f:
|
||||
cinfo.sample = unicode(f.read(), 'utf8')
|
||||
cinfo.sample_source = 'samples/%s_%s_samples.md'%(module, command)
|
||||
self.command_cache[command] = cinfo
|
||||
return cinfo
|
||||
|
||||
def generate_rst(self, input_dir, output_dir):
|
||||
root = self.get_info()
|
||||
i = 0
|
||||
|
||||
env = Environment(extensions=["jinja2.ext.do",])
|
||||
env.filters['firstline'] = first_line
|
||||
env.filters['rst_link'] = make_rst_link
|
||||
env.filters['md_link'] = make_md_link
|
||||
env.filters['md_prefix_lnk'] = make_md_prefix_lnk
|
||||
env.filters['md_self_link'] = make_md_self_link
|
||||
env.filters['md_code'] = make_md_code
|
||||
env.filters['rst_table'] = render_rst_table
|
||||
env.filters['rst_heading'] = render_rst_heading
|
||||
env.filters['extract_value'] = extract_value
|
||||
env.filters['block_pad'] = block_pad
|
||||
env.filters['common_head'] = calculate_common_head
|
||||
env.filters['as_text'] = as_text
|
||||
|
||||
for (module,minfo) in root.plugins.iteritems():
|
||||
out_base_path = '%s/docs/'%output_dir
|
||||
sample_base_path = '%s/docs/samples/'%output_dir
|
||||
if minfo.namespace:
|
||||
out_base_path = '%s/docs/reference/%s/'%(output_dir, minfo.namespace)
|
||||
hash = root.get_hash()
|
||||
minfo.key = module
|
||||
minfo.queries = {}
|
||||
sfile = '%s%s_samples.inc'%(sample_base_path, module)
|
||||
if os.path.exists(sfile):
|
||||
minfo.sample = os.path.basename(sfile)
|
||||
sfile = '%s%s_samples.rst'%(sample_base_path, module)
|
||||
if os.path.exists(sfile):
|
||||
minfo.sample = os.path.basename(sfile)
|
||||
|
||||
for (c,cinfo) in sorted(root.commands.iteritems()):
|
||||
if module in cinfo.info.plugin:
|
||||
more_info = self.fetch_command(module, minfo, c,cinfo)
|
||||
if more_info:
|
||||
cinfo = more_info
|
||||
sfile = '%s%s_%s_samples.inc'%(sample_base_path, module, c)
|
||||
if os.path.exists(sfile):
|
||||
cinfo.sample = os.path.basename(sfile)
|
||||
#all_samples.append((module, command, sfile))
|
||||
cinfo.key = c
|
||||
minfo.queries[c] = cinfo
|
||||
minfo.aliases = {}
|
||||
for (c,cinfo) in sorted(root.aliases.iteritems()):
|
||||
if module in cinfo.info.plugin:
|
||||
cinfo.key = c
|
||||
minfo.aliases[c] = cinfo
|
||||
|
||||
minfo.paths = {}
|
||||
for (c,cinfo) in sorted(root.paths.iteritems()):
|
||||
if module in cinfo.info.plugin:
|
||||
cinfo.key = c
|
||||
minfo.paths[c] = cinfo
|
||||
|
||||
hash['module'] = minfo
|
||||
i=i+1
|
||||
log_debug('Processing module: %d of %d [%s]'%(i, len(root.plugins), module))
|
||||
|
||||
template = env.from_string(module_template)
|
||||
render_template(hash, template, '%s/%s.md'%(out_base_path, module))
|
||||
|
||||
hash = root.get_hash()
|
||||
template = env.from_string(index_template)
|
||||
render_template(hash, template, '%s/docs/reference/index.md'%output_dir)
|
||||
|
||||
log_debug('%s/samples/index.rst'%output_dir)
|
||||
template = env.from_string(samples_template)
|
||||
render_template(hash, template, '%s/samples/index.rst'%output_dir)
|
||||
|
||||
def main(self, args):
|
||||
parser = OptionParser(prog="")
|
||||
parser.add_option("-o", "--output", help="write report to FILE(s)")
|
||||
parser.add_option("-i", "--input", help="Reference folder")
|
||||
(options, args) = parser.parse_args(args=args)
|
||||
self.folder = options.output
|
||||
self.generate_rst(options.input, options.output)
|
||||
|
||||
def __main__(args):
|
||||
global helper
|
||||
helper.main(args);
|
||||
return 0
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
global helper
|
||||
helper = DocumentationHelper(plugin_id, plugin_alias, script_alias)
|
||||
|
||||
def shutdown():
|
||||
global helper
|
||||
helper = None
|
@ -0,0 +1,161 @@
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
||||
from google.protobuf import descriptor
|
||||
from google.protobuf import message
|
||||
from google.protobuf import reflection
|
||||
from google.protobuf import descriptor_pb2
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
|
||||
import google.protobuf.descriptor_pb2
|
||||
|
||||
DESCRIPTOR = descriptor.FileDescriptor(
|
||||
name='google/protobuf/compiler/plugin.proto',
|
||||
package='google.protobuf.compiler',
|
||||
serialized_pb='\n%google/protobuf/compiler/plugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"}\n\x14\x43odeGeneratorRequest\x12\x18\n\x10\x66ile_to_generate\x18\x01 \x03(\t\x12\x11\n\tparameter\x18\x02 \x01(\t\x12\x38\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xaa\x01\n\x15\x43odeGeneratorResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x42\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.File\x1a>\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0finsertion_point\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x0f \x01(\t')
|
||||
|
||||
|
||||
|
||||
|
||||
_CODEGENERATORREQUEST = descriptor.Descriptor(
|
||||
name='CodeGeneratorRequest',
|
||||
full_name='google.protobuf.compiler.CodeGeneratorRequest',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
fields=[
|
||||
descriptor.FieldDescriptor(
|
||||
name='file_to_generate', full_name='google.protobuf.compiler.CodeGeneratorRequest.file_to_generate', index=0,
|
||||
number=1, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
descriptor.FieldDescriptor(
|
||||
name='parameter', full_name='google.protobuf.compiler.CodeGeneratorRequest.parameter', index=1,
|
||||
number=2, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=unicode("", "utf-8"),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
descriptor.FieldDescriptor(
|
||||
name='proto_file', full_name='google.protobuf.compiler.CodeGeneratorRequest.proto_file', index=2,
|
||||
number=15, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
is_extendable=False,
|
||||
extension_ranges=[],
|
||||
serialized_start=101,
|
||||
serialized_end=226,
|
||||
)
|
||||
|
||||
|
||||
_CODEGENERATORRESPONSE_FILE = descriptor.Descriptor(
|
||||
name='File',
|
||||
full_name='google.protobuf.compiler.CodeGeneratorResponse.File',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
fields=[
|
||||
descriptor.FieldDescriptor(
|
||||
name='name', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.name', index=0,
|
||||
number=1, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=unicode("", "utf-8"),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
descriptor.FieldDescriptor(
|
||||
name='insertion_point', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point', index=1,
|
||||
number=2, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=unicode("", "utf-8"),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
descriptor.FieldDescriptor(
|
||||
name='content', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.content', index=2,
|
||||
number=15, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=unicode("", "utf-8"),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
is_extendable=False,
|
||||
extension_ranges=[],
|
||||
serialized_start=337,
|
||||
serialized_end=399,
|
||||
)
|
||||
|
||||
_CODEGENERATORRESPONSE = descriptor.Descriptor(
|
||||
name='CodeGeneratorResponse',
|
||||
full_name='google.protobuf.compiler.CodeGeneratorResponse',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
fields=[
|
||||
descriptor.FieldDescriptor(
|
||||
name='error', full_name='google.protobuf.compiler.CodeGeneratorResponse.error', index=0,
|
||||
number=1, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=unicode("", "utf-8"),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
descriptor.FieldDescriptor(
|
||||
name='file', full_name='google.protobuf.compiler.CodeGeneratorResponse.file', index=1,
|
||||
number=15, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[_CODEGENERATORRESPONSE_FILE, ],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
is_extendable=False,
|
||||
extension_ranges=[],
|
||||
serialized_start=229,
|
||||
serialized_end=399,
|
||||
)
|
||||
|
||||
_CODEGENERATORREQUEST.fields_by_name['proto_file'].message_type = google.protobuf.descriptor_pb2._FILEDESCRIPTORPROTO
|
||||
_CODEGENERATORRESPONSE_FILE.containing_type = _CODEGENERATORRESPONSE;
|
||||
_CODEGENERATORRESPONSE.fields_by_name['file'].message_type = _CODEGENERATORRESPONSE_FILE
|
||||
DESCRIPTOR.message_types_by_name['CodeGeneratorRequest'] = _CODEGENERATORREQUEST
|
||||
DESCRIPTOR.message_types_by_name['CodeGeneratorResponse'] = _CODEGENERATORRESPONSE
|
||||
|
||||
class CodeGeneratorRequest(message.Message):
|
||||
__metaclass__ = reflection.GeneratedProtocolMessageType
|
||||
DESCRIPTOR = _CODEGENERATORREQUEST
|
||||
|
||||
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorRequest)
|
||||
|
||||
class CodeGeneratorResponse(message.Message):
|
||||
__metaclass__ = reflection.GeneratedProtocolMessageType
|
||||
|
||||
class File(message.Message):
|
||||
__metaclass__ = reflection.GeneratedProtocolMessageType
|
||||
DESCRIPTOR = _CODEGENERATORRESPONSE_FILE
|
||||
|
||||
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse.File)
|
||||
DESCRIPTOR = _CODEGENERATORRESPONSE
|
||||
|
||||
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse)
|
||||
|
||||
# @@protoc_insertion_point(module_scope)
|
@ -0,0 +1,598 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Descriptors essentially contain exactly the information found in a .proto
|
||||
file, in types that make this information accessible in Python.
|
||||
"""
|
||||
|
||||
__author__ = 'robinson@google.com (Will Robinson)'
|
||||
|
||||
|
||||
from google.protobuf.internal import api_implementation
|
||||
|
||||
|
||||
if api_implementation.Type() == 'cpp':
|
||||
from google.protobuf.internal import cpp_message
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
"""Base error for this module."""
|
||||
|
||||
|
||||
class DescriptorBase(object):
|
||||
|
||||
"""Descriptors base class.
|
||||
|
||||
This class is the base of all descriptor classes. It provides common options
|
||||
related functionaility.
|
||||
|
||||
Attributes:
|
||||
has_options: True if the descriptor has non-default options. Usually it
|
||||
is not necessary to read this -- just call GetOptions() which will
|
||||
happily return the default instance. However, it's sometimes useful
|
||||
for efficiency, and also useful inside the protobuf implementation to
|
||||
avoid some bootstrapping issues.
|
||||
"""
|
||||
|
||||
def __init__(self, options, options_class_name):
|
||||
"""Initialize the descriptor given its options message and the name of the
|
||||
class of the options message. The name of the class is required in case
|
||||
the options message is None and has to be created.
|
||||
"""
|
||||
self._options = options
|
||||
self._options_class_name = options_class_name
|
||||
|
||||
# Does this descriptor have non-default options?
|
||||
self.has_options = options is not None
|
||||
|
||||
def GetOptions(self):
|
||||
"""Retrieves descriptor options.
|
||||
|
||||
This method returns the options set or creates the default options for the
|
||||
descriptor.
|
||||
"""
|
||||
if self._options:
|
||||
return self._options
|
||||
from google.protobuf import descriptor_pb2
|
||||
try:
|
||||
options_class = getattr(descriptor_pb2, self._options_class_name)
|
||||
except AttributeError:
|
||||
raise RuntimeError('Unknown options class name %s!' %
|
||||
(self._options_class_name))
|
||||
self._options = options_class()
|
||||
return self._options
|
||||
|
||||
|
||||
class _NestedDescriptorBase(DescriptorBase):
|
||||
"""Common class for descriptors that can be nested."""
|
||||
|
||||
def __init__(self, options, options_class_name, name, full_name,
|
||||
file, containing_type, serialized_start=None,
|
||||
serialized_end=None):
|
||||
"""Constructor.
|
||||
|
||||
Args:
|
||||
options: Protocol message options or None
|
||||
to use default message options.
|
||||
options_class_name: (str) The class name of the above options.
|
||||
|
||||
name: (str) Name of this protocol message type.
|
||||
full_name: (str) Fully-qualified name of this protocol message type,
|
||||
which will include protocol "package" name and the name of any
|
||||
enclosing types.
|
||||
file: (FileDescriptor) Reference to file info.
|
||||
containing_type: if provided, this is a nested descriptor, with this
|
||||
descriptor as parent, otherwise None.
|
||||
serialized_start: The start index (inclusive) in block in the
|
||||
file.serialized_pb that describes this descriptor.
|
||||
serialized_end: The end index (exclusive) in block in the
|
||||
file.serialized_pb that describes this descriptor.
|
||||
"""
|
||||
super(_NestedDescriptorBase, self).__init__(
|
||||
options, options_class_name)
|
||||
|
||||
self.name = name
|
||||
# TODO(falk): Add function to calculate full_name instead of having it in
|
||||
# memory?
|
||||
self.full_name = full_name
|
||||
self.file = file
|
||||
self.containing_type = containing_type
|
||||
|
||||
self._serialized_start = serialized_start
|
||||
self._serialized_end = serialized_end
|
||||
|
||||
def GetTopLevelContainingType(self):
|
||||
"""Returns the root if this is a nested type, or itself if its the root."""
|
||||
desc = self
|
||||
while desc.containing_type is not None:
|
||||
desc = desc.containing_type
|
||||
return desc
|
||||
|
||||
def CopyToProto(self, proto):
|
||||
"""Copies this to the matching proto in descriptor_pb2.
|
||||
|
||||
Args:
|
||||
proto: An empty proto instance from descriptor_pb2.
|
||||
|
||||
Raises:
|
||||
Error: If self couldnt be serialized, due to to few constructor arguments.
|
||||
"""
|
||||
if (self.file is not None and
|
||||
self._serialized_start is not None and
|
||||
self._serialized_end is not None):
|
||||
proto.ParseFromString(self.file.serialized_pb[
|
||||
self._serialized_start:self._serialized_end])
|
||||
else:
|
||||
raise Error('Descriptor does not contain serialization.')
|
||||
|
||||
|
||||
class Descriptor(_NestedDescriptorBase):
|
||||
|
||||
"""Descriptor for a protocol message type.
|
||||
|
||||
A Descriptor instance has the following attributes:
|
||||
|
||||
name: (str) Name of this protocol message type.
|
||||
full_name: (str) Fully-qualified name of this protocol message type,
|
||||
which will include protocol "package" name and the name of any
|
||||
enclosing types.
|
||||
|
||||
containing_type: (Descriptor) Reference to the descriptor of the
|
||||
type containing us, or None if this is top-level.
|
||||
|
||||
fields: (list of FieldDescriptors) Field descriptors for all
|
||||
fields in this type.
|
||||
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
|
||||
objects as in |fields|, but indexed by "number" attribute in each
|
||||
FieldDescriptor.
|
||||
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
|
||||
objects as in |fields|, but indexed by "name" attribute in each
|
||||
FieldDescriptor.
|
||||
|
||||
nested_types: (list of Descriptors) Descriptor references
|
||||
for all protocol message types nested within this one.
|
||||
nested_types_by_name: (dict str -> Descriptor) Same Descriptor
|
||||
objects as in |nested_types|, but indexed by "name" attribute
|
||||
in each Descriptor.
|
||||
|
||||
enum_types: (list of EnumDescriptors) EnumDescriptor references
|
||||
for all enums contained within this type.
|
||||
enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
|
||||
objects as in |enum_types|, but indexed by "name" attribute
|
||||
in each EnumDescriptor.
|
||||
enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
|
||||
from enum value name to EnumValueDescriptor for that value.
|
||||
|
||||
extensions: (list of FieldDescriptor) All extensions defined directly
|
||||
within this message type (NOT within a nested type).
|
||||
extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
|
||||
objects as |extensions|, but indexed by "name" attribute of each
|
||||
FieldDescriptor.
|
||||
|
||||
is_extendable: Does this type define any extension ranges?
|
||||
|
||||
options: (descriptor_pb2.MessageOptions) Protocol message options or None
|
||||
to use default message options.
|
||||
|
||||
file: (FileDescriptor) Reference to file descriptor.
|
||||
"""
|
||||
|
||||
def __init__(self, name, full_name, filename, containing_type, fields,
|
||||
nested_types, enum_types, extensions, options=None,
|
||||
is_extendable=True, extension_ranges=None, file=None,
|
||||
serialized_start=None, serialized_end=None):
|
||||
"""Arguments to __init__() are as described in the description
|
||||
of Descriptor fields above.
|
||||
|
||||
Note that filename is an obsolete argument, that is not used anymore.
|
||||
Please use file.name to access this as an attribute.
|
||||
"""
|
||||
super(Descriptor, self).__init__(
|
||||
options, 'MessageOptions', name, full_name, file,
|
||||
containing_type, serialized_start=serialized_start,
|
||||
serialized_end=serialized_start)
|
||||
|
||||
# We have fields in addition to fields_by_name and fields_by_number,
|
||||
# so that:
|
||||
# 1. Clients can index fields by "order in which they're listed."
|
||||
# 2. Clients can easily iterate over all fields with the terse
|
||||
# syntax: for f in descriptor.fields: ...
|
||||
self.fields = fields
|
||||
for field in self.fields:
|
||||
field.containing_type = self
|
||||
self.fields_by_number = dict((f.number, f) for f in fields)
|
||||
self.fields_by_name = dict((f.name, f) for f in fields)
|
||||
|
||||
self.nested_types = nested_types
|
||||
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
|
||||
|
||||
self.enum_types = enum_types
|
||||
for enum_type in self.enum_types:
|
||||
enum_type.containing_type = self
|
||||
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
|
||||
self.enum_values_by_name = dict(
|
||||
(v.name, v) for t in enum_types for v in t.values)
|
||||
|
||||
self.extensions = extensions
|
||||
for extension in self.extensions:
|
||||
extension.extension_scope = self
|
||||
self.extensions_by_name = dict((f.name, f) for f in extensions)
|
||||
self.is_extendable = is_extendable
|
||||
self.extension_ranges = extension_ranges
|
||||
|
||||
self._serialized_start = serialized_start
|
||||
self._serialized_end = serialized_end
|
||||
|
||||
def CopyToProto(self, proto):
|
||||
"""Copies this to a descriptor_pb2.DescriptorProto.
|
||||
|
||||
Args:
|
||||
proto: An empty descriptor_pb2.DescriptorProto.
|
||||
"""
|
||||
# This function is overriden to give a better doc comment.
|
||||
super(Descriptor, self).CopyToProto(proto)
|
||||
|
||||
|
||||
# TODO(robinson): We should have aggressive checking here,
|
||||
# for example:
|
||||
# * If you specify a repeated field, you should not be allowed
|
||||
# to specify a default value.
|
||||
# * [Other examples here as needed].
|
||||
#
|
||||
# TODO(robinson): for this and other *Descriptor classes, we
|
||||
# might also want to lock things down aggressively (e.g.,
|
||||
# prevent clients from setting the attributes). Having
|
||||
# stronger invariants here in general will reduce the number
|
||||
# of runtime checks we must do in reflection.py...
|
||||
class FieldDescriptor(DescriptorBase):
|
||||
|
||||
"""Descriptor for a single field in a .proto file.
|
||||
|
||||
A FieldDescriptor instance has the following attriubtes:
|
||||
|
||||
name: (str) Name of this field, exactly as it appears in .proto.
|
||||
full_name: (str) Name of this field, including containing scope. This is
|
||||
particularly relevant for extensions.
|
||||
index: (int) Dense, 0-indexed index giving the order that this
|
||||
field textually appears within its message in the .proto file.
|
||||
number: (int) Tag number declared for this field in the .proto file.
|
||||
|
||||
type: (One of the TYPE_* constants below) Declared type.
|
||||
cpp_type: (One of the CPPTYPE_* constants below) C++ type used to
|
||||
represent this field.
|
||||
|
||||
label: (One of the LABEL_* constants below) Tells whether this
|
||||
field is optional, required, or repeated.
|
||||
has_default_value: (bool) True if this field has a default value defined,
|
||||
otherwise false.
|
||||
default_value: (Varies) Default value of this field. Only
|
||||
meaningful for non-repeated scalar fields. Repeated fields
|
||||
should always set this to [], and non-repeated composite
|
||||
fields should always set this to None.
|
||||
|
||||
containing_type: (Descriptor) Descriptor of the protocol message
|
||||
type that contains this field. Set by the Descriptor constructor
|
||||
if we're passed into one.
|
||||
Somewhat confusingly, for extension fields, this is the
|
||||
descriptor of the EXTENDED message, not the descriptor
|
||||
of the message containing this field. (See is_extension and
|
||||
extension_scope below).
|
||||
message_type: (Descriptor) If a composite field, a descriptor
|
||||
of the message type contained in this field. Otherwise, this is None.
|
||||
enum_type: (EnumDescriptor) If this field contains an enum, a
|
||||
descriptor of that enum. Otherwise, this is None.
|
||||
|
||||
is_extension: True iff this describes an extension field.
|
||||
extension_scope: (Descriptor) Only meaningful if is_extension is True.
|
||||
Gives the message that immediately contains this extension field.
|
||||
Will be None iff we're a top-level (file-level) extension field.
|
||||
|
||||
options: (descriptor_pb2.FieldOptions) Protocol message field options or
|
||||
None to use default field options.
|
||||
"""
|
||||
|
||||
# Must be consistent with C++ FieldDescriptor::Type enum in
|
||||
# descriptor.h.
|
||||
#
|
||||
# TODO(robinson): Find a way to eliminate this repetition.
|
||||
TYPE_DOUBLE = 1
|
||||
TYPE_FLOAT = 2
|
||||
TYPE_INT64 = 3
|
||||
TYPE_UINT64 = 4
|
||||
TYPE_INT32 = 5
|
||||
TYPE_FIXED64 = 6
|
||||
TYPE_FIXED32 = 7
|
||||
TYPE_BOOL = 8
|
||||
TYPE_STRING = 9
|
||||
TYPE_GROUP = 10
|
||||
TYPE_MESSAGE = 11
|
||||
TYPE_BYTES = 12
|
||||
TYPE_UINT32 = 13
|
||||
TYPE_ENUM = 14
|
||||
TYPE_SFIXED32 = 15
|
||||
TYPE_SFIXED64 = 16
|
||||
TYPE_SINT32 = 17
|
||||
TYPE_SINT64 = 18
|
||||
MAX_TYPE = 18
|
||||
|
||||
# Must be consistent with C++ FieldDescriptor::CppType enum in
|
||||
# descriptor.h.
|
||||
#
|
||||
# TODO(robinson): Find a way to eliminate this repetition.
|
||||
CPPTYPE_INT32 = 1
|
||||
CPPTYPE_INT64 = 2
|
||||
CPPTYPE_UINT32 = 3
|
||||
CPPTYPE_UINT64 = 4
|
||||
CPPTYPE_DOUBLE = 5
|
||||
CPPTYPE_FLOAT = 6
|
||||
CPPTYPE_BOOL = 7
|
||||
CPPTYPE_ENUM = 8
|
||||
CPPTYPE_STRING = 9
|
||||
CPPTYPE_MESSAGE = 10
|
||||
MAX_CPPTYPE = 10
|
||||
|
||||
# Must be consistent with C++ FieldDescriptor::Label enum in
|
||||
# descriptor.h.
|
||||
#
|
||||
# TODO(robinson): Find a way to eliminate this repetition.
|
||||
LABEL_OPTIONAL = 1
|
||||
LABEL_REQUIRED = 2
|
||||
LABEL_REPEATED = 3
|
||||
MAX_LABEL = 3
|
||||
|
||||
def __init__(self, name, full_name, index, number, type, cpp_type, label,
|
||||
default_value, message_type, enum_type, containing_type,
|
||||
is_extension, extension_scope, options=None,
|
||||
has_default_value=True):
|
||||
"""The arguments are as described in the description of FieldDescriptor
|
||||
attributes above.
|
||||
|
||||
Note that containing_type may be None, and may be set later if necessary
|
||||
(to deal with circular references between message types, for example).
|
||||
Likewise for extension_scope.
|
||||
"""
|
||||
super(FieldDescriptor, self).__init__(options, 'FieldOptions')
|
||||
self.name = name
|
||||
self.full_name = full_name
|
||||
self.index = index
|
||||
self.number = number
|
||||
self.type = type
|
||||
self.cpp_type = cpp_type
|
||||
self.label = label
|
||||
self.has_default_value = has_default_value
|
||||
self.default_value = default_value
|
||||
self.containing_type = containing_type
|
||||
self.message_type = message_type
|
||||
self.enum_type = enum_type
|
||||
self.is_extension = is_extension
|
||||
self.extension_scope = extension_scope
|
||||
if api_implementation.Type() == 'cpp':
|
||||
if is_extension:
|
||||
self._cdescriptor = cpp_message.GetExtensionDescriptor(full_name)
|
||||
else:
|
||||
self._cdescriptor = cpp_message.GetFieldDescriptor(full_name)
|
||||
else:
|
||||
self._cdescriptor = None
|
||||
|
||||
|
||||
class EnumDescriptor(_NestedDescriptorBase):
|
||||
|
||||
"""Descriptor for an enum defined in a .proto file.
|
||||
|
||||
An EnumDescriptor instance has the following attributes:
|
||||
|
||||
name: (str) Name of the enum type.
|
||||
full_name: (str) Full name of the type, including package name
|
||||
and any enclosing type(s).
|
||||
|
||||
values: (list of EnumValueDescriptors) List of the values
|
||||
in this enum.
|
||||
values_by_name: (dict str -> EnumValueDescriptor) Same as |values|,
|
||||
but indexed by the "name" field of each EnumValueDescriptor.
|
||||
values_by_number: (dict int -> EnumValueDescriptor) Same as |values|,
|
||||
but indexed by the "number" field of each EnumValueDescriptor.
|
||||
containing_type: (Descriptor) Descriptor of the immediate containing
|
||||
type of this enum, or None if this is an enum defined at the
|
||||
top level in a .proto file. Set by Descriptor's constructor
|
||||
if we're passed into one.
|
||||
file: (FileDescriptor) Reference to file descriptor.
|
||||
options: (descriptor_pb2.EnumOptions) Enum options message or
|
||||
None to use default enum options.
|
||||
"""
|
||||
|
||||
def __init__(self, name, full_name, filename, values,
|
||||
containing_type=None, options=None, file=None,
|
||||
serialized_start=None, serialized_end=None):
|
||||
"""Arguments are as described in the attribute description above.
|
||||
|
||||
Note that filename is an obsolete argument, that is not used anymore.
|
||||
Please use file.name to access this as an attribute.
|
||||
"""
|
||||
super(EnumDescriptor, self).__init__(
|
||||
options, 'EnumOptions', name, full_name, file,
|
||||
containing_type, serialized_start=serialized_start,
|
||||
serialized_end=serialized_start)
|
||||
|
||||
self.values = values
|
||||
for value in self.values:
|
||||
value.type = self
|
||||
self.values_by_name = dict((v.name, v) for v in values)
|
||||
self.values_by_number = dict((v.number, v) for v in values)
|
||||
|
||||
self._serialized_start = serialized_start
|
||||
self._serialized_end = serialized_end
|
||||
|
||||
def CopyToProto(self, proto):
|
||||
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
|
||||
|
||||
Args:
|
||||
proto: An empty descriptor_pb2.EnumDescriptorProto.
|
||||
"""
|
||||
# This function is overriden to give a better doc comment.
|
||||
super(EnumDescriptor, self).CopyToProto(proto)
|
||||
|
||||
|
||||
class EnumValueDescriptor(DescriptorBase):
|
||||
|
||||
"""Descriptor for a single value within an enum.
|
||||
|
||||
name: (str) Name of this value.
|
||||
index: (int) Dense, 0-indexed index giving the order that this
|
||||
value appears textually within its enum in the .proto file.
|
||||
number: (int) Actual number assigned to this enum value.
|
||||
type: (EnumDescriptor) EnumDescriptor to which this value
|
||||
belongs. Set by EnumDescriptor's constructor if we're
|
||||
passed into one.
|
||||
options: (descriptor_pb2.EnumValueOptions) Enum value options message or
|
||||
None to use default enum value options options.
|
||||
"""
|
||||
|
||||
def __init__(self, name, index, number, type=None, options=None):
|
||||
"""Arguments are as described in the attribute description above."""
|
||||
super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions')
|
||||
self.name = name
|
||||
self.index = index
|
||||
self.number = number
|
||||
self.type = type
|
||||
|
||||
|
||||
class ServiceDescriptor(_NestedDescriptorBase):
|
||||
|
||||
"""Descriptor for a service.
|
||||
|
||||
name: (str) Name of the service.
|
||||
full_name: (str) Full name of the service, including package name.
|
||||
index: (int) 0-indexed index giving the order that this services
|
||||
definition appears withing the .proto file.
|
||||
methods: (list of MethodDescriptor) List of methods provided by this
|
||||
service.
|
||||
options: (descriptor_pb2.ServiceOptions) Service options message or
|
||||
None to use default service options.
|
||||
file: (FileDescriptor) Reference to file info.
|
||||
"""
|
||||
|
||||
def __init__(self, name, full_name, index, methods, options=None, file=None,
|
||||
serialized_start=None, serialized_end=None):
|
||||
super(ServiceDescriptor, self).__init__(
|
||||
options, 'ServiceOptions', name, full_name, file,
|
||||
None, serialized_start=serialized_start,
|
||||
serialized_end=serialized_end)
|
||||
self.index = index
|
||||
self.methods = methods
|
||||
# Set the containing service for each method in this service.
|
||||
for method in self.methods:
|
||||
method.containing_service = self
|
||||
|
||||
def FindMethodByName(self, name):
|
||||
"""Searches for the specified method, and returns its descriptor."""
|
||||
for method in self.methods:
|
||||
if name == method.name:
|
||||
return method
|
||||
return None
|
||||
|
||||
def CopyToProto(self, proto):
|
||||
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
|
||||
|
||||
Args:
|
||||
proto: An empty descriptor_pb2.ServiceDescriptorProto.
|
||||
"""
|
||||
# This function is overriden to give a better doc comment.
|
||||
super(ServiceDescriptor, self).CopyToProto(proto)
|
||||
|
||||
|
||||
class MethodDescriptor(DescriptorBase):
|
||||
|
||||
"""Descriptor for a method in a service.
|
||||
|
||||
name: (str) Name of the method within the service.
|
||||
full_name: (str) Full name of method.
|
||||
index: (int) 0-indexed index of the method inside the service.
|
||||
containing_service: (ServiceDescriptor) The service that contains this
|
||||
method.
|
||||
input_type: The descriptor of the message that this method accepts.
|
||||
output_type: The descriptor of the message that this method returns.
|
||||
options: (descriptor_pb2.MethodOptions) Method options message or
|
||||
None to use default method options.
|
||||
"""
|
||||
|
||||
def __init__(self, name, full_name, index, containing_service,
|
||||
input_type, output_type, options=None):
|
||||
"""The arguments are as described in the description of MethodDescriptor
|
||||
attributes above.
|
||||
|
||||
Note that containing_service may be None, and may be set later if necessary.
|
||||
"""
|
||||
super(MethodDescriptor, self).__init__(options, 'MethodOptions')
|
||||
self.name = name
|
||||
self.full_name = full_name
|
||||
self.index = index
|
||||
self.containing_service = containing_service
|
||||
self.input_type = input_type
|
||||
self.output_type = output_type
|
||||
|
||||
|
||||
class FileDescriptor(DescriptorBase):
|
||||
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
|
||||
|
||||
name: name of file, relative to root of source tree.
|
||||
package: name of the package
|
||||
serialized_pb: (str) Byte string of serialized
|
||||
descriptor_pb2.FileDescriptorProto.
|
||||
"""
|
||||
|
||||
def __init__(self, name, package, options=None, serialized_pb=None):
|
||||
"""Constructor."""
|
||||
super(FileDescriptor, self).__init__(options, 'FileOptions')
|
||||
|
||||
self.message_types_by_name = {}
|
||||
self.name = name
|
||||
self.package = package
|
||||
self.serialized_pb = serialized_pb
|
||||
if (api_implementation.Type() == 'cpp' and
|
||||
self.serialized_pb is not None):
|
||||
cpp_message.BuildFile(self.serialized_pb)
|
||||
|
||||
def CopyToProto(self, proto):
|
||||
"""Copies this to a descriptor_pb2.FileDescriptorProto.
|
||||
|
||||
Args:
|
||||
proto: An empty descriptor_pb2.FileDescriptorProto.
|
||||
"""
|
||||
proto.ParseFromString(self.serialized_pb)
|
||||
|
||||
|
||||
def _ParseOptions(message, string):
|
||||
"""Parses serialized options.
|
||||
|
||||
This helper function is used to parse serialized options in generated
|
||||
proto2 files. It must not be used outside proto2.
|
||||
"""
|
||||
message.ParseFromString(string)
|
||||
return message
|
File diff suppressed because one or more lines are too long
@ -0,0 +1,64 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""
|
||||
This module is the central entity that determines which implementation of the
|
||||
API is used.
|
||||
"""
|
||||
|
||||
__author__ = 'petar@google.com (Petar Petrov)'
|
||||
|
||||
import os
|
||||
# This environment variable can be used to switch to a certain implementation
|
||||
# of the Python API. Right now only 'python' and 'cpp' are valid values. Any
|
||||
# other value will be ignored.
|
||||
_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION',
|
||||
'python')
|
||||
|
||||
|
||||
if _implementation_type != 'python':
|
||||
# For now, by default use the pure-Python implementation.
|
||||
# The code below checks if the C extension is available and
|
||||
# uses it if it is available.
|
||||
_implementation_type = 'cpp'
|
||||
## Determine automatically which implementation to use.
|
||||
#try:
|
||||
# from google.protobuf.internal import cpp_message
|
||||
# _implementation_type = 'cpp'
|
||||
#except ImportError, e:
|
||||
# _implementation_type = 'python'
|
||||
|
||||
|
||||
# Usage of this function is discouraged. Clients shouldn't care which
|
||||
# implementation of the API is in use. Note that there is no guarantee
|
||||
# that differences between APIs will be maintained.
|
||||
# Please don't use this function if possible.
|
||||
def Type():
|
||||
return _implementation_type
|
@ -0,0 +1,259 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Contains container classes to represent different protocol buffer types.
|
||||
|
||||
This file defines container classes which represent categories of protocol
|
||||
buffer field types which need extra maintenance. Currently these categories
|
||||
are:
|
||||
- Repeated scalar fields - These are all repeated fields which aren't
|
||||
composite (e.g. they are of simple types like int32, string, etc).
|
||||
- Repeated composite fields - Repeated fields which are composite. This
|
||||
includes groups and nested messages.
|
||||
"""
|
||||
|
||||
__author__ = 'petar@google.com (Petar Petrov)'
|
||||
|
||||
|
||||
class BaseContainer(object):
|
||||
|
||||
"""Base container class."""
|
||||
|
||||
# Minimizes memory usage and disallows assignment to other attributes.
|
||||
__slots__ = ['_message_listener', '_values']
|
||||
|
||||
def __init__(self, message_listener):
|
||||
"""
|
||||
Args:
|
||||
message_listener: A MessageListener implementation.
|
||||
The RepeatedScalarFieldContainer will call this object's
|
||||
Modified() method when it is modified.
|
||||
"""
|
||||
self._message_listener = message_listener
|
||||
self._values = []
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Retrieves item by the specified key."""
|
||||
return self._values[key]
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the number of elements in the container."""
|
||||
return len(self._values)
|
||||
|
||||
def __ne__(self, other):
|
||||
"""Checks if another instance isn't equal to this one."""
|
||||
# The concrete classes should define __eq__.
|
||||
return not self == other
|
||||
|
||||
def __hash__(self):
|
||||
raise TypeError('unhashable object')
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self._values)
|
||||
|
||||
def sort(self, sort_function=cmp):
|
||||
self._values.sort(sort_function)
|
||||
|
||||
|
||||
class RepeatedScalarFieldContainer(BaseContainer):
|
||||
|
||||
"""Simple, type-checked, list-like container for holding repeated scalars."""
|
||||
|
||||
# Disallows assignment to other attributes.
|
||||
__slots__ = ['_type_checker']
|
||||
|
||||
def __init__(self, message_listener, type_checker):
|
||||
"""
|
||||
Args:
|
||||
message_listener: A MessageListener implementation.
|
||||
The RepeatedScalarFieldContainer will call this object's
|
||||
Modified() method when it is modified.
|
||||
type_checker: A type_checkers.ValueChecker instance to run on elements
|
||||
inserted into this container.
|
||||
"""
|
||||
super(RepeatedScalarFieldContainer, self).__init__(message_listener)
|
||||
self._type_checker = type_checker
|
||||
|
||||
def append(self, value):
|
||||
"""Appends an item to the list. Similar to list.append()."""
|
||||
self._type_checker.CheckValue(value)
|
||||
self._values.append(value)
|
||||
if not self._message_listener.dirty:
|
||||
self._message_listener.Modified()
|
||||
|
||||
def insert(self, key, value):
|
||||
"""Inserts the item at the specified position. Similar to list.insert()."""
|
||||
self._type_checker.CheckValue(value)
|
||||
self._values.insert(key, value)
|
||||
if not self._message_listener.dirty:
|
||||
self._message_listener.Modified()
|
||||
|
||||
def extend(self, elem_seq):
|
||||
"""Extends by appending the given sequence. Similar to list.extend()."""
|
||||
if not elem_seq:
|
||||
return
|
||||
|
||||
new_values = []
|
||||
for elem in elem_seq:
|
||||
self._type_checker.CheckValue(elem)
|
||||
new_values.append(elem)
|
||||
self._values.extend(new_values)
|
||||
self._message_listener.Modified()
|
||||
|
||||
def MergeFrom(self, other):
|
||||
"""Appends the contents of another repeated field of the same type to this
|
||||
one. We do not check the types of the individual fields.
|
||||
"""
|
||||
self._values.extend(other._values)
|
||||
self._message_listener.Modified()
|
||||
|
||||
def remove(self, elem):
|
||||
"""Removes an item from the list. Similar to list.remove()."""
|
||||
self._values.remove(elem)
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Sets the item on the specified position."""
|
||||
self._type_checker.CheckValue(value)
|
||||
self._values[key] = value
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __getslice__(self, start, stop):
|
||||
"""Retrieves the subset of items from between the specified indices."""
|
||||
return self._values[start:stop]
|
||||
|
||||
def __setslice__(self, start, stop, values):
|
||||
"""Sets the subset of items from between the specified indices."""
|
||||
new_values = []
|
||||
for value in values:
|
||||
self._type_checker.CheckValue(value)
|
||||
new_values.append(value)
|
||||
self._values[start:stop] = new_values
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Deletes the item at the specified position."""
|
||||
del self._values[key]
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __delslice__(self, start, stop):
|
||||
"""Deletes the subset of items from between the specified indices."""
|
||||
del self._values[start:stop]
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Compares the current instance with another one."""
|
||||
if self is other:
|
||||
return True
|
||||
# Special case for the same type which should be common and fast.
|
||||
if isinstance(other, self.__class__):
|
||||
return other._values == self._values
|
||||
# We are presumably comparing against some other sequence type.
|
||||
return other == self._values
|
||||
|
||||
|
||||
class RepeatedCompositeFieldContainer(BaseContainer):
|
||||
|
||||
"""Simple, list-like container for holding repeated composite fields."""
|
||||
|
||||
# Disallows assignment to other attributes.
|
||||
__slots__ = ['_message_descriptor']
|
||||
|
||||
def __init__(self, message_listener, message_descriptor):
|
||||
"""
|
||||
Note that we pass in a descriptor instead of the generated directly,
|
||||
since at the time we construct a _RepeatedCompositeFieldContainer we
|
||||
haven't yet necessarily initialized the type that will be contained in the
|
||||
container.
|
||||
|
||||
Args:
|
||||
message_listener: A MessageListener implementation.
|
||||
The RepeatedCompositeFieldContainer will call this object's
|
||||
Modified() method when it is modified.
|
||||
message_descriptor: A Descriptor instance describing the protocol type
|
||||
that should be present in this container. We'll use the
|
||||
_concrete_class field of this descriptor when the client calls add().
|
||||
"""
|
||||
super(RepeatedCompositeFieldContainer, self).__init__(message_listener)
|
||||
self._message_descriptor = message_descriptor
|
||||
|
||||
def add(self, **kwargs):
|
||||
"""Adds a new element at the end of the list and returns it. Keyword
|
||||
arguments may be used to initialize the element.
|
||||
"""
|
||||
new_element = self._message_descriptor._concrete_class(**kwargs)
|
||||
new_element._SetListener(self._message_listener)
|
||||
self._values.append(new_element)
|
||||
if not self._message_listener.dirty:
|
||||
self._message_listener.Modified()
|
||||
return new_element
|
||||
|
||||
def extend(self, elem_seq):
|
||||
"""Extends by appending the given sequence of elements of the same type
|
||||
as this one, copying each individual message.
|
||||
"""
|
||||
message_class = self._message_descriptor._concrete_class
|
||||
listener = self._message_listener
|
||||
values = self._values
|
||||
for message in elem_seq:
|
||||
new_element = message_class()
|
||||
new_element._SetListener(listener)
|
||||
new_element.MergeFrom(message)
|
||||
values.append(new_element)
|
||||
listener.Modified()
|
||||
|
||||
def MergeFrom(self, other):
|
||||
"""Appends the contents of another repeated field of the same type to this
|
||||
one, copying each individual message.
|
||||
"""
|
||||
self.extend(other._values)
|
||||
|
||||
def __getslice__(self, start, stop):
|
||||
"""Retrieves the subset of items from between the specified indices."""
|
||||
return self._values[start:stop]
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Deletes the item at the specified position."""
|
||||
del self._values[key]
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __delslice__(self, start, stop):
|
||||
"""Deletes the subset of items from between the specified indices."""
|
||||
del self._values[start:stop]
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Compares the current instance with another one."""
|
||||
if self is other:
|
||||
return True
|
||||
if not isinstance(other, self.__class__):
|
||||
raise TypeError('Can only compare repeated composite fields against '
|
||||
'other repeated composite fields.')
|
||||
return self._values == other._values
|
@ -0,0 +1,616 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Contains helper functions used to create protocol message classes from
|
||||
Descriptor objects at runtime backed by the protocol buffer C++ API.
|
||||
"""
|
||||
|
||||
__author__ = 'petar@google.com (Petar Petrov)'
|
||||
|
||||
import operator
|
||||
from google.protobuf.internal import _net_proto2___python
|
||||
from google.protobuf import message
|
||||
|
||||
|
||||
_LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED
|
||||
_LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL
|
||||
_CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE
|
||||
_TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE
|
||||
|
||||
|
||||
def GetDescriptorPool():
|
||||
"""Creates a new DescriptorPool C++ object."""
|
||||
return _net_proto2___python.NewCDescriptorPool()
|
||||
|
||||
|
||||
_pool = GetDescriptorPool()
|
||||
|
||||
|
||||
def GetFieldDescriptor(full_field_name):
|
||||
"""Searches for a field descriptor given a full field name."""
|
||||
return _pool.FindFieldByName(full_field_name)
|
||||
|
||||
|
||||
def BuildFile(content):
|
||||
"""Registers a new proto file in the underlying C++ descriptor pool."""
|
||||
_net_proto2___python.BuildFile(content)
|
||||
|
||||
|
||||
def GetExtensionDescriptor(full_extension_name):
|
||||
"""Searches for extension descriptor given a full field name."""
|
||||
return _pool.FindExtensionByName(full_extension_name)
|
||||
|
||||
|
||||
def NewCMessage(full_message_name):
|
||||
"""Creates a new C++ protocol message by its name."""
|
||||
return _net_proto2___python.NewCMessage(full_message_name)
|
||||
|
||||
|
||||
def ScalarProperty(cdescriptor):
|
||||
"""Returns a scalar property for the given descriptor."""
|
||||
|
||||
def Getter(self):
|
||||
return self._cmsg.GetScalar(cdescriptor)
|
||||
|
||||
def Setter(self, value):
|
||||
self._cmsg.SetScalar(cdescriptor, value)
|
||||
|
||||
return property(Getter, Setter)
|
||||
|
||||
|
||||
def CompositeProperty(cdescriptor, message_type):
|
||||
"""Returns a Python property the given composite field."""
|
||||
|
||||
def Getter(self):
|
||||
sub_message = self._composite_fields.get(cdescriptor.name, None)
|
||||
if sub_message is None:
|
||||
cmessage = self._cmsg.NewSubMessage(cdescriptor)
|
||||
sub_message = message_type._concrete_class(__cmessage=cmessage)
|
||||
self._composite_fields[cdescriptor.name] = sub_message
|
||||
return sub_message
|
||||
|
||||
return property(Getter)
|
||||
|
||||
|
||||
class RepeatedScalarContainer(object):
|
||||
"""Container for repeated scalar fields."""
|
||||
|
||||
__slots__ = ['_message', '_cfield_descriptor', '_cmsg']
|
||||
|
||||
def __init__(self, msg, cfield_descriptor):
|
||||
self._message = msg
|
||||
self._cmsg = msg._cmsg
|
||||
self._cfield_descriptor = cfield_descriptor
|
||||
|
||||
def append(self, value):
|
||||
self._cmsg.AddRepeatedScalar(
|
||||
self._cfield_descriptor, value)
|
||||
|
||||
def extend(self, sequence):
|
||||
for element in sequence:
|
||||
self.append(element)
|
||||
|
||||
def insert(self, key, value):
|
||||
values = self[slice(None, None, None)]
|
||||
values.insert(key, value)
|
||||
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
|
||||
|
||||
def remove(self, value):
|
||||
values = self[slice(None, None, None)]
|
||||
values.remove(value)
|
||||
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
values = self[slice(None, None, None)]
|
||||
values[key] = value
|
||||
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key)
|
||||
|
||||
def __delitem__(self, key):
|
||||
self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key)
|
||||
|
||||
def __len__(self):
|
||||
return len(self[slice(None, None, None)])
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
if not operator.isSequenceType(other):
|
||||
raise TypeError(
|
||||
'Can only compare repeated scalar fields against sequences.')
|
||||
# We are presumably comparing against some other sequence type.
|
||||
return other == self[slice(None, None, None)]
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __hash__(self):
|
||||
raise TypeError('unhashable object')
|
||||
|
||||
def sort(self, sort_function=cmp):
|
||||
values = self[slice(None, None, None)]
|
||||
values.sort(sort_function)
|
||||
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
|
||||
|
||||
|
||||
def RepeatedScalarProperty(cdescriptor):
|
||||
"""Returns a Python property the given repeated scalar field."""
|
||||
|
||||
def Getter(self):
|
||||
container = self._composite_fields.get(cdescriptor.name, None)
|
||||
if container is None:
|
||||
container = RepeatedScalarContainer(self, cdescriptor)
|
||||
self._composite_fields[cdescriptor.name] = container
|
||||
return container
|
||||
|
||||
def Setter(self, new_value):
|
||||
raise AttributeError('Assignment not allowed to repeated field '
|
||||
'"%s" in protocol message object.' % cdescriptor.name)
|
||||
|
||||
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
|
||||
return property(Getter, Setter, doc=doc)
|
||||
|
||||
|
||||
class RepeatedCompositeContainer(object):
|
||||
"""Container for repeated composite fields."""
|
||||
|
||||
__slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg']
|
||||
|
||||
def __init__(self, msg, cfield_descriptor, subclass):
|
||||
self._message = msg
|
||||
self._cmsg = msg._cmsg
|
||||
self._subclass = subclass
|
||||
self._cfield_descriptor = cfield_descriptor
|
||||
|
||||
def add(self, **kwargs):
|
||||
cmessage = self._cmsg.AddMessage(self._cfield_descriptor)
|
||||
return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs)
|
||||
|
||||
def extend(self, elem_seq):
|
||||
"""Extends by appending the given sequence of elements of the same type
|
||||
as this one, copying each individual message.
|
||||
"""
|
||||
for message in elem_seq:
|
||||
self.add().MergeFrom(message)
|
||||
|
||||
def MergeFrom(self, other):
|
||||
for message in other[:]:
|
||||
self.add().MergeFrom(message)
|
||||
|
||||
def __getitem__(self, key):
|
||||
cmessages = self._cmsg.GetRepeatedMessage(
|
||||
self._cfield_descriptor, key)
|
||||
subclass = self._subclass
|
||||
if not isinstance(cmessages, list):
|
||||
return subclass(__cmessage=cmessages, __owner=self._message)
|
||||
|
||||
return [subclass(__cmessage=m, __owner=self._message) for m in cmessages]
|
||||
|
||||
def __delitem__(self, key):
|
||||
self._cmsg.DeleteRepeatedField(
|
||||
self._cfield_descriptor, key)
|
||||
|
||||
def __len__(self):
|
||||
return self._cmsg.FieldLength(self._cfield_descriptor)
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Compares the current instance with another one."""
|
||||
if self is other:
|
||||
return True
|
||||
if not isinstance(other, self.__class__):
|
||||
raise TypeError('Can only compare repeated composite fields against '
|
||||
'other repeated composite fields.')
|
||||
messages = self[slice(None, None, None)]
|
||||
other_messages = other[slice(None, None, None)]
|
||||
return messages == other_messages
|
||||
|
||||
def __hash__(self):
|
||||
raise TypeError('unhashable object')
|
||||
|
||||
def sort(self, sort_function=cmp):
|
||||
messages = []
|
||||
for index in range(len(self)):
|
||||
# messages[i][0] is where the i-th element of the new array has to come
|
||||
# from.
|
||||
# messages[i][1] is where the i-th element of the old array has to go.
|
||||
messages.append([index, 0, self[index]])
|
||||
messages.sort(lambda x,y: sort_function(x[2], y[2]))
|
||||
|
||||
# Remember which position each elements has to move to.
|
||||
for i in range(len(messages)):
|
||||
messages[messages[i][0]][1] = i
|
||||
|
||||
# Apply the transposition.
|
||||
for i in range(len(messages)):
|
||||
from_position = messages[i][0]
|
||||
if i == from_position:
|
||||
continue
|
||||
self._cmsg.SwapRepeatedFieldElements(
|
||||
self._cfield_descriptor, i, from_position)
|
||||
messages[messages[i][1]][0] = from_position
|
||||
|
||||
|
||||
def RepeatedCompositeProperty(cdescriptor, message_type):
|
||||
"""Returns a Python property for the given repeated composite field."""
|
||||
|
||||
def Getter(self):
|
||||
container = self._composite_fields.get(cdescriptor.name, None)
|
||||
if container is None:
|
||||
container = RepeatedCompositeContainer(
|
||||
self, cdescriptor, message_type._concrete_class)
|
||||
self._composite_fields[cdescriptor.name] = container
|
||||
return container
|
||||
|
||||
def Setter(self, new_value):
|
||||
raise AttributeError('Assignment not allowed to repeated field '
|
||||
'"%s" in protocol message object.' % cdescriptor.name)
|
||||
|
||||
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
|
||||
return property(Getter, Setter, doc=doc)
|
||||
|
||||
|
||||
class ExtensionDict(object):
|
||||
"""Extension dictionary added to each protocol message."""
|
||||
|
||||
def __init__(self, msg):
|
||||
self._message = msg
|
||||
self._cmsg = msg._cmsg
|
||||
self._values = {}
|
||||
|
||||
def __setitem__(self, extension, value):
|
||||
from google.protobuf import descriptor
|
||||
if not isinstance(extension, descriptor.FieldDescriptor):
|
||||
raise KeyError('Bad extension %r.' % (extension,))
|
||||
cdescriptor = extension._cdescriptor
|
||||
if (cdescriptor.label != _LABEL_OPTIONAL or
|
||||
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
|
||||
raise TypeError('Extension %r is repeated and/or a composite type.' % (
|
||||
extension.full_name,))
|
||||
self._cmsg.SetScalar(cdescriptor, value)
|
||||
self._values[extension] = value
|
||||
|
||||
def __getitem__(self, extension):
|
||||
from google.protobuf import descriptor
|
||||
if not isinstance(extension, descriptor.FieldDescriptor):
|
||||
raise KeyError('Bad extension %r.' % (extension,))
|
||||
|
||||
cdescriptor = extension._cdescriptor
|
||||
if (cdescriptor.label != _LABEL_REPEATED and
|
||||
cdescriptor.cpp_type != _CPPTYPE_MESSAGE):
|
||||
return self._cmsg.GetScalar(cdescriptor)
|
||||
|
||||
ext = self._values.get(extension, None)
|
||||
if ext is not None:
|
||||
return ext
|
||||
|
||||
ext = self._CreateNewHandle(extension)
|
||||
self._values[extension] = ext
|
||||
return ext
|
||||
|
||||
def ClearExtension(self, extension):
|
||||
from google.protobuf import descriptor
|
||||
if not isinstance(extension, descriptor.FieldDescriptor):
|
||||
raise KeyError('Bad extension %r.' % (extension,))
|
||||
self._cmsg.ClearFieldByDescriptor(extension._cdescriptor)
|
||||
if extension in self._values:
|
||||
del self._values[extension]
|
||||
|
||||
def HasExtension(self, extension):
|
||||
from google.protobuf import descriptor
|
||||
if not isinstance(extension, descriptor.FieldDescriptor):
|
||||
raise KeyError('Bad extension %r.' % (extension,))
|
||||
return self._cmsg.HasFieldByDescriptor(extension._cdescriptor)
|
||||
|
||||
def _FindExtensionByName(self, name):
|
||||
"""Tries to find a known extension with the specified name.
|
||||
|
||||
Args:
|
||||
name: Extension full name.
|
||||
|
||||
Returns:
|
||||
Extension field descriptor.
|
||||
"""
|
||||
return self._message._extensions_by_name.get(name, None)
|
||||
|
||||
def _CreateNewHandle(self, extension):
|
||||
cdescriptor = extension._cdescriptor
|
||||
if (cdescriptor.label != _LABEL_REPEATED and
|
||||
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
|
||||
cmessage = self._cmsg.NewSubMessage(cdescriptor)
|
||||
return extension.message_type._concrete_class(__cmessage=cmessage)
|
||||
|
||||
if cdescriptor.label == _LABEL_REPEATED:
|
||||
if cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
|
||||
return RepeatedCompositeContainer(
|
||||
self._message, cdescriptor, extension.message_type._concrete_class)
|
||||
else:
|
||||
return RepeatedScalarContainer(self._message, cdescriptor)
|
||||
# This shouldn't happen!
|
||||
assert False
|
||||
return None
|
||||
|
||||
|
||||
def NewMessage(message_descriptor, dictionary):
|
||||
"""Creates a new protocol message *class*."""
|
||||
_AddClassAttributesForNestedExtensions(message_descriptor, dictionary)
|
||||
_AddEnumValues(message_descriptor, dictionary)
|
||||
_AddDescriptors(message_descriptor, dictionary)
|
||||
|
||||
|
||||
def InitMessage(message_descriptor, cls):
|
||||
"""Constructs a new message instance (called before instance's __init__)."""
|
||||
cls._extensions_by_name = {}
|
||||
_AddInitMethod(message_descriptor, cls)
|
||||
_AddMessageMethods(message_descriptor, cls)
|
||||
_AddPropertiesForExtensions(message_descriptor, cls)
|
||||
|
||||
|
||||
def _AddDescriptors(message_descriptor, dictionary):
|
||||
"""Sets up a new protocol message class dictionary.
|
||||
|
||||
Args:
|
||||
message_descriptor: A Descriptor instance describing this message type.
|
||||
dictionary: Class dictionary to which we'll add a '__slots__' entry.
|
||||
"""
|
||||
dictionary['__descriptors'] = {}
|
||||
for field in message_descriptor.fields:
|
||||
dictionary['__descriptors'][field.name] = GetFieldDescriptor(
|
||||
field.full_name)
|
||||
|
||||
dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [
|
||||
'_cmsg', '_owner', '_composite_fields', 'Extensions']
|
||||
|
||||
|
||||
def _AddEnumValues(message_descriptor, dictionary):
|
||||
"""Sets class-level attributes for all enum fields defined in this message.
|
||||
|
||||
Args:
|
||||
message_descriptor: Descriptor object for this message type.
|
||||
dictionary: Class dictionary that should be populated.
|
||||
"""
|
||||
for enum_type in message_descriptor.enum_types:
|
||||
for enum_value in enum_type.values:
|
||||
dictionary[enum_value.name] = enum_value.number
|
||||
|
||||
|
||||
def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary):
|
||||
"""Adds class attributes for the nested extensions."""
|
||||
extension_dict = message_descriptor.extensions_by_name
|
||||
for extension_name, extension_field in extension_dict.iteritems():
|
||||
assert extension_name not in dictionary
|
||||
dictionary[extension_name] = extension_field
|
||||
|
||||
|
||||
def _AddInitMethod(message_descriptor, cls):
|
||||
"""Adds an __init__ method to cls."""
|
||||
|
||||
# Create and attach message field properties to the message class.
|
||||
# This can be done just once per message class, since property setters and
|
||||
# getters are passed the message instance.
|
||||
# This makes message instantiation extremely fast, and at the same time it
|
||||
# doesn't require the creation of property objects for each message instance,
|
||||
# which saves a lot of memory.
|
||||
for field in message_descriptor.fields:
|
||||
field_cdescriptor = cls.__descriptors[field.name]
|
||||
if field.label == _LABEL_REPEATED:
|
||||
if field.cpp_type == _CPPTYPE_MESSAGE:
|
||||
value = RepeatedCompositeProperty(field_cdescriptor, field.message_type)
|
||||
else:
|
||||
value = RepeatedScalarProperty(field_cdescriptor)
|
||||
elif field.cpp_type == _CPPTYPE_MESSAGE:
|
||||
value = CompositeProperty(field_cdescriptor, field.message_type)
|
||||
else:
|
||||
value = ScalarProperty(field_cdescriptor)
|
||||
setattr(cls, field.name, value)
|
||||
|
||||
# Attach a constant with the field number.
|
||||
constant_name = field.name.upper() + '_FIELD_NUMBER'
|
||||
setattr(cls, constant_name, field.number)
|
||||
|
||||
def Init(self, **kwargs):
|
||||
"""Message constructor."""
|
||||
cmessage = kwargs.pop('__cmessage', None)
|
||||
if cmessage is None:
|
||||
self._cmsg = NewCMessage(message_descriptor.full_name)
|
||||
else:
|
||||
self._cmsg = cmessage
|
||||
|
||||
# Keep a reference to the owner, as the owner keeps a reference to the
|
||||
# underlying protocol buffer message.
|
||||
owner = kwargs.pop('__owner', None)
|
||||
if owner is not None:
|
||||
self._owner = owner
|
||||
|
||||
self.Extensions = ExtensionDict(self)
|
||||
self._composite_fields = {}
|
||||
|
||||
for field_name, field_value in kwargs.iteritems():
|
||||
field_cdescriptor = self.__descriptors.get(field_name, None)
|
||||
if field_cdescriptor is None:
|
||||
raise ValueError('Protocol message has no "%s" field.' % field_name)
|
||||
if field_cdescriptor.label == _LABEL_REPEATED:
|
||||
if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
|
||||
for val in field_value:
|
||||
getattr(self, field_name).add().MergeFrom(val)
|
||||
else:
|
||||
getattr(self, field_name).extend(field_value)
|
||||
elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
|
||||
getattr(self, field_name).MergeFrom(field_value)
|
||||
else:
|
||||
setattr(self, field_name, field_value)
|
||||
|
||||
Init.__module__ = None
|
||||
Init.__doc__ = None
|
||||
cls.__init__ = Init
|
||||
|
||||
|
||||
def _IsMessageSetExtension(field):
|
||||
"""Checks if a field is a message set extension."""
|
||||
return (field.is_extension and
|
||||
field.containing_type.has_options and
|
||||
field.containing_type.GetOptions().message_set_wire_format and
|
||||
field.type == _TYPE_MESSAGE and
|
||||
field.message_type == field.extension_scope and
|
||||
field.label == _LABEL_OPTIONAL)
|
||||
|
||||
|
||||
def _AddMessageMethods(message_descriptor, cls):
|
||||
"""Adds the methods to a protocol message class."""
|
||||
if message_descriptor.is_extendable:
|
||||
|
||||
def ClearExtension(self, extension):
|
||||
self.Extensions.ClearExtension(extension)
|
||||
|
||||
def HasExtension(self, extension):
|
||||
return self.Extensions.HasExtension(extension)
|
||||
|
||||
def HasField(self, field_name):
|
||||
return self._cmsg.HasField(field_name)
|
||||
|
||||
def ClearField(self, field_name):
|
||||
if field_name in self._composite_fields:
|
||||
del self._composite_fields[field_name]
|
||||
self._cmsg.ClearField(field_name)
|
||||
|
||||
def Clear(self):
|
||||
return self._cmsg.Clear()
|
||||
|
||||
def IsInitialized(self, errors=None):
|
||||
if self._cmsg.IsInitialized():
|
||||
return True
|
||||
if errors is not None:
|
||||
errors.extend(self.FindInitializationErrors());
|
||||
return False
|
||||
|
||||
def SerializeToString(self):
|
||||
if not self.IsInitialized():
|
||||
raise message.EncodeError(
|
||||
'Message is missing required fields: ' +
|
||||
','.join(self.FindInitializationErrors()))
|
||||
return self._cmsg.SerializeToString()
|
||||
|
||||
def SerializePartialToString(self):
|
||||
return self._cmsg.SerializePartialToString()
|
||||
|
||||
def ParseFromString(self, serialized):
|
||||
self.Clear()
|
||||
self.MergeFromString(serialized)
|
||||
|
||||
def MergeFromString(self, serialized):
|
||||
byte_size = self._cmsg.MergeFromString(serialized)
|
||||
if byte_size < 0:
|
||||
raise message.DecodeError('Unable to merge from string.')
|
||||
return byte_size
|
||||
|
||||
def MergeFrom(self, msg):
|
||||
if not isinstance(msg, cls):
|
||||
raise TypeError(
|
||||
"Parameter to MergeFrom() must be instance of same class.")
|
||||
self._cmsg.MergeFrom(msg._cmsg)
|
||||
|
||||
def CopyFrom(self, msg):
|
||||
self._cmsg.CopyFrom(msg._cmsg)
|
||||
|
||||
def ByteSize(self):
|
||||
return self._cmsg.ByteSize()
|
||||
|
||||
def SetInParent(self):
|
||||
return self._cmsg.SetInParent()
|
||||
|
||||
def ListFields(self):
|
||||
all_fields = []
|
||||
field_list = self._cmsg.ListFields()
|
||||
fields_by_name = cls.DESCRIPTOR.fields_by_name
|
||||
for is_extension, field_name in field_list:
|
||||
if is_extension:
|
||||
extension = cls._extensions_by_name[field_name]
|
||||
all_fields.append((extension, self.Extensions[extension]))
|
||||
else:
|
||||
field_descriptor = fields_by_name[field_name]
|
||||
all_fields.append(
|
||||
(field_descriptor, getattr(self, field_name)))
|
||||
all_fields.sort(key=lambda item: item[0].number)
|
||||
return all_fields
|
||||
|
||||
def FindInitializationErrors(self):
|
||||
return self._cmsg.FindInitializationErrors()
|
||||
|
||||
def __str__(self):
|
||||
return self._cmsg.DebugString()
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
if not isinstance(other, self.__class__):
|
||||
return False
|
||||
return self.ListFields() == other.ListFields()
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __hash__(self):
|
||||
raise TypeError('unhashable object')
|
||||
|
||||
def __unicode__(self):
|
||||
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
|
||||
|
||||
# Attach the local methods to the message class.
|
||||
for key, value in locals().copy().iteritems():
|
||||
if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'):
|
||||
setattr(cls, key, value)
|
||||
|
||||
# Static methods:
|
||||
|
||||
def RegisterExtension(extension_handle):
|
||||
extension_handle.containing_type = cls.DESCRIPTOR
|
||||
cls._extensions_by_name[extension_handle.full_name] = extension_handle
|
||||
|
||||
if _IsMessageSetExtension(extension_handle):
|
||||
# MessageSet extension. Also register under type name.
|
||||
cls._extensions_by_name[
|
||||
extension_handle.message_type.full_name] = extension_handle
|
||||
cls.RegisterExtension = staticmethod(RegisterExtension)
|
||||
|
||||
def FromString(string):
|
||||
msg = cls()
|
||||
msg.MergeFromString(string)
|
||||
return msg
|
||||
cls.FromString = staticmethod(FromString)
|
||||
|
||||
|
||||
|
||||
def _AddPropertiesForExtensions(message_descriptor, cls):
|
||||
"""Adds properties for all fields in this protocol message type."""
|
||||
extension_dict = message_descriptor.extensions_by_name
|
||||
for extension_name, extension_field in extension_dict.iteritems():
|
||||
constant_name = extension_name.upper() + '_FIELD_NUMBER'
|
||||
setattr(cls, constant_name, extension_field.number)
|
@ -0,0 +1,714 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Code for decoding protocol buffer primitives.
|
||||
|
||||
This code is very similar to encoder.py -- read the docs for that module first.
|
||||
|
||||
A "decoder" is a function with the signature:
|
||||
Decode(buffer, pos, end, message, field_dict)
|
||||
The arguments are:
|
||||
buffer: The string containing the encoded message.
|
||||
pos: The current position in the string.
|
||||
end: The position in the string where the current message ends. May be
|
||||
less than len(buffer) if we're reading a sub-message.
|
||||
message: The message object into which we're parsing.
|
||||
field_dict: message._fields (avoids a hashtable lookup).
|
||||
The decoder reads the field and stores it into field_dict, returning the new
|
||||
buffer position. A decoder for a repeated field may proactively decode all of
|
||||
the elements of that field, if they appear consecutively.
|
||||
|
||||
Note that decoders may throw any of the following:
|
||||
IndexError: Indicates a truncated message.
|
||||
struct.error: Unpacking of a fixed-width field failed.
|
||||
message.DecodeError: Other errors.
|
||||
|
||||
Decoders are expected to raise an exception if they are called with pos > end.
|
||||
This allows callers to be lax about bounds checking: it's fineto read past
|
||||
"end" as long as you are sure that someone else will notice and throw an
|
||||
exception later on.
|
||||
|
||||
Something up the call stack is expected to catch IndexError and struct.error
|
||||
and convert them to message.DecodeError.
|
||||
|
||||
Decoders are constructed using decoder constructors with the signature:
|
||||
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
|
||||
The arguments are:
|
||||
field_number: The field number of the field we want to decode.
|
||||
is_repeated: Is the field a repeated field? (bool)
|
||||
is_packed: Is the field a packed field? (bool)
|
||||
key: The key to use when looking up the field within field_dict.
|
||||
(This is actually the FieldDescriptor but nothing in this
|
||||
file should depend on that.)
|
||||
new_default: A function which takes a message object as a parameter and
|
||||
returns a new instance of the default value for this field.
|
||||
(This is called for repeated fields and sub-messages, when an
|
||||
instance does not already exist.)
|
||||
|
||||
As with encoders, we define a decoder constructor for every type of field.
|
||||
Then, for every field of every message class we construct an actual decoder.
|
||||
That decoder goes into a dict indexed by tag, so when we decode a message
|
||||
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
|
||||
"""
|
||||
|
||||
__author__ = 'kenton@google.com (Kenton Varda)'
|
||||
|
||||
import struct
|
||||
from google.protobuf.internal import encoder
|
||||
from google.protobuf.internal import wire_format
|
||||
from google.protobuf import message
|
||||
|
||||
|
||||
# This will overflow and thus become IEEE-754 "infinity". We would use
|
||||
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
|
||||
_POS_INF = 1e10000
|
||||
_NEG_INF = -_POS_INF
|
||||
_NAN = _POS_INF * 0
|
||||
|
||||
|
||||
# This is not for optimization, but rather to avoid conflicts with local
|
||||
# variables named "message".
|
||||
_DecodeError = message.DecodeError
|
||||
|
||||
|
||||
def _VarintDecoder(mask):
|
||||
"""Return an encoder for a basic varint value (does not include tag).
|
||||
|
||||
Decoded values will be bitwise-anded with the given mask before being
|
||||
returned, e.g. to limit them to 32 bits. The returned decoder does not
|
||||
take the usual "end" parameter -- the caller is expected to do bounds checking
|
||||
after the fact (often the caller can defer such checking until later). The
|
||||
decoder returns a (value, new_pos) pair.
|
||||
"""
|
||||
|
||||
local_ord = ord
|
||||
def DecodeVarint(buffer, pos):
|
||||
result = 0
|
||||
shift = 0
|
||||
while 1:
|
||||
b = local_ord(buffer[pos])
|
||||
result |= ((b & 0x7f) << shift)
|
||||
pos += 1
|
||||
if not (b & 0x80):
|
||||
result &= mask
|
||||
return (result, pos)
|
||||
shift += 7
|
||||
if shift >= 64:
|
||||
raise _DecodeError('Too many bytes when decoding varint.')
|
||||
return DecodeVarint
|
||||
|
||||
|
||||
def _SignedVarintDecoder(mask):
|
||||
"""Like _VarintDecoder() but decodes signed values."""
|
||||
|
||||
local_ord = ord
|
||||
def DecodeVarint(buffer, pos):
|
||||
result = 0
|
||||
shift = 0
|
||||
while 1:
|
||||
b = local_ord(buffer[pos])
|
||||
result |= ((b & 0x7f) << shift)
|
||||
pos += 1
|
||||
if not (b & 0x80):
|
||||
if result > 0x7fffffffffffffff:
|
||||
result -= (1 << 64)
|
||||
result |= ~mask
|
||||
else:
|
||||
result &= mask
|
||||
return (result, pos)
|
||||
shift += 7
|
||||
if shift >= 64:
|
||||
raise _DecodeError('Too many bytes when decoding varint.')
|
||||
return DecodeVarint
|
||||
|
||||
|
||||
_DecodeVarint = _VarintDecoder((1 << 64) - 1)
|
||||
_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1)
|
||||
|
||||
# Use these versions for values which must be limited to 32 bits.
|
||||
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1)
|
||||
_DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1)
|
||||
|
||||
|
||||
def ReadTag(buffer, pos):
|
||||
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
|
||||
|
||||
We return the raw bytes of the tag rather than decoding them. The raw
|
||||
bytes can then be used to look up the proper decoder. This effectively allows
|
||||
us to trade some work that would be done in pure-python (decoding a varint)
|
||||
for work that is done in C (searching for a byte string in a hash table).
|
||||
In a low-level language it would be much cheaper to decode the varint and
|
||||
use that, but not in Python.
|
||||
"""
|
||||
|
||||
start = pos
|
||||
while ord(buffer[pos]) & 0x80:
|
||||
pos += 1
|
||||
pos += 1
|
||||
return (buffer[start:pos], pos)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _SimpleDecoder(wire_type, decode_value):
|
||||
"""Return a constructor for a decoder for fields of a particular type.
|
||||
|
||||
Args:
|
||||
wire_type: The field's wire type.
|
||||
decode_value: A function which decodes an individual value, e.g.
|
||||
_DecodeVarint()
|
||||
"""
|
||||
|
||||
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
|
||||
if is_packed:
|
||||
local_DecodeVarint = _DecodeVarint
|
||||
def DecodePackedField(buffer, pos, end, message, field_dict):
|
||||
value = field_dict.get(key)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(key, new_default(message))
|
||||
(endpoint, pos) = local_DecodeVarint(buffer, pos)
|
||||
endpoint += pos
|
||||
if endpoint > end:
|
||||
raise _DecodeError('Truncated message.')
|
||||
while pos < endpoint:
|
||||
(element, pos) = decode_value(buffer, pos)
|
||||
value.append(element)
|
||||
if pos > endpoint:
|
||||
del value[-1] # Discard corrupt value.
|
||||
raise _DecodeError('Packed element was truncated.')
|
||||
return pos
|
||||
return DecodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = encoder.TagBytes(field_number, wire_type)
|
||||
tag_len = len(tag_bytes)
|
||||
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
|
||||
value = field_dict.get(key)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(key, new_default(message))
|
||||
while 1:
|
||||
(element, new_pos) = decode_value(buffer, pos)
|
||||
value.append(element)
|
||||
# Predict that the next tag is another copy of the same repeated
|
||||
# field.
|
||||
pos = new_pos + tag_len
|
||||
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
|
||||
# Prediction failed. Return.
|
||||
if new_pos > end:
|
||||
raise _DecodeError('Truncated message.')
|
||||
return new_pos
|
||||
return DecodeRepeatedField
|
||||
else:
|
||||
def DecodeField(buffer, pos, end, message, field_dict):
|
||||
(field_dict[key], pos) = decode_value(buffer, pos)
|
||||
if pos > end:
|
||||
del field_dict[key] # Discard corrupt value.
|
||||
raise _DecodeError('Truncated message.')
|
||||
return pos
|
||||
return DecodeField
|
||||
|
||||
return SpecificDecoder
|
||||
|
||||
|
||||
def _ModifiedDecoder(wire_type, decode_value, modify_value):
|
||||
"""Like SimpleDecoder but additionally invokes modify_value on every value
|
||||
before storing it. Usually modify_value is ZigZagDecode.
|
||||
"""
|
||||
|
||||
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
|
||||
# not enough to make a significant difference.
|
||||
|
||||
def InnerDecode(buffer, pos):
|
||||
(result, new_pos) = decode_value(buffer, pos)
|
||||
return (modify_value(result), new_pos)
|
||||
return _SimpleDecoder(wire_type, InnerDecode)
|
||||
|
||||
|
||||
def _StructPackDecoder(wire_type, format):
|
||||
"""Return a constructor for a decoder for a fixed-width field.
|
||||
|
||||
Args:
|
||||
wire_type: The field's wire type.
|
||||
format: The format string to pass to struct.unpack().
|
||||
"""
|
||||
|
||||
value_size = struct.calcsize(format)
|
||||
local_unpack = struct.unpack
|
||||
|
||||
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
|
||||
# not enough to make a significant difference.
|
||||
|
||||
# Note that we expect someone up-stack to catch struct.error and convert
|
||||
# it to _DecodeError -- this way we don't have to set up exception-
|
||||
# handling blocks every time we parse one value.
|
||||
|
||||
def InnerDecode(buffer, pos):
|
||||
new_pos = pos + value_size
|
||||
result = local_unpack(format, buffer[pos:new_pos])[0]
|
||||
return (result, new_pos)
|
||||
return _SimpleDecoder(wire_type, InnerDecode)
|
||||
|
||||
|
||||
def _FloatDecoder():
|
||||
"""Returns a decoder for a float field.
|
||||
|
||||
This code works around a bug in struct.unpack for non-finite 32-bit
|
||||
floating-point values.
|
||||
"""
|
||||
|
||||
local_unpack = struct.unpack
|
||||
|
||||
def InnerDecode(buffer, pos):
|
||||
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
|
||||
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
|
||||
new_pos = pos + 4
|
||||
float_bytes = buffer[pos:new_pos]
|
||||
|
||||
# If this value has all its exponent bits set, then it's non-finite.
|
||||
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
|
||||
# To avoid that, we parse it specially.
|
||||
if ((float_bytes[3] in '\x7F\xFF')
|
||||
and (float_bytes[2] >= '\x80')):
|
||||
# If at least one significand bit is set...
|
||||
if float_bytes[0:3] != '\x00\x00\x80':
|
||||
return (_NAN, new_pos)
|
||||
# If sign bit is set...
|
||||
if float_bytes[3] == '\xFF':
|
||||
return (_NEG_INF, new_pos)
|
||||
return (_POS_INF, new_pos)
|
||||
|
||||
# Note that we expect someone up-stack to catch struct.error and convert
|
||||
# it to _DecodeError -- this way we don't have to set up exception-
|
||||
# handling blocks every time we parse one value.
|
||||
result = local_unpack('<f', float_bytes)[0]
|
||||
return (result, new_pos)
|
||||
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
|
||||
|
||||
|
||||
def _DoubleDecoder():
|
||||
"""Returns a decoder for a double field.
|
||||
|
||||
This code works around a bug in struct.unpack for not-a-number.
|
||||
"""
|
||||
|
||||
local_unpack = struct.unpack
|
||||
|
||||
def InnerDecode(buffer, pos):
|
||||
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
|
||||
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
|
||||
new_pos = pos + 8
|
||||
double_bytes = buffer[pos:new_pos]
|
||||
|
||||
# If this value has all its exponent bits set and at least one significand
|
||||
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
|
||||
# as inf or -inf. To avoid that, we treat it specially.
|
||||
if ((double_bytes[7] in '\x7F\xFF')
|
||||
and (double_bytes[6] >= '\xF0')
|
||||
and (double_bytes[0:7] != '\x00\x00\x00\x00\x00\x00\xF0')):
|
||||
return (_NAN, new_pos)
|
||||
|
||||
# Note that we expect someone up-stack to catch struct.error and convert
|
||||
# it to _DecodeError -- this way we don't have to set up exception-
|
||||
# handling blocks every time we parse one value.
|
||||
result = local_unpack('<d', double_bytes)[0]
|
||||
return (result, new_pos)
|
||||
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
Int32Decoder = EnumDecoder = _SimpleDecoder(
|
||||
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
|
||||
|
||||
Int64Decoder = _SimpleDecoder(
|
||||
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
|
||||
|
||||
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
|
||||
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
|
||||
|
||||
SInt32Decoder = _ModifiedDecoder(
|
||||
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
|
||||
SInt64Decoder = _ModifiedDecoder(
|
||||
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
|
||||
|
||||
# Note that Python conveniently guarantees that when using the '<' prefix on
|
||||
# formats, they will also have the same size across all platforms (as opposed
|
||||
# to without the prefix, where their sizes depend on the C compiler's basic
|
||||
# type sizes).
|
||||
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
|
||||
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
|
||||
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
|
||||
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
|
||||
FloatDecoder = _FloatDecoder()
|
||||
DoubleDecoder = _DoubleDecoder()
|
||||
|
||||
BoolDecoder = _ModifiedDecoder(
|
||||
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
|
||||
|
||||
|
||||
def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
|
||||
"""Returns a decoder for a string field."""
|
||||
|
||||
local_DecodeVarint = _DecodeVarint
|
||||
local_unicode = unicode
|
||||
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
tag_bytes = encoder.TagBytes(field_number,
|
||||
wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
tag_len = len(tag_bytes)
|
||||
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
|
||||
value = field_dict.get(key)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(key, new_default(message))
|
||||
while 1:
|
||||
(size, pos) = local_DecodeVarint(buffer, pos)
|
||||
new_pos = pos + size
|
||||
if new_pos > end:
|
||||
raise _DecodeError('Truncated string.')
|
||||
value.append(local_unicode(buffer[pos:new_pos], 'utf-8'))
|
||||
# Predict that the next tag is another copy of the same repeated field.
|
||||
pos = new_pos + tag_len
|
||||
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
|
||||
# Prediction failed. Return.
|
||||
return new_pos
|
||||
return DecodeRepeatedField
|
||||
else:
|
||||
def DecodeField(buffer, pos, end, message, field_dict):
|
||||
(size, pos) = local_DecodeVarint(buffer, pos)
|
||||
new_pos = pos + size
|
||||
if new_pos > end:
|
||||
raise _DecodeError('Truncated string.')
|
||||
field_dict[key] = local_unicode(buffer[pos:new_pos], 'utf-8')
|
||||
return new_pos
|
||||
return DecodeField
|
||||
|
||||
|
||||
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
|
||||
"""Returns a decoder for a bytes field."""
|
||||
|
||||
local_DecodeVarint = _DecodeVarint
|
||||
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
tag_bytes = encoder.TagBytes(field_number,
|
||||
wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
tag_len = len(tag_bytes)
|
||||
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
|
||||
value = field_dict.get(key)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(key, new_default(message))
|
||||
while 1:
|
||||
(size, pos) = local_DecodeVarint(buffer, pos)
|
||||
new_pos = pos + size
|
||||
if new_pos > end:
|
||||
raise _DecodeError('Truncated string.')
|
||||
value.append(buffer[pos:new_pos])
|
||||
# Predict that the next tag is another copy of the same repeated field.
|
||||
pos = new_pos + tag_len
|
||||
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
|
||||
# Prediction failed. Return.
|
||||
return new_pos
|
||||
return DecodeRepeatedField
|
||||
else:
|
||||
def DecodeField(buffer, pos, end, message, field_dict):
|
||||
(size, pos) = local_DecodeVarint(buffer, pos)
|
||||
new_pos = pos + size
|
||||
if new_pos > end:
|
||||
raise _DecodeError('Truncated string.')
|
||||
field_dict[key] = buffer[pos:new_pos]
|
||||
return new_pos
|
||||
return DecodeField
|
||||
|
||||
|
||||
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
|
||||
"""Returns a decoder for a group field."""
|
||||
|
||||
end_tag_bytes = encoder.TagBytes(field_number,
|
||||
wire_format.WIRETYPE_END_GROUP)
|
||||
end_tag_len = len(end_tag_bytes)
|
||||
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
tag_bytes = encoder.TagBytes(field_number,
|
||||
wire_format.WIRETYPE_START_GROUP)
|
||||
tag_len = len(tag_bytes)
|
||||
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
|
||||
value = field_dict.get(key)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(key, new_default(message))
|
||||
while 1:
|
||||
value = field_dict.get(key)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(key, new_default(message))
|
||||
# Read sub-message.
|
||||
pos = value.add()._InternalParse(buffer, pos, end)
|
||||
# Read end tag.
|
||||
new_pos = pos+end_tag_len
|
||||
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
|
||||
raise _DecodeError('Missing group end tag.')
|
||||
# Predict that the next tag is another copy of the same repeated field.
|
||||
pos = new_pos + tag_len
|
||||
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
|
||||
# Prediction failed. Return.
|
||||
return new_pos
|
||||
return DecodeRepeatedField
|
||||
else:
|
||||
def DecodeField(buffer, pos, end, message, field_dict):
|
||||
value = field_dict.get(key)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(key, new_default(message))
|
||||
# Read sub-message.
|
||||
pos = value._InternalParse(buffer, pos, end)
|
||||
# Read end tag.
|
||||
new_pos = pos+end_tag_len
|
||||
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
|
||||
raise _DecodeError('Missing group end tag.')
|
||||
return new_pos
|
||||
return DecodeField
|
||||
|
||||
|
||||
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
|
||||
"""Returns a decoder for a message field."""
|
||||
|
||||
local_DecodeVarint = _DecodeVarint
|
||||
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
tag_bytes = encoder.TagBytes(field_number,
|
||||
wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
tag_len = len(tag_bytes)
|
||||
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
|
||||
value = field_dict.get(key)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(key, new_default(message))
|
||||
while 1:
|
||||
value = field_dict.get(key)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(key, new_default(message))
|
||||
# Read length.
|
||||
(size, pos) = local_DecodeVarint(buffer, pos)
|
||||
new_pos = pos + size
|
||||
if new_pos > end:
|
||||
raise _DecodeError('Truncated message.')
|
||||
# Read sub-message.
|
||||
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
|
||||
# The only reason _InternalParse would return early is if it
|
||||
# encountered an end-group tag.
|
||||
raise _DecodeError('Unexpected end-group tag.')
|
||||
# Predict that the next tag is another copy of the same repeated field.
|
||||
pos = new_pos + tag_len
|
||||
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
|
||||
# Prediction failed. Return.
|
||||
return new_pos
|
||||
return DecodeRepeatedField
|
||||
else:
|
||||
def DecodeField(buffer, pos, end, message, field_dict):
|
||||
value = field_dict.get(key)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(key, new_default(message))
|
||||
# Read length.
|
||||
(size, pos) = local_DecodeVarint(buffer, pos)
|
||||
new_pos = pos + size
|
||||
if new_pos > end:
|
||||
raise _DecodeError('Truncated message.')
|
||||
# Read sub-message.
|
||||
if value._InternalParse(buffer, pos, new_pos) != new_pos:
|
||||
# The only reason _InternalParse would return early is if it encountered
|
||||
# an end-group tag.
|
||||
raise _DecodeError('Unexpected end-group tag.')
|
||||
return new_pos
|
||||
return DecodeField
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
|
||||
|
||||
def MessageSetItemDecoder(extensions_by_number):
|
||||
"""Returns a decoder for a MessageSet item.
|
||||
|
||||
The parameter is the _extensions_by_number map for the message class.
|
||||
|
||||
The message set message looks like this:
|
||||
message MessageSet {
|
||||
repeated group Item = 1 {
|
||||
required int32 type_id = 2;
|
||||
required string message = 3;
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
|
||||
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
|
||||
|
||||
local_ReadTag = ReadTag
|
||||
local_DecodeVarint = _DecodeVarint
|
||||
local_SkipField = SkipField
|
||||
|
||||
def DecodeItem(buffer, pos, end, message, field_dict):
|
||||
type_id = -1
|
||||
message_start = -1
|
||||
message_end = -1
|
||||
|
||||
# Technically, type_id and message can appear in any order, so we need
|
||||
# a little loop here.
|
||||
while 1:
|
||||
(tag_bytes, pos) = local_ReadTag(buffer, pos)
|
||||
if tag_bytes == type_id_tag_bytes:
|
||||
(type_id, pos) = local_DecodeVarint(buffer, pos)
|
||||
elif tag_bytes == message_tag_bytes:
|
||||
(size, message_start) = local_DecodeVarint(buffer, pos)
|
||||
pos = message_end = message_start + size
|
||||
elif tag_bytes == item_end_tag_bytes:
|
||||
break
|
||||
else:
|
||||
pos = SkipField(buffer, pos, end, tag_bytes)
|
||||
if pos == -1:
|
||||
raise _DecodeError('Missing group end tag.')
|
||||
|
||||
if pos > end:
|
||||
raise _DecodeError('Truncated message.')
|
||||
|
||||
if type_id == -1:
|
||||
raise _DecodeError('MessageSet item missing type_id.')
|
||||
if message_start == -1:
|
||||
raise _DecodeError('MessageSet item missing message.')
|
||||
|
||||
extension = extensions_by_number.get(type_id)
|
||||
if extension is not None:
|
||||
value = field_dict.get(extension)
|
||||
if value is None:
|
||||
value = field_dict.setdefault(
|
||||
extension, extension.message_type._concrete_class())
|
||||
if value._InternalParse(buffer, message_start,message_end) != message_end:
|
||||
# The only reason _InternalParse would return early is if it encountered
|
||||
# an end-group tag.
|
||||
raise _DecodeError('Unexpected end-group tag.')
|
||||
|
||||
return pos
|
||||
|
||||
return DecodeItem
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Optimization is not as heavy here because calls to SkipField() are rare,
|
||||
# except for handling end-group tags.
|
||||
|
||||
def _SkipVarint(buffer, pos, end):
|
||||
"""Skip a varint value. Returns the new position."""
|
||||
|
||||
while ord(buffer[pos]) & 0x80:
|
||||
pos += 1
|
||||
pos += 1
|
||||
if pos > end:
|
||||
raise _DecodeError('Truncated message.')
|
||||
return pos
|
||||
|
||||
def _SkipFixed64(buffer, pos, end):
|
||||
"""Skip a fixed64 value. Returns the new position."""
|
||||
|
||||
pos += 8
|
||||
if pos > end:
|
||||
raise _DecodeError('Truncated message.')
|
||||
return pos
|
||||
|
||||
def _SkipLengthDelimited(buffer, pos, end):
|
||||
"""Skip a length-delimited value. Returns the new position."""
|
||||
|
||||
(size, pos) = _DecodeVarint(buffer, pos)
|
||||
pos += size
|
||||
if pos > end:
|
||||
raise _DecodeError('Truncated message.')
|
||||
return pos
|
||||
|
||||
def _SkipGroup(buffer, pos, end):
|
||||
"""Skip sub-group. Returns the new position."""
|
||||
|
||||
while 1:
|
||||
(tag_bytes, pos) = ReadTag(buffer, pos)
|
||||
new_pos = SkipField(buffer, pos, end, tag_bytes)
|
||||
if new_pos == -1:
|
||||
return pos
|
||||
pos = new_pos
|
||||
|
||||
def _EndGroup(buffer, pos, end):
|
||||
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
|
||||
|
||||
return -1
|
||||
|
||||
def _SkipFixed32(buffer, pos, end):
|
||||
"""Skip a fixed32 value. Returns the new position."""
|
||||
|
||||
pos += 4
|
||||
if pos > end:
|
||||
raise _DecodeError('Truncated message.')
|
||||
return pos
|
||||
|
||||
def _RaiseInvalidWireType(buffer, pos, end):
|
||||
"""Skip function for unknown wire types. Raises an exception."""
|
||||
|
||||
raise _DecodeError('Tag had invalid wire type.')
|
||||
|
||||
def _FieldSkipper():
|
||||
"""Constructs the SkipField function."""
|
||||
|
||||
WIRETYPE_TO_SKIPPER = [
|
||||
_SkipVarint,
|
||||
_SkipFixed64,
|
||||
_SkipLengthDelimited,
|
||||
_SkipGroup,
|
||||
_EndGroup,
|
||||
_SkipFixed32,
|
||||
_RaiseInvalidWireType,
|
||||
_RaiseInvalidWireType,
|
||||
]
|
||||
|
||||
wiretype_mask = wire_format.TAG_TYPE_MASK
|
||||
local_ord = ord
|
||||
|
||||
def SkipField(buffer, pos, end, tag_bytes):
|
||||
"""Skips a field with the specified tag.
|
||||
|
||||
|pos| should point to the byte immediately after the tag.
|
||||
|
||||
Returns:
|
||||
The new position (after the tag value), or -1 if the tag is an end-group
|
||||
tag (in which case the calling loop should break).
|
||||
"""
|
||||
|
||||
# The wire type is always in the first byte since varints are little-endian.
|
||||
wire_type = local_ord(tag_bytes[0]) & wiretype_mask
|
||||
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
|
||||
|
||||
return SkipField
|
||||
|
||||
SkipField = _FieldSkipper()
|
@ -0,0 +1,769 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Code for encoding protocol message primitives.
|
||||
|
||||
Contains the logic for encoding every logical protocol field type
|
||||
into one of the 5 physical wire types.
|
||||
|
||||
This code is designed to push the Python interpreter's performance to the
|
||||
limits.
|
||||
|
||||
The basic idea is that at startup time, for every field (i.e. every
|
||||
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
|
||||
sizer takes a value of this field's type and computes its byte size. The
|
||||
encoder takes a writer function and a value. It encodes the value into byte
|
||||
strings and invokes the writer function to write those strings. Typically the
|
||||
writer function is the write() method of a cStringIO.
|
||||
|
||||
We try to do as much work as possible when constructing the writer and the
|
||||
sizer rather than when calling them. In particular:
|
||||
* We copy any needed global functions to local variables, so that we do not need
|
||||
to do costly global table lookups at runtime.
|
||||
* Similarly, we try to do any attribute lookups at startup time if possible.
|
||||
* Every field's tag is encoded to bytes at startup, since it can't change at
|
||||
runtime.
|
||||
* Whatever component of the field size we can compute at startup, we do.
|
||||
* We *avoid* sharing code if doing so would make the code slower and not sharing
|
||||
does not burden us too much. For example, encoders for repeated fields do
|
||||
not just call the encoders for singular fields in a loop because this would
|
||||
add an extra function call overhead for every loop iteration; instead, we
|
||||
manually inline the single-value encoder into the loop.
|
||||
* If a Python function lacks a return statement, Python actually generates
|
||||
instructions to pop the result of the last statement off the stack, push
|
||||
None onto the stack, and then return that. If we really don't care what
|
||||
value is returned, then we can save two instructions by returning the
|
||||
result of the last statement. It looks funny but it helps.
|
||||
* We assume that type and bounds checking has happened at a higher level.
|
||||
"""
|
||||
|
||||
__author__ = 'kenton@google.com (Kenton Varda)'
|
||||
|
||||
import struct
|
||||
from google.protobuf.internal import wire_format
|
||||
|
||||
|
||||
# This will overflow and thus become IEEE-754 "infinity". We would use
|
||||
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
|
||||
_POS_INF = 1e10000
|
||||
_NEG_INF = -_POS_INF
|
||||
|
||||
|
||||
def _VarintSize(value):
|
||||
"""Compute the size of a varint value."""
|
||||
if value <= 0x7f: return 1
|
||||
if value <= 0x3fff: return 2
|
||||
if value <= 0x1fffff: return 3
|
||||
if value <= 0xfffffff: return 4
|
||||
if value <= 0x7ffffffff: return 5
|
||||
if value <= 0x3ffffffffff: return 6
|
||||
if value <= 0x1ffffffffffff: return 7
|
||||
if value <= 0xffffffffffffff: return 8
|
||||
if value <= 0x7fffffffffffffff: return 9
|
||||
return 10
|
||||
|
||||
|
||||
def _SignedVarintSize(value):
|
||||
"""Compute the size of a signed varint value."""
|
||||
if value < 0: return 10
|
||||
if value <= 0x7f: return 1
|
||||
if value <= 0x3fff: return 2
|
||||
if value <= 0x1fffff: return 3
|
||||
if value <= 0xfffffff: return 4
|
||||
if value <= 0x7ffffffff: return 5
|
||||
if value <= 0x3ffffffffff: return 6
|
||||
if value <= 0x1ffffffffffff: return 7
|
||||
if value <= 0xffffffffffffff: return 8
|
||||
if value <= 0x7fffffffffffffff: return 9
|
||||
return 10
|
||||
|
||||
|
||||
def _TagSize(field_number):
|
||||
"""Returns the number of bytes required to serialize a tag with this field
|
||||
number."""
|
||||
# Just pass in type 0, since the type won't affect the tag+type size.
|
||||
return _VarintSize(wire_format.PackTag(field_number, 0))
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# In this section we define some generic sizers. Each of these functions
|
||||
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
|
||||
# It returns another function which in turn takes parameters specific to a
|
||||
# particular field, e.g. the field number and whether it is repeated or packed.
|
||||
# Look at the next section to see how these are used.
|
||||
|
||||
|
||||
def _SimpleSizer(compute_value_size):
|
||||
"""A sizer which uses the function compute_value_size to compute the size of
|
||||
each value. Typically compute_value_size is _VarintSize."""
|
||||
|
||||
def SpecificSizer(field_number, is_repeated, is_packed):
|
||||
tag_size = _TagSize(field_number)
|
||||
if is_packed:
|
||||
local_VarintSize = _VarintSize
|
||||
def PackedFieldSize(value):
|
||||
result = 0
|
||||
for element in value:
|
||||
result += compute_value_size(element)
|
||||
return result + local_VarintSize(result) + tag_size
|
||||
return PackedFieldSize
|
||||
elif is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
result += compute_value_size(element)
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
return tag_size + compute_value_size(value)
|
||||
return FieldSize
|
||||
|
||||
return SpecificSizer
|
||||
|
||||
|
||||
def _ModifiedSizer(compute_value_size, modify_value):
|
||||
"""Like SimpleSizer, but modify_value is invoked on each value before it is
|
||||
passed to compute_value_size. modify_value is typically ZigZagEncode."""
|
||||
|
||||
def SpecificSizer(field_number, is_repeated, is_packed):
|
||||
tag_size = _TagSize(field_number)
|
||||
if is_packed:
|
||||
local_VarintSize = _VarintSize
|
||||
def PackedFieldSize(value):
|
||||
result = 0
|
||||
for element in value:
|
||||
result += compute_value_size(modify_value(element))
|
||||
return result + local_VarintSize(result) + tag_size
|
||||
return PackedFieldSize
|
||||
elif is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
result += compute_value_size(modify_value(element))
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
return tag_size + compute_value_size(modify_value(value))
|
||||
return FieldSize
|
||||
|
||||
return SpecificSizer
|
||||
|
||||
|
||||
def _FixedSizer(value_size):
|
||||
"""Like _SimpleSizer except for a fixed-size field. The input is the size
|
||||
of one value."""
|
||||
|
||||
def SpecificSizer(field_number, is_repeated, is_packed):
|
||||
tag_size = _TagSize(field_number)
|
||||
if is_packed:
|
||||
local_VarintSize = _VarintSize
|
||||
def PackedFieldSize(value):
|
||||
result = len(value) * value_size
|
||||
return result + local_VarintSize(result) + tag_size
|
||||
return PackedFieldSize
|
||||
elif is_repeated:
|
||||
element_size = value_size + tag_size
|
||||
def RepeatedFieldSize(value):
|
||||
return len(value) * element_size
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
field_size = value_size + tag_size
|
||||
def FieldSize(value):
|
||||
return field_size
|
||||
return FieldSize
|
||||
|
||||
return SpecificSizer
|
||||
|
||||
|
||||
# ====================================================================
|
||||
# Here we declare a sizer constructor for each field type. Each "sizer
|
||||
# constructor" is a function that takes (field_number, is_repeated, is_packed)
|
||||
# as parameters and returns a sizer, which in turn takes a field value as
|
||||
# a parameter and returns its encoded size.
|
||||
|
||||
|
||||
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
|
||||
|
||||
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
|
||||
|
||||
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
|
||||
_SignedVarintSize, wire_format.ZigZagEncode)
|
||||
|
||||
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
|
||||
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
|
||||
|
||||
BoolSizer = _FixedSizer(1)
|
||||
|
||||
|
||||
def StringSizer(field_number, is_repeated, is_packed):
|
||||
"""Returns a sizer for a string field."""
|
||||
|
||||
tag_size = _TagSize(field_number)
|
||||
local_VarintSize = _VarintSize
|
||||
local_len = len
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
l = local_len(element.encode('utf-8'))
|
||||
result += local_VarintSize(l) + l
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
l = local_len(value.encode('utf-8'))
|
||||
return tag_size + local_VarintSize(l) + l
|
||||
return FieldSize
|
||||
|
||||
|
||||
def BytesSizer(field_number, is_repeated, is_packed):
|
||||
"""Returns a sizer for a bytes field."""
|
||||
|
||||
tag_size = _TagSize(field_number)
|
||||
local_VarintSize = _VarintSize
|
||||
local_len = len
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
l = local_len(element)
|
||||
result += local_VarintSize(l) + l
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
l = local_len(value)
|
||||
return tag_size + local_VarintSize(l) + l
|
||||
return FieldSize
|
||||
|
||||
|
||||
def GroupSizer(field_number, is_repeated, is_packed):
|
||||
"""Returns a sizer for a group field."""
|
||||
|
||||
tag_size = _TagSize(field_number) * 2
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
result += element.ByteSize()
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
return tag_size + value.ByteSize()
|
||||
return FieldSize
|
||||
|
||||
|
||||
def MessageSizer(field_number, is_repeated, is_packed):
|
||||
"""Returns a sizer for a message field."""
|
||||
|
||||
tag_size = _TagSize(field_number)
|
||||
local_VarintSize = _VarintSize
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
l = element.ByteSize()
|
||||
result += local_VarintSize(l) + l
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
l = value.ByteSize()
|
||||
return tag_size + local_VarintSize(l) + l
|
||||
return FieldSize
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MessageSet is special.
|
||||
|
||||
|
||||
def MessageSetItemSizer(field_number):
|
||||
"""Returns a sizer for extensions of MessageSet.
|
||||
|
||||
The message set message looks like this:
|
||||
message MessageSet {
|
||||
repeated group Item = 1 {
|
||||
required int32 type_id = 2;
|
||||
required string message = 3;
|
||||
}
|
||||
}
|
||||
"""
|
||||
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
|
||||
_TagSize(3))
|
||||
local_VarintSize = _VarintSize
|
||||
|
||||
def FieldSize(value):
|
||||
l = value.ByteSize()
|
||||
return static_size + local_VarintSize(l) + l
|
||||
|
||||
return FieldSize
|
||||
|
||||
|
||||
# ====================================================================
|
||||
# Encoders!
|
||||
|
||||
|
||||
def _VarintEncoder():
|
||||
"""Return an encoder for a basic varint value (does not include tag)."""
|
||||
|
||||
local_chr = chr
|
||||
def EncodeVarint(write, value):
|
||||
bits = value & 0x7f
|
||||
value >>= 7
|
||||
while value:
|
||||
write(local_chr(0x80|bits))
|
||||
bits = value & 0x7f
|
||||
value >>= 7
|
||||
return write(local_chr(bits))
|
||||
|
||||
return EncodeVarint
|
||||
|
||||
|
||||
def _SignedVarintEncoder():
|
||||
"""Return an encoder for a basic signed varint value (does not include
|
||||
tag)."""
|
||||
|
||||
local_chr = chr
|
||||
def EncodeSignedVarint(write, value):
|
||||
if value < 0:
|
||||
value += (1 << 64)
|
||||
bits = value & 0x7f
|
||||
value >>= 7
|
||||
while value:
|
||||
write(local_chr(0x80|bits))
|
||||
bits = value & 0x7f
|
||||
value >>= 7
|
||||
return write(local_chr(bits))
|
||||
|
||||
return EncodeSignedVarint
|
||||
|
||||
|
||||
_EncodeVarint = _VarintEncoder()
|
||||
_EncodeSignedVarint = _SignedVarintEncoder()
|
||||
|
||||
|
||||
def _VarintBytes(value):
|
||||
"""Encode the given integer as a varint and return the bytes. This is only
|
||||
called at startup time so it doesn't need to be fast."""
|
||||
|
||||
pieces = []
|
||||
_EncodeVarint(pieces.append, value)
|
||||
return "".join(pieces)
|
||||
|
||||
|
||||
def TagBytes(field_number, wire_type):
|
||||
"""Encode the given tag and return the bytes. Only called at startup."""
|
||||
|
||||
return _VarintBytes(wire_format.PackTag(field_number, wire_type))
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# As with sizers (see above), we have a number of common encoder
|
||||
# implementations.
|
||||
|
||||
|
||||
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
|
||||
"""Return a constructor for an encoder for fields of a particular type.
|
||||
|
||||
Args:
|
||||
wire_type: The field's wire type, for encoding tags.
|
||||
encode_value: A function which encodes an individual value, e.g.
|
||||
_EncodeVarint().
|
||||
compute_value_size: A function which computes the size of an individual
|
||||
value, e.g. _VarintSize().
|
||||
"""
|
||||
|
||||
def SpecificEncoder(field_number, is_repeated, is_packed):
|
||||
if is_packed:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
def EncodePackedField(write, value):
|
||||
write(tag_bytes)
|
||||
size = 0
|
||||
for element in value:
|
||||
size += compute_value_size(element)
|
||||
local_EncodeVarint(write, size)
|
||||
for element in value:
|
||||
encode_value(write, element)
|
||||
return EncodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeRepeatedField(write, value):
|
||||
for element in value:
|
||||
write(tag_bytes)
|
||||
encode_value(write, element)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeField(write, value):
|
||||
write(tag_bytes)
|
||||
return encode_value(write, value)
|
||||
return EncodeField
|
||||
|
||||
return SpecificEncoder
|
||||
|
||||
|
||||
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
|
||||
"""Like SimpleEncoder but additionally invokes modify_value on every value
|
||||
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
|
||||
|
||||
def SpecificEncoder(field_number, is_repeated, is_packed):
|
||||
if is_packed:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
def EncodePackedField(write, value):
|
||||
write(tag_bytes)
|
||||
size = 0
|
||||
for element in value:
|
||||
size += compute_value_size(modify_value(element))
|
||||
local_EncodeVarint(write, size)
|
||||
for element in value:
|
||||
encode_value(write, modify_value(element))
|
||||
return EncodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeRepeatedField(write, value):
|
||||
for element in value:
|
||||
write(tag_bytes)
|
||||
encode_value(write, modify_value(element))
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeField(write, value):
|
||||
write(tag_bytes)
|
||||
return encode_value(write, modify_value(value))
|
||||
return EncodeField
|
||||
|
||||
return SpecificEncoder
|
||||
|
||||
|
||||
def _StructPackEncoder(wire_type, format):
|
||||
"""Return a constructor for an encoder for a fixed-width field.
|
||||
|
||||
Args:
|
||||
wire_type: The field's wire type, for encoding tags.
|
||||
format: The format string to pass to struct.pack().
|
||||
"""
|
||||
|
||||
value_size = struct.calcsize(format)
|
||||
|
||||
def SpecificEncoder(field_number, is_repeated, is_packed):
|
||||
local_struct_pack = struct.pack
|
||||
if is_packed:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
def EncodePackedField(write, value):
|
||||
write(tag_bytes)
|
||||
local_EncodeVarint(write, len(value) * value_size)
|
||||
for element in value:
|
||||
write(local_struct_pack(format, element))
|
||||
return EncodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeRepeatedField(write, value):
|
||||
for element in value:
|
||||
write(tag_bytes)
|
||||
write(local_struct_pack(format, element))
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeField(write, value):
|
||||
write(tag_bytes)
|
||||
return write(local_struct_pack(format, value))
|
||||
return EncodeField
|
||||
|
||||
return SpecificEncoder
|
||||
|
||||
|
||||
def _FloatingPointEncoder(wire_type, format):
|
||||
"""Return a constructor for an encoder for float fields.
|
||||
|
||||
This is like StructPackEncoder, but catches errors that may be due to
|
||||
passing non-finite floating-point values to struct.pack, and makes a
|
||||
second attempt to encode those values.
|
||||
|
||||
Args:
|
||||
wire_type: The field's wire type, for encoding tags.
|
||||
format: The format string to pass to struct.pack().
|
||||
"""
|
||||
|
||||
value_size = struct.calcsize(format)
|
||||
if value_size == 4:
|
||||
def EncodeNonFiniteOrRaise(write, value):
|
||||
# Remember that the serialized form uses little-endian byte order.
|
||||
if value == _POS_INF:
|
||||
write('\x00\x00\x80\x7F')
|
||||
elif value == _NEG_INF:
|
||||
write('\x00\x00\x80\xFF')
|
||||
elif value != value: # NaN
|
||||
write('\x00\x00\xC0\x7F')
|
||||
else:
|
||||
raise
|
||||
elif value_size == 8:
|
||||
def EncodeNonFiniteOrRaise(write, value):
|
||||
if value == _POS_INF:
|
||||
write('\x00\x00\x00\x00\x00\x00\xF0\x7F')
|
||||
elif value == _NEG_INF:
|
||||
write('\x00\x00\x00\x00\x00\x00\xF0\xFF')
|
||||
elif value != value: # NaN
|
||||
write('\x00\x00\x00\x00\x00\x00\xF8\x7F')
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
raise ValueError('Can\'t encode floating-point values that are '
|
||||
'%d bytes long (only 4 or 8)' % value_size)
|
||||
|
||||
def SpecificEncoder(field_number, is_repeated, is_packed):
|
||||
local_struct_pack = struct.pack
|
||||
if is_packed:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
def EncodePackedField(write, value):
|
||||
write(tag_bytes)
|
||||
local_EncodeVarint(write, len(value) * value_size)
|
||||
for element in value:
|
||||
# This try/except block is going to be faster than any code that
|
||||
# we could write to check whether element is finite.
|
||||
try:
|
||||
write(local_struct_pack(format, element))
|
||||
except SystemError:
|
||||
EncodeNonFiniteOrRaise(write, element)
|
||||
return EncodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeRepeatedField(write, value):
|
||||
for element in value:
|
||||
write(tag_bytes)
|
||||
try:
|
||||
write(local_struct_pack(format, element))
|
||||
except SystemError:
|
||||
EncodeNonFiniteOrRaise(write, element)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeField(write, value):
|
||||
write(tag_bytes)
|
||||
try:
|
||||
write(local_struct_pack(format, value))
|
||||
except SystemError:
|
||||
EncodeNonFiniteOrRaise(write, value)
|
||||
return EncodeField
|
||||
|
||||
return SpecificEncoder
|
||||
|
||||
|
||||
# ====================================================================
|
||||
# Here we declare an encoder constructor for each field type. These work
|
||||
# very similarly to sizer constructors, described earlier.
|
||||
|
||||
|
||||
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
|
||||
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
|
||||
|
||||
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
|
||||
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
|
||||
|
||||
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
|
||||
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
|
||||
wire_format.ZigZagEncode)
|
||||
|
||||
# Note that Python conveniently guarantees that when using the '<' prefix on
|
||||
# formats, they will also have the same size across all platforms (as opposed
|
||||
# to without the prefix, where their sizes depend on the C compiler's basic
|
||||
# type sizes).
|
||||
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
|
||||
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
|
||||
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
|
||||
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
|
||||
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
|
||||
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
|
||||
|
||||
|
||||
def BoolEncoder(field_number, is_repeated, is_packed):
|
||||
"""Returns an encoder for a boolean field."""
|
||||
|
||||
false_byte = chr(0)
|
||||
true_byte = chr(1)
|
||||
if is_packed:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
def EncodePackedField(write, value):
|
||||
write(tag_bytes)
|
||||
local_EncodeVarint(write, len(value))
|
||||
for element in value:
|
||||
if element:
|
||||
write(true_byte)
|
||||
else:
|
||||
write(false_byte)
|
||||
return EncodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
|
||||
def EncodeRepeatedField(write, value):
|
||||
for element in value:
|
||||
write(tag_bytes)
|
||||
if element:
|
||||
write(true_byte)
|
||||
else:
|
||||
write(false_byte)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
|
||||
def EncodeField(write, value):
|
||||
write(tag_bytes)
|
||||
if value:
|
||||
return write(true_byte)
|
||||
return write(false_byte)
|
||||
return EncodeField
|
||||
|
||||
|
||||
def StringEncoder(field_number, is_repeated, is_packed):
|
||||
"""Returns an encoder for a string field."""
|
||||
|
||||
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
local_len = len
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def EncodeRepeatedField(write, value):
|
||||
for element in value:
|
||||
encoded = element.encode('utf-8')
|
||||
write(tag)
|
||||
local_EncodeVarint(write, local_len(encoded))
|
||||
write(encoded)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
def EncodeField(write, value):
|
||||
encoded = value.encode('utf-8')
|
||||
write(tag)
|
||||
local_EncodeVarint(write, local_len(encoded))
|
||||
return write(encoded)
|
||||
return EncodeField
|
||||
|
||||
|
||||
def BytesEncoder(field_number, is_repeated, is_packed):
|
||||
"""Returns an encoder for a bytes field."""
|
||||
|
||||
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
local_len = len
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def EncodeRepeatedField(write, value):
|
||||
for element in value:
|
||||
write(tag)
|
||||
local_EncodeVarint(write, local_len(element))
|
||||
write(element)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
def EncodeField(write, value):
|
||||
write(tag)
|
||||
local_EncodeVarint(write, local_len(value))
|
||||
return write(value)
|
||||
return EncodeField
|
||||
|
||||
|
||||
def GroupEncoder(field_number, is_repeated, is_packed):
|
||||
"""Returns an encoder for a group field."""
|
||||
|
||||
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
|
||||
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def EncodeRepeatedField(write, value):
|
||||
for element in value:
|
||||
write(start_tag)
|
||||
element._InternalSerialize(write)
|
||||
write(end_tag)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
def EncodeField(write, value):
|
||||
write(start_tag)
|
||||
value._InternalSerialize(write)
|
||||
return write(end_tag)
|
||||
return EncodeField
|
||||
|
||||
|
||||
def MessageEncoder(field_number, is_repeated, is_packed):
|
||||
"""Returns an encoder for a message field."""
|
||||
|
||||
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def EncodeRepeatedField(write, value):
|
||||
for element in value:
|
||||
write(tag)
|
||||
local_EncodeVarint(write, element.ByteSize())
|
||||
element._InternalSerialize(write)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
def EncodeField(write, value):
|
||||
write(tag)
|
||||
local_EncodeVarint(write, value.ByteSize())
|
||||
return value._InternalSerialize(write)
|
||||
return EncodeField
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# As before, MessageSet is special.
|
||||
|
||||
|
||||
def MessageSetItemEncoder(field_number):
|
||||
"""Encoder for extensions of MessageSet.
|
||||
|
||||
The message set message looks like this:
|
||||
message MessageSet {
|
||||
repeated group Item = 1 {
|
||||
required int32 type_id = 2;
|
||||
required string message = 3;
|
||||
}
|
||||
}
|
||||
"""
|
||||
start_bytes = "".join([
|
||||
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
|
||||
TagBytes(2, wire_format.WIRETYPE_VARINT),
|
||||
_VarintBytes(field_number),
|
||||
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
|
||||
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
|
||||
def EncodeField(write, value):
|
||||
write(start_bytes)
|
||||
local_EncodeVarint(write, value.ByteSize())
|
||||
value._InternalSerialize(write)
|
||||
return write(end_bytes)
|
||||
|
||||
return EncodeField
|
@ -0,0 +1,78 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Defines a listener interface for observing certain
|
||||
state transitions on Message objects.
|
||||
|
||||
Also defines a null implementation of this interface.
|
||||
"""
|
||||
|
||||
__author__ = 'robinson@google.com (Will Robinson)'
|
||||
|
||||
|
||||
class MessageListener(object):
|
||||
|
||||
"""Listens for modifications made to a message. Meant to be registered via
|
||||
Message._SetListener().
|
||||
|
||||
Attributes:
|
||||
dirty: If True, then calling Modified() would be a no-op. This can be
|
||||
used to avoid these calls entirely in the common case.
|
||||
"""
|
||||
|
||||
def Modified(self):
|
||||
"""Called every time the message is modified in such a way that the parent
|
||||
message may need to be updated. This currently means either:
|
||||
(a) The message was modified for the first time, so the parent message
|
||||
should henceforth mark the message as present.
|
||||
(b) The message's cached byte size became dirty -- i.e. the message was
|
||||
modified for the first time after a previous call to ByteSize().
|
||||
Therefore the parent should also mark its byte size as dirty.
|
||||
Note that (a) implies (b), since new objects start out with a client cached
|
||||
size (zero). However, we document (a) explicitly because it is important.
|
||||
|
||||
Modified() will *only* be called in response to one of these two events --
|
||||
not every time the sub-message is modified.
|
||||
|
||||
Note that if the listener's |dirty| attribute is true, then calling
|
||||
Modified at the moment would be a no-op, so it can be skipped. Performance-
|
||||
sensitive callers should check this attribute directly before calling since
|
||||
it will be true most of the time.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class NullMessageListener(object):
|
||||
|
||||
"""No-op MessageListener implementation."""
|
||||
|
||||
def Modified(self):
|
||||
pass
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,286 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Provides type checking routines.
|
||||
|
||||
This module defines type checking utilities in the forms of dictionaries:
|
||||
|
||||
VALUE_CHECKERS: A dictionary of field types and a value validation object.
|
||||
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
|
||||
function.
|
||||
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
|
||||
function.
|
||||
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
|
||||
coresponding wire types.
|
||||
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
|
||||
function.
|
||||
"""
|
||||
|
||||
__author__ = 'robinson@google.com (Will Robinson)'
|
||||
|
||||
from google.protobuf.internal import decoder
|
||||
from google.protobuf.internal import encoder
|
||||
from google.protobuf.internal import wire_format
|
||||
from google.protobuf import descriptor
|
||||
|
||||
_FieldDescriptor = descriptor.FieldDescriptor
|
||||
|
||||
|
||||
def GetTypeChecker(cpp_type, field_type):
|
||||
"""Returns a type checker for a message field of the specified types.
|
||||
|
||||
Args:
|
||||
cpp_type: C++ type of the field (see descriptor.py).
|
||||
field_type: Protocol message field type (see descriptor.py).
|
||||
|
||||
Returns:
|
||||
An instance of TypeChecker which can be used to verify the types
|
||||
of values assigned to a field of the specified type.
|
||||
"""
|
||||
if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
|
||||
field_type == _FieldDescriptor.TYPE_STRING):
|
||||
return UnicodeValueChecker()
|
||||
return _VALUE_CHECKERS[cpp_type]
|
||||
|
||||
|
||||
# None of the typecheckers below make any attempt to guard against people
|
||||
# subclassing builtin types and doing weird things. We're not trying to
|
||||
# protect against malicious clients here, just people accidentally shooting
|
||||
# themselves in the foot in obvious ways.
|
||||
|
||||
class TypeChecker(object):
|
||||
|
||||
"""Type checker used to catch type errors as early as possible
|
||||
when the client is setting scalar fields in protocol messages.
|
||||
"""
|
||||
|
||||
def __init__(self, *acceptable_types):
|
||||
self._acceptable_types = acceptable_types
|
||||
|
||||
def CheckValue(self, proposed_value):
|
||||
if not isinstance(proposed_value, self._acceptable_types):
|
||||
message = ('%.1024r has type %s, but expected one of: %s' %
|
||||
(proposed_value, type(proposed_value), self._acceptable_types))
|
||||
raise TypeError(message)
|
||||
|
||||
|
||||
# IntValueChecker and its subclasses perform integer type-checks
|
||||
# and bounds-checks.
|
||||
class IntValueChecker(object):
|
||||
|
||||
"""Checker used for integer fields. Performs type-check and range check."""
|
||||
|
||||
def CheckValue(self, proposed_value):
|
||||
if not isinstance(proposed_value, (int, long)):
|
||||
message = ('%.1024r has type %s, but expected one of: %s' %
|
||||
(proposed_value, type(proposed_value), (int, long)))
|
||||
raise TypeError(message)
|
||||
if not self._MIN <= proposed_value <= self._MAX:
|
||||
raise ValueError('Value out of range: %d' % proposed_value)
|
||||
|
||||
|
||||
class UnicodeValueChecker(object):
|
||||
|
||||
"""Checker used for string fields."""
|
||||
|
||||
def CheckValue(self, proposed_value):
|
||||
if not isinstance(proposed_value, (str, unicode)):
|
||||
message = ('%.1024r has type %s, but expected one of: %s' %
|
||||
(proposed_value, type(proposed_value), (str, unicode)))
|
||||
raise TypeError(message)
|
||||
|
||||
# If the value is of type 'str' make sure that it is in 7-bit ASCII
|
||||
# encoding.
|
||||
if isinstance(proposed_value, str):
|
||||
try:
|
||||
unicode(proposed_value, 'ascii')
|
||||
except UnicodeDecodeError:
|
||||
raise ValueError('%.1024r has type str, but isn\'t in 7-bit ASCII '
|
||||
'encoding. Non-ASCII strings must be converted to '
|
||||
'unicode objects before being added.' %
|
||||
(proposed_value))
|
||||
|
||||
|
||||
class Int32ValueChecker(IntValueChecker):
|
||||
# We're sure to use ints instead of longs here since comparison may be more
|
||||
# efficient.
|
||||
_MIN = -2147483648
|
||||
_MAX = 2147483647
|
||||
|
||||
|
||||
class Uint32ValueChecker(IntValueChecker):
|
||||
_MIN = 0
|
||||
_MAX = (1 << 32) - 1
|
||||
|
||||
|
||||
class Int64ValueChecker(IntValueChecker):
|
||||
_MIN = -(1 << 63)
|
||||
_MAX = (1 << 63) - 1
|
||||
|
||||
|
||||
class Uint64ValueChecker(IntValueChecker):
|
||||
_MIN = 0
|
||||
_MAX = (1 << 64) - 1
|
||||
|
||||
|
||||
# Type-checkers for all scalar CPPTYPEs.
|
||||
_VALUE_CHECKERS = {
|
||||
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
|
||||
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
|
||||
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
|
||||
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
|
||||
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
|
||||
float, int, long),
|
||||
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
|
||||
float, int, long),
|
||||
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
|
||||
_FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
|
||||
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(str),
|
||||
}
|
||||
|
||||
|
||||
# Map from field type to a function F, such that F(field_num, value)
|
||||
# gives the total byte size for a value of the given type. This
|
||||
# byte size includes tag information and any other additional space
|
||||
# associated with serializing "value".
|
||||
TYPE_TO_BYTE_SIZE_FN = {
|
||||
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
|
||||
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
|
||||
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
|
||||
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
|
||||
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
|
||||
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
|
||||
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
|
||||
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
|
||||
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
|
||||
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
|
||||
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
|
||||
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
|
||||
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
|
||||
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
|
||||
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
|
||||
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
|
||||
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
|
||||
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
|
||||
}
|
||||
|
||||
|
||||
# Maps from field types to encoder constructors.
|
||||
TYPE_TO_ENCODER = {
|
||||
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
|
||||
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
|
||||
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
|
||||
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
|
||||
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
|
||||
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
|
||||
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
|
||||
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
|
||||
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
|
||||
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
|
||||
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
|
||||
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
|
||||
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
|
||||
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
|
||||
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
|
||||
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
|
||||
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
|
||||
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
|
||||
}
|
||||
|
||||
|
||||
# Maps from field types to sizer constructors.
|
||||
TYPE_TO_SIZER = {
|
||||
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
|
||||
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
|
||||
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
|
||||
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
|
||||
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
|
||||
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
|
||||
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
|
||||
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
|
||||
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
|
||||
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
|
||||
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
|
||||
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
|
||||
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
|
||||
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
|
||||
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
|
||||
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
|
||||
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
|
||||
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
|
||||
}
|
||||
|
||||
|
||||
# Maps from field type to a decoder constructor.
|
||||
TYPE_TO_DECODER = {
|
||||
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
|
||||
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
|
||||
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
|
||||
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
|
||||
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
|
||||
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
|
||||
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
|
||||
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
|
||||
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
|
||||
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
|
||||
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
|
||||
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
|
||||
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
|
||||
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
|
||||
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
|
||||
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
|
||||
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
|
||||
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
|
||||
}
|
||||
|
||||
# Maps from field type to expected wiretype.
|
||||
FIELD_TYPE_TO_WIRE_TYPE = {
|
||||
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
|
||||
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
|
||||
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
|
||||
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
|
||||
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
|
||||
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
|
||||
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
|
||||
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
|
||||
_FieldDescriptor.TYPE_STRING:
|
||||
wire_format.WIRETYPE_LENGTH_DELIMITED,
|
||||
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
|
||||
_FieldDescriptor.TYPE_MESSAGE:
|
||||
wire_format.WIRETYPE_LENGTH_DELIMITED,
|
||||
_FieldDescriptor.TYPE_BYTES:
|
||||
wire_format.WIRETYPE_LENGTH_DELIMITED,
|
||||
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
|
||||
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
|
||||
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
|
||||
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
|
||||
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
|
||||
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
|
||||
}
|
@ -0,0 +1,268 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Constants and static functions to support protocol buffer wire format."""
|
||||
|
||||
__author__ = 'robinson@google.com (Will Robinson)'
|
||||
|
||||
import struct
|
||||
from google.protobuf import descriptor
|
||||
from google.protobuf import message
|
||||
|
||||
|
||||
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
|
||||
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
|
||||
|
||||
# These numbers identify the wire type of a protocol buffer value.
|
||||
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
|
||||
# tag-and-type to store one of these WIRETYPE_* constants.
|
||||
# These values must match WireType enum in google/protobuf/wire_format.h.
|
||||
WIRETYPE_VARINT = 0
|
||||
WIRETYPE_FIXED64 = 1
|
||||
WIRETYPE_LENGTH_DELIMITED = 2
|
||||
WIRETYPE_START_GROUP = 3
|
||||
WIRETYPE_END_GROUP = 4
|
||||
WIRETYPE_FIXED32 = 5
|
||||
_WIRETYPE_MAX = 5
|
||||
|
||||
|
||||
# Bounds for various integer types.
|
||||
INT32_MAX = int((1 << 31) - 1)
|
||||
INT32_MIN = int(-(1 << 31))
|
||||
UINT32_MAX = (1 << 32) - 1
|
||||
|
||||
INT64_MAX = (1 << 63) - 1
|
||||
INT64_MIN = -(1 << 63)
|
||||
UINT64_MAX = (1 << 64) - 1
|
||||
|
||||
# "struct" format strings that will encode/decode the specified formats.
|
||||
FORMAT_UINT32_LITTLE_ENDIAN = '<I'
|
||||
FORMAT_UINT64_LITTLE_ENDIAN = '<Q'
|
||||
FORMAT_FLOAT_LITTLE_ENDIAN = '<f'
|
||||
FORMAT_DOUBLE_LITTLE_ENDIAN = '<d'
|
||||
|
||||
|
||||
# We'll have to provide alternate implementations of AppendLittleEndian*() on
|
||||
# any architectures where these checks fail.
|
||||
if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
|
||||
raise AssertionError('Format "I" is not a 32-bit number.')
|
||||
if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
|
||||
raise AssertionError('Format "Q" is not a 64-bit number.')
|
||||
|
||||
|
||||
def PackTag(field_number, wire_type):
|
||||
"""Returns an unsigned 32-bit integer that encodes the field number and
|
||||
wire type information in standard protocol message wire format.
|
||||
|
||||
Args:
|
||||
field_number: Expected to be an integer in the range [1, 1 << 29)
|
||||
wire_type: One of the WIRETYPE_* constants.
|
||||
"""
|
||||
if not 0 <= wire_type <= _WIRETYPE_MAX:
|
||||
raise message.EncodeError('Unknown wire type: %d' % wire_type)
|
||||
return (field_number << TAG_TYPE_BITS) | wire_type
|
||||
|
||||
|
||||
def UnpackTag(tag):
|
||||
"""The inverse of PackTag(). Given an unsigned 32-bit number,
|
||||
returns a (field_number, wire_type) tuple.
|
||||
"""
|
||||
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
|
||||
|
||||
|
||||
def ZigZagEncode(value):
|
||||
"""ZigZag Transform: Encodes signed integers so that they can be
|
||||
effectively used with varint encoding. See wire_format.h for
|
||||
more details.
|
||||
"""
|
||||
if value >= 0:
|
||||
return value << 1
|
||||
return (value << 1) ^ (~0)
|
||||
|
||||
|
||||
def ZigZagDecode(value):
|
||||
"""Inverse of ZigZagEncode()."""
|
||||
if not value & 0x1:
|
||||
return value >> 1
|
||||
return (value >> 1) ^ (~0)
|
||||
|
||||
|
||||
|
||||
# The *ByteSize() functions below return the number of bytes required to
|
||||
# serialize "field number + type" information and then serialize the value.
|
||||
|
||||
|
||||
def Int32ByteSize(field_number, int32):
|
||||
return Int64ByteSize(field_number, int32)
|
||||
|
||||
|
||||
def Int32ByteSizeNoTag(int32):
|
||||
return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32)
|
||||
|
||||
|
||||
def Int64ByteSize(field_number, int64):
|
||||
# Have to convert to uint before calling UInt64ByteSize().
|
||||
return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
|
||||
|
||||
|
||||
def UInt32ByteSize(field_number, uint32):
|
||||
return UInt64ByteSize(field_number, uint32)
|
||||
|
||||
|
||||
def UInt64ByteSize(field_number, uint64):
|
||||
return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
|
||||
|
||||
|
||||
def SInt32ByteSize(field_number, int32):
|
||||
return UInt32ByteSize(field_number, ZigZagEncode(int32))
|
||||
|
||||
|
||||
def SInt64ByteSize(field_number, int64):
|
||||
return UInt64ByteSize(field_number, ZigZagEncode(int64))
|
||||
|
||||
|
||||
def Fixed32ByteSize(field_number, fixed32):
|
||||
return TagByteSize(field_number) + 4
|
||||
|
||||
|
||||
def Fixed64ByteSize(field_number, fixed64):
|
||||
return TagByteSize(field_number) + 8
|
||||
|
||||
|
||||
def SFixed32ByteSize(field_number, sfixed32):
|
||||
return TagByteSize(field_number) + 4
|
||||
|
||||
|
||||
def SFixed64ByteSize(field_number, sfixed64):
|
||||
return TagByteSize(field_number) + 8
|
||||
|
||||
|
||||
def FloatByteSize(field_number, flt):
|
||||
return TagByteSize(field_number) + 4
|
||||
|
||||
|
||||
def DoubleByteSize(field_number, double):
|
||||
return TagByteSize(field_number) + 8
|
||||
|
||||
|
||||
def BoolByteSize(field_number, b):
|
||||
return TagByteSize(field_number) + 1
|
||||
|
||||
|
||||
def EnumByteSize(field_number, enum):
|
||||
return UInt32ByteSize(field_number, enum)
|
||||
|
||||
|
||||
def StringByteSize(field_number, string):
|
||||
return BytesByteSize(field_number, string.encode('utf-8'))
|
||||
|
||||
|
||||
def BytesByteSize(field_number, b):
|
||||
return (TagByteSize(field_number)
|
||||
+ _VarUInt64ByteSizeNoTag(len(b))
|
||||
+ len(b))
|
||||
|
||||
|
||||
def GroupByteSize(field_number, message):
|
||||
return (2 * TagByteSize(field_number) # START and END group.
|
||||
+ message.ByteSize())
|
||||
|
||||
|
||||
def MessageByteSize(field_number, message):
|
||||
return (TagByteSize(field_number)
|
||||
+ _VarUInt64ByteSizeNoTag(message.ByteSize())
|
||||
+ message.ByteSize())
|
||||
|
||||
|
||||
def MessageSetItemByteSize(field_number, msg):
|
||||
# First compute the sizes of the tags.
|
||||
# There are 2 tags for the beginning and ending of the repeated group, that
|
||||
# is field number 1, one with field number 2 (type_id) and one with field
|
||||
# number 3 (message).
|
||||
total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3))
|
||||
|
||||
# Add the number of bytes for type_id.
|
||||
total_size += _VarUInt64ByteSizeNoTag(field_number)
|
||||
|
||||
message_size = msg.ByteSize()
|
||||
|
||||
# The number of bytes for encoding the length of the message.
|
||||
total_size += _VarUInt64ByteSizeNoTag(message_size)
|
||||
|
||||
# The size of the message.
|
||||
total_size += message_size
|
||||
return total_size
|
||||
|
||||
|
||||
def TagByteSize(field_number):
|
||||
"""Returns the bytes required to serialize a tag with this field number."""
|
||||
# Just pass in type 0, since the type won't affect the tag+type size.
|
||||
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
|
||||
|
||||
|
||||
# Private helper function for the *ByteSize() functions above.
|
||||
|
||||
def _VarUInt64ByteSizeNoTag(uint64):
|
||||
"""Returns the number of bytes required to serialize a single varint
|
||||
using boundary value comparisons. (unrolled loop optimization -WPierce)
|
||||
uint64 must be unsigned.
|
||||
"""
|
||||
if uint64 <= 0x7f: return 1
|
||||
if uint64 <= 0x3fff: return 2
|
||||
if uint64 <= 0x1fffff: return 3
|
||||
if uint64 <= 0xfffffff: return 4
|
||||
if uint64 <= 0x7ffffffff: return 5
|
||||
if uint64 <= 0x3ffffffffff: return 6
|
||||
if uint64 <= 0x1ffffffffffff: return 7
|
||||
if uint64 <= 0xffffffffffffff: return 8
|
||||
if uint64 <= 0x7fffffffffffffff: return 9
|
||||
if uint64 > UINT64_MAX:
|
||||
raise message.EncodeError('Value out of range: %d' % uint64)
|
||||
return 10
|
||||
|
||||
|
||||
NON_PACKABLE_TYPES = (
|
||||
descriptor.FieldDescriptor.TYPE_STRING,
|
||||
descriptor.FieldDescriptor.TYPE_GROUP,
|
||||
descriptor.FieldDescriptor.TYPE_MESSAGE,
|
||||
descriptor.FieldDescriptor.TYPE_BYTES
|
||||
)
|
||||
|
||||
|
||||
def IsTypePackable(field_type):
|
||||
"""Return true iff packable = true is valid for fields of this type.
|
||||
|
||||
Args:
|
||||
field_type: a FieldDescriptor::Type value.
|
||||
|
||||
Returns:
|
||||
True iff fields of this type are packable.
|
||||
"""
|
||||
return field_type not in NON_PACKABLE_TYPES
|
@ -0,0 +1,268 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# TODO(robinson): We should just make these methods all "pure-virtual" and move
|
||||
# all implementation out, into reflection.py for now.
|
||||
|
||||
|
||||
"""Contains an abstract base class for protocol messages."""
|
||||
|
||||
__author__ = 'robinson@google.com (Will Robinson)'
|
||||
|
||||
|
||||
class Error(Exception): pass
|
||||
class DecodeError(Error): pass
|
||||
class EncodeError(Error): pass
|
||||
|
||||
|
||||
class Message(object):
|
||||
|
||||
"""Abstract base class for protocol messages.
|
||||
|
||||
Protocol message classes are almost always generated by the protocol
|
||||
compiler. These generated types subclass Message and implement the methods
|
||||
shown below.
|
||||
|
||||
TODO(robinson): Link to an HTML document here.
|
||||
|
||||
TODO(robinson): Document that instances of this class will also
|
||||
have an Extensions attribute with __getitem__ and __setitem__.
|
||||
Again, not sure how to best convey this.
|
||||
|
||||
TODO(robinson): Document that the class must also have a static
|
||||
RegisterExtension(extension_field) method.
|
||||
Not sure how to best express at this point.
|
||||
"""
|
||||
|
||||
# TODO(robinson): Document these fields and methods.
|
||||
|
||||
__slots__ = []
|
||||
|
||||
DESCRIPTOR = None
|
||||
|
||||
def __deepcopy__(self, memo=None):
|
||||
clone = type(self)()
|
||||
clone.MergeFrom(self)
|
||||
return clone
|
||||
|
||||
def __eq__(self, other_msg):
|
||||
raise NotImplementedError
|
||||
|
||||
def __ne__(self, other_msg):
|
||||
# Can't just say self != other_msg, since that would infinitely recurse. :)
|
||||
return not self == other_msg
|
||||
|
||||
def __hash__(self):
|
||||
raise TypeError('unhashable object')
|
||||
|
||||
def __str__(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def __unicode__(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def MergeFrom(self, other_msg):
|
||||
"""Merges the contents of the specified message into current message.
|
||||
|
||||
This method merges the contents of the specified message into the current
|
||||
message. Singular fields that are set in the specified message overwrite
|
||||
the corresponding fields in the current message. Repeated fields are
|
||||
appended. Singular sub-messages and groups are recursively merged.
|
||||
|
||||
Args:
|
||||
other_msg: Message to merge into the current message.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def CopyFrom(self, other_msg):
|
||||
"""Copies the content of the specified message into the current message.
|
||||
|
||||
The method clears the current message and then merges the specified
|
||||
message using MergeFrom.
|
||||
|
||||
Args:
|
||||
other_msg: Message to copy into the current one.
|
||||
"""
|
||||
if self is other_msg:
|
||||
return
|
||||
self.Clear()
|
||||
self.MergeFrom(other_msg)
|
||||
|
||||
def Clear(self):
|
||||
"""Clears all data that was set in the message."""
|
||||
raise NotImplementedError
|
||||
|
||||
def SetInParent(self):
|
||||
"""Mark this as present in the parent.
|
||||
|
||||
This normally happens automatically when you assign a field of a
|
||||
sub-message, but sometimes you want to make the sub-message
|
||||
present while keeping it empty. If you find yourself using this,
|
||||
you may want to reconsider your design."""
|
||||
raise NotImplementedError
|
||||
|
||||
def IsInitialized(self):
|
||||
"""Checks if the message is initialized.
|
||||
|
||||
Returns:
|
||||
The method returns True if the message is initialized (i.e. all of its
|
||||
required fields are set).
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# TODO(robinson): MergeFromString() should probably return None and be
|
||||
# implemented in terms of a helper that returns the # of bytes read. Our
|
||||
# deserialization routines would use the helper when recursively
|
||||
# deserializing, but the end user would almost always just want the no-return
|
||||
# MergeFromString().
|
||||
|
||||
def MergeFromString(self, serialized):
|
||||
"""Merges serialized protocol buffer data into this message.
|
||||
|
||||
When we find a field in |serialized| that is already present
|
||||
in this message:
|
||||
- If it's a "repeated" field, we append to the end of our list.
|
||||
- Else, if it's a scalar, we overwrite our field.
|
||||
- Else, (it's a nonrepeated composite), we recursively merge
|
||||
into the existing composite.
|
||||
|
||||
TODO(robinson): Document handling of unknown fields.
|
||||
|
||||
Args:
|
||||
serialized: Any object that allows us to call buffer(serialized)
|
||||
to access a string of bytes using the buffer interface.
|
||||
|
||||
TODO(robinson): When we switch to a helper, this will return None.
|
||||
|
||||
Returns:
|
||||
The number of bytes read from |serialized|.
|
||||
For non-group messages, this will always be len(serialized),
|
||||
but for messages which are actually groups, this will
|
||||
generally be less than len(serialized), since we must
|
||||
stop when we reach an END_GROUP tag. Note that if
|
||||
we *do* stop because of an END_GROUP tag, the number
|
||||
of bytes returned does not include the bytes
|
||||
for the END_GROUP tag information.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def ParseFromString(self, serialized):
|
||||
"""Like MergeFromString(), except we clear the object first."""
|
||||
self.Clear()
|
||||
self.MergeFromString(serialized)
|
||||
|
||||
def SerializeToString(self):
|
||||
"""Serializes the protocol message to a binary string.
|
||||
|
||||
Returns:
|
||||
A binary string representation of the message if all of the required
|
||||
fields in the message are set (i.e. the message is initialized).
|
||||
|
||||
Raises:
|
||||
message.EncodeError if the message isn't initialized.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def SerializePartialToString(self):
|
||||
"""Serializes the protocol message to a binary string.
|
||||
|
||||
This method is similar to SerializeToString but doesn't check if the
|
||||
message is initialized.
|
||||
|
||||
Returns:
|
||||
A string representation of the partial message.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# TODO(robinson): Decide whether we like these better
|
||||
# than auto-generated has_foo() and clear_foo() methods
|
||||
# on the instances themselves. This way is less consistent
|
||||
# with C++, but it makes reflection-type access easier and
|
||||
# reduces the number of magically autogenerated things.
|
||||
#
|
||||
# TODO(robinson): Be sure to document (and test) exactly
|
||||
# which field names are accepted here. Are we case-sensitive?
|
||||
# What do we do with fields that share names with Python keywords
|
||||
# like 'lambda' and 'yield'?
|
||||
#
|
||||
# nnorwitz says:
|
||||
# """
|
||||
# Typically (in python), an underscore is appended to names that are
|
||||
# keywords. So they would become lambda_ or yield_.
|
||||
# """
|
||||
def ListFields(self):
|
||||
"""Returns a list of (FieldDescriptor, value) tuples for all
|
||||
fields in the message which are not empty. A singular field is non-empty
|
||||
if HasField() would return true, and a repeated field is non-empty if
|
||||
it contains at least one element. The fields are ordered by field
|
||||
number"""
|
||||
raise NotImplementedError
|
||||
|
||||
def HasField(self, field_name):
|
||||
"""Checks if a certain field is set for the message. Note if the
|
||||
field_name is not defined in the message descriptor, ValueError will be
|
||||
raised."""
|
||||
raise NotImplementedError
|
||||
|
||||
def ClearField(self, field_name):
|
||||
raise NotImplementedError
|
||||
|
||||
def HasExtension(self, extension_handle):
|
||||
raise NotImplementedError
|
||||
|
||||
def ClearExtension(self, extension_handle):
|
||||
raise NotImplementedError
|
||||
|
||||
def ByteSize(self):
|
||||
"""Returns the serialized size of this message.
|
||||
Recursively calls ByteSize() on all contained messages.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def _SetListener(self, message_listener):
|
||||
"""Internal method used by the protocol message implementation.
|
||||
Clients should not call this directly.
|
||||
|
||||
Sets a listener that this message will call on certain state transitions.
|
||||
|
||||
The purpose of this method is to register back-edges from children to
|
||||
parents at runtime, for the purpose of setting "has" bits and
|
||||
byte-size-dirty bits in the parent and ancestor objects whenever a child or
|
||||
descendant object is modified.
|
||||
|
||||
If the client wants to disconnect this Message from the object tree, she
|
||||
explicitly sets callback to None.
|
||||
|
||||
If message_listener is None, unregisters any existing listener. Otherwise,
|
||||
message_listener must implement the MessageListener interface in
|
||||
internal/message_listener.py, and we discard any listener registered
|
||||
via a previous _SetListener() call.
|
||||
"""
|
||||
raise NotImplementedError
|
@ -0,0 +1,142 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# This code is meant to work on Python 2.4 and above only.
|
||||
|
||||
"""Contains a metaclass and helper functions used to create
|
||||
protocol message classes from Descriptor objects at runtime.
|
||||
|
||||
Recall that a metaclass is the "type" of a class.
|
||||
(A class is to a metaclass what an instance is to a class.)
|
||||
|
||||
In this case, we use the GeneratedProtocolMessageType metaclass
|
||||
to inject all the useful functionality into the classes
|
||||
output by the protocol compiler at compile-time.
|
||||
|
||||
The upshot of all this is that the real implementation
|
||||
details for ALL pure-Python protocol buffers are *here in
|
||||
this file*.
|
||||
"""
|
||||
|
||||
__author__ = 'robinson@google.com (Will Robinson)'
|
||||
|
||||
|
||||
from google.protobuf.internal import api_implementation
|
||||
from google.protobuf import descriptor as descriptor_mod
|
||||
_FieldDescriptor = descriptor_mod.FieldDescriptor
|
||||
|
||||
|
||||
if api_implementation.Type() == 'cpp':
|
||||
from google.protobuf.internal import cpp_message
|
||||
_NewMessage = cpp_message.NewMessage
|
||||
_InitMessage = cpp_message.InitMessage
|
||||
else:
|
||||
from google.protobuf.internal import python_message
|
||||
_NewMessage = python_message.NewMessage
|
||||
_InitMessage = python_message.InitMessage
|
||||
|
||||
|
||||
class GeneratedProtocolMessageType(type):
|
||||
|
||||
"""Metaclass for protocol message classes created at runtime from Descriptors.
|
||||
|
||||
We add implementations for all methods described in the Message class. We
|
||||
also create properties to allow getting/setting all fields in the protocol
|
||||
message. Finally, we create slots to prevent users from accidentally
|
||||
"setting" nonexistent fields in the protocol message, which then wouldn't get
|
||||
serialized / deserialized properly.
|
||||
|
||||
The protocol compiler currently uses this metaclass to create protocol
|
||||
message classes at runtime. Clients can also manually create their own
|
||||
classes at runtime, as in this example:
|
||||
|
||||
mydescriptor = Descriptor(.....)
|
||||
class MyProtoClass(Message):
|
||||
__metaclass__ = GeneratedProtocolMessageType
|
||||
DESCRIPTOR = mydescriptor
|
||||
myproto_instance = MyProtoClass()
|
||||
myproto.foo_field = 23
|
||||
...
|
||||
"""
|
||||
|
||||
# Must be consistent with the protocol-compiler code in
|
||||
# proto2/compiler/internal/generator.*.
|
||||
_DESCRIPTOR_KEY = 'DESCRIPTOR'
|
||||
|
||||
def __new__(cls, name, bases, dictionary):
|
||||
"""Custom allocation for runtime-generated class types.
|
||||
|
||||
We override __new__ because this is apparently the only place
|
||||
where we can meaningfully set __slots__ on the class we're creating(?).
|
||||
(The interplay between metaclasses and slots is not very well-documented).
|
||||
|
||||
Args:
|
||||
name: Name of the class (ignored, but required by the
|
||||
metaclass protocol).
|
||||
bases: Base classes of the class we're constructing.
|
||||
(Should be message.Message). We ignore this field, but
|
||||
it's required by the metaclass protocol
|
||||
dictionary: The class dictionary of the class we're
|
||||
constructing. dictionary[_DESCRIPTOR_KEY] must contain
|
||||
a Descriptor object describing this protocol message
|
||||
type.
|
||||
|
||||
Returns:
|
||||
Newly-allocated class.
|
||||
"""
|
||||
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
|
||||
_NewMessage(descriptor, dictionary)
|
||||
superclass = super(GeneratedProtocolMessageType, cls)
|
||||
|
||||
new_class = superclass.__new__(cls, name, bases, dictionary)
|
||||
setattr(descriptor, '_concrete_class', new_class)
|
||||
return new_class
|
||||
|
||||
def __init__(cls, name, bases, dictionary):
|
||||
"""Here we perform the majority of our work on the class.
|
||||
We add enum getters, an __init__ method, implementations
|
||||
of all Message methods, and properties for all fields
|
||||
in the protocol type.
|
||||
|
||||
Args:
|
||||
name: Name of the class (ignored, but required by the
|
||||
metaclass protocol).
|
||||
bases: Base classes of the class we're constructing.
|
||||
(Should be message.Message). We ignore this field, but
|
||||
it's required by the metaclass protocol
|
||||
dictionary: The class dictionary of the class we're
|
||||
constructing. dictionary[_DESCRIPTOR_KEY] must contain
|
||||
a Descriptor object describing this protocol message
|
||||
type.
|
||||
"""
|
||||
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
|
||||
_InitMessage(descriptor, cls)
|
||||
superclass = super(GeneratedProtocolMessageType, cls)
|
||||
superclass.__init__(name, bases, dictionary)
|
@ -0,0 +1,226 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""DEPRECATED: Declares the RPC service interfaces.
|
||||
|
||||
This module declares the abstract interfaces underlying proto2 RPC
|
||||
services. These are intended to be independent of any particular RPC
|
||||
implementation, so that proto2 services can be used on top of a variety
|
||||
of implementations. Starting with version 2.3.0, RPC implementations should
|
||||
not try to build on these, but should instead provide code generator plugins
|
||||
which generate code specific to the particular RPC implementation. This way
|
||||
the generated code can be more appropriate for the implementation in use
|
||||
and can avoid unnecessary layers of indirection.
|
||||
"""
|
||||
|
||||
__author__ = 'petar@google.com (Petar Petrov)'
|
||||
|
||||
|
||||
class RpcException(Exception):
|
||||
"""Exception raised on failed blocking RPC method call."""
|
||||
pass
|
||||
|
||||
|
||||
class Service(object):
|
||||
|
||||
"""Abstract base interface for protocol-buffer-based RPC services.
|
||||
|
||||
Services themselves are abstract classes (implemented either by servers or as
|
||||
stubs), but they subclass this base interface. The methods of this
|
||||
interface can be used to call the methods of the service without knowing
|
||||
its exact type at compile time (analogous to the Message interface).
|
||||
"""
|
||||
|
||||
def GetDescriptor():
|
||||
"""Retrieves this service's descriptor."""
|
||||
raise NotImplementedError
|
||||
|
||||
def CallMethod(self, method_descriptor, rpc_controller,
|
||||
request, done):
|
||||
"""Calls a method of the service specified by method_descriptor.
|
||||
|
||||
If "done" is None then the call is blocking and the response
|
||||
message will be returned directly. Otherwise the call is asynchronous
|
||||
and "done" will later be called with the response value.
|
||||
|
||||
In the blocking case, RpcException will be raised on error.
|
||||
|
||||
Preconditions:
|
||||
* method_descriptor.service == GetDescriptor
|
||||
* request is of the exact same classes as returned by
|
||||
GetRequestClass(method).
|
||||
* After the call has started, the request must not be modified.
|
||||
* "rpc_controller" is of the correct type for the RPC implementation being
|
||||
used by this Service. For stubs, the "correct type" depends on the
|
||||
RpcChannel which the stub is using.
|
||||
|
||||
Postconditions:
|
||||
* "done" will be called when the method is complete. This may be
|
||||
before CallMethod() returns or it may be at some point in the future.
|
||||
* If the RPC failed, the response value passed to "done" will be None.
|
||||
Further details about the failure can be found by querying the
|
||||
RpcController.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def GetRequestClass(self, method_descriptor):
|
||||
"""Returns the class of the request message for the specified method.
|
||||
|
||||
CallMethod() requires that the request is of a particular subclass of
|
||||
Message. GetRequestClass() gets the default instance of this required
|
||||
type.
|
||||
|
||||
Example:
|
||||
method = service.GetDescriptor().FindMethodByName("Foo")
|
||||
request = stub.GetRequestClass(method)()
|
||||
request.ParseFromString(input)
|
||||
service.CallMethod(method, request, callback)
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def GetResponseClass(self, method_descriptor):
|
||||
"""Returns the class of the response message for the specified method.
|
||||
|
||||
This method isn't really needed, as the RpcChannel's CallMethod constructs
|
||||
the response protocol message. It's provided anyway in case it is useful
|
||||
for the caller to know the response type in advance.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class RpcController(object):
|
||||
|
||||
"""An RpcController mediates a single method call.
|
||||
|
||||
The primary purpose of the controller is to provide a way to manipulate
|
||||
settings specific to the RPC implementation and to find out about RPC-level
|
||||
errors. The methods provided by the RpcController interface are intended
|
||||
to be a "least common denominator" set of features which we expect all
|
||||
implementations to support. Specific implementations may provide more
|
||||
advanced features (e.g. deadline propagation).
|
||||
"""
|
||||
|
||||
# Client-side methods below
|
||||
|
||||
def Reset(self):
|
||||
"""Resets the RpcController to its initial state.
|
||||
|
||||
After the RpcController has been reset, it may be reused in
|
||||
a new call. Must not be called while an RPC is in progress.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def Failed(self):
|
||||
"""Returns true if the call failed.
|
||||
|
||||
After a call has finished, returns true if the call failed. The possible
|
||||
reasons for failure depend on the RPC implementation. Failed() must not
|
||||
be called before a call has finished. If Failed() returns true, the
|
||||
contents of the response message are undefined.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def ErrorText(self):
|
||||
"""If Failed is true, returns a human-readable description of the error."""
|
||||
raise NotImplementedError
|
||||
|
||||
def StartCancel(self):
|
||||
"""Initiate cancellation.
|
||||
|
||||
Advises the RPC system that the caller desires that the RPC call be
|
||||
canceled. The RPC system may cancel it immediately, may wait awhile and
|
||||
then cancel it, or may not even cancel the call at all. If the call is
|
||||
canceled, the "done" callback will still be called and the RpcController
|
||||
will indicate that the call failed at that time.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# Server-side methods below
|
||||
|
||||
def SetFailed(self, reason):
|
||||
"""Sets a failure reason.
|
||||
|
||||
Causes Failed() to return true on the client side. "reason" will be
|
||||
incorporated into the message returned by ErrorText(). If you find
|
||||
you need to return machine-readable information about failures, you
|
||||
should incorporate it into your response protocol buffer and should
|
||||
NOT call SetFailed().
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def IsCanceled(self):
|
||||
"""Checks if the client cancelled the RPC.
|
||||
|
||||
If true, indicates that the client canceled the RPC, so the server may
|
||||
as well give up on replying to it. The server should still call the
|
||||
final "done" callback.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def NotifyOnCancel(self, callback):
|
||||
"""Sets a callback to invoke on cancel.
|
||||
|
||||
Asks that the given callback be called when the RPC is canceled. The
|
||||
callback will always be called exactly once. If the RPC completes without
|
||||
being canceled, the callback will be called after completion. If the RPC
|
||||
has already been canceled when NotifyOnCancel() is called, the callback
|
||||
will be called immediately.
|
||||
|
||||
NotifyOnCancel() must be called no more than once per request.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class RpcChannel(object):
|
||||
|
||||
"""Abstract interface for an RPC channel.
|
||||
|
||||
An RpcChannel represents a communication line to a service which can be used
|
||||
to call that service's methods. The service may be running on another
|
||||
machine. Normally, you should not use an RpcChannel directly, but instead
|
||||
construct a stub {@link Service} wrapping it. Example:
|
||||
|
||||
Example:
|
||||
RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
|
||||
RpcController controller = rpcImpl.Controller()
|
||||
MyService service = MyService_Stub(channel)
|
||||
service.MyMethod(controller, request, callback)
|
||||
"""
|
||||
|
||||
def CallMethod(self, method_descriptor, rpc_controller,
|
||||
request, response_class, done):
|
||||
"""Calls the method identified by the descriptor.
|
||||
|
||||
Call the given method of the remote service. The signature of this
|
||||
procedure looks the same as Service.CallMethod(), but the requirements
|
||||
are less strict in one important way: the request object doesn't have to
|
||||
be of any specific class as long as its descriptor is method.input_type.
|
||||
"""
|
||||
raise NotImplementedError
|
@ -0,0 +1,284 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Contains metaclasses used to create protocol service and service stub
|
||||
classes from ServiceDescriptor objects at runtime.
|
||||
|
||||
The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to
|
||||
inject all useful functionality into the classes output by the protocol
|
||||
compiler at compile-time.
|
||||
"""
|
||||
|
||||
__author__ = 'petar@google.com (Petar Petrov)'
|
||||
|
||||
|
||||
class GeneratedServiceType(type):
|
||||
|
||||
"""Metaclass for service classes created at runtime from ServiceDescriptors.
|
||||
|
||||
Implementations for all methods described in the Service class are added here
|
||||
by this class. We also create properties to allow getting/setting all fields
|
||||
in the protocol message.
|
||||
|
||||
The protocol compiler currently uses this metaclass to create protocol service
|
||||
classes at runtime. Clients can also manually create their own classes at
|
||||
runtime, as in this example:
|
||||
|
||||
mydescriptor = ServiceDescriptor(.....)
|
||||
class MyProtoService(service.Service):
|
||||
__metaclass__ = GeneratedServiceType
|
||||
DESCRIPTOR = mydescriptor
|
||||
myservice_instance = MyProtoService()
|
||||
...
|
||||
"""
|
||||
|
||||
_DESCRIPTOR_KEY = 'DESCRIPTOR'
|
||||
|
||||
def __init__(cls, name, bases, dictionary):
|
||||
"""Creates a message service class.
|
||||
|
||||
Args:
|
||||
name: Name of the class (ignored, but required by the metaclass
|
||||
protocol).
|
||||
bases: Base classes of the class being constructed.
|
||||
dictionary: The class dictionary of the class being constructed.
|
||||
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
|
||||
describing this protocol service type.
|
||||
"""
|
||||
# Don't do anything if this class doesn't have a descriptor. This happens
|
||||
# when a service class is subclassed.
|
||||
if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary:
|
||||
return
|
||||
descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY]
|
||||
service_builder = _ServiceBuilder(descriptor)
|
||||
service_builder.BuildService(cls)
|
||||
|
||||
|
||||
class GeneratedServiceStubType(GeneratedServiceType):
|
||||
|
||||
"""Metaclass for service stubs created at runtime from ServiceDescriptors.
|
||||
|
||||
This class has similar responsibilities as GeneratedServiceType, except that
|
||||
it creates the service stub classes.
|
||||
"""
|
||||
|
||||
_DESCRIPTOR_KEY = 'DESCRIPTOR'
|
||||
|
||||
def __init__(cls, name, bases, dictionary):
|
||||
"""Creates a message service stub class.
|
||||
|
||||
Args:
|
||||
name: Name of the class (ignored, here).
|
||||
bases: Base classes of the class being constructed.
|
||||
dictionary: The class dictionary of the class being constructed.
|
||||
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
|
||||
describing this protocol service type.
|
||||
"""
|
||||
super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary)
|
||||
# Don't do anything if this class doesn't have a descriptor. This happens
|
||||
# when a service stub is subclassed.
|
||||
if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary:
|
||||
return
|
||||
descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY]
|
||||
service_stub_builder = _ServiceStubBuilder(descriptor)
|
||||
service_stub_builder.BuildServiceStub(cls)
|
||||
|
||||
|
||||
class _ServiceBuilder(object):
|
||||
|
||||
"""This class constructs a protocol service class using a service descriptor.
|
||||
|
||||
Given a service descriptor, this class constructs a class that represents
|
||||
the specified service descriptor. One service builder instance constructs
|
||||
exactly one service class. That means all instances of that class share the
|
||||
same builder.
|
||||
"""
|
||||
|
||||
def __init__(self, service_descriptor):
|
||||
"""Initializes an instance of the service class builder.
|
||||
|
||||
Args:
|
||||
service_descriptor: ServiceDescriptor to use when constructing the
|
||||
service class.
|
||||
"""
|
||||
self.descriptor = service_descriptor
|
||||
|
||||
def BuildService(self, cls):
|
||||
"""Constructs the service class.
|
||||
|
||||
Args:
|
||||
cls: The class that will be constructed.
|
||||
"""
|
||||
|
||||
# CallMethod needs to operate with an instance of the Service class. This
|
||||
# internal wrapper function exists only to be able to pass the service
|
||||
# instance to the method that does the real CallMethod work.
|
||||
def _WrapCallMethod(srvc, method_descriptor,
|
||||
rpc_controller, request, callback):
|
||||
return self._CallMethod(srvc, method_descriptor,
|
||||
rpc_controller, request, callback)
|
||||
self.cls = cls
|
||||
cls.CallMethod = _WrapCallMethod
|
||||
cls.GetDescriptor = staticmethod(lambda: self.descriptor)
|
||||
cls.GetDescriptor.__doc__ = "Returns the service descriptor."
|
||||
cls.GetRequestClass = self._GetRequestClass
|
||||
cls.GetResponseClass = self._GetResponseClass
|
||||
for method in self.descriptor.methods:
|
||||
setattr(cls, method.name, self._GenerateNonImplementedMethod(method))
|
||||
|
||||
def _CallMethod(self, srvc, method_descriptor,
|
||||
rpc_controller, request, callback):
|
||||
"""Calls the method described by a given method descriptor.
|
||||
|
||||
Args:
|
||||
srvc: Instance of the service for which this method is called.
|
||||
method_descriptor: Descriptor that represent the method to call.
|
||||
rpc_controller: RPC controller to use for this method's execution.
|
||||
request: Request protocol message.
|
||||
callback: A callback to invoke after the method has completed.
|
||||
"""
|
||||
if method_descriptor.containing_service != self.descriptor:
|
||||
raise RuntimeError(
|
||||
'CallMethod() given method descriptor for wrong service type.')
|
||||
method = getattr(srvc, method_descriptor.name)
|
||||
return method(rpc_controller, request, callback)
|
||||
|
||||
def _GetRequestClass(self, method_descriptor):
|
||||
"""Returns the class of the request protocol message.
|
||||
|
||||
Args:
|
||||
method_descriptor: Descriptor of the method for which to return the
|
||||
request protocol message class.
|
||||
|
||||
Returns:
|
||||
A class that represents the input protocol message of the specified
|
||||
method.
|
||||
"""
|
||||
if method_descriptor.containing_service != self.descriptor:
|
||||
raise RuntimeError(
|
||||
'GetRequestClass() given method descriptor for wrong service type.')
|
||||
return method_descriptor.input_type._concrete_class
|
||||
|
||||
def _GetResponseClass(self, method_descriptor):
|
||||
"""Returns the class of the response protocol message.
|
||||
|
||||
Args:
|
||||
method_descriptor: Descriptor of the method for which to return the
|
||||
response protocol message class.
|
||||
|
||||
Returns:
|
||||
A class that represents the output protocol message of the specified
|
||||
method.
|
||||
"""
|
||||
if method_descriptor.containing_service != self.descriptor:
|
||||
raise RuntimeError(
|
||||
'GetResponseClass() given method descriptor for wrong service type.')
|
||||
return method_descriptor.output_type._concrete_class
|
||||
|
||||
def _GenerateNonImplementedMethod(self, method):
|
||||
"""Generates and returns a method that can be set for a service methods.
|
||||
|
||||
Args:
|
||||
method: Descriptor of the service method for which a method is to be
|
||||
generated.
|
||||
|
||||
Returns:
|
||||
A method that can be added to the service class.
|
||||
"""
|
||||
return lambda inst, rpc_controller, request, callback: (
|
||||
self._NonImplementedMethod(method.name, rpc_controller, callback))
|
||||
|
||||
def _NonImplementedMethod(self, method_name, rpc_controller, callback):
|
||||
"""The body of all methods in the generated service class.
|
||||
|
||||
Args:
|
||||
method_name: Name of the method being executed.
|
||||
rpc_controller: RPC controller used to execute this method.
|
||||
callback: A callback which will be invoked when the method finishes.
|
||||
"""
|
||||
rpc_controller.SetFailed('Method %s not implemented.' % method_name)
|
||||
callback(None)
|
||||
|
||||
|
||||
class _ServiceStubBuilder(object):
|
||||
|
||||
"""Constructs a protocol service stub class using a service descriptor.
|
||||
|
||||
Given a service descriptor, this class constructs a suitable stub class.
|
||||
A stub is just a type-safe wrapper around an RpcChannel which emulates a
|
||||
local implementation of the service.
|
||||
|
||||
One service stub builder instance constructs exactly one class. It means all
|
||||
instances of that class share the same service stub builder.
|
||||
"""
|
||||
|
||||
def __init__(self, service_descriptor):
|
||||
"""Initializes an instance of the service stub class builder.
|
||||
|
||||
Args:
|
||||
service_descriptor: ServiceDescriptor to use when constructing the
|
||||
stub class.
|
||||
"""
|
||||
self.descriptor = service_descriptor
|
||||
|
||||
def BuildServiceStub(self, cls):
|
||||
"""Constructs the stub class.
|
||||
|
||||
Args:
|
||||
cls: The class that will be constructed.
|
||||
"""
|
||||
|
||||
def _ServiceStubInit(stub, rpc_channel):
|
||||
stub.rpc_channel = rpc_channel
|
||||
self.cls = cls
|
||||
cls.__init__ = _ServiceStubInit
|
||||
for method in self.descriptor.methods:
|
||||
setattr(cls, method.name, self._GenerateStubMethod(method))
|
||||
|
||||
def _GenerateStubMethod(self, method):
|
||||
return (lambda inst, rpc_controller, request, callback=None:
|
||||
self._StubMethod(inst, method, rpc_controller, request, callback))
|
||||
|
||||
def _StubMethod(self, stub, method_descriptor,
|
||||
rpc_controller, request, callback):
|
||||
"""The body of all service methods in the generated stub class.
|
||||
|
||||
Args:
|
||||
stub: Stub instance.
|
||||
method_descriptor: Descriptor of the invoked method.
|
||||
rpc_controller: Rpc controller to execute the method.
|
||||
request: Request protocol message.
|
||||
callback: A callback to execute when the method finishes.
|
||||
Returns:
|
||||
Response message (in case of blocking call).
|
||||
"""
|
||||
return stub.rpc_channel.CallMethod(
|
||||
method_descriptor, rpc_controller, request,
|
||||
method_descriptor.output_type._concrete_class, callback)
|
@ -0,0 +1,691 @@
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# http://code.google.com/p/protobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Contains routines for printing protocol messages in text format."""
|
||||
|
||||
__author__ = 'kenton@google.com (Kenton Varda)'
|
||||
|
||||
import cStringIO
|
||||
import re
|
||||
|
||||
from collections import deque
|
||||
from google.protobuf.internal import type_checkers
|
||||
from google.protobuf import descriptor
|
||||
|
||||
__all__ = [ 'MessageToString', 'PrintMessage', 'PrintField',
|
||||
'PrintFieldValue', 'Merge' ]
|
||||
|
||||
|
||||
# Infinity and NaN are not explicitly supported by Python pre-2.6, and
|
||||
# float('inf') does not work on Windows (pre-2.6).
|
||||
_INFINITY = 1e10000 # overflows, thus will actually be infinity.
|
||||
_NAN = _INFINITY * 0
|
||||
|
||||
|
||||
class ParseError(Exception):
|
||||
"""Thrown in case of ASCII parsing error."""
|
||||
|
||||
|
||||
def MessageToString(message, as_utf8=False, as_one_line=False):
|
||||
out = cStringIO.StringIO()
|
||||
PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line)
|
||||
result = out.getvalue()
|
||||
out.close()
|
||||
if as_one_line:
|
||||
return result.rstrip()
|
||||
return result
|
||||
|
||||
|
||||
def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False):
|
||||
for field, value in message.ListFields():
|
||||
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
|
||||
for element in value:
|
||||
PrintField(field, element, out, indent, as_utf8, as_one_line)
|
||||
else:
|
||||
PrintField(field, value, out, indent, as_utf8, as_one_line)
|
||||
|
||||
|
||||
def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False):
|
||||
"""Print a single field name/value pair. For repeated fields, the value
|
||||
should be a single element."""
|
||||
|
||||
out.write(' ' * indent);
|
||||
if field.is_extension:
|
||||
out.write('[')
|
||||
if (field.containing_type.GetOptions().message_set_wire_format and
|
||||
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
|
||||
field.message_type == field.extension_scope and
|
||||
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
|
||||
out.write(field.message_type.full_name)
|
||||
else:
|
||||
out.write(field.full_name)
|
||||
out.write(']')
|
||||
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
|
||||
# For groups, use the capitalized name.
|
||||
out.write(field.message_type.name)
|
||||
else:
|
||||
out.write(field.name)
|
||||
|
||||
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
|
||||
# The colon is optional in this case, but our cross-language golden files
|
||||
# don't include it.
|
||||
out.write(': ')
|
||||
|
||||
PrintFieldValue(field, value, out, indent, as_utf8, as_one_line)
|
||||
if as_one_line:
|
||||
out.write(' ')
|
||||
else:
|
||||
out.write('\n')
|
||||
|
||||
|
||||
def PrintFieldValue(field, value, out, indent=0,
|
||||
as_utf8=False, as_one_line=False):
|
||||
"""Print a single field value (not including name). For repeated fields,
|
||||
the value should be a single element."""
|
||||
|
||||
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
|
||||
if as_one_line:
|
||||
out.write(' { ')
|
||||
PrintMessage(value, out, indent, as_utf8, as_one_line)
|
||||
out.write('}')
|
||||
else:
|
||||
out.write(' {\n')
|
||||
PrintMessage(value, out, indent + 2, as_utf8, as_one_line)
|
||||
out.write(' ' * indent + '}')
|
||||
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
|
||||
out.write(field.enum_type.values_by_number[value].name)
|
||||
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
|
||||
out.write('\"')
|
||||
if type(value) is unicode:
|
||||
out.write(_CEscape(value.encode('utf-8'), as_utf8))
|
||||
else:
|
||||
out.write(_CEscape(value, as_utf8))
|
||||
out.write('\"')
|
||||
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
|
||||
if value:
|
||||
out.write("true")
|
||||
else:
|
||||
out.write("false")
|
||||
else:
|
||||
out.write(str(value))
|
||||
|
||||
|
||||
def Merge(text, message):
|
||||
"""Merges an ASCII representation of a protocol message into a message.
|
||||
|
||||
Args:
|
||||
text: Message ASCII representation.
|
||||
message: A protocol buffer message to merge into.
|
||||
|
||||
Raises:
|
||||
ParseError: On ASCII parsing problems.
|
||||
"""
|
||||
tokenizer = _Tokenizer(text)
|
||||
while not tokenizer.AtEnd():
|
||||
_MergeField(tokenizer, message)
|
||||
|
||||
|
||||
def _MergeField(tokenizer, message):
|
||||
"""Merges a single protocol message field into a message.
|
||||
|
||||
Args:
|
||||
tokenizer: A tokenizer to parse the field name and values.
|
||||
message: A protocol message to record the data.
|
||||
|
||||
Raises:
|
||||
ParseError: In case of ASCII parsing problems.
|
||||
"""
|
||||
message_descriptor = message.DESCRIPTOR
|
||||
if tokenizer.TryConsume('['):
|
||||
name = [tokenizer.ConsumeIdentifier()]
|
||||
while tokenizer.TryConsume('.'):
|
||||
name.append(tokenizer.ConsumeIdentifier())
|
||||
name = '.'.join(name)
|
||||
|
||||
if not message_descriptor.is_extendable:
|
||||
raise tokenizer.ParseErrorPreviousToken(
|
||||
'Message type "%s" does not have extensions.' %
|
||||
message_descriptor.full_name)
|
||||
field = message.Extensions._FindExtensionByName(name)
|
||||
if not field:
|
||||
raise tokenizer.ParseErrorPreviousToken(
|
||||
'Extension "%s" not registered.' % name)
|
||||
elif message_descriptor != field.containing_type:
|
||||
raise tokenizer.ParseErrorPreviousToken(
|
||||
'Extension "%s" does not extend message type "%s".' % (
|
||||
name, message_descriptor.full_name))
|
||||
tokenizer.Consume(']')
|
||||
else:
|
||||
name = tokenizer.ConsumeIdentifier()
|
||||
field = message_descriptor.fields_by_name.get(name, None)
|
||||
|
||||
# Group names are expected to be capitalized as they appear in the
|
||||
# .proto file, which actually matches their type names, not their field
|
||||
# names.
|
||||
if not field:
|
||||
field = message_descriptor.fields_by_name.get(name.lower(), None)
|
||||
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
|
||||
field = None
|
||||
|
||||
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
|
||||
field.message_type.name != name):
|
||||
field = None
|
||||
|
||||
if not field:
|
||||
raise tokenizer.ParseErrorPreviousToken(
|
||||
'Message type "%s" has no field named "%s".' % (
|
||||
message_descriptor.full_name, name))
|
||||
|
||||
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
|
||||
tokenizer.TryConsume(':')
|
||||
|
||||
if tokenizer.TryConsume('<'):
|
||||
end_token = '>'
|
||||
else:
|
||||
tokenizer.Consume('{')
|
||||
end_token = '}'
|
||||
|
||||
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
|
||||
if field.is_extension:
|
||||
sub_message = message.Extensions[field].add()
|
||||
else:
|
||||
sub_message = getattr(message, field.name).add()
|
||||
else:
|
||||
if field.is_extension:
|
||||
sub_message = message.Extensions[field]
|
||||
else:
|
||||
sub_message = getattr(message, field.name)
|
||||
sub_message.SetInParent()
|
||||
|
||||
while not tokenizer.TryConsume(end_token):
|
||||
if tokenizer.AtEnd():
|
||||
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
|
||||
_MergeField(tokenizer, sub_message)
|
||||
else:
|
||||
_MergeScalarField(tokenizer, message, field)
|
||||
|
||||
|
||||
def _MergeScalarField(tokenizer, message, field):
|
||||
"""Merges a single protocol message scalar field into a message.
|
||||
|
||||
Args:
|
||||
tokenizer: A tokenizer to parse the field value.
|
||||
message: A protocol message to record the data.
|
||||
field: The descriptor of the field to be merged.
|
||||
|
||||
Raises:
|
||||
ParseError: In case of ASCII parsing problems.
|
||||
RuntimeError: On runtime errors.
|
||||
"""
|
||||
tokenizer.Consume(':')
|
||||
value = None
|
||||
|
||||
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
|
||||
descriptor.FieldDescriptor.TYPE_SINT32,
|
||||
descriptor.FieldDescriptor.TYPE_SFIXED32):
|
||||
value = tokenizer.ConsumeInt32()
|
||||
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
|
||||
descriptor.FieldDescriptor.TYPE_SINT64,
|
||||
descriptor.FieldDescriptor.TYPE_SFIXED64):
|
||||
value = tokenizer.ConsumeInt64()
|
||||
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
|
||||
descriptor.FieldDescriptor.TYPE_FIXED32):
|
||||
value = tokenizer.ConsumeUint32()
|
||||
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
|
||||
descriptor.FieldDescriptor.TYPE_FIXED64):
|
||||
value = tokenizer.ConsumeUint64()
|
||||
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
|
||||
descriptor.FieldDescriptor.TYPE_DOUBLE):
|
||||
value = tokenizer.ConsumeFloat()
|
||||
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
|
||||
value = tokenizer.ConsumeBool()
|
||||
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
|
||||
value = tokenizer.ConsumeString()
|
||||
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
|
||||
value = tokenizer.ConsumeByteString()
|
||||
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
|
||||
# Enum can be specified by a number (the enum value), or by
|
||||
# a string literal (the enum name).
|
||||
enum_descriptor = field.enum_type
|
||||
if tokenizer.LookingAtInteger():
|
||||
number = tokenizer.ConsumeInt32()
|
||||
enum_value = enum_descriptor.values_by_number.get(number, None)
|
||||
if enum_value is None:
|
||||
raise tokenizer.ParseErrorPreviousToken(
|
||||
'Enum type "%s" has no value with number %d.' % (
|
||||
enum_descriptor.full_name, number))
|
||||
else:
|
||||
identifier = tokenizer.ConsumeIdentifier()
|
||||
enum_value = enum_descriptor.values_by_name.get(identifier, None)
|
||||
if enum_value is None:
|
||||
raise tokenizer.ParseErrorPreviousToken(
|
||||
'Enum type "%s" has no value named %s.' % (
|
||||
enum_descriptor.full_name, identifier))
|
||||
value = enum_value.number
|
||||
else:
|
||||
raise RuntimeError('Unknown field type %d' % field.type)
|
||||
|
||||
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
|
||||
if field.is_extension:
|
||||
message.Extensions[field].append(value)
|
||||
else:
|
||||
getattr(message, field.name).append(value)
|
||||
else:
|
||||
if field.is_extension:
|
||||
message.Extensions[field] = value
|
||||
else:
|
||||
setattr(message, field.name, value)
|
||||
|
||||
|
||||
class _Tokenizer(object):
|
||||
"""Protocol buffer ASCII representation tokenizer.
|
||||
|
||||
This class handles the lower level string parsing by splitting it into
|
||||
meaningful tokens.
|
||||
|
||||
It was directly ported from the Java protocol buffer API.
|
||||
"""
|
||||
|
||||
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
|
||||
_TOKEN = re.compile(
|
||||
'[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier
|
||||
'[0-9+-][0-9a-zA-Z_.+-]*|' # a number
|
||||
'\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string
|
||||
'\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string
|
||||
_IDENTIFIER = re.compile('\w+')
|
||||
_INTEGER_CHECKERS = [type_checkers.Uint32ValueChecker(),
|
||||
type_checkers.Int32ValueChecker(),
|
||||
type_checkers.Uint64ValueChecker(),
|
||||
type_checkers.Int64ValueChecker()]
|
||||
_FLOAT_INFINITY = re.compile('-?inf(inity)?f?', re.IGNORECASE)
|
||||
_FLOAT_NAN = re.compile("nanf?", re.IGNORECASE)
|
||||
|
||||
def __init__(self, text_message):
|
||||
self._text_message = text_message
|
||||
|
||||
self._position = 0
|
||||
self._line = -1
|
||||
self._column = 0
|
||||
self._token_start = None
|
||||
self.token = ''
|
||||
self._lines = deque(text_message.split('\n'))
|
||||
self._current_line = ''
|
||||
self._previous_line = 0
|
||||
self._previous_column = 0
|
||||
self._SkipWhitespace()
|
||||
self.NextToken()
|
||||
|
||||
def AtEnd(self):
|
||||
"""Checks the end of the text was reached.
|
||||
|
||||
Returns:
|
||||
True iff the end was reached.
|
||||
"""
|
||||
return self.token == ''
|
||||
|
||||
def _PopLine(self):
|
||||
while len(self._current_line) <= self._column:
|
||||
if not self._lines:
|
||||
self._current_line = ''
|
||||
return
|
||||
self._line += 1
|
||||
self._column = 0
|
||||
self._current_line = self._lines.popleft()
|
||||
|
||||
def _SkipWhitespace(self):
|
||||
while True:
|
||||
self._PopLine()
|
||||
match = self._WHITESPACE.match(self._current_line, self._column)
|
||||
if not match:
|
||||
break
|
||||
length = len(match.group(0))
|
||||
self._column += length
|
||||
|
||||
def TryConsume(self, token):
|
||||
"""Tries to consume a given piece of text.
|
||||
|
||||
Args:
|
||||
token: Text to consume.
|
||||
|
||||
Returns:
|
||||
True iff the text was consumed.
|
||||
"""
|
||||
if self.token == token:
|
||||
self.NextToken()
|
||||
return True
|
||||
return False
|
||||
|
||||
def Consume(self, token):
|
||||
"""Consumes a piece of text.
|
||||
|
||||
Args:
|
||||
token: Text to consume.
|
||||
|
||||
Raises:
|
||||
ParseError: If the text couldn't be consumed.
|
||||
"""
|
||||
if not self.TryConsume(token):
|
||||
raise self._ParseError('Expected "%s".' % token)
|
||||
|
||||
def LookingAtInteger(self):
|
||||
"""Checks if the current token is an integer.
|
||||
|
||||
Returns:
|
||||
True iff the current token is an integer.
|
||||
"""
|
||||
if not self.token:
|
||||
return False
|
||||
c = self.token[0]
|
||||
return (c >= '0' and c <= '9') or c == '-' or c == '+'
|
||||
|
||||
def ConsumeIdentifier(self):
|
||||
"""Consumes protocol message field identifier.
|
||||
|
||||
Returns:
|
||||
Identifier string.
|
||||
|
||||
Raises:
|
||||
ParseError: If an identifier couldn't be consumed.
|
||||
"""
|
||||
result = self.token
|
||||
if not self._IDENTIFIER.match(result):
|
||||
raise self._ParseError('Expected identifier.')
|
||||
self.NextToken()
|
||||
return result
|
||||
|
||||
def ConsumeInt32(self):
|
||||
"""Consumes a signed 32bit integer number.
|
||||
|
||||
Returns:
|
||||
The integer parsed.
|
||||
|
||||
Raises:
|
||||
ParseError: If a signed 32bit integer couldn't be consumed.
|
||||
"""
|
||||
try:
|
||||
result = self._ParseInteger(self.token, is_signed=True, is_long=False)
|
||||
except ValueError, e:
|
||||
raise self._IntegerParseError(e)
|
||||
self.NextToken()
|
||||
return result
|
||||
|
||||
def ConsumeUint32(self):
|
||||
"""Consumes an unsigned 32bit integer number.
|
||||
|
||||
Returns:
|
||||
The integer parsed.
|
||||
|
||||
Raises:
|
||||
ParseError: If an unsigned 32bit integer couldn't be consumed.
|
||||
"""
|
||||
try:
|
||||
result = self._ParseInteger(self.token, is_signed=False, is_long=False)
|
||||
except ValueError, e:
|
||||
raise self._IntegerParseError(e)
|
||||
self.NextToken()
|
||||
return result
|
||||
|
||||
def ConsumeInt64(self):
|
||||
"""Consumes a signed 64bit integer number.
|
||||
|
||||
Returns:
|
||||
The integer parsed.
|
||||
|
||||
Raises:
|
||||
ParseError: If a signed 64bit integer couldn't be consumed.
|
||||
"""
|
||||
try:
|
||||
result = self._ParseInteger(self.token, is_signed=True, is_long=True)
|
||||
except ValueError, e:
|
||||
raise self._IntegerParseError(e)
|
||||
self.NextToken()
|
||||
return result
|
||||
|
||||
def ConsumeUint64(self):
|
||||
"""Consumes an unsigned 64bit integer number.
|
||||
|
||||
Returns:
|
||||
The integer parsed.
|
||||
|
||||
Raises:
|
||||
ParseError: If an unsigned 64bit integer couldn't be consumed.
|
||||
"""
|
||||
try:
|
||||
result = self._ParseInteger(self.token, is_signed=False, is_long=True)
|
||||
except ValueError, e:
|
||||
raise self._IntegerParseError(e)
|
||||
self.NextToken()
|
||||
return result
|
||||
|
||||
def ConsumeFloat(self):
|
||||
"""Consumes an floating point number.
|
||||
|
||||
Returns:
|
||||
The number parsed.
|
||||
|
||||
Raises:
|
||||
ParseError: If a floating point number couldn't be consumed.
|
||||
"""
|
||||
text = self.token
|
||||
if self._FLOAT_INFINITY.match(text):
|
||||
self.NextToken()
|
||||
if text.startswith('-'):
|
||||
return -_INFINITY
|
||||
return _INFINITY
|
||||
|
||||
if self._FLOAT_NAN.match(text):
|
||||
self.NextToken()
|
||||
return _NAN
|
||||
|
||||
try:
|
||||
result = float(text)
|
||||
except ValueError, e:
|
||||
raise self._FloatParseError(e)
|
||||
self.NextToken()
|
||||
return result
|
||||
|
||||
def ConsumeBool(self):
|
||||
"""Consumes a boolean value.
|
||||
|
||||
Returns:
|
||||
The bool parsed.
|
||||
|
||||
Raises:
|
||||
ParseError: If a boolean value couldn't be consumed.
|
||||
"""
|
||||
if self.token in ('true', 't', '1'):
|
||||
self.NextToken()
|
||||
return True
|
||||
elif self.token in ('false', 'f', '0'):
|
||||
self.NextToken()
|
||||
return False
|
||||
else:
|
||||
raise self._ParseError('Expected "true" or "false".')
|
||||
|
||||
def ConsumeString(self):
|
||||
"""Consumes a string value.
|
||||
|
||||
Returns:
|
||||
The string parsed.
|
||||
|
||||
Raises:
|
||||
ParseError: If a string value couldn't be consumed.
|
||||
"""
|
||||
bytes = self.ConsumeByteString()
|
||||
try:
|
||||
return unicode(bytes, 'utf-8')
|
||||
except UnicodeDecodeError, e:
|
||||
raise self._StringParseError(e)
|
||||
|
||||
def ConsumeByteString(self):
|
||||
"""Consumes a byte array value.
|
||||
|
||||
Returns:
|
||||
The array parsed (as a string).
|
||||
|
||||
Raises:
|
||||
ParseError: If a byte array value couldn't be consumed.
|
||||
"""
|
||||
list = [self._ConsumeSingleByteString()]
|
||||
while len(self.token) > 0 and self.token[0] in ('\'', '"'):
|
||||
list.append(self._ConsumeSingleByteString())
|
||||
return "".join(list)
|
||||
|
||||
def _ConsumeSingleByteString(self):
|
||||
"""Consume one token of a string literal.
|
||||
|
||||
String literals (whether bytes or text) can come in multiple adjacent
|
||||
tokens which are automatically concatenated, like in C or Python. This
|
||||
method only consumes one token.
|
||||
"""
|
||||
text = self.token
|
||||
if len(text) < 1 or text[0] not in ('\'', '"'):
|
||||
raise self._ParseError('Exptected string.')
|
||||
|
||||
if len(text) < 2 or text[-1] != text[0]:
|
||||
raise self._ParseError('String missing ending quote.')
|
||||
|
||||
try:
|
||||
result = _CUnescape(text[1:-1])
|
||||
except ValueError, e:
|
||||
raise self._ParseError(str(e))
|
||||
self.NextToken()
|
||||
return result
|
||||
|
||||
def _ParseInteger(self, text, is_signed=False, is_long=False):
|
||||
"""Parses an integer.
|
||||
|
||||
Args:
|
||||
text: The text to parse.
|
||||
is_signed: True if a signed integer must be parsed.
|
||||
is_long: True if a long integer must be parsed.
|
||||
|
||||
Returns:
|
||||
The integer value.
|
||||
|
||||
Raises:
|
||||
ValueError: Thrown Iff the text is not a valid integer.
|
||||
"""
|
||||
pos = 0
|
||||
if text.startswith('-'):
|
||||
pos += 1
|
||||
|
||||
base = 10
|
||||
if text.startswith('0x', pos) or text.startswith('0X', pos):
|
||||
base = 16
|
||||
elif text.startswith('0', pos):
|
||||
base = 8
|
||||
|
||||
# Do the actual parsing. Exception handling is propagated to caller.
|
||||
result = int(text, base)
|
||||
|
||||
# Check if the integer is sane. Exceptions handled by callers.
|
||||
checker = self._INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
|
||||
checker.CheckValue(result)
|
||||
return result
|
||||
|
||||
def ParseErrorPreviousToken(self, message):
|
||||
"""Creates and *returns* a ParseError for the previously read token.
|
||||
|
||||
Args:
|
||||
message: A message to set for the exception.
|
||||
|
||||
Returns:
|
||||
A ParseError instance.
|
||||
"""
|
||||
return ParseError('%d:%d : %s' % (
|
||||
self._previous_line + 1, self._previous_column + 1, message))
|
||||
|
||||
def _ParseError(self, message):
|
||||
"""Creates and *returns* a ParseError for the current token."""
|
||||
return ParseError('%d:%d : %s' % (
|
||||
self._line + 1, self._column - len(self.token) + 1, message))
|
||||
|
||||
def _IntegerParseError(self, e):
|
||||
return self._ParseError('Couldn\'t parse integer: ' + str(e))
|
||||
|
||||
def _FloatParseError(self, e):
|
||||
return self._ParseError('Couldn\'t parse number: ' + str(e))
|
||||
|
||||
def _StringParseError(self, e):
|
||||
return self._ParseError('Couldn\'t parse string: ' + str(e))
|
||||
|
||||
def NextToken(self):
|
||||
"""Reads the next meaningful token."""
|
||||
self._previous_line = self._line
|
||||
self._previous_column = self._column
|
||||
|
||||
self._column += len(self.token)
|
||||
self._SkipWhitespace()
|
||||
|
||||
if not self._lines and len(self._current_line) <= self._column:
|
||||
self.token = ''
|
||||
return
|
||||
|
||||
match = self._TOKEN.match(self._current_line, self._column)
|
||||
if match:
|
||||
token = match.group(0)
|
||||
self.token = token
|
||||
else:
|
||||
self.token = self._current_line[self._column]
|
||||
|
||||
|
||||
# text.encode('string_escape') does not seem to satisfy our needs as it
|
||||
# encodes unprintable characters using two-digit hex escapes whereas our
|
||||
# C++ unescaping function allows hex escapes to be any length. So,
|
||||
# "\0011".encode('string_escape') ends up being "\\x011", which will be
|
||||
# decoded in C++ as a single-character string with char code 0x11.
|
||||
def _CEscape(text, as_utf8):
|
||||
def escape(c):
|
||||
o = ord(c)
|
||||
if o == 10: return r"\n" # optional escape
|
||||
if o == 13: return r"\r" # optional escape
|
||||
if o == 9: return r"\t" # optional escape
|
||||
if o == 39: return r"\'" # optional escape
|
||||
|
||||
if o == 34: return r'\"' # necessary escape
|
||||
if o == 92: return r"\\" # necessary escape
|
||||
|
||||
# necessary escapes
|
||||
if not as_utf8 and (o >= 127 or o < 32): return "\\%03o" % o
|
||||
return c
|
||||
return "".join([escape(c) for c in text])
|
||||
|
||||
|
||||
_CUNESCAPE_HEX = re.compile('\\\\x([0-9a-fA-F]{2}|[0-9a-fA-F])')
|
||||
|
||||
|
||||
def _CUnescape(text):
|
||||
def ReplaceHex(m):
|
||||
return chr(int(m.group(0)[2:], 16))
|
||||
# This is required because the 'string_escape' encoding doesn't
|
||||
# allow single-digit hex escapes (like '\xf').
|
||||
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
|
||||
return result.decode('string_escape')
|
412
bibliotheque/files/nsclient/scripts/python/lib/test_helper.py
Normal file
412
bibliotheque/files/nsclient/scripts/python/lib/test_helper.py
Normal file
@ -0,0 +1,412 @@
|
||||
from NSCP import Settings, Registry, Core, log, log_debug, log_error, status
|
||||
import os
|
||||
import inspect
|
||||
|
||||
test_manager = None
|
||||
|
||||
def install_testcases(tests, args = []):
|
||||
test_manager = create_test_manager()
|
||||
test_manager.add(tests)
|
||||
test_manager.install()
|
||||
|
||||
def init_testcases(plugin_id, plugin_alias, script_alias, tests):
|
||||
test_manager = create_test_manager(plugin_id, plugin_alias, script_alias)
|
||||
test_manager.add(tests)
|
||||
test_manager.init()
|
||||
|
||||
def shutdown_testcases():
|
||||
if get_test_manager():
|
||||
get_test_manager().shutdown()
|
||||
destroy_test_manager()
|
||||
|
||||
def get_test_manager():
|
||||
global test_manager
|
||||
return test_manager
|
||||
|
||||
def destroy_test_manager():
|
||||
global test_manager
|
||||
if test_manager:
|
||||
test_manager.destroy()
|
||||
test_manager = None
|
||||
|
||||
def create_test_manager(plugin_id = 0, plugin_alias = '', script_alias = ''):
|
||||
global test_manager
|
||||
if not test_manager:
|
||||
test_manager = TestManager(plugin_id, plugin_alias, script_alias)
|
||||
|
||||
reg = Registry.get(plugin_id)
|
||||
|
||||
reg.simple_cmdline('help', display_help)
|
||||
reg.simple_cmdline('install_python_test', install_tests)
|
||||
reg.simple_cmdline('run_python_test', run_tests)
|
||||
|
||||
reg.simple_function('py_unittest', run_tests, 'Run python unit test suite')
|
||||
reg.simple_function('py_unittest_show_ok', set_show_ok, 'Set verbouse log')
|
||||
reg.simple_function('py_unittest_add_case', add_case, 'Set which cases to run')
|
||||
|
||||
return test_manager
|
||||
|
||||
def add_test_suite(suites):
|
||||
mgr = get_test_manager()
|
||||
if isinstance(suites, (list)):
|
||||
for s in suites:
|
||||
mgr.add(s)
|
||||
else:
|
||||
mgr.add(suites)
|
||||
|
||||
def install_tests(arguments = []):
|
||||
get_test_manager().install(arguments)
|
||||
return (status.OK, 'installed?')
|
||||
|
||||
def run_tests(arguments = []):
|
||||
result = get_test_manager().run(arguments)
|
||||
return result.return_nagios(get_test_manager().show_all)
|
||||
|
||||
def set_show_ok(arguments = []):
|
||||
get_test_manager().set_show_ok()
|
||||
return (status.OK, 'Done')
|
||||
|
||||
def add_case(arguments = []):
|
||||
get_test_manager().add_case(arguments)
|
||||
return (status.OK, 'Done')
|
||||
|
||||
def display_help(arguments = []):
|
||||
return (status.OK, 'TODO')
|
||||
|
||||
class Callable:
|
||||
def __init__(self, anycallable):
|
||||
self.__call__ = anycallable
|
||||
|
||||
class SingletonHelper:
|
||||
klass = None
|
||||
def __init__(self, klass):
|
||||
self.klass = klass
|
||||
def __call__(self, *args, **kw):
|
||||
if not self.klass._instance:
|
||||
self.klass._instance = self.klass()
|
||||
return self.klass._instance
|
||||
|
||||
def setup_singleton(klass, src = None):
|
||||
klass.getInstance = SingletonHelper(klass)
|
||||
if not src:
|
||||
cf = inspect.currentframe()
|
||||
if cf:
|
||||
bf = cf.f_back
|
||||
if bf:
|
||||
src = bf.f_code.co_filename
|
||||
klass.__source__ = src
|
||||
|
||||
class BasicTest(object):
|
||||
|
||||
_instance = None
|
||||
getInstance = None
|
||||
__source__ = ''
|
||||
|
||||
def desc(self):
|
||||
return 'TODO: Describe: %s'%self.title()
|
||||
|
||||
def title(self):
|
||||
return self._instance.__class__.__name__
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
None
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult('run_test')
|
||||
result.add_message(False, 'TODO add implementation')
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
conf = Settings.get()
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
fn = os.path.basename(self.__source__)
|
||||
(sn, ext) = os.path.splitext(fn)
|
||||
conf.register_key('/settings/pytest/scripts', sn, 'string', 'UNIT TEST SCRIPT: %s'%self.title(), 'A script for running unittests for: %s'%self.desc(), fn)
|
||||
conf.set_string('/settings/pytest/scripts', sn, fn)
|
||||
|
||||
conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id):
|
||||
None
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
def require_boot(self):
|
||||
return False
|
||||
|
||||
class TestResultEntry:
|
||||
status = False
|
||||
desc = 'Unassigned result'
|
||||
error = None
|
||||
def __init__(self, status, desc, error):
|
||||
self.status = status
|
||||
self.desc = desc
|
||||
self.error = error
|
||||
|
||||
def log(self, show_all = False, prefix = '', indent = 0):
|
||||
if self.status:
|
||||
if show_all:
|
||||
log('%s%s%s'%(prefix, ''.rjust(indent, ' '), self))
|
||||
log_debug('%s%s%s'%(prefix, ''.rjust(indent, ' '), self))
|
||||
else:
|
||||
log_error('%s%s%s'%(prefix, ''.rjust(indent, ' '), self))
|
||||
|
||||
def is_ok(self):
|
||||
return self.status
|
||||
|
||||
def count(self):
|
||||
if self.status:
|
||||
return (1, 1)
|
||||
return (1, 0)
|
||||
|
||||
def contains(self, other):
|
||||
if self == other:
|
||||
return True
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
if self.status:
|
||||
return 'OK: %s'%self.desc
|
||||
else:
|
||||
return 'ERROR: %s (%s)'%(self.desc, self.error)
|
||||
|
||||
class TestResultCollection(TestResultEntry):
|
||||
|
||||
status = True
|
||||
title = None
|
||||
children = []
|
||||
def __init__(self, title, list = None):
|
||||
self.title = title
|
||||
self.children = []
|
||||
if list:
|
||||
self.extend(list)
|
||||
|
||||
def log(self, show_all = False, prefix = '', indent = 0):
|
||||
start = '%s%s'%(prefix, ''.rjust(indent, ' '))
|
||||
if self.status:
|
||||
if show_all:
|
||||
log('%s%s'%(start, self))
|
||||
log_debug('%s%s'%(start, self))
|
||||
else:
|
||||
log_error('%s%s'%(start, self))
|
||||
for c in self.children:
|
||||
c.log(show_all, prefix, indent+1)
|
||||
|
||||
def is_ok(self):
|
||||
return self.status
|
||||
|
||||
def count(self):
|
||||
total_count = 0
|
||||
ok_count = 0
|
||||
#if self.status:
|
||||
# ok_count = 1
|
||||
|
||||
for c in self.children:
|
||||
(total, ok) = c.count()
|
||||
total_count = total_count + total
|
||||
ok_count = ok_count + ok
|
||||
|
||||
return (total_count, ok_count)
|
||||
|
||||
def contains(self, other):
|
||||
for c in self.children:
|
||||
if c.contains(other):
|
||||
return True
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
if self.status:
|
||||
return 'OK: %s'%self.title
|
||||
else:
|
||||
(total, ok) = self.count()
|
||||
return 'ERROR: %s (%d/%d)'%(self.title, ok, total)
|
||||
|
||||
def extend(self, lst):
|
||||
if isinstance(lst, list):
|
||||
if self.status:
|
||||
for c in lst:
|
||||
if not c.is_ok():
|
||||
self.status = False
|
||||
|
||||
for c in lst:
|
||||
if c.contains(self):
|
||||
log_error('Attempting to add a list with me in it')
|
||||
return
|
||||
self.children.extend(lst)
|
||||
else:
|
||||
self.append(lst)
|
||||
|
||||
def append(self, entry):
|
||||
if not entry:
|
||||
log_error('Attempting to add invalid entry (None)')
|
||||
elif entry == self:
|
||||
log_error('Attempting to add self to self')
|
||||
else:
|
||||
if self.status and not entry.is_ok():
|
||||
self.status = False
|
||||
self.children.append(entry)
|
||||
|
||||
class ArgumentParserError(Exception): pass
|
||||
|
||||
import argparse
|
||||
|
||||
class ThrowingArgumentParser(argparse.ArgumentParser):
|
||||
def error(self, message):
|
||||
raise ArgumentParserError(message)
|
||||
|
||||
class TestResult(TestResultCollection):
|
||||
|
||||
def __init__(self, title = 'DUMMY TITLE'):
|
||||
TestResultCollection.__init__(self, title)
|
||||
|
||||
def add_message(self, status, message, error = None):
|
||||
e = TestResultEntry(status, message, error)
|
||||
e.log()
|
||||
self.append(e)
|
||||
|
||||
def assert_equals(self, s1, s2, msg):
|
||||
self.add_message(s1 == s2, msg, '"%s" != "%s"'%(s1, s2))
|
||||
|
||||
def assert_gt(self, v1, v2, msg):
|
||||
self.add_message(v1 > v2, msg, '%d should be greater then %d'%(v1, v2))
|
||||
def assert_lt(self, v1, v2, msg):
|
||||
self.add_message(v1 < v2, msg, '%d should be less then %d'%(v1, v2))
|
||||
|
||||
def assert_contains(self, s1, s2, msg):
|
||||
if s1 == s2:
|
||||
self.add_message(s1 in s2 or s2 in s1, msg, '"%s" (contains) "%s"'%(s1, s2))
|
||||
elif s1 == None or s2 == None:
|
||||
self.add_message(False, msg, '"%s" (contains) "%s"'%(s1, s2))
|
||||
else:
|
||||
self.add_message(s1 in s2 or s2 in s1, msg, '"%s" (contains) "%s"'%(s1, s2))
|
||||
|
||||
def assert_not_contains(self, s1, s2, msg):
|
||||
if s1 == s2:
|
||||
self.add_message(False, msg, '"%s" (equals) "%s"'%(s1, s2))
|
||||
elif s1 == None or s2 == None:
|
||||
self.add_message(True, msg, '"%s" (is null?) "%s"'%(s1, s2))
|
||||
else:
|
||||
self.add_message(not (s1 in s2 or s2 in s1), msg, '"%s" (does not contains) "%s"'%(s1, s2))
|
||||
|
||||
def add_entry(self, e):
|
||||
self.append(e)
|
||||
|
||||
def add(self, result):
|
||||
self.extend(result)
|
||||
|
||||
def return_nagios(self, show_all = False):
|
||||
(total, ok) = self.count()
|
||||
self.log(show_all, ' | ')
|
||||
if total == ok:
|
||||
return (status.OK, "OK: %d test(s) successfull"%(total))
|
||||
else:
|
||||
return (status.CRITICAL, "ERROR: %d/%d test(s) failed"%(total-ok, total))
|
||||
|
||||
class TestManager:
|
||||
|
||||
suites = []
|
||||
prefix = ''
|
||||
plugin_id = None
|
||||
plugin_alias = None
|
||||
script_alias = None
|
||||
show_all = False
|
||||
cases = []
|
||||
|
||||
def __init__(self, plugin_id = 0, plugin_alias = '', script_alias = ''):
|
||||
if script_alias:
|
||||
self.prefix = '%s_'%script_alias
|
||||
self.plugin_id = plugin_id
|
||||
self.plugin_alias = plugin_alias
|
||||
self.script_alias = script_alias
|
||||
self.suites = []
|
||||
self.show_all = False
|
||||
self.cases = []
|
||||
|
||||
def set_show_ok(self):
|
||||
self.show_all = True
|
||||
|
||||
def add_case(self, cases):
|
||||
self.cases.extend(cases)
|
||||
|
||||
def add(self, suite):
|
||||
if isinstance(suite, list):
|
||||
for s in suite:
|
||||
self.add(s)
|
||||
else:
|
||||
if not suite in self.suites:
|
||||
self.suites.append(suite)
|
||||
|
||||
def run_suite(self, suite):
|
||||
result = TestResult('Running suite: %s'%suite.title())
|
||||
for c in list:
|
||||
result.add(run_test(plugin_id, prefix, c))
|
||||
return result
|
||||
|
||||
def run(self, arguments = []):
|
||||
result = TestResult('Test result for %d suites'%len(self.suites))
|
||||
for suite in self.suites:
|
||||
instance = suite.getInstance()
|
||||
instance.setup(self.plugin_id, self.prefix)
|
||||
suite_result = TestResult('Running suite: %s'%instance.title())
|
||||
if self.cases:
|
||||
suite_result.append(instance.run_test(self.cases))
|
||||
else:
|
||||
suite_result.append(instance.run_test())
|
||||
result.append(suite_result)
|
||||
result.add_message(suite_result.is_ok(), 'Result from suite: %s'%instance.title())
|
||||
instance.teardown()
|
||||
return result
|
||||
|
||||
def init(self):
|
||||
for suite in self.suites:
|
||||
instance = suite.getInstance()
|
||||
instance.init(self.plugin_id, self.prefix)
|
||||
|
||||
def destroy(self):
|
||||
self.suites = []
|
||||
self.prefix = ''
|
||||
self.plugin_id = None
|
||||
self.plugin_alias = None
|
||||
self.script_alias = None
|
||||
|
||||
def install(self, arguments = []):
|
||||
boot = False
|
||||
for suite in self.suites:
|
||||
instance = suite.getInstance()
|
||||
instance.install(arguments)
|
||||
if instance.require_boot():
|
||||
boot = True
|
||||
|
||||
#core = Core.get()
|
||||
#core.reload('service')
|
||||
#(code, msg, perf) = core.simple_query('py_unittest', [])
|
||||
|
||||
log('-+---==(TEST INSTALLER)==---------------------------------------------------+-')
|
||||
log(' | Setup nessecary configuration for running test |')
|
||||
log(' | This includes: Loading the PythonScript module at startup |')
|
||||
log(' | To use this please run nsclient++ in "test mode" like so: |')
|
||||
if boot:
|
||||
log(' | nscp client --boot --query py_unittest |')
|
||||
else:
|
||||
log(' | nscp client --query py_unittest |')
|
||||
log('-+--------------------------------------------------------==(DAS ENDE!)==---+-')
|
||||
|
||||
def shutdown(self):
|
||||
for suite in self.suites:
|
||||
instance = suite.getInstance()
|
||||
instance.uninstall()
|
||||
for suite in self.suites:
|
||||
instance = suite.getInstance()
|
||||
instance.shutdown()
|
||||
|
||||
|
109
bibliotheque/files/nsclient/scripts/python/sample.py
Normal file
109
bibliotheque/files/nsclient/scripts/python/sample.py
Normal file
@ -0,0 +1,109 @@
|
||||
from NSCP import Settings, Registry, Core, log, log_err, status
|
||||
|
||||
plugin_id = 0
|
||||
|
||||
world_status = 'safe'
|
||||
show_metrics = False
|
||||
world_count = 0
|
||||
def get_help(arguments):
|
||||
if arguments:
|
||||
log("Wicked: we got some args")
|
||||
return (status.OK, 'Need help? Sorry, Im not help full my friend...')
|
||||
|
||||
def check_world(arguments):
|
||||
global world_status, world_count
|
||||
world_count = world_count + 1
|
||||
if world_status == 'safe':
|
||||
return (status.OK, 'The world is fine!')
|
||||
return (status.CRITICAL, 'My god its full of stars: %s'%world_status)
|
||||
|
||||
def break_world(arguments):
|
||||
global world_status, world_count
|
||||
world_count = world_count + 1
|
||||
world_status = 'bad'
|
||||
log_err('Now why did you have to go and do this...')
|
||||
return (status.OK, 'Please, help me! I am trapped in here, please, my good... I want to get out.... please... ple...AAAAaarrrg...')
|
||||
|
||||
def fix_world(arguments):
|
||||
global world_status, world_count
|
||||
world_count = world_count + 1
|
||||
world_status = 'safe'
|
||||
return (status.OK, 'Wicked! Safe!')
|
||||
|
||||
def save_world(arguments):
|
||||
global world_status, plugin_id
|
||||
conf = Settings.get(plugin_id)
|
||||
conf.set_string('/settings/cool script', 'world', world_status)
|
||||
conf.save()
|
||||
return (status.OK, 'The world is saved: %s'%world_status)
|
||||
|
||||
def fun_show_metrics(arguments):
|
||||
global show_metrics
|
||||
if len(arguments) > 0:
|
||||
if arguments[0] == "true":
|
||||
show_metrics = True
|
||||
return (status.OK, 'Metrics displayed enabled')
|
||||
else:
|
||||
show_metrics = False
|
||||
return (status.OK, 'Metrics displayed disabled')
|
||||
return (status.UNKNOWN, 'Usage: show_metrics <true|false>')
|
||||
|
||||
def __main__(args):
|
||||
get_help(args)
|
||||
|
||||
def submit_metrics(list, request):
|
||||
global show_metrics
|
||||
if show_metrics:
|
||||
for k,v in list.iteritems():
|
||||
log("Got metrics: %s = %s"%(k,v))
|
||||
|
||||
def fetch_metrics():
|
||||
global world_status, world_count
|
||||
return { "number.of.times": world_count, "world": world_status}
|
||||
|
||||
def init(pid, plugin_alias, script_alias):
|
||||
global world_status, plugin_id
|
||||
plugin_id = pid
|
||||
|
||||
conf = Settings.get(plugin_id)
|
||||
conf.register_path('/settings/cool script', "Sample script config", "This is a sample script which demonstrates how to interact with NSClient++")
|
||||
conf.register_key('/settings/cool script', 'world', 'string', "A key", "Never ever change this key: or the world will break", "safe")
|
||||
|
||||
world_status = conf.get_string('/settings/cool script', 'world', 'true')
|
||||
if world_status != 'safe':
|
||||
log('My god: its full of stars: %s'%world_status)
|
||||
|
||||
log('Adding a simple function/cmd line')
|
||||
reg = Registry.get(plugin_id)
|
||||
reg.simple_cmdline('help', get_help)
|
||||
|
||||
reg.simple_function('check_world', check_world, 'Check if the world is safe')
|
||||
reg.simple_function('break_world', break_world, 'Break the world')
|
||||
reg.simple_function('fix_world', fix_world, 'Fix the world')
|
||||
reg.simple_function('save_world', save_world, 'Save the world')
|
||||
|
||||
reg.simple_function('show_metrics', fun_show_metrics, 'Enable displaying metrics or not')
|
||||
|
||||
reg.submit_metrics(submit_metrics)
|
||||
reg.fetch_metrics(fetch_metrics)
|
||||
|
||||
#core.simple_submit('%stest'%prefix, 'test.py', status.WARNING, 'hello', '')
|
||||
#core.simple_submit('test', 'test.py', status.WARNING, 'hello', '')
|
||||
|
||||
#(ret, list) = core.simple_exec('%stest'%prefix, ['a', 'b', 'c'])
|
||||
#for l in list:
|
||||
# log('-- %s --'%l)
|
||||
|
||||
#log('Testing to register settings keys')
|
||||
#conf.register_path('hello', 'PYTHON SETTINGS', 'This is stuff for python')
|
||||
#conf.register_key('hello', 'python', 'int', 'KEY', 'This is a key', '42')
|
||||
|
||||
#log('Testing to get key (nonexistant): %d' % conf.get_int('hello', 'python', -1))
|
||||
#conf.set_int('hello', 'python', 4)
|
||||
#log('Testing to get it (after setting it): %d' % conf.get_int('hello', 'python', -1))
|
||||
|
||||
#log('Saving configuration...')
|
||||
#conf.save()
|
||||
|
||||
def shutdown():
|
||||
log('Unloading script...')
|
@ -0,0 +1,21 @@
|
||||
from NSCP import Core, Registry, log, log_error
|
||||
plugin_id = -1
|
||||
|
||||
def __main__(args):
|
||||
global plugin_id
|
||||
# List all namespaces recursivly
|
||||
core = Core.get(plugin_id)
|
||||
(ret, ns_msgs) = core.simple_exec('CheckWMI', 'wmi', ['--list-all-ns'])
|
||||
if len(ns_msgs) == 0:
|
||||
log_error("Failed to execute WMI command is CheckWMI enabled?")
|
||||
else:
|
||||
for ns in ns_msgs[0].splitlines():
|
||||
# List all classes in each namespace
|
||||
(ret, cls_msgs) = core.simple_exec('any', 'wmi', ['--list-classes', '--simple', '--namespace', ns])
|
||||
for cls in cls_msgs[0].splitlines():
|
||||
log( '%s : %s'%(ns, cls))
|
||||
|
||||
|
||||
def init(pid, plugin_alias, script_alias):
|
||||
global plugin_id
|
||||
plugin_id = pid
|
2
bibliotheque/files/nsclient/scripts/python/test.py
Normal file
2
bibliotheque/files/nsclient/scripts/python/test.py
Normal file
@ -0,0 +1,2 @@
|
||||
|
||||
|
30
bibliotheque/files/nsclient/scripts/python/test_all.py
Normal file
30
bibliotheque/files/nsclient/scripts/python/test_all.py
Normal file
@ -0,0 +1,30 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
|
||||
from sys import path
|
||||
import os
|
||||
path.append(os.getcwd() + '/scripts/python')
|
||||
|
||||
from test_nsca import NSCAServerTest
|
||||
from test_nrpe import NRPEServerTest
|
||||
from test_python import PythonTest
|
||||
|
||||
#
|
||||
all_tests = [NSCAServerTest, PythonTest, NRPEServerTest]
|
||||
if os.name == 'nt':
|
||||
from test_eventlog import EventLogTest
|
||||
from test_w32_system import Win32SystemTest
|
||||
from test_w32_file import Win32FileTest
|
||||
from test_w32_wmi import Win32WMITest
|
||||
from test_external_script import ExternalScriptTest
|
||||
#all_tests.extend([EventLogTest, Win32SystemTest, Win32FileTest, Win32WMITest])
|
||||
all_tests.extend([Win32SystemTest, Win32FileTest, Win32WMITest, ExternalScriptTest])
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
294
bibliotheque/files/nsclient/scripts/python/test_eventlog.py
Normal file
294
bibliotheque/files/nsclient/scripts/python/test_eventlog.py
Normal file
@ -0,0 +1,294 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, sleep
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
import plugin_pb2
|
||||
from types import *
|
||||
import socket
|
||||
import unicodedata
|
||||
import uuid
|
||||
|
||||
|
||||
class Message:
|
||||
uuid = None
|
||||
channel = None
|
||||
source = None
|
||||
command = None
|
||||
status = None
|
||||
message = None
|
||||
perf = None
|
||||
tag = None
|
||||
delivered = False
|
||||
|
||||
def __init__(self, channel = None, source = None, command = None, status = None, message = None, perf = None, tag = None):
|
||||
if not channel:
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.channel = channel
|
||||
self.source = source
|
||||
self.command = command
|
||||
self.status = status
|
||||
self.message = message
|
||||
self.perf = perf
|
||||
self.delivered = False
|
||||
self.tag = tag
|
||||
|
||||
def copy_from(self, other):
|
||||
self.uuid = other.uuid
|
||||
self.channel = other.channel
|
||||
self.source = other.source
|
||||
self.command = other.command
|
||||
self.status = other.status
|
||||
self.message = other.message
|
||||
self.perf = other.perf
|
||||
self.delivered = other.delivered
|
||||
self.tag = other.tag
|
||||
|
||||
def __str__(self):
|
||||
return 'Message: %s (%s, %s, %s)'%(self.uuid, self.channel, self.status, self.message)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
|
||||
class EventLogTest(BasicTest):
|
||||
instance = None
|
||||
key = ''
|
||||
reg = None
|
||||
conf = None
|
||||
core = None
|
||||
last_tag = []
|
||||
got_simple_response = None
|
||||
message_count = 0
|
||||
messages = []
|
||||
|
||||
class SingletonHelper:
|
||||
def __call__( self, *args, **kw ) :
|
||||
if EventLogTest.instance is None :
|
||||
object = EventLogTest()
|
||||
EventLogTest.instance = object
|
||||
return EventLogTest.instance
|
||||
|
||||
getInstance = SingletonHelper()
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for eventlog'
|
||||
|
||||
def title(self):
|
||||
return 'EventLog test'
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.key = '_%stest_command'%prefix
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.reg.simple_subscription('pytest_evlog_01', EventLogTest.simple_inbox_handler_01)
|
||||
self.reg.simple_subscription('pytest_evlog_02', EventLogTest.simple_inbox_handler_02)
|
||||
|
||||
def simple_inbox_handler_01(channel, source, command, code, message, perf):
|
||||
instance = EventLogTest.getInstance()
|
||||
return instance.simple_inbox_handler_wrapped(channel, source, command, code, message, perf, '001')
|
||||
simple_inbox_handler_01 = Callable(simple_inbox_handler_01)
|
||||
|
||||
def simple_inbox_handler_02(channel, source, command, code, message, perf):
|
||||
instance = EventLogTest.getInstance()
|
||||
return instance.simple_inbox_handler_wrapped(channel, source, command, code, message, perf, '002')
|
||||
simple_inbox_handler_02 = Callable(simple_inbox_handler_02)
|
||||
|
||||
def simple_inbox_handler_wrapped(self, channel, source, command, status, message, perf, tag):
|
||||
msg = Message(channel, source, command, status, message, perf, tag)
|
||||
msg.delivered = True
|
||||
self.messages.append(msg)
|
||||
log('Recieved: %s'%msg)
|
||||
return True
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
|
||||
def test_create(self, source, id, level, severity, category, facility, arguments):
|
||||
result = TestResult('Creating log message: i:%d, l:%s, s:%s, c:%d, f:%d'%(id, level, severity, category, facility))
|
||||
args = ['--source', source,
|
||||
'--id', id, # Any number (corresponds with message identifier) -- Identifies message
|
||||
'--level', level, # error(1), warning(2), success(0), info(4), auditSuccess(8), auditFailure(10) -- Loglevel severity (ie log level)
|
||||
'--severity', severity, # success(0), informational(1), warning(2), error(3) -- Developer severity (ie classification)
|
||||
'--category', category, #
|
||||
'--facility', facility #
|
||||
]
|
||||
for f in arguments:
|
||||
args.append('--argument')
|
||||
args.append(f)
|
||||
(ret, msg) = self.core.simple_exec('eventlog', 'insert', args)
|
||||
result.assert_equals(ret, 0, 'return code')
|
||||
result.assert_equals(len(msg), 1, 'Message length')
|
||||
if len(msg) == 1:
|
||||
result.assert_equals(msg[0], 'Message reported successfully', 'Status message')
|
||||
return result
|
||||
|
||||
|
||||
def test_w_expected(self, filter, syntax, expected):
|
||||
result = TestResult('Validating filter: %s (%d)'%(filter, expected))
|
||||
(res, msg, perf) = self.core.simple_query('CheckEventLog', ['file=Application', 'debug=false', 'warn=gt:%d'%expected, 'crit=gt:%d'%expected, 'filter=%s'%filter, 'syntax=%s'%syntax, 'scan-range=-10m', 'top-syntax=${status} ${count}==%d: ${list}'%expected])
|
||||
result.assert_equals(res, status.OK, "Validate status OK for %s"%filter)
|
||||
(res, msg, perf) = self.core.simple_query('CheckEventLog', ['file=Application', 'debug=false', 'warn=eq:%d'%expected, 'crit=gt:%d'%expected, 'filter=%s'%filter, 'syntax=%s'%syntax, 'scan-range=-10m', 'top-syntax=${status} ${count}==%d: ${list}'%expected])
|
||||
result.assert_equals(res, status.WARNING, "Validate status OK for %s"%filter)
|
||||
(res, msg, perf) = self.core.simple_query('CheckEventLog', ['file=Application', 'debug=false', 'warn=eq:%d'%expected, 'crit=eq:%d'%expected, 'filter=%s'%filter, 'syntax=%s'%syntax, 'scan-range=-10m', 'top-syntax=${status} ${count}==%d: ${list}'%expected])
|
||||
result.assert_equals(res, status.CRITICAL, "Validate status CRIT for %s"%filter)
|
||||
return result
|
||||
|
||||
def test_syntax(self, filter, syntax, expected):
|
||||
result = TestResult('Validating syntax: %s'%syntax)
|
||||
(res, msg, perf) = self.core.simple_query('CheckEventLog', ['file=Application', 'warn=ne:1', 'filter=%s'%filter, 'syntax=%s'%syntax, 'descriptions', 'scan-range=-10m'])
|
||||
result.assert_equals(msg, expected, "Validate message rendering syntax: %s"%msg)
|
||||
return result
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult('Checking CheckEventLog')
|
||||
cache = TestResult('Checking CheckEventLog CACHE')
|
||||
|
||||
sleep(2000)
|
||||
|
||||
#(res, msg, perf) = self.core.simple_query('CheckEventLogCACHE', ['warn=eq:1', 'crit=eq:2'])
|
||||
#cache.assert_equals(res, status.OK, "Validate cache is empty")
|
||||
#cache.assert_equals(msg, 'Eventlog check ok', "Validate cache is ok: %s"%msg)
|
||||
|
||||
|
||||
a_list = ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
|
||||
result.add(self.test_create('Application Error', 1000, 'error', 'success', 0, 0, a_list))
|
||||
result.add(self.test_create('Application Error', 1000, 'warning', 'informational', 1, 5, a_list))
|
||||
result.add(self.test_create('Application Error', 1000, 'success', 'warning', 2, 5, a_list))
|
||||
result.add(self.test_create('Application Error', 1000, 'info', 'error', 3, 5, a_list))
|
||||
for x in range(1,10):
|
||||
log('Waiting...%d/4.'%len(self.messages))
|
||||
sleep(100)
|
||||
if len(self.messages) == 4:
|
||||
break
|
||||
log('Recieved %d messages.'%len(self.messages))
|
||||
result.assert_equals(len(self.messages), 4, 'Verify that all 4 messages are sent through')
|
||||
|
||||
for msg in self.messages:
|
||||
if msg.message.startswith('X1'):
|
||||
r = TestResult('Validating message X1')
|
||||
r.assert_equals(msg.message, 'X1 warning Application Error: ', 'Verify message')
|
||||
r.assert_equals(msg.channel, 'pytest_evlog_01', 'Verify channel')
|
||||
r.assert_equals(msg.tag, '001', 'Verify tag')
|
||||
r.assert_equals(msg.status, status.WARNING, 'Verify status')
|
||||
result.add(r)
|
||||
elif msg.message.startswith('X2'):
|
||||
r = TestResult('Validating message X2')
|
||||
r.assert_equals(msg.message, 'X2 success Application Error: ', 'Verify message')
|
||||
r.assert_equals(msg.channel, 'pytest_evlog_02', 'Verify channel')
|
||||
r.assert_equals(msg.tag, '002', 'Verify tag')
|
||||
r.assert_equals(msg.status, status.CRITICAL, 'Verify status')
|
||||
result.add(r)
|
||||
elif msg.message.startswith('X3'):
|
||||
r = TestResult('Validating message X3')
|
||||
r.assert_equals(msg.message, 'X3 info Application Error: ', 'Verify message')
|
||||
r.assert_equals(msg.channel, 'pytest_evlog_01', 'Verify channel')
|
||||
r.assert_equals(msg.tag, '001', 'Verify tag')
|
||||
r.assert_equals(msg.status, status.UNKNOWN, 'Verify status')
|
||||
result.add(r)
|
||||
elif msg.message.startswith('X4'):
|
||||
r = TestResult('Validating message X4')
|
||||
r.assert_equals(msg.message, 'X4 error Application Error: ', 'Verify message')
|
||||
r.assert_equals(msg.channel, 'pytest_evlog_01', 'Verify channel')
|
||||
r.assert_equals(msg.tag, '001', 'Verify tag')
|
||||
r.assert_equals(msg.status, status.OK, 'Verify status')
|
||||
result.add(r)
|
||||
|
||||
#(res, msg, perf) = self.core.simple_query('CheckEventLogCACHE', ['warn=eq:1', 'crit=eq:4'])
|
||||
#cache.assert_equals(res, status.CRITICAL, "Validate cache has items: %s"%msg)
|
||||
#cache.assert_equals(msg, 'X4 error Application Error: , X1 warning Application Error: , X2 success Application Error: , X3 info Application Error: , eventlog: 4 = critical', "Validate cache message")
|
||||
#cache.assert_equals(perf, "'eventlog'=4;1;4", "Validate cache performance")
|
||||
#(res, msg, perf) = self.core.simple_query('CheckEventLogCACHE', ['warn=eq:1', 'crit=eq:2'])
|
||||
#cache.assert_equals(res, status.OK, "Validate cache is empty (again)")
|
||||
#cache.assert_equals(msg, 'Eventlog check ok', "Validate cache is ok: %s"%msg)
|
||||
|
||||
#result.add(cache)
|
||||
|
||||
r = TestResult('Checking filters')
|
||||
r.add(self.test_w_expected('id = 1000 and generated gt 1m', '%generated%', 0))
|
||||
r.add(self.test_w_expected('id = 1000 and generated gt -1m', '%generated%', 4))
|
||||
r.add(self.test_w_expected('id = 1000 and generated gt -1m and id = 1000', '%generated%: %id%, %category%', 4))
|
||||
r.add(self.test_w_expected('id = 1000 and generated gt -1m and category = 1', '%category%', 1))
|
||||
r.add(self.test_w_expected('id = 1000 and generated gt -1m and category = 0', '%category%', 1))
|
||||
r.add(self.test_w_expected("id = 1000 and generated gt -1m and level = 'error'", '%level%', 1))
|
||||
r.add(self.test_w_expected("id = 1000 and generated gt -1m and level = 'warning'", '%level%', 1))
|
||||
result.add(r)
|
||||
|
||||
r = TestResult('Checking syntax')
|
||||
r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 0', '%source% - %type% - %category%', 'Application Error - error - 0'))
|
||||
r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 1', '%source% - %type% - %category%', 'Application Error - warning - 1'))
|
||||
r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 2', '%source% - %type% - %category%', 'Application Error - information - 2'))
|
||||
r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 3', '%source% - %type% - %category%', 'Application Error - information - 3'))
|
||||
#r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 0', '%facility% - %qualifier% - %customer%', '0 - 0 - 0'))
|
||||
#r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 1', '%facility% - %qualifier% - %customer%', '5 - 5 - 0'))
|
||||
#r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 0', '%rawid% - %severity% - %log%', '1000 - success - Application'))
|
||||
#r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 1', '%rawid% - %severity% - %log%', '1074070504 - informational - Application'))
|
||||
#r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 2', '%rawid% - %severity% - %log%', '2147812328 - warning - Application'))
|
||||
#r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 3', '%rawid% - %severity% - %log%', '3221554152 - error - Application'))
|
||||
#r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 0', '%id% - %strings%', '1000 - a, a, a, a, a, a, a, a, a, a, a, a, a, '))
|
||||
#r.add(self.test_syntax('id = 1000 and generated gt -2m and category = 1', '%id% - %strings%', '1000 - a, a, a, a, a, a, a, a, a, a, a, a, a, '))
|
||||
result.add(r)
|
||||
|
||||
return result
|
||||
|
||||
def install_filter(self, conf, path, target, filter = None, syntax = '%type% %source%: %message%', severity = 'OK', lang = 'english', age = '5s'):
|
||||
if filter:
|
||||
conf.set_string(path, 'filter', filter)
|
||||
if target:
|
||||
conf.set_string(path, 'target', target)
|
||||
if lang:
|
||||
conf.set_string(path, 'language', lang)
|
||||
if age:
|
||||
conf.set_string(path, 'maximum age', age)
|
||||
if syntax:
|
||||
conf.set_string(path, 'detail syntax', syntax)
|
||||
if severity:
|
||||
conf.set_string(path, 'severity', severity)
|
||||
conf.set_string(path, 'log', 'application')
|
||||
conf.set_string(path, 'debug', 'true')
|
||||
|
||||
def install(self, arguments):
|
||||
conf = self.conf
|
||||
conf.set_string('/modules', 'pytest_eventlog', 'CheckEventLog')
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
|
||||
conf.set_string('/settings/pytest/scripts', 'test_eventlog', 'test_eventlog.py')
|
||||
|
||||
conf.set_string('/settings/pytest_eventlog/real-time', 'enabled', 'true')
|
||||
|
||||
self.install_filter(conf, '/settings/pytest_eventlog/real-time/filters/default', 'pytest_evlog_01', 'id = 1000 and category = 0', '%type% %source%: %message%','OK')
|
||||
base_syntax = '${id},${category} ${source}: ${message}'
|
||||
|
||||
self.install_filter(conf, '/settings/pytest_eventlog/real-time/filters/py_test_001', 'pytest_evlog_01', 'id = 1000 and category = 1', 'X1 %s'%base_syntax, 'WARNING')
|
||||
self.install_filter(conf, '/settings/pytest_eventlog/real-time/filters/py_test_002', 'pytest_evlog_02', 'id = 1000 and category = 2', 'X2 %s'%base_syntax, 'CRITICAL')
|
||||
self.install_filter(conf, '/settings/pytest_eventlog/real-time/filters/py_test_003', None, 'id = 1000 and category = 3', 'X3 %s'%base_syntax, 'UNKNOWN')
|
||||
self.install_filter(conf, '/settings/pytest_eventlog/real-time/filters/py_test_004', None, None , 'X4 %s'%base_syntax, None)
|
||||
|
||||
conf.set_string('/settings/pytest_eventlog/real-time', 'maximum age', '5s')
|
||||
conf.set_string('/settings/pytest_eventlog/real-time', 'debug', 'true')
|
||||
conf.set_string('/settings/pytest_eventlog/real-time', 'enable active', 'true')
|
||||
|
||||
conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
setup_singleton(EventLogTest)
|
||||
|
||||
all_tests = [EventLogTest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
@ -0,0 +1,204 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error, sleep
|
||||
import sys, difflib
|
||||
import os
|
||||
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
import plugin_pb2
|
||||
from types import *
|
||||
import socket
|
||||
import uuid
|
||||
import unicodedata
|
||||
|
||||
import threading
|
||||
sync = threading.RLock()
|
||||
|
||||
|
||||
LONG_OUTPUT = """Test arguments are: (LONG "$ARG2$" "$ARG3$")\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n
|
||||
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"""
|
||||
|
||||
|
||||
class ExternalScriptTest(BasicTest):
|
||||
instance = None
|
||||
key = ''
|
||||
reg = None
|
||||
conf = None
|
||||
core = None
|
||||
_responses = {}
|
||||
_requests = {}
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.key = '_%stest_command'%prefix
|
||||
self.reg = Registry.get(plugin_id)
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
|
||||
def do_one_test(self, script, expected = status.OK, message = "Foo Bar", args=[], cleanup=True):
|
||||
result = TestResult('%s (%s)'%(script, args))
|
||||
(ret, msg, perf) = self.core.simple_query(script, args)
|
||||
if cleanup and os.name != 'nt':
|
||||
message = message.replace('"', '')
|
||||
message = message.replace('$ARG1$', '$')
|
||||
message = message.replace('$ARG2$', '$')
|
||||
message = message.replace('$ARG3$', '$')
|
||||
|
||||
message = message.replace('\r', '\n')
|
||||
message = message.replace('\n\n', '\n')
|
||||
msg = msg.replace('\r', '\n')
|
||||
msg = msg.replace('\n\n', '\n')
|
||||
|
||||
result.assert_equals(ret, expected, 'Validate return code for %s'%script)
|
||||
result.assert_equals(msg, message, 'Validate return message for %s'%script)
|
||||
if msg != message:
|
||||
diff = difflib.ndiff(msg.splitlines(1), message.splitlines(1))
|
||||
for l in diff:
|
||||
log_error(l)
|
||||
return result
|
||||
|
||||
def run_test(self):
|
||||
ret = TestResult('External scripts test suite')
|
||||
result = TestResult('Arguments NOT allowed')
|
||||
result.add(self.do_one_test('tes_script_ok', message='OK: Everything is going to be fine'))
|
||||
result.add(self.do_one_test('tes_script_test', message='Test arguments are: ( )'))
|
||||
|
||||
result.add(self.do_one_test('tes_sa_test', status.OK, 'Test arguments are: ("ARG1" "ARG 2" "A R G 3")'))
|
||||
|
||||
|
||||
result.add(self.do_one_test('tes_script_test', status.UNKNOWN, 'Arguments not allowed see nsclient.log for details', ['NOT ALLOWED']))
|
||||
|
||||
ret.add(result)
|
||||
self.conf.set_string('/settings/test_external_scripts', 'allow arguments', 'true')
|
||||
self.core.reload('test_external_scripts')
|
||||
|
||||
result = TestResult('Arguments allowed')
|
||||
|
||||
if os.name == 'nt':
|
||||
tests = ['bat', 'ps1']
|
||||
else:
|
||||
tests = ['sh']
|
||||
for t in tests:
|
||||
script = 'tes_sca_%s'%t
|
||||
subresult = TestResult(t)
|
||||
subresult.add(self.do_one_test(script, status.OK, 'Test arguments are: (OK "$ARG2$" "$ARG3$")', ['OK']))
|
||||
subresult.add(self.do_one_test(script, status.WARNING, 'Test arguments are: (WARN "$ARG2$" "$ARG3$")', ['WARN']))
|
||||
subresult.add(self.do_one_test(script, status.CRITICAL, 'Test arguments are: (CRIT "$ARG2$" "$ARG3$")', ['CRIT']))
|
||||
subresult.add(self.do_one_test(script, status.UNKNOWN, 'Test arguments are: (UNKNOWN "$ARG2$" "$ARG3$")', ['UNKNOWN']))
|
||||
subresult.add(self.do_one_test(script, status.OK, 'Test arguments are: (OK "String with space" "A long long option with many spaces")', ['OK', 'String with space', 'A long long option with many spaces']))
|
||||
subresult.add(self.do_one_test(script, status.OK, LONG_OUTPUT, ['LONG']))
|
||||
|
||||
subresult.add(self.do_one_test(script, status.UNKNOWN, 'Request contained illegal characters set /settings/external scripts/allow nasty characters=true!', ['OK', '$$$ \\ \\', '$$$ \\ \\']))
|
||||
|
||||
result.add(subresult)
|
||||
|
||||
subresult = TestResult('Upper and lower case')
|
||||
subresult.add(self.do_one_test("tes_upper_LOWER", status.OK, 'OK: Everything is going to be fine', ['OK']))
|
||||
subresult.add(self.do_one_test("alias_UPPER_lower", status.OK, 'OK: Everything is going to be fine', ['OK']))
|
||||
|
||||
result.add(subresult)
|
||||
|
||||
ret.add(result)
|
||||
|
||||
self.conf.set_string('/settings/test_external_scripts', 'allow nasty characters', 'true')
|
||||
self.core.reload('test_external_scripts')
|
||||
|
||||
result = TestResult('Nasty Arguments allowed')
|
||||
for t in tests:
|
||||
script = 'tes_sca_%s'%t
|
||||
subresult = TestResult(t)
|
||||
if os.name == 'nt':
|
||||
subresult.add(self.do_one_test(script, status.OK, 'Test arguments are: (OK "$$$ \\ \\" "$$$ \\ \\")', ['OK', '$$$ \\ \\', '$$$ \\ \\'], False))
|
||||
else:
|
||||
subresult.add(self.do_one_test(script, status.OK, 'Test arguments are: (OK $ \\ " $ ")', ['OK', '$ \\ \\', '$ \\ \\'], False))
|
||||
result.add(subresult)
|
||||
|
||||
ret.add(result)
|
||||
return ret
|
||||
|
||||
def install(self, arguments):
|
||||
self.conf.set_string('/modules', 'test_external_scripts', 'CheckExternalScripts')
|
||||
self.conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
|
||||
self.conf.set_string('/settings/pytest/scripts', 'test_external_script', 'test_external_script.py')
|
||||
|
||||
self.conf.set_string('/settings/test_external_scripts', 'allow arguments', 'false')
|
||||
self.conf.set_string('/settings/test_external_scripts', 'allow nasty characters', 'false')
|
||||
|
||||
if os.name == 'nt':
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_UPPER_lower', 'scripts\\check_ok.bat')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_script_ok', 'scripts\\check_ok.bat')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_script_long', 'scripts\\check_long.bat')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_script_test', 'scripts\\check_test.bat')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_script_bat', 'scripts\\check_test.bat')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_script_ps1', 'cmd /c echo scripts\\check_test.ps1; exit($lastexitcode) | powershell.exe -command -')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_script_vbs', 'cscript.exe //T:30 //NoLogo scripts\\\\lib\\\\wrapper.vbs scripts\\check_test.vbs')
|
||||
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_sa_test', 'scripts\\check_test.bat "ARG1" "ARG 2" "A R G 3"')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_sa_ps1', 'scripts\\check_test.ps1 "ARG1" "ARG 2" "A R G 3"')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_sa_vbs', 'scripts\\check_test.vbs "ARG1" "ARG 2" "A R G 3"')
|
||||
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_sca_bat', 'scripts\\check_test.bat $ARG1$ "$ARG2$" "$ARG3$"')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_sca_ps1', 'cmd /c echo scripts\\check_test.ps1 \'$ARG1$\' \'$ARG2$\' \'$ARG3$\'; exit($lastexitcode) | powershell.exe -command -')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_sca_vbs', 'scripts\\check_test.vbs $ARG1$ "$ARG2$" "$ARG3$"')
|
||||
|
||||
self.conf.set_string('/settings/test_external_scripts/wrapped scripts', 'tes_ws_bat', 'check_test.bat')
|
||||
self.conf.set_string('/settings/test_external_scripts/wrapped scripts', 'tes_ws_ps1', 'check_test.ps1')
|
||||
self.conf.set_string('/settings/test_external_scripts/wrapped scripts', 'tes_ws_vbs', 'check_test.vbs')
|
||||
|
||||
self.conf.set_string('/settings/test_external_scripts/wrappings', 'vbs', 'cscript.exe //T:30 //NoLogo scripts\\\\lib\\\\wrapper.vbs %SCRIPT% %ARGS%')
|
||||
self.conf.set_string('/settings/test_external_scripts/wrappings', 'ps1', 'cmd /c echo scripts\\\\%SCRIPT% %ARGS%; exit($lastexitcode) | powershell.exe -command -')
|
||||
self.conf.set_string('/settings/test_external_scripts/wrappings', 'bat', 'scripts\\\\%SCRIPT% %ARGS%')
|
||||
|
||||
else:
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_UPPER_lower', 'scripts/check_ok.sh')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_script_ok', 'scripts/check_ok.sh')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_script_long', 'scripts/check_long.sh')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_script_sh', 'scripts/check_test.sh')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_script_test', 'scripts/check_test.sh')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_sa_test', 'scripts/check_test.sh "ARG1" "ARG 2" "A R G 3"')
|
||||
self.conf.set_string('/settings/test_external_scripts/scripts', 'tes_sca_sh', 'scripts/check_test.sh $ARG1$ "$ARG2$" "$ARG3$"')
|
||||
|
||||
self.conf.set_string('/settings/test_external_scripts/alias', 'tes_alias_ok', 'tes_script_test')
|
||||
self.conf.set_string('/settings/test_external_scripts/alias', 'tes_aa_ok', 'tes_script_test "ARG1" "ARG 2" "A R G 3"')
|
||||
self.conf.set_string('/settings/test_external_scripts/alias', 'alias_UPPER_lower', 'tes_UPPER_lower')
|
||||
|
||||
self.conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
def require_boot(self):
|
||||
return True
|
||||
|
||||
setup_singleton(ExternalScriptTest)
|
||||
|
||||
all_tests = [ExternalScriptTest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
161
bibliotheque/files/nsclient/scripts/python/test_log_file.py
Normal file
161
bibliotheque/files/nsclient/scripts/python/test_log_file.py
Normal file
@ -0,0 +1,161 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error, sleep
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
from types import *
|
||||
import uuid
|
||||
import os
|
||||
|
||||
|
||||
def create_test_data(file):
|
||||
with open(file, "w") as f:
|
||||
f.write("1,A,Test 1\n")
|
||||
f.write("2,A,Test 2\n")
|
||||
f.write("3,B,Test 1\n")
|
||||
f.write("4,B,Test 1\n")
|
||||
f.write("5,C,Test 1\n")
|
||||
f.write("6,B,Test 2\n")
|
||||
|
||||
def delete_file(file):
|
||||
if os.path.exists(file):
|
||||
try:
|
||||
os.remove(file)
|
||||
except OSError, (errno, strerror):
|
||||
log('Failed to delete: %s'%file)
|
||||
|
||||
class LogFileTest(BasicTest):
|
||||
|
||||
reg = None
|
||||
conf = None
|
||||
core = None
|
||||
|
||||
def __init__(self):
|
||||
self.temp_path = None
|
||||
self.work_file = None
|
||||
None
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for check_file module'
|
||||
|
||||
def title(self):
|
||||
return 'Win32File tests'
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.temp_path = self.core.expand_path('${temp}')
|
||||
log('Temp: %s'%self.temp_path)
|
||||
self.work_path = os.path.join(self.temp_path, '%s.txt'%uuid.uuid4())
|
||||
log('Work: %s'%self.work_path)
|
||||
create_test_data(self.work_path)
|
||||
|
||||
def teardown(self):
|
||||
delete_file(self.work_path)
|
||||
|
||||
def get_count(self,perf):
|
||||
if not perf:
|
||||
return -1
|
||||
(title, data) = perf.split('=')
|
||||
if not data:
|
||||
return -1
|
||||
(count, warn, crit) = data.split(';')
|
||||
return int(count)
|
||||
|
||||
def check_files(self, filter, text, expected):
|
||||
alias = '%s: %s'%(text, filter)
|
||||
result = TestResult('Checking %s'%alias)
|
||||
args = ['file=%s'%self.work_path, 'column-split=,', 'filter=%s'%filter, 'warn=count gt %d'%expected, 'crit=count gt %d'%expected]
|
||||
#log("Command: %s"%args)
|
||||
(ret, msg, perf) = self.core.simple_query('check_logfile', args)
|
||||
log("%s : %s -- %s"%(filter, msg, perf))
|
||||
count = self.get_count(perf)
|
||||
result.add_message(count == expected, '%s - number of files'%filter, 'got %s expected %s'%(count, expected))
|
||||
result.add_message(ret == status.OK, '%s -- status', 'got %s expected OK'%ret)
|
||||
return result
|
||||
|
||||
def check_bound(self, filter, warn, crit, expected):
|
||||
alias = '%s/%s/%s'%(filter, warn, crit)
|
||||
result = TestResult('Checking %s'%alias)
|
||||
args = ['file=%s'%self.work_path, 'column-split=,', 'filter=%s'%filter, 'warn=%s'%warn, 'crit=%s'%crit]
|
||||
#log("Command: %s"%args)
|
||||
(ret, msg, perf) = self.core.simple_query('check_logfile', args)
|
||||
log("%s : %s -- %s"%(filter, msg, perf))
|
||||
result.add_message(ret == expected, 'Check status', 'Invalid check status: %s'%ret)
|
||||
return result
|
||||
|
||||
def run_filter_operator_test(self):
|
||||
result = TestResult('Filter tests')
|
||||
result.add(self.check_files('none', 'Count all lines', 7))
|
||||
result.add(self.check_files("column2 = 'A'", 'Count all A', 2))
|
||||
result.add(self.check_files("column2 = 'B'", 'Count all B', 3))
|
||||
result.add(self.check_files("column2 = 'C'", 'Count all C', 1))
|
||||
result.add(self.check_files("column3 = 'Test 1'", 'Count all T1', 4))
|
||||
result.add(self.check_files("column3 like 'Test'", 'Count all T', 6))
|
||||
result.add(self.check_files("column3 not like '1'", 'Count all T', 3))
|
||||
result.add(self.check_files("column1 > 1", 'Count all B', 5))
|
||||
result.add(self.check_files("column1 > 3", 'Count all B', 3))
|
||||
result.add(self.check_files("column1 > 5", 'Count all B', 1))
|
||||
result.add(self.check_files("column1 < 1", 'Count all B', 1))
|
||||
result.add(self.check_files("column1 < 3", 'Count all B', 3))
|
||||
result.add(self.check_files("column1 < 5", 'Count all B', 5))
|
||||
result.add(self.check_files("column1 = 1", 'Count all B', 1))
|
||||
result.add(self.check_files("column1 = 3", 'Count all B', 1))
|
||||
result.add(self.check_files("column1 = 5", 'Count all B', 1))
|
||||
result.add(self.check_files("column1 != 1", 'Count all B', 6))
|
||||
result.add(self.check_files("column1 != 3", 'Count all B', 6))
|
||||
result.add(self.check_files("column1 != 5", 'Count all B', 6))
|
||||
|
||||
return result
|
||||
|
||||
def run_boundry_test(self):
|
||||
result = TestResult('Boundry tests')
|
||||
result.add(self.check_bound('none', 'count > 1', 'none', status.WARNING))
|
||||
result.add(self.check_bound('none', 'none', 'count > 1', status.CRITICAL))
|
||||
result.add(self.check_bound('column1 > 5', 'count > 2', 'count > 5', status.OK))
|
||||
result.add(self.check_bound('column1 > 4', 'count > 2', 'count > 5', status.OK))
|
||||
result.add(self.check_bound('column1 > 3', 'count > 2', 'count > 5', status.WARNING))
|
||||
result.add(self.check_bound('column1 > 2', 'count > 2', 'count > 5', status.WARNING))
|
||||
result.add(self.check_bound('column1 > 1', 'count > 2', 'count > 5', status.WARNING))
|
||||
result.add(self.check_bound('column1 > 0', 'count > 2', 'count > 5', status.CRITICAL))
|
||||
|
||||
result.add(self.check_bound('column1 > 5', 'column1 = 3', 'none', status.OK))
|
||||
result.add(self.check_bound('column1 > 0', 'column1 = 3', 'none', status.WARNING))
|
||||
|
||||
return result
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult('Test')
|
||||
result.append(self.run_filter_operator_test())
|
||||
result.append(self.run_boundry_test())
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
conf = self.conf
|
||||
conf.set_string('/modules', 'test_disk', 'CheckLogFile')
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
conf.set_string('/settings/pytest/scripts', 'test_logfile', 'test_log_file.py')
|
||||
conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
setup_singleton(LogFileTest)
|
||||
|
||||
all_tests = [LogFileTest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
284
bibliotheque/files/nsclient/scripts/python/test_nrpe.py
Normal file
284
bibliotheque/files/nsclient/scripts/python/test_nrpe.py
Normal file
@ -0,0 +1,284 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error, sleep
|
||||
import sys
|
||||
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
import plugin_pb2
|
||||
from types import *
|
||||
import socket
|
||||
import uuid
|
||||
import unicodedata
|
||||
|
||||
import threading
|
||||
sync = threading.RLock()
|
||||
|
||||
def isOpen(ip, port):
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
s.connect((ip, int(port)))
|
||||
s.shutdown(2)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
class NRPEMessage:
|
||||
uuid = None
|
||||
source = None
|
||||
command = None
|
||||
status = None
|
||||
message = None
|
||||
perfdata = None
|
||||
got_simple_response = False
|
||||
got_response = False
|
||||
|
||||
def __init__(self, command):
|
||||
if type(command) == 'unicode':
|
||||
try:
|
||||
self.uuid = command.decode('ascii', 'replace')
|
||||
except UnicodeDecodeError:
|
||||
self.uuid = command
|
||||
else:
|
||||
self.uuid = command
|
||||
#self.uuid = unicodedata.normalize('NFKD', command).encode('ascii','ignore')
|
||||
self.command = command
|
||||
def __str__(self):
|
||||
return 'Message: %s (%s, %s, %s)'%(self.uuid, self.source, self.command, self.status)
|
||||
|
||||
class NRPEServerTest(BasicTest):
|
||||
instance = None
|
||||
key = ''
|
||||
reg = None
|
||||
conf = None
|
||||
core = None
|
||||
_responses = {}
|
||||
_requests = {}
|
||||
|
||||
def has_response(self, id):
|
||||
with sync:
|
||||
return id in self._responses
|
||||
|
||||
def get_response(self, id):
|
||||
with sync:
|
||||
if id in self._responses:
|
||||
return self._responses[id]
|
||||
msg = NRPEMessage(id)
|
||||
self._responses[id] = msg
|
||||
return msg
|
||||
|
||||
def set_response(self, msg):
|
||||
with sync:
|
||||
self._responses[msg.uuid] = msg
|
||||
|
||||
def del_response(self, id):
|
||||
with sync:
|
||||
del self._responses[id]
|
||||
|
||||
def get_request(self, id):
|
||||
with sync:
|
||||
if id in self._requests:
|
||||
return self._requests[id]
|
||||
msg = NRPEMessage(id)
|
||||
self._requests[id] = msg
|
||||
return msg
|
||||
|
||||
def set_request(self, msg):
|
||||
msg.got_simple_response = False
|
||||
msg.got_response = False
|
||||
with sync:
|
||||
self._requests[msg.uuid] = msg
|
||||
|
||||
def del_request(self, id):
|
||||
with sync:
|
||||
del self._requests[id]
|
||||
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for NRPE protocol'
|
||||
|
||||
def title(self):
|
||||
return 'NRPE Client/Server test'
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.key = '_%stest_command'%prefix
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.reg.simple_function('check_py_nrpe_test_s', NRPEServerTest.simple_handler, 'TODO')
|
||||
self.reg.function('check_py_nrpe_test', NRPEServerTest.handler, 'TODO')
|
||||
|
||||
def simple_handler(arguments):
|
||||
instance = NRPEServerTest.getInstance()
|
||||
return instance.simple_handler_wrapped(arguments)
|
||||
simple_handler = Callable(simple_handler)
|
||||
|
||||
def handler(channel, request):
|
||||
instance = NRPEServerTest.getInstance()
|
||||
return instance.handler_wrapped(channel, request)
|
||||
handler = Callable(handler)
|
||||
|
||||
def simple_handler_wrapped(self, arguments):
|
||||
log('Got simple message %s'%arguments)
|
||||
msg = self.get_response(arguments[0])
|
||||
msg.got_simple_response = True
|
||||
self.set_response(msg)
|
||||
rmsg = self.get_request(arguments[0])
|
||||
return (rmsg.status, rmsg.message, rmsg.perfdata)
|
||||
|
||||
def handler_wrapped(self, channel, request):
|
||||
log_error('DISCARDING message on %s'%(channel))
|
||||
|
||||
message = plugin_pb2.SubmitRequestMessage()
|
||||
message.ParseFromString(request)
|
||||
command = message.payload[0].command
|
||||
log('Got message %s on %s'%(command, channel))
|
||||
|
||||
msg = self.get_response(command)
|
||||
msg.got_response = True
|
||||
self.set_response(msg)
|
||||
return None
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
|
||||
def submit_payload(self, alias, ssl, length, source, status, msg, perf, target):
|
||||
message = plugin_pb2.QueryRequestMessage()
|
||||
|
||||
message.header.destination_id = target
|
||||
message.header.command = 'nrpe_forward'
|
||||
host = message.header.hosts.add()
|
||||
host.address = "127.0.0.1:15666"
|
||||
host.id = target
|
||||
if (target == 'valid'):
|
||||
pass
|
||||
else:
|
||||
enc = host.metadata.add()
|
||||
enc.key = "use ssl"
|
||||
enc.value = '%s'%ssl
|
||||
enc = host.metadata.add()
|
||||
enc.key = "payload length"
|
||||
enc.value = '%d'%length
|
||||
enc = host.metadata.add()
|
||||
enc.key = "timeout"
|
||||
enc.value = '5'
|
||||
|
||||
uid = str(uuid.uuid4())
|
||||
payload = message.payload.add()
|
||||
payload.command = 'check_py_nrpe_test_s'
|
||||
payload.arguments.append(uid)
|
||||
rmsg = self.get_request(uid)
|
||||
rmsg.status = status
|
||||
rmsg.message = msg
|
||||
rmsg.perfdata = perf
|
||||
self.set_request(rmsg)
|
||||
(result_code, response) = self.core.query('ignored', message.SerializeToString())
|
||||
response_message = plugin_pb2.QueryResponseMessage()
|
||||
response_message.ParseFromString(response)
|
||||
result = TestResult('Testing NRPE: %s for %s'%(alias, target))
|
||||
|
||||
found = False
|
||||
for i in range(0,10):
|
||||
if self.has_response(uid):
|
||||
rmsg = self.get_response(uid)
|
||||
#result.add_message(rmsg.got_response, 'Testing to recieve message using %s'%alias)
|
||||
result.add_message(rmsg.got_simple_response, 'Testing to recieve simple message using %s'%alias)
|
||||
result.add_message(len(response_message.payload) == 1, 'Verify that we only get one payload response for %s'%alias, '%s != 1'%len(response_message.payload))
|
||||
if len(response_message.payload) == 1 and len(response_message.payload[0].lines) == 1:
|
||||
result.assert_equals(response_message.payload[0].result, status, 'Verify that status is sent through %s'%alias)
|
||||
result.assert_equals(response_message.payload[0].lines[0].message, msg, 'Verify that message is sent through %s'%alias)
|
||||
#result.assert_equals(rmsg.perfdata, perf, 'Verify that performance data is sent through')
|
||||
self.del_response(uid)
|
||||
found = True
|
||||
break
|
||||
else:
|
||||
log('Waiting for %s (%s/%s)'%(uid,alias,target))
|
||||
sleep(500)
|
||||
if not found:
|
||||
result.add_message(False, 'Testing to recieve message using %s'%alias)
|
||||
return result
|
||||
|
||||
def test_one(self, ssl=True, length=1024, state = status.UNKNOWN, tag = 'TODO'):
|
||||
result = TestResult('Testing NRPE: %s/%s/%s with various targets'%(ssl, length, tag))
|
||||
for t in ['valid', 'test_rp', 'invalid']:
|
||||
result.add(self.submit_payload('%s/%s/%s'%(ssl, length, tag), ssl, length, '%ssrc%s'%(tag, tag), state, '%smsg%s'%(tag, tag), '', t))
|
||||
return result
|
||||
|
||||
def do_one_test(self, ssl=True, length=1024):
|
||||
conf = self.conf
|
||||
conf.set_int('/settings/NRPE/test_nrpe_server', 'payload length', length)
|
||||
conf.set_bool('/settings/NRPE/test_nrpe_server', 'use ssl', ssl)
|
||||
conf.set_bool('/settings/NRPE/test_nrpe_server', 'allow arguments', True)
|
||||
# TODO: conf.set_string('/settings/NRPE/test_nrpe_server', 'certificate', ssl)
|
||||
self.core.reload('test_nrpe_server')
|
||||
|
||||
conf.set_string('/settings/NRPE/test_nrpe_client/targets/default', 'address', 'nrpe://127.0.0.1:35666')
|
||||
conf.set_bool('/settings/NRPE/test_nrpe_client/targets/default', 'use ssl', not ssl)
|
||||
conf.set_int('/settings/NRPE/test_nrpe_client/targets/default', 'payload length', length*3)
|
||||
|
||||
conf.set_string('/settings/NRPE/test_nrpe_client/targets/invalid', 'address', 'nrpe://127.0.0.1:25666')
|
||||
conf.set_bool('/settings/NRPE/test_nrpe_client/targets/invalid', 'use ssl', not ssl)
|
||||
conf.set_int('/settings/NRPE/test_nrpe_client/targets/invalid', 'payload length', length*2)
|
||||
|
||||
conf.set_string('/settings/NRPE/test_nrpe_client/targets/valid', 'address', 'nrpe://127.0.0.1:15666')
|
||||
conf.set_bool('/settings/NRPE/test_nrpe_client/targets/valid', 'use ssl', ssl)
|
||||
conf.set_int('/settings/NRPE/test_nrpe_client/targets/valid', 'payload length', length)
|
||||
self.core.reload('test_nrpe_client')
|
||||
|
||||
result = TestResult('ssl=%s, length=%s'%(ssl, length))
|
||||
result.add_message(isOpen('127.0.0.1', 15666), 'Checking that port is open (server is up)')
|
||||
result.add(self.test_one(ssl, length, state = status.UNKNOWN, tag = 'unknown'))
|
||||
result.add(self.test_one(ssl, length, state = status.OK, tag = 'ok'))
|
||||
result.add(self.test_one(ssl, length, state = status.WARNING, tag = 'warn'))
|
||||
result.add(self.test_one(ssl, length, state = status.CRITICAL, tag = 'crit'))
|
||||
return result
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult()
|
||||
result.add(self.do_one_test(ssl=True))
|
||||
result.add(self.do_one_test(ssl=False))
|
||||
result.add(self.do_one_test(ssl=True, length=4096))
|
||||
result.add(self.do_one_test(ssl=True, length=65536))
|
||||
result.add(self.do_one_test(ssl=True, length=1048576))
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
conf = self.conf
|
||||
conf.set_string('/modules', 'test_nrpe_server', 'NRPEServer')
|
||||
conf.set_string('/modules', 'test_nrpe_client', 'NRPEClient')
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
|
||||
conf.set_string('/settings/pytest/scripts', 'test_nrpe', 'test_nrpe.py')
|
||||
|
||||
conf.set_string('/settings/NRPE/test_nrpe_server', 'port', '15666')
|
||||
conf.set_string('/settings/NRPE/test_nrpe_server', 'inbox', 'nrpe_test_inbox')
|
||||
conf.set_string('/settings/NRPE/test_nrpe_server', 'encryption', '1')
|
||||
|
||||
conf.set_string('/settings/NRPE/test_nrpe_client/targets', 'nrpe_test_local', 'nrpe://127.0.0.1:15666')
|
||||
conf.set_string('/settings/NRPE/test_nrpe_client', 'channel', 'nrpe_test_outbox')
|
||||
|
||||
conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
def require_boot(self):
|
||||
return True
|
||||
|
||||
setup_singleton(NRPEServerTest)
|
||||
|
||||
all_tests = [NRPEServerTest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
363
bibliotheque/files/nsclient/scripts/python/test_nsca.py
Normal file
363
bibliotheque/files/nsclient/scripts/python/test_nsca.py
Normal file
@ -0,0 +1,363 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error, log_debug, sleep
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
import plugin_pb2
|
||||
from types import *
|
||||
import socket
|
||||
import uuid
|
||||
import unicodedata
|
||||
|
||||
import threading
|
||||
sync = threading.RLock()
|
||||
|
||||
def isOpen(ip, port):
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
s.connect((ip, int(port)))
|
||||
s.shutdown(2)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
class NSCAMessage:
|
||||
uuid = None
|
||||
source = None
|
||||
command = None
|
||||
status = None
|
||||
message = None
|
||||
perfdata = None
|
||||
got_response = False
|
||||
got_simple_response = False
|
||||
|
||||
def __init__(self, command):
|
||||
if type(command) == 'unicode':
|
||||
try:
|
||||
self.uuid = command.decode('ascii', 'replace')
|
||||
except UnicodeDecodeError:
|
||||
self.uuid = command
|
||||
else:
|
||||
self.uuid = command
|
||||
#self.uuid = unicodedata.normalize('NFKD', command).encode('ascii','ignore')
|
||||
self.command = command
|
||||
self.source = None
|
||||
self.status = None
|
||||
self.message = None
|
||||
self.perfdata = None
|
||||
self.got_response = False
|
||||
self.got_simple_response = False
|
||||
|
||||
|
||||
def copy_changed_attributes(self, other):
|
||||
if other.source:
|
||||
self.source = other.source
|
||||
if other.status:
|
||||
self.status = other.status
|
||||
if other.message:
|
||||
self.message = other.message
|
||||
if other.perfdata:
|
||||
self.perfdata = other.perfdata
|
||||
if other.got_response:
|
||||
self.got_response = True
|
||||
if other.got_simple_response:
|
||||
self.got_simple_response = True
|
||||
|
||||
def __str__(self):
|
||||
return 'Message: %s (%s, %s, %s)'%(self.uuid, self.source, self.command, self.status)
|
||||
|
||||
class NSCAServerTest(BasicTest):
|
||||
instance = None
|
||||
key = ''
|
||||
reg = None
|
||||
conf = None
|
||||
core = None
|
||||
_responses = {}
|
||||
|
||||
def has_response(self, id):
|
||||
with sync:
|
||||
return id in self._responses
|
||||
|
||||
def get_response(self, id):
|
||||
with sync:
|
||||
if id in self._responses:
|
||||
return self._responses[id]
|
||||
msg = NSCAMessage(id)
|
||||
self._responses[id] = msg
|
||||
return msg
|
||||
|
||||
def set_response(self, msg):
|
||||
with sync:
|
||||
if msg.uuid in self._responses:
|
||||
self._responses[msg.uuid].copy_changed_attributes(msg)
|
||||
else:
|
||||
self._responses[msg.uuid] = msg
|
||||
|
||||
|
||||
def del_response(self, id):
|
||||
with sync:
|
||||
del self._responses[id]
|
||||
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for NSCA protocol'
|
||||
|
||||
def title(self):
|
||||
return 'NSCA Server test'
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.key = '_%stest_command'%prefix
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.reg.simple_subscription('nsca_test_inbox', NSCAServerTest.simple_inbox_handler)
|
||||
self.reg.subscription('nsca_test_inbox', NSCAServerTest.inbox_handler)
|
||||
|
||||
def simple_inbox_handler(channel, source, command, code, message, perf):
|
||||
instance = NSCAServerTest.getInstance()
|
||||
return instance.simple_inbox_handler_wrapped(channel, source, command, code, message, perf)
|
||||
simple_inbox_handler = Callable(simple_inbox_handler)
|
||||
|
||||
def inbox_handler(channel, request):
|
||||
instance = NSCAServerTest.getInstance()
|
||||
return instance.inbox_handler_wrapped(channel, request)
|
||||
inbox_handler = Callable(inbox_handler)
|
||||
|
||||
def simple_inbox_handler_wrapped(self, channel, source, command, status, message, perf):
|
||||
log_debug('Got message %s on %s'%(command, channel))
|
||||
msg = NSCAMessage(command)
|
||||
msg.source = source
|
||||
msg.status = status
|
||||
msg.message = message
|
||||
msg.perfdata = perf
|
||||
msg.got_simple_response = True
|
||||
self.set_response(msg)
|
||||
return True
|
||||
|
||||
def inbox_handler_wrapped(self, channel, request):
|
||||
message = plugin_pb2.SubmitRequestMessage()
|
||||
message.ParseFromString(request)
|
||||
if len(message.payload) != 1:
|
||||
log_error("Got invalid message on channel: %s"%channel)
|
||||
return None
|
||||
command = message.payload[0].command
|
||||
log_debug('Got message %s on %s'%(command, channel))
|
||||
|
||||
msg = NSCAMessage(command)
|
||||
msg.got_response = True
|
||||
self.set_response(msg)
|
||||
return None
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
|
||||
def wait_and_validate(self, uuid, result, msg, perf, tag):
|
||||
found = False
|
||||
for i in range(0,10):
|
||||
if not self.has_response(uuid):
|
||||
log_debug('Waiting for %s (%d/10)'%(uuid, i+1))
|
||||
sleep(200)
|
||||
else:
|
||||
log_debug('Got response %s'%uuid)
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
result.add_message(False, 'Failed to recieve message %s using %s'%(uuid, tag))
|
||||
return False
|
||||
|
||||
for i in range(0,10):
|
||||
rmsg = self.get_response(uuid)
|
||||
if not rmsg.got_simple_response or not rmsg.got_response:
|
||||
log_debug('Waiting for delayed response %s s/m: %s/%s - (%d/10)'%(uuid, rmsg.got_simple_response, rmsg.got_response, i+1))
|
||||
sleep(500)
|
||||
else:
|
||||
log_debug('Got delayed response %s'%uuid)
|
||||
break
|
||||
|
||||
result.add_message(rmsg.got_response, 'Testing to recieve message using %s'%tag)
|
||||
result.add_message(rmsg.got_simple_response, 'Testing to recieve simple message using %s'%tag)
|
||||
result.assert_equals(rmsg.command, uuid, 'Verify that command is sent through using %s'%tag)
|
||||
result.assert_contains(rmsg.message, msg, 'Verify that message is sent through using %s'%tag)
|
||||
|
||||
#result.assert_equals(rmsg.last_source, source, 'Verify that source is sent through')
|
||||
#result.assert_equals(rmsg.perfdata, perf, 'Verify that performance data is sent through using %s'%tag)
|
||||
self.del_response(uuid)
|
||||
return True
|
||||
|
||||
def submit_payload(self, encryption, target, length, source, status, msg, perf, tag):
|
||||
message = plugin_pb2.SubmitRequestMessage()
|
||||
|
||||
message.header.recipient_id = target
|
||||
message.channel = 'nsca_test_outbox'
|
||||
host = message.header.hosts.add()
|
||||
host.id = target
|
||||
if (target == 'valid'):
|
||||
pass
|
||||
else:
|
||||
host.address = "127.0.0.1:15667"
|
||||
enc = host.metadata.add()
|
||||
enc.key = "encryption"
|
||||
enc.value = encryption
|
||||
enc = host.metadata.add()
|
||||
enc.key = "password"
|
||||
enc.value = 'pwd-%s'%encryption
|
||||
enc = host.metadata.add()
|
||||
enc.key = "payload length"
|
||||
enc.value = '%d'%length
|
||||
|
||||
uid = str(uuid.uuid4())
|
||||
payload = message.payload.add()
|
||||
payload.result = status
|
||||
payload.command = uid
|
||||
line = payload.lines.add()
|
||||
line.message = '%s - %s'%(uid, msg)
|
||||
payload.source = source
|
||||
(result_code, err) = self.core.submit('nsca_test_outbox', message.SerializeToString())
|
||||
|
||||
result = TestResult('Testing payload submission (via API): %s'%tag)
|
||||
result.assert_equals(result_code, True, 'Submission (%s) return ok status'%tag)
|
||||
result.assert_equals(err, 'Submission successful', 'Submission (%s) returned correct status'%tag)
|
||||
self.wait_and_validate(uid, result, msg, perf, '%s/spb'%tag)
|
||||
return result
|
||||
|
||||
|
||||
def submit_via_exec(self, encryption, target, length, source, status, msg, perf, tag):
|
||||
uid = str(uuid.uuid4())
|
||||
|
||||
args = [
|
||||
#'--exec', 'submit',
|
||||
'--alias', uid,
|
||||
'--result', '%d'%status,
|
||||
'--retries', '0',
|
||||
'--message', '%s - %s'%(uid, msg),
|
||||
'--target', target,
|
||||
]
|
||||
if (target == 'valid'):
|
||||
pass
|
||||
else:
|
||||
args.extend([
|
||||
'--address', '127.0.0.1:15667',
|
||||
'--encryption', encryption,
|
||||
'--password', 'pwd-%s'%encryption,
|
||||
'--payload-length', '%d'%length,
|
||||
])
|
||||
(result_code, result_message) = self.core.simple_exec('test_nsca_client', 'nsca_submit', args)
|
||||
result = TestResult('Testing payload submission (via command line exec): %s'%tag)
|
||||
|
||||
result.add_message(result_code == 0, 'Testing to send message using %s/exec:1'%tag)
|
||||
result.add_message(len(result_message) == 1, 'Testing to send message using %s/exec:2'%tag)
|
||||
if len(result_message) == 1:
|
||||
result.assert_equals(result_message[0], "Submission successful", 'Testing to send message using %s/exec:3'%tag)
|
||||
self.wait_and_validate(uid, result, msg, perf, '%s/exec'%tag)
|
||||
return result
|
||||
|
||||
def test_one_crypto_full(self, encryption, state, key, target, length):
|
||||
result = TestResult('Testing %s/%s'%(encryption, key))
|
||||
result.add(self.submit_payload(encryption, target, length, '%ssrc%s'%(key, key), state, '%smsg%s'%(key, key), '', '%s/%s/%d/%s'%(state, encryption, length, target)))
|
||||
result.add(self.submit_via_exec(encryption, target, length, '%ssrc%s'%(key, key), state, '%smsg%s'%(key, key), '', '%s/%s/%d/%s'%(state, encryption, length, target)))
|
||||
return result
|
||||
|
||||
def test_one_crypto(self, crypto, length=512):
|
||||
log('Testing: %s %d'%(crypto, length))
|
||||
conf = self.conf
|
||||
conf.set_string('/settings/NSCA/test_nsca_server', 'encryption', '%s'%crypto)
|
||||
conf.set_string('/settings/NSCA/test_nsca_server', 'password', 'pwd-%s'%crypto)
|
||||
conf.set_int('/settings/NSCA/test_nsca_server', 'payload length', length)
|
||||
self.core.reload('test_nsca_server')
|
||||
|
||||
conf.set_string('/settings/NSCA/test_nsca_client/targets/default', 'address', 'nsca://127.0.0.1:35667')
|
||||
conf.set_string('/settings/NSCA/test_nsca_client/targets/default', 'encryption', '%s'%crypto)
|
||||
conf.set_string('/settings/NSCA/test_nsca_client/targets/default', 'password', 'default-%s'%crypto)
|
||||
conf.set_int('/settings/NSCA/test_nsca_client/targets/default', 'payload length', length*3)
|
||||
|
||||
conf.set_string('/settings/NSCA/test_nsca_client/targets/invalid', 'address', 'nsca://127.0.0.1:25667')
|
||||
conf.set_string('/settings/NSCA/test_nsca_client/targets/invalid', 'encryption', 'none')
|
||||
conf.set_string('/settings/NSCA/test_nsca_client/targets/invalid', 'password', 'invalid-%s'%crypto)
|
||||
conf.set_int('/settings/NSCA/test_nsca_client/targets/invalid', 'payload length', length*2)
|
||||
|
||||
conf.set_string('/settings/NSCA/test_nsca_client/targets/valid', 'address', 'nsca://127.0.0.1:15667')
|
||||
conf.set_string('/settings/NSCA/test_nsca_client/targets/valid', 'encryption', '%s'%crypto)
|
||||
conf.set_string('/settings/NSCA/test_nsca_client/targets/valid', 'password', 'pwd-%s'%crypto)
|
||||
conf.set_int('/settings/NSCA/test_nsca_client/targets/valid', 'payload length', length)
|
||||
self.core.reload('test_nsca_client')
|
||||
|
||||
|
||||
|
||||
result = TestResult('Testing: %s/%d'%(crypto, length))
|
||||
result.add_message(isOpen('localhost', 15667), 'Checking that port is open')
|
||||
for target in ['valid', 'test_rp', 'invalid']:
|
||||
result.add(self.test_one_crypto_full(crypto, status.UNKNOWN, 'unknown', target, length))
|
||||
result.add(self.test_one_crypto_full(crypto, status.OK, 'ok', target, length))
|
||||
result.add(self.test_one_crypto_full(crypto, status.WARNING, 'warn', target, length))
|
||||
result.add(self.test_one_crypto_full(crypto, status.CRITICAL, 'crit', target, length))
|
||||
return result
|
||||
|
||||
def run_test(self, cases=None):
|
||||
result = TestResult()
|
||||
cryptos = ["none", "xor", "des", "3des", "cast128", "xtea", "blowfish", "twofish", "rc2", "aes", "aes256", "aes192", "aes128", "serpent", "gost", "3way"]
|
||||
for c in cryptos:
|
||||
run_l = None
|
||||
run_this = False
|
||||
if cases:
|
||||
tmp_l = None
|
||||
for case in cases:
|
||||
if '-' in case:
|
||||
(run_c, tmp_l) = case.split('-', 2)
|
||||
else:
|
||||
run_c = case
|
||||
if c == run_c:
|
||||
run_l = int(tmp_l) if tmp_l else None
|
||||
run_this = True
|
||||
if not run_this:
|
||||
result.add_message(True, 'Ignoring: %s-*'%c)
|
||||
continue
|
||||
for l in [128, 512, 1024, 4096]:
|
||||
if not run_l or run_l == l:
|
||||
result.add(self.test_one_crypto(c, l))
|
||||
else:
|
||||
result.add_message(True, 'Ignoring: %s-%s'%(c, l))
|
||||
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
conf = self.conf
|
||||
conf.set_string('/modules', 'test_nsca_server', 'NSCAServer')
|
||||
conf.set_string('/modules', 'test_nsca_client', 'NSCAClient')
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
|
||||
conf.set_string('/settings/pytest/scripts', 'test_nsca', 'test_nsca.py')
|
||||
|
||||
conf.set_string('/settings/NSCA/test_nsca_server', 'port', '15667')
|
||||
conf.set_string('/settings/NSCA/test_nsca_server', 'inbox', 'nsca_test_inbox')
|
||||
conf.set_string('/settings/NSCA/test_nsca_server', 'encryption', '1')
|
||||
|
||||
conf.set_string('/settings/NSCA/test_nsca_client', 'channel', 'nsca_test_outbox')
|
||||
|
||||
conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.key = '_%stest_command'%prefix
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
def require_boot(self):
|
||||
return True
|
||||
|
||||
|
||||
setup_singleton(NSCAServerTest)
|
||||
|
||||
all_tests = [NSCAServerTest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
273
bibliotheque/files/nsclient/scripts/python/test_nscp.py
Normal file
273
bibliotheque/files/nsclient/scripts/python/test_nscp.py
Normal file
@ -0,0 +1,273 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error, sleep
|
||||
import sys
|
||||
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
import plugin_pb2
|
||||
from types import *
|
||||
import socket
|
||||
import uuid
|
||||
import unicodedata
|
||||
|
||||
import threading
|
||||
sync = threading.RLock()
|
||||
|
||||
core = Core.get()
|
||||
|
||||
def isOpen(ip, port):
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
s.connect((ip, int(port)))
|
||||
s.shutdown(2)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
class NSCPMessage:
|
||||
uuid = None
|
||||
source = None
|
||||
command = None
|
||||
status = None
|
||||
message = None
|
||||
perfdata = None
|
||||
got_simple_response = False
|
||||
got_response = False
|
||||
|
||||
def __init__(self, command):
|
||||
if type(command) == 'unicode':
|
||||
try:
|
||||
self.uuid = command.decode('ascii', 'replace')
|
||||
except UnicodeDecodeError:
|
||||
self.uuid = command
|
||||
else:
|
||||
self.uuid = command
|
||||
#self.uuid = unicodedata.normalize('NFKD', command).encode('ascii','ignore')
|
||||
self.command = command
|
||||
def __str__(self):
|
||||
return 'Message: %s (%s, %s, %s)'%(self.uuid, self.source, self.command, self.status)
|
||||
|
||||
class NSCPServerTest(BasicTest):
|
||||
instance = None
|
||||
key = ''
|
||||
reg = None
|
||||
_responses = {}
|
||||
_requests = {}
|
||||
|
||||
def has_response(self, id):
|
||||
with sync:
|
||||
return id in self._responses
|
||||
|
||||
def get_response(self, id):
|
||||
with sync:
|
||||
if id in self._responses:
|
||||
return self._responses[id]
|
||||
msg = NSCPMessage(id)
|
||||
self._responses[id] = msg
|
||||
return msg
|
||||
|
||||
def set_response(self, msg):
|
||||
with sync:
|
||||
self._responses[msg.uuid] = msg
|
||||
|
||||
def del_response(self, id):
|
||||
with sync:
|
||||
del self._responses[id]
|
||||
|
||||
def get_request(self, id):
|
||||
with sync:
|
||||
if id in self._requests:
|
||||
return self._requests[id]
|
||||
msg = NSCPMessage(id)
|
||||
self._requests[id] = msg
|
||||
return msg
|
||||
|
||||
def set_request(self, msg):
|
||||
with sync:
|
||||
self._requests[msg.uuid] = msg
|
||||
|
||||
def del_request(self, id):
|
||||
with sync:
|
||||
del self._requests[id]
|
||||
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for NSCP protocol'
|
||||
|
||||
def title(self):
|
||||
return 'NSCP Client/Server test'
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.key = '_%stest_command'%prefix
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.reg.simple_function('check_py_nscp_test_s', NSCPServerTest.simple_handler, 'TODO')
|
||||
self.reg.function('check_py_nscp_test', NSCPServerTest.handler, 'TODO')
|
||||
|
||||
def simple_handler(arguments):
|
||||
instance = NSCPServerTest.getInstance()
|
||||
return instance.simple_handler_wrapped(arguments)
|
||||
simple_handler = Callable(simple_handler)
|
||||
|
||||
def handler(channel, request):
|
||||
instance = NSCPServerTest.getInstance()
|
||||
return instance.handler_wrapped(channel, request)
|
||||
handler = Callable(handler)
|
||||
|
||||
def simple_handler_wrapped(self, arguments):
|
||||
log('Got simple message %s'%arguments)
|
||||
msg = self.get_response(arguments[0])
|
||||
msg.got_simple_response = True
|
||||
self.set_response(msg)
|
||||
rmsg = self.get_request(arguments[0])
|
||||
return (rmsg.status, rmsg.message, rmsg.perfdata)
|
||||
|
||||
def handler_wrapped(self, channel, request):
|
||||
log_error('DISCARDING message on %s'%(channel))
|
||||
|
||||
message = plugin_pb2.SubmitRequestMessage()
|
||||
message.ParseFromString(request)
|
||||
command = message.payload[0].command
|
||||
log('Got message %s on %s'%(command, channel))
|
||||
|
||||
msg = self.get_response(command)
|
||||
msg.got_response = True
|
||||
self.set_response(msg)
|
||||
return None
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
|
||||
def submit_payload(self, alias, ssl, source, status, msg, perf, target):
|
||||
message = plugin_pb2.QueryRequestMessage()
|
||||
|
||||
message.header.version = plugin_pb2.Common.VERSION_1
|
||||
message.header.recipient_id = target
|
||||
host = message.header.hosts.add()
|
||||
host.address = "127.0.0.1:15668"
|
||||
host.id = target
|
||||
if (target == 'valid'):
|
||||
pass
|
||||
else:
|
||||
enc = host.metadata.add()
|
||||
enc.key = "use ssl"
|
||||
enc.value = '%s'%ssl
|
||||
enc = host.metadata.add()
|
||||
enc.key = "timeout"
|
||||
enc.value = '5'
|
||||
|
||||
uid = str(uuid.uuid4())
|
||||
payload = message.payload.add()
|
||||
payload.command = 'check_py_nscp_test_s'
|
||||
payload.arguments.append(uid)
|
||||
rmsg = self.get_request(uid)
|
||||
rmsg.status = status
|
||||
rmsg.message = msg
|
||||
rmsg.perfdata = perf
|
||||
self.set_request(rmsg)
|
||||
(result_code, response) = core.query('nscp_forward', message.SerializeToString())
|
||||
response_message = plugin_pb2.QueryResponseMessage()
|
||||
response_message.ParseFromString(response)
|
||||
result = TestResult('Testing NSCP: %s for %s'%(alias, target))
|
||||
|
||||
found = False
|
||||
for i in range(0,10):
|
||||
if self.has_response(uid):
|
||||
rmsg = self.get_response(uid)
|
||||
#result.add_message(rmsg.got_response, 'Testing to recieve message using %s'%alias)
|
||||
result.add_message(rmsg.got_simple_response, 'Testing to recieve simple message using %s'%alias)
|
||||
result.add_message(len(response_message.payload) == 1, 'Verify that we only get one payload response for %s'%alias, '%s != 1'%len(response_message.payload))
|
||||
result.assert_equals(response_message.payload[0].result, status, 'Verify that status is sent through %s'%alias)
|
||||
result.assert_equals(response_message.payload[0].message, msg, 'Verify that message is sent through %s'%alias)
|
||||
#result.assert_equals(rmsg.perfdata, perf, 'Verify that performance data is sent through')
|
||||
self.del_response(uid)
|
||||
found = True
|
||||
break
|
||||
else:
|
||||
log('Waiting for %s (%s/%s)'%(uid,alias,target))
|
||||
sleep(500)
|
||||
if not found:
|
||||
result.add_message(False, 'Testing to recieve message using %s'%alias)
|
||||
return result
|
||||
|
||||
def test_one(self, ssl=True, state = status.UNKNOWN, tag = 'TODO'):
|
||||
result = TestResult('Testing NSCP: %s/%s with various targets'%(ssl, tag))
|
||||
#for t in ['valid', 'test_rp', 'invalid']:
|
||||
for t in ['valid']:
|
||||
result.add(self.submit_payload('%s/%s'%(ssl, tag), ssl, '%ssrc%s'%(tag, tag), state, '%smsg%s'%(tag, tag), '', t))
|
||||
return result
|
||||
|
||||
def do_one_test(self, ssl=True):
|
||||
conf = Settings.get()
|
||||
conf.set_bool('/settings/nscp/test_nscp_server', 'use ssl', ssl)
|
||||
conf.set_bool('/settings/nscp/test_nscp_server', 'allow arguments', True)
|
||||
# TODO: conf.set_string('/settings/nscp/test_nscp_server', 'certificate', ssl)
|
||||
core.reload('test_nscp_server')
|
||||
|
||||
conf.set_string('/settings/nscp/test_nscp_client/targets/default', 'address', 'nscp://127.0.0.1:35668')
|
||||
conf.set_bool('/settings/nscp/test_nscp_client/targets/default', 'use ssl', not ssl)
|
||||
|
||||
conf.set_string('/settings/nscp/test_nscp_client/targets/invalid', 'address', 'nscp://127.0.0.1:25668')
|
||||
conf.set_bool('/settings/nscp/test_nscp_client/targets/invalid', 'use ssl', not ssl)
|
||||
|
||||
conf.set_string('/settings/nscp/test_nscp_client/targets/valid', 'address', 'nscp://127.0.0.1:15668')
|
||||
conf.set_bool('/settings/nscp/test_nscp_client/targets/valid', 'use ssl', ssl)
|
||||
core.reload('test_nscp_client')
|
||||
|
||||
result = TestResult()
|
||||
#result.add_message(isOpen('127.0.0.1', 15668), 'Checking that port is open (server is up)')
|
||||
#result.add(self.test_one(ssl, length, state = status.UNKNOWN, tag = 'unknown'))
|
||||
result.add(self.test_one(ssl, state = status.OK, tag = 'ok'))
|
||||
#result.add(self.test_one(ssl, length, state = status.WARNING, tag = 'warn'))
|
||||
#result.add(self.test_one(ssl, length, state = status.CRITICAL, tag = 'crit'))
|
||||
return result
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult()
|
||||
result.add(self.do_one_test(ssl=False))
|
||||
#result.add(self.do_one_test(ssl=True))
|
||||
#result.add(self.do_one_test(ssl=True, length=4096))
|
||||
#result.add(self.do_one_test(ssl=True, length=65536))
|
||||
#result.add(self.do_one_test(ssl=True, length=1048576))
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
conf = Settings.get()
|
||||
conf.set_string('/modules', 'test_nscp_server', 'NSCPServer')
|
||||
conf.set_string('/modules', 'test_nscp_client', 'NSCPClient')
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
|
||||
conf.set_string('/settings/pytest/scripts', 'test_nscp', 'test_nscp.py')
|
||||
|
||||
conf.set_string('/settings/nscp/test_nscp_server', 'port', '15668')
|
||||
conf.set_string('/settings/nscp/test_nscp_server', 'inbox', 'nscp_test_inbox')
|
||||
|
||||
conf.set_string('/settings/nscp/test_nscp_client/targets', 'nscp_test_local', 'nscp://127.0.0.1:15668')
|
||||
conf.set_string('/settings/nscp/test_nscp_client', 'channel', 'nscp_test_outbox')
|
||||
|
||||
conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id):
|
||||
None
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
def require_boot(self):
|
||||
return True
|
||||
|
||||
setup_singleton(NSCPServerTest)
|
||||
|
||||
all_tests = [NSCPServerTest]
|
||||
|
||||
def __main__():
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
140
bibliotheque/files/nsclient/scripts/python/test_pb.py
Normal file
140
bibliotheque/files/nsclient/scripts/python/test_pb.py
Normal file
@ -0,0 +1,140 @@
|
||||
from NSCP import Settings, Registry, Core, log, status
|
||||
from test_helper import Callable, TestResult, get_test_manager, create_test_manager
|
||||
from types import *
|
||||
|
||||
class ChannelTest:
|
||||
instance = None
|
||||
channel = ''
|
||||
reg = None
|
||||
|
||||
last_channel = ''
|
||||
last_command = ''
|
||||
last_status = status.UNKNOWN
|
||||
last_message = ''
|
||||
last_perf = ''
|
||||
|
||||
instance = None
|
||||
class SingletonHelper:
|
||||
def __call__( self, *args, **kw ) :
|
||||
if ChannelTest.instance is None :
|
||||
object = ChannelTest()
|
||||
ChannelTest.instance = object
|
||||
return ChannelTest.instance
|
||||
|
||||
getInstance = SingletonHelper()
|
||||
|
||||
def title(self):
|
||||
return 'Channel Test'
|
||||
|
||||
def desc(self):
|
||||
return 'Testing that channels work'
|
||||
|
||||
def test_submission_handler_001(channel, source, command, code, message, perf):
|
||||
log('Got messgae on %s'%channel)
|
||||
instance = ChannelTest.getInstance()
|
||||
instance.set_last(channel, command, code, message, perf)
|
||||
test_submission_handler_001 = Callable(test_submission_handler_001)
|
||||
|
||||
def test_command_handler_001(arguments):
|
||||
instance = ChannelTest.getInstance()
|
||||
return (instance.last_status, '%s'%instance.last_message, '%s'%instance.last_perf)
|
||||
test_command_handler_001 = Callable(test_command_handler_001)
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.channel = '_%stest_channel'%prefix
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.reg.simple_subscription(self.channel, ChannelTest.test_submission_handler_001)
|
||||
self.reg.simple_function(self.channel, ChannelTest.test_command_handler_001, 'This is a sample command')
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
#self.reg.unregister_simple_subscription('%s_001'%self.channel)
|
||||
#self.reg.unregister_simple_function('%s_001'%self.channel)
|
||||
|
||||
def reset_last(self):
|
||||
self.last_channel = None
|
||||
self.last_command = None
|
||||
self.last_status = None
|
||||
self.last_message = None
|
||||
self.last_perf = None
|
||||
|
||||
def set_last(self, channel, command, status, message, perf):
|
||||
self.last_channel = channel
|
||||
self.last_command = command
|
||||
self.last_status = status
|
||||
self.last_message = message
|
||||
self.last_perf = perf
|
||||
|
||||
def test_simple(self, command, code, message, perf, tag):
|
||||
result = TestResult()
|
||||
core = Core.get()
|
||||
self.reset_last()
|
||||
(ret, msg) = core.simple_submit(self.channel, '%s'%command, code, '%s'%message, '%s'%perf)
|
||||
result.add_message(ret, 'Testing channels: %s'%tag, msg)
|
||||
r1 = TestResult()
|
||||
r1.assert_equals(self.last_status, code, 'Return code')
|
||||
r1.assert_equals(self.last_message, message, 'Message')
|
||||
r1.assert_equals(self.last_perf, perf, 'Performance data')
|
||||
result.add(r1)
|
||||
|
||||
self.set_last('', '', code, message, perf)
|
||||
(retcode, retmessage, retperf) = core.simple_query(self.channel, [])
|
||||
result.add_message(True, 'Testing queries: %s'%tag)
|
||||
r2 = TestResult()
|
||||
r2.assert_equals(self.last_status, code, 'Return code')
|
||||
r2.assert_equals(self.last_message, message, 'Message')
|
||||
r2.assert_equals(self.last_perf, perf, 'Performance data')
|
||||
result.add(r2)
|
||||
return result
|
||||
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult()
|
||||
result.add(self.test_simple('foobar', status.OK, 'qwerty', '', 'simple ok'))
|
||||
result.add(self.test_simple('foobar', status.WARNING, 'foobar', '', 'simple warning'))
|
||||
result.add(self.test_simple('foobar', status.CRITICAL, 'test', '', 'simple critical'))
|
||||
result.add(self.test_simple('foobar', status.UNKNOWN, '1234567890', '', 'simple unknown'))
|
||||
result.add(self.test_simple('foobar', status.OK, 'qwerty', "'foo'=5%", 'simple performance data 001'))
|
||||
result.add(self.test_simple('foobar', status.OK, 'qwerty', "'foo'=5%;10", 'simple performance data 002'))
|
||||
result.add(self.test_simple('foobar', status.OK, 'qwerty', "'foo'=5%;10;23", 'simple performance data 003'))
|
||||
result.add(self.test_simple('foobar', status.OK, 'qwerty', "'foo'=5%;10;23;10;78", 'simple performance data 004'))
|
||||
result.add(self.test_simple('foobar', status.OK, 'qwerty', "'foo'=5%;10;23;10;78 'bar'=1k;2;3", 'simple performance data 005'))
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
conf = Settings.get()
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
|
||||
conf.set_string('/settings/pytest/scripts', 'test_pb', 'test_pb.py')
|
||||
|
||||
conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id):
|
||||
None
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
|
||||
all_tests = [ChannelTest]
|
||||
|
||||
def __main__():
|
||||
test_manager = create_test_manager()
|
||||
test_manager.add(all_tests)
|
||||
test_manager.install()
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
test_manager = create_test_manager(plugin_id, plugin_alias, script_alias)
|
||||
test_manager.add(all_tests)
|
||||
|
||||
test_manager.init()
|
||||
|
||||
def shutdown():
|
||||
test_manager = get_test_manager()
|
||||
test_manager.shutdown()
|
117
bibliotheque/files/nsclient/scripts/python/test_python.py
Normal file
117
bibliotheque/files/nsclient/scripts/python/test_python.py
Normal file
@ -0,0 +1,117 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error, log_debug, sleep
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
from types import *
|
||||
from time import time
|
||||
|
||||
install_checks = 100
|
||||
time_to_run = 30
|
||||
|
||||
class PythonTest(BasicTest):
|
||||
|
||||
noop_count = 0
|
||||
stress_count = 0
|
||||
|
||||
key = ''
|
||||
reg = None
|
||||
conf = None
|
||||
core = None
|
||||
|
||||
def noop_handler(arguments):
|
||||
instance = PythonTest.getInstance()
|
||||
instance.noop_count = instance.noop_count + 1
|
||||
return (status.OK, 'Got call %d'%instance.noop_count, '')
|
||||
noop_handler = Callable(noop_handler)
|
||||
|
||||
|
||||
def stress_handler(channel, source, command, code, message, perf):
|
||||
instance = PythonTest.getInstance()
|
||||
instance.stress_count = instance.stress_count + 1
|
||||
log_debug('Got message %d/%d on %s'%(instance.stress_count, instance.noop_count, channel))
|
||||
stress_handler = Callable(stress_handler)
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for python script module'
|
||||
|
||||
def title(self):
|
||||
return 'PythonScript tests'
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
log('Loading Python unit tests')
|
||||
self.key = '_%stest_command'%prefix
|
||||
self.reg.simple_function('py_stress_noop', PythonTest.noop_handler, 'This is a simple noop command')
|
||||
self.reg.simple_subscription('py_stress_test', PythonTest.stress_handler)
|
||||
self.conf.set_string('/settings/test_scheduler', 'threads', '50')
|
||||
self.core.reload('test_scheduler')
|
||||
|
||||
|
||||
def teardown(self):
|
||||
self.conf.set_string('/settings/test_scheduler', 'threads', '0')
|
||||
self.core.reload('test_scheduler')
|
||||
None
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult()
|
||||
start = time()
|
||||
total_count = install_checks*time_to_run/5
|
||||
while self.stress_count < total_count:
|
||||
log('Waiting for %d: %d/%d'%(total_count, self.stress_count, self.noop_count))
|
||||
old_stress_count = self.stress_count
|
||||
old_noop_count = self.noop_count
|
||||
sleep(5000)
|
||||
result.add_message(True, 'Commands/second: %d/%d'%( (self.stress_count-old_stress_count)/5, (self.noop_count-old_noop_count)/5 ) )
|
||||
elapsed = (time() - start)
|
||||
if elapsed == 0:
|
||||
elapsed = 1
|
||||
result.add_message(True, 'Summary Collected %d instance in %d seconds: %d/s'%(self.stress_count, elapsed, self.stress_count/elapsed))
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
self.conf.set_string('/modules', 'test_scheduler', 'Scheduler')
|
||||
self.conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
|
||||
self.conf.set_string('/settings/pytest/scripts', 'test_python', 'test_python.py')
|
||||
|
||||
base_path = '/settings/test_scheduler'
|
||||
self.conf.set_string(base_path, 'threads', '0')
|
||||
|
||||
default_path = '%s/schedules/default'%base_path
|
||||
self.conf.set_string(default_path, 'channel', 'py_stress_test')
|
||||
#self.conf.set_string(default_path, 'alias', 'stress')
|
||||
self.conf.set_string(default_path, 'command', 'py_stress_noop')
|
||||
self.conf.set_string(default_path, 'interval', '5s')
|
||||
for i in range(1, install_checks):
|
||||
alias = 'stress_python_%i'%i
|
||||
self.conf.set_string('%s/schedules'%(base_path), alias, 'py_stress_noop')
|
||||
|
||||
self.conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.key = '_%stest_command'%prefix
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
|
||||
None
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
setup_singleton(PythonTest)
|
||||
|
||||
all_tests = [PythonTest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
||||
|
18
bibliotheque/files/nsclient/scripts/python/test_sample.py
Normal file
18
bibliotheque/files/nsclient/scripts/python/test_sample.py
Normal file
@ -0,0 +1,18 @@
|
||||
from test_helper import BasicTest, TestResult, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
|
||||
class SampleTest(BasicTest):
|
||||
pass
|
||||
|
||||
setup_singleton(SampleTest)
|
||||
|
||||
all_tests = [SampleTest]
|
||||
|
||||
def __main__():
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
||||
|
240
bibliotheque/files/nsclient/scripts/python/test_stress.py
Normal file
240
bibliotheque/files/nsclient/scripts/python/test_stress.py
Normal file
@ -0,0 +1,240 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_debug, log_error, sleep
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
from types import *
|
||||
from time import time
|
||||
import random
|
||||
import os
|
||||
|
||||
is_windows = False
|
||||
if os.name == 'nt':
|
||||
is_windows = True
|
||||
|
||||
check_per_second = 1000
|
||||
#time_to_run = 'infinate'
|
||||
time_to_run = 60 # in seconds
|
||||
use_threads = 100
|
||||
|
||||
route_via_nsca = True
|
||||
route_via_nrpe = True
|
||||
route_via_python = False
|
||||
|
||||
prefix = 'stress'
|
||||
|
||||
|
||||
class StressTest(BasicTest):
|
||||
|
||||
check_count = 0
|
||||
results_count = 0
|
||||
|
||||
sched_alias = 'test_sched_%s'%prefix
|
||||
nsca_server_alias = 'test_nsca_s_%s'%prefix
|
||||
nsca_client_alias = 'test_nsca_c_%s'%prefix
|
||||
nrpe_server_alias = 'test_nrpe_s_%s'%prefix
|
||||
nrpe_client_alias = 'test_nrpe_c_%s'%prefix
|
||||
python_channel = 'test_stress_%s_py'%prefix
|
||||
nsca_channel = 'test_stress_%s_nsca'%prefix
|
||||
command = 'test_stress_%s'%prefix
|
||||
nsca_port = 15568
|
||||
nrpe_port = 15566
|
||||
sched_base_path = '/settings/%s'%sched_alias
|
||||
|
||||
background = False
|
||||
|
||||
|
||||
checks = [
|
||||
['CheckCPU', ['MaxWarn=20', 'MaxCrit=20', '10s']],
|
||||
['CheckMEM', ['MaxWarn=20', 'MaxCrit=20']]
|
||||
]
|
||||
|
||||
def get_random_check(self):
|
||||
return random.choice(self.checks)
|
||||
|
||||
def random_check_handler(arguments):
|
||||
instance = StressTest.getInstance()
|
||||
return instance.wrapped_random_check_handler(arguments)
|
||||
random_check_handler = Callable(random_check_handler)
|
||||
|
||||
def wrapped_random_check_handler(self, arguments):
|
||||
global is_windows
|
||||
if is_windows:
|
||||
check = []
|
||||
if route_via_nrpe:
|
||||
# host=127.0.0.1 port=15566 command=CheckCPU arguments=MaxWarn=20 arguments=8s
|
||||
check_data = self.get_random_check()
|
||||
check[0] = 'nrpe_query'
|
||||
check[1] = ['host=127.0.0.1', 'port=%d'%self.nrpe_port, 'command=%s'%check_data[0]]
|
||||
for arg in check_data[1]:
|
||||
check[1].append('arguments=%s'%arg)
|
||||
else:
|
||||
check = self.get_random_check()
|
||||
self.check_count = self.check_count + 1
|
||||
return self.core.simple_query(check[0], check[1])
|
||||
else:
|
||||
return (status.OK, 'Got call %d'%(self.check_count), '')
|
||||
|
||||
def on_stress_handler(channel, source, command, code, message, perf):
|
||||
instance = StressTest.getInstance()
|
||||
instance.wrapped_on_stress_handler(channel, source, command, code, message, perf)
|
||||
on_stress_handler = Callable(on_stress_handler)
|
||||
|
||||
def wrapped_on_stress_handler(self, channel, source, command, code, message, perf):
|
||||
check = self.get_random_check()
|
||||
self.results_count = self.results_count + 1
|
||||
log_debug('Got result %s <%d/%d> on %s'%(message, self.results_count, self.check_count, channel))
|
||||
return None
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for stresstest script module'
|
||||
|
||||
def title(self):
|
||||
return 'StressTest tests'
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.reg.simple_function(self.command, StressTest.random_check_handler, 'This is a simple noop command')
|
||||
self.reg.simple_subscription(self.python_channel, StressTest.on_stress_handler)
|
||||
self.conf.set_string(self.sched_base_path, 'threads', '%d'%use_threads)
|
||||
self.core.reload('%s,delayed'%self.sched_alias)
|
||||
|
||||
|
||||
def teardown(self):
|
||||
if not self.background:
|
||||
self.conf.set_string(self.sched_base_path, 'threads', '0')
|
||||
self.core.reload(self.sched_alias)
|
||||
|
||||
def run_test(self):
|
||||
global time_to_run, check_per_second
|
||||
result = TestResult()
|
||||
start = time()
|
||||
if isinstance(time_to_run, str) and time_to_run == 'infinate':
|
||||
time_to_run = -1
|
||||
elif isinstance(time_to_run, str):
|
||||
time_to_run = 5
|
||||
|
||||
if time_to_run == -1:
|
||||
total_count = -1
|
||||
else:
|
||||
total_count = check_per_second*time_to_run
|
||||
|
||||
if time_to_run != -1:
|
||||
self.background = False
|
||||
last_major = 0
|
||||
while self.results_count < total_count:
|
||||
old_stress_count = self.results_count
|
||||
old_noop_count = self.check_count
|
||||
sleep(5000)
|
||||
result.add_message(True, 'Commands/second: %d/%d'%( (self.results_count-old_stress_count)/5, (self.check_count-old_noop_count)/5 ) )
|
||||
if (self.results_count*100/total_count) > last_major + 10:
|
||||
last_major = last_major + 10
|
||||
log('%d%% Complete: %d checks per second <%d/%d>'%(self.results_count*100/total_count, (self.results_count-old_stress_count)/5, self.results_count, total_count))
|
||||
elapsed = (time() - start)
|
||||
if elapsed == 0:
|
||||
elapsed = 1
|
||||
result.add_message(True, 'Summary Collected %d instance in %d seconds: %d/s'%(self.results_count, elapsed, self.results_count/elapsed))
|
||||
else:
|
||||
self.background = True
|
||||
result.add_message(True, 'Test running in background, run py_unittest_collect to collect results at any time.')
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
global is_windows, route_via_python, route_via_nsca, use_threads
|
||||
|
||||
# Configure required modules
|
||||
self.conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
self.conf.set_string('/modules', self.sched_alias, 'Scheduler')
|
||||
if is_windows:
|
||||
self.conf.set_string('/modules', 'CheckSystem', 'enabled')
|
||||
self.conf.set_string('/modules', 'CheckHelpers', 'enabled')
|
||||
|
||||
if route_via_nsca:
|
||||
self.conf.set_string('/modules', self.nsca_server_alias, 'NSCAServer')
|
||||
self.conf.set_string('/modules', self.nsca_client_alias, 'NSCAClient')
|
||||
|
||||
# Configure NSCA Server
|
||||
self.conf.set_string('/settings/NSCA/%s'%self.nsca_server_alias, 'port', '%d'%self.nsca_port)
|
||||
self.conf.set_string('/settings/NSCA/%s'%self.nsca_server_alias, 'inbox', self.python_channel)
|
||||
self.conf.set_string('/settings/NSCA/%s'%self.nsca_server_alias, 'encryption', 'xor')
|
||||
self.conf.set_string('/settings/NSCA/%s'%self.nsca_server_alias, 'password', 'HelloWorld')
|
||||
|
||||
# Configure NSCA Client
|
||||
self.conf.set_string('/settings/NSCA/%s/targets/default'%self.nsca_client_alias, 'address', 'nsca://127.0.0.1:%d'%self.nsca_port)
|
||||
self.conf.set_string('/settings/NSCA/%s/targets/default'%self.nsca_client_alias, 'encryption', 'xor')
|
||||
self.conf.set_string('/settings/NSCA/%s/targets/default'%self.nsca_client_alias, 'password', 'HelloWorld')
|
||||
self.conf.set_string('/settings/NSCA/%s'%self.nsca_client_alias, 'channel', self.nsca_channel)
|
||||
|
||||
if route_via_nrpe:
|
||||
self.conf.set_string('/modules', self.nrpe_server_alias, 'NRPEServer')
|
||||
self.conf.set_string('/modules', self.nrpe_client_alias, 'NRPEClient')
|
||||
|
||||
# Configure NRPE Server
|
||||
self.conf.set_string('/settings/NRPE/%s'%self.nrpe_server_alias, 'port', '%d'%self.nrpe_port)
|
||||
self.conf.set_string('/settings/NRPE/%s'%self.nrpe_server_alias, 'allow arguments', 'true')
|
||||
|
||||
# Configure NRPE Client
|
||||
self.conf.set_string('/settings/NRPE/%s/targets/default'%self.nsca_client_alias, 'address', 'nrpe://127.0.0.1:%d'%self.nrpe_port)
|
||||
|
||||
# Configure python
|
||||
self.conf.set_string('/settings/pytest/scripts', 'test_stress', 'test_stress.py')
|
||||
|
||||
# Configure Scheduler
|
||||
if route_via_python:
|
||||
self.conf.set_string(self.sched_base_path, 'threads', '0')
|
||||
else:
|
||||
self.conf.set_string(self.sched_base_path, 'threads', '50')
|
||||
|
||||
default_path = '%s/schedules/default'%self.sched_base_path
|
||||
if route_via_nsca:
|
||||
self.conf.set_string(default_path, 'channel', self.nsca_channel)
|
||||
else:
|
||||
self.conf.set_string(default_path, 'channel', self.python_channel)
|
||||
|
||||
self.conf.set_string(default_path, 'alias', 'stress')
|
||||
#self.conf.set_string(default_path, 'target', 'stress_001')
|
||||
|
||||
use_command = self.command
|
||||
if not route_via_python:
|
||||
if route_via_nrpe:
|
||||
use_command = 'nrpe_query host=127.0.0.1 port=%d command=CheckOK'%self.nrpe_port
|
||||
else:
|
||||
use_command = 'CheckOK'
|
||||
|
||||
self.conf.set_string(default_path, 'command', use_command)
|
||||
self.conf.set_string(default_path, 'interval', '5s')
|
||||
log_debug('Adding %d checks'%int(check_per_second*5))
|
||||
for i in range(1, int(check_per_second*5)+1):
|
||||
|
||||
alias = 'stress_python_%i'%i
|
||||
self.conf.set_string('%s/schedules'%(self.sched_base_path), alias, use_command)
|
||||
|
||||
self.conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
None
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
def require_boot(self):
|
||||
return True
|
||||
|
||||
setup_singleton(StressTest)
|
||||
|
||||
all_tests = [StressTest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
||||
|
210
bibliotheque/files/nsclient/scripts/python/test_w32_file.py
Normal file
210
bibliotheque/files/nsclient/scripts/python/test_w32_file.py
Normal file
@ -0,0 +1,210 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error, sleep
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
from types import *
|
||||
import random
|
||||
import subprocess
|
||||
import uuid
|
||||
import os
|
||||
import sys, stat, datetime, time
|
||||
|
||||
class Win32FileTest(BasicTest):
|
||||
|
||||
reg = None
|
||||
conf = None
|
||||
core = None
|
||||
|
||||
def __init__(self):
|
||||
self.test_data = [
|
||||
['test.001', 4, -5, ''],
|
||||
['test-001.txt', 4, -5, ''],
|
||||
['test-002.txt', 12, -5, ''],
|
||||
['test-003.txt', 32, -10, ''],
|
||||
['test-004.txt', 4, -10, ''],
|
||||
['test-005.txt', 4, 0, ''],
|
||||
['test-006.txt', 4, 5, ''],
|
||||
['test-007.txt', 4, 5, '001/002/003'],
|
||||
['test-008.txt', 4, 5, '001/002'],
|
||||
['test-009.txt', 4, 5, '001']
|
||||
]
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for w32 check_file module'
|
||||
|
||||
def title(self):
|
||||
return 'Win32File tests'
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.temp_path = self.core.expand_path('${temp}')
|
||||
log('Temp: %s'%self.temp_path)
|
||||
self.work_path = os.path.join(self.temp_path, '%s'%uuid.uuid4())
|
||||
log('Work: %s'%self.work_path)
|
||||
os.mkdir(self.work_path)
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
|
||||
def get_real_filename(self, name, path):
|
||||
if path != '':
|
||||
folder = os.path.join(self.work_path, path)
|
||||
return (folder, os.path.join(folder, name))
|
||||
else:
|
||||
return (self.work_path, os.path.join(self.work_path, name))
|
||||
|
||||
def create_file(self, name, size, time_offset, path = ''):
|
||||
(folder, file_name) = self.get_real_filename(name, path)
|
||||
if not os.path.exists(folder):
|
||||
os.makedirs(folder)
|
||||
|
||||
if not os.path.exists(file_name):
|
||||
f = open(file_name, 'w')
|
||||
for x in range(0,size):
|
||||
f.write('%d'%(x%10))
|
||||
f.close()
|
||||
|
||||
today = datetime.datetime.now()
|
||||
pastday = today + datetime.timedelta(minutes=time_offset)
|
||||
atime = int(time.mktime(pastday.timetuple()))
|
||||
times = (atime,atime)
|
||||
os.utime(file_name,times)
|
||||
|
||||
def delete_file(self, name, path = ''):
|
||||
(folder, file_name) = self.get_real_filename(name, path)
|
||||
if os.path.exists(file_name):
|
||||
try:
|
||||
os.remove(file_name)
|
||||
except OSError, (errno, strerror):
|
||||
log('Failed to delete: %s'%file_name)
|
||||
if os.path.exists(folder):
|
||||
try:
|
||||
os.rmdir(folder)
|
||||
except OSError, (errno, strerror):
|
||||
None
|
||||
|
||||
def setup_files(self):
|
||||
for data in self.test_data:
|
||||
self.create_file(data[0], data[1], data[2], data[3])
|
||||
|
||||
def cleanup_files(self):
|
||||
for data in self.test_data:
|
||||
self.delete_file(data[0], data[3])
|
||||
for data in self.test_data:
|
||||
self.delete_file(data[0], data[3])
|
||||
if os.path.exists(self.work_path):
|
||||
try:
|
||||
os.rmdir(self.work_path)
|
||||
except OSError, (errno, strerror):
|
||||
log('Failed to delete folder: %s'%self.work_path)
|
||||
log('Failed to delete folder: %s'%errno)
|
||||
|
||||
def get_count(self,perf):
|
||||
if not perf:
|
||||
return -1
|
||||
(title, data) = perf.split('=')
|
||||
if not data:
|
||||
return -1
|
||||
(count, warn, crit) = data.split(';')
|
||||
return int(count)
|
||||
|
||||
def check_files(self, filter, text, expected, extra_args):
|
||||
self.setup_files()
|
||||
alias = '%s: %s'%(text, filter)
|
||||
result = TestResult('Checking %s'%alias)
|
||||
args = ['path=%s'%self.work_path, 'filter=%s'%filter, 'syntax=%filename%: %size% %write%', 'warn=gt:1', 'crit=gt:3']
|
||||
args.extend(extra_args)
|
||||
(ret, msg, perf) = self.core.simple_query('CheckFiles', args)
|
||||
#log("Messge: %s"%msg)
|
||||
#log("Perf: %s"%perf)
|
||||
count = self.get_count(perf)
|
||||
result.add_message(count == expected, 'Check that we get correct number of files', 'Invalid result: got %s expected %s'%(count, expected))
|
||||
if expected > 3:
|
||||
result.add_message(ret == status.CRITICAL, 'Check that we get correct status back (CRIT)', 'We did not get a CRIT back as expected: %s'%ret)
|
||||
elif expected > 1:
|
||||
result.add_message(ret == status.WARNING, 'Check that we get correct status back (WARN)', 'We did not get a WARN back as expected: %s'%ret)
|
||||
elif expected > 0:
|
||||
result.add_message(ret == status.OK, 'Check that we get correct status back (OK)', 'We did not get a OK back as expected: %s'%ret)
|
||||
else:
|
||||
result.add_message(ret == status.UNKNOWN, 'Check that we get correct status back (UNKNOWN)', 'We did not get a UNKNOWN back as expected: %s'%ret)
|
||||
return result
|
||||
|
||||
def check_no_files(self):
|
||||
self.setup_files()
|
||||
result = TestResult('Checking no files')
|
||||
args = ['path=%s\\aaa.txt'%self.work_path]
|
||||
(ret, msg, perf) = self.core.simple_query('check_files', args)
|
||||
#log("Messge: %s"%msg)
|
||||
#log("Perf: %s"%perf)
|
||||
result.add_message(ret == status.UNKNOWN, 'Check that we get correct status back', 'Return status was wrong: %s'%ret)
|
||||
#count = self.get_count(perf)
|
||||
result.assert_equals(msg, 'No files found', 'Validate return message')
|
||||
|
||||
return result
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult('Testing W32 file systems')
|
||||
|
||||
# Check size
|
||||
result.add(self.check_files('size gt 0b', 'Count all files (not folders)', 10, []))
|
||||
result.add(self.check_files('size gt 4b', 'Count all files > 4b', 2, []))
|
||||
result.add(self.check_files('size lt 5b', 'Count all files < 5b', 11, []))
|
||||
result.add(self.check_files('size eq 4b', 'Count all files = 4b', 8, []))
|
||||
result.add(self.check_files('size ne 4b', 'Count all files!= 4b', 5, []))
|
||||
result.add(self.check_files('size lt 4m', 'Count all files < 5m', 13, []))
|
||||
result.add(self.check_files('size eq 0b', 'Count all folders', 3, []))
|
||||
|
||||
# Check flags (recursive, pattern)
|
||||
result.add(self.check_files('size eq 0b', 'Count all folders (non recursivly)', 3, ['max-dir-depth=0']))
|
||||
result.add(self.check_files('size eq 0b', 'Count all folders (recurse 1)', 1, ['max-dir-depth=1']))
|
||||
result.add(self.check_files('size eq 0b', 'Count all folders (recurse 1)', 2, ['max-dir-depth=2']))
|
||||
result.add(self.check_files('size eq 0b', 'Count all folders (recurse 1)', 3, ['max-dir-depth=3']))
|
||||
result.add(self.check_files('size eq 0b', 'Count all folders (recurse 1)', 3, ['max-dir-depth=4']))
|
||||
result.add(self.check_files('size gt 0b', 'Count all files (*.txt)', 9, ['pattern=*.txt']))
|
||||
result.add(self.check_files('size gt 0b', 'Count all files (*.foo)', 0, ['pattern=*.foo']))
|
||||
|
||||
# Check dates
|
||||
result.add(self.check_files('written ge -5m', 'Count all files (*.txt, >-5m)', 7, ['pattern=*.txt']))
|
||||
result.add(self.check_files('written le -5m', 'Count all files (*.txt, <-5m)', 4, ['pattern=*.txt']))
|
||||
result.add(self.check_files('written lt -9m', 'Count all files (*.txt, <-9m)', 2, ['pattern=*.txt']))
|
||||
result.add(self.check_files('written gt -9m', 'Count all files (*.txt, >-9m)', 7, ['pattern=*.txt']))
|
||||
result.add(self.check_files('written lt -1m', 'Count all files (*.txt, <-1m)', 4, ['pattern=*.txt']))
|
||||
result.add(self.check_files('written gt -9m and written lt -1m', 'Count all files (*.txt, >-10m<-5m)', 2, ['pattern=*.txt']))
|
||||
result.add(self.check_files('written gt 0m', 'Count all files (*.txt, >0m)', 4, ['pattern=*.txt']))
|
||||
|
||||
result.add(self.check_no_files())
|
||||
self.cleanup_files()
|
||||
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
conf = self.conf
|
||||
conf.set_string('/modules', 'test_disk', 'CheckDisk')
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
conf.set_string('/settings/pytest/scripts', 'test_w32file', 'test_w32_file.py')
|
||||
conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
setup_singleton(Win32FileTest)
|
||||
|
||||
all_tests = [Win32FileTest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
107
bibliotheque/files/nsclient/scripts/python/test_w32_schetask.py
Normal file
107
bibliotheque/files/nsclient/scripts/python/test_w32_schetask.py
Normal file
@ -0,0 +1,107 @@
|
||||
from NSCP import Settings, Registry, Core, log, log_debug, status, log_error, sleep
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
from types import *
|
||||
import random
|
||||
import subprocess
|
||||
import uuid
|
||||
import os
|
||||
import sys, stat, datetime, time
|
||||
from subprocess import check_output
|
||||
|
||||
class Win32SchedTaskTest(BasicTest):
|
||||
|
||||
reg = None
|
||||
conf = None
|
||||
core = None
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for w32 check_wmi module'
|
||||
|
||||
def title(self):
|
||||
return 'Win32File tests'
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
|
||||
def get_count(self,perf):
|
||||
if not perf:
|
||||
return -1
|
||||
(title, data) = perf.split('=')
|
||||
if not data:
|
||||
return -1
|
||||
(count, warn, crit) = data.split(';')
|
||||
return int(count)
|
||||
|
||||
def check_ts_query(self, task, code):
|
||||
result = TestResult('Checking task %s'%task)
|
||||
for i in [0, 1, 2, 3, 4]:
|
||||
# check_tasksched "filter=title = 'NSCPSample_CRIT'" "warn=exit_code != 3"
|
||||
args = ["filter=title = 'NSCPSample_%s'"%task,
|
||||
"warn=exit_code = %d"%i]
|
||||
log_debug(', '.join(args))
|
||||
(ret, msg, perf) = self.core.simple_query('check_tasksched', args)
|
||||
|
||||
if i == code:
|
||||
result.assert_equals(ret, status.WARNING, 'Verify WARN result: %s'%msg)
|
||||
else:
|
||||
result.assert_equals(ret, status.OK, 'Verify OK result: %s'%msg)
|
||||
|
||||
return result
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult('Testing W32 task scheduler')
|
||||
for (state, code) in [('OK', 0), ('WARN', 1), ('CRIT', 2), ('LONG', 0)]:
|
||||
result.add(self.check_ts_query(state, code))
|
||||
return result
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
t = datetime.datetime.fromtimestamp(time.mktime(time.localtime()))
|
||||
t = t + datetime.timedelta(seconds=60)
|
||||
tm = time.strftime("%H:%M", t.timetuple())
|
||||
folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
log("Adding scheduled tasks")
|
||||
for state in ['OK', 'WARN', 'CRIT', 'LONG']:
|
||||
cmd = "schtasks.exe /Create /SC DAILY /TN NSCPSample_%s /TR \"%s\\check_test.bat %s\" /ST %s /F"%(state, folder, state, tm)
|
||||
log_debug(cmd)
|
||||
check_output(cmd)
|
||||
log("Waiting 1 minute (for tasks to run)")
|
||||
time.sleep(60)
|
||||
|
||||
def install(self, arguments):
|
||||
conf = self.conf
|
||||
conf.set_string('/modules', 'test_tsch', 'CheckTaskSched')
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
conf.set_string('/settings/pytest/scripts', 'test_w32_tsch', __file__)
|
||||
conf.save()
|
||||
|
||||
def teardown(self):
|
||||
for state in ['OK', 'WARN', 'CRIT', 'LONG']:
|
||||
log_debug("schtasks.exe /Delete /TN NSCPSample_%s /F"%state)
|
||||
check_output("schtasks.exe /Delete /TN NSCPSample_%s /F"%state)
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.plugin_id = plugin_id
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
|
||||
|
||||
setup_singleton(Win32SchedTaskTest)
|
||||
|
||||
all_tests = [Win32SchedTaskTest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
log("shutdown")
|
||||
shutdown_testcases()
|
166
bibliotheque/files/nsclient/scripts/python/test_w32_system.py
Normal file
166
bibliotheque/files/nsclient/scripts/python/test_w32_system.py
Normal file
@ -0,0 +1,166 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error, sleep
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error, sleep
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
from types import *
|
||||
import random
|
||||
import subprocess
|
||||
|
||||
class Win32SystemTest(BasicTest):
|
||||
|
||||
reg = None
|
||||
conf = None
|
||||
core = None
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for w32 check_system module'
|
||||
|
||||
def title(self):
|
||||
return 'Win32System tests'
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
|
||||
def get_expected_state(self, existing, sign, asked):
|
||||
if existing == 0:
|
||||
return status.OK
|
||||
if sign == 'eq':
|
||||
if existing == asked:
|
||||
return status.CRITICAL
|
||||
return status.OK
|
||||
if sign == 'gt':
|
||||
if existing > asked:
|
||||
return status.CRITICAL
|
||||
return status.OK
|
||||
if sign == 'lt':
|
||||
if existing < asked:
|
||||
return status.CRITICAL
|
||||
return status.OK
|
||||
if sign == 'ne':
|
||||
if existing != asked:
|
||||
return status.CRITICAL
|
||||
return status.OK
|
||||
return status.UNKNOWN
|
||||
|
||||
def test_one_proc_int(self, proc, actual, asked):
|
||||
result = TestResult('Checking one state %d/%d'%(actual, asked))
|
||||
for s in ['eq', 'gt', 'lt', 'ne']:
|
||||
(retcode, retmessage, retperf) = self.core.simple_query('check_process', ['empty-state=OK', 'show-all', 'crit=count %s %d'%(s, asked), "filter=exe='%s'"%proc])
|
||||
expected = self.get_expected_state(actual, s, asked)
|
||||
result.add_message(retcode == expected, 'Process: %s (%d %s %d): %s'%(proc, actual, s, asked, retmessage), '%s != %s'%(retcode, expected))
|
||||
return result
|
||||
|
||||
def run_test_proc(self):
|
||||
master = TestResult('Checking check_process')
|
||||
|
||||
result = TestResult('0 notepads running')
|
||||
for j in range(0,3):
|
||||
result.append(self.test_one_proc_int('notepad.exe', 0, j))
|
||||
master.add(result)
|
||||
|
||||
pids = []
|
||||
for i in range(1,4):
|
||||
result = TestResult('%d notepads running'%i)
|
||||
log('Starting notepad...')
|
||||
handle = subprocess.Popen('notepad.exe', shell=False)
|
||||
sleep(500)
|
||||
pids.append(handle.pid)
|
||||
for j in range(0,3):
|
||||
result.append(self.test_one_proc_int('notepad.exe', i, j))
|
||||
master.add(result)
|
||||
|
||||
for p in pids:
|
||||
subprocess.Popen("taskkill /F /T /PID %i"%p , shell=True)
|
||||
|
||||
return master
|
||||
|
||||
def check_and_lookup_index(self, index):
|
||||
result = TestResult('Validating index: %s'%index)
|
||||
(result_code, result_message) = self.core.simple_exec('any', 'pdh', ['--lookup-name', '%s'%index, '--porcelain'])
|
||||
result.assert_equals(result_code, 0, 'Result code')
|
||||
result.assert_equals(len(result_message), 1, 'result length')
|
||||
result.add_message(len(result_message[0])>0, 'result length')
|
||||
name = result_message[0]
|
||||
|
||||
(result_code, result_message) = self.core.simple_exec('any', 'pdh', ['--lookup-index', name, '--porcelain'])
|
||||
result.assert_equals(result_code, 0, 'Result code')
|
||||
result.assert_equals(len(result_message), 1, 'result length')
|
||||
result.add_message(len(result_message[0])>0, 'result length')
|
||||
result.assert_equals(result_message[0], '%s'%index, 'result length')
|
||||
|
||||
return result, name
|
||||
|
||||
def check_counter(self, counter, args):
|
||||
result = TestResult('Checking counter: %s'%counter)
|
||||
args.append('Counter=%s'%counter)
|
||||
(retcode, retmessage, retperf) = self.core.simple_query('CheckCounter', args)
|
||||
result.add_message(retcode != status.UNKNOWN, 'Return code: %s'%retcode)
|
||||
result.add_message(len(retmessage) > 0, 'Returned message: %s'%retmessage)
|
||||
result.add_message(len(retperf) > 0, 'Performance data: %s'%retperf)
|
||||
return result
|
||||
|
||||
def run_test_counters(self):
|
||||
result = TestResult('Checking CheckCounter')
|
||||
(result_code, result_message) = self.core.simple_exec('any', 'pdh', ['--list', '--all'])
|
||||
count = 0
|
||||
data = []
|
||||
for m in result_message:
|
||||
data = m.splitlines()
|
||||
count = len(data)
|
||||
result.add_message(count > 0, 'Managed to retrieve counters: %d'%count)
|
||||
if len(data) == 0:
|
||||
result.add_message(False, 'Failed to find counters: %s'%result_message)
|
||||
counters = []
|
||||
|
||||
|
||||
(subres, name1) = self.check_and_lookup_index(4)
|
||||
result.add(subres)
|
||||
(subres, name2) = self.check_and_lookup_index(26)
|
||||
result.add(subres)
|
||||
|
||||
result.add(self.check_counter('\\4\\26', ['ShowAll', 'MaxWarn=10']))
|
||||
result.add(self.check_counter('\\4\\26', ['index', 'ShowAll', 'MaxWarn=10']))
|
||||
result.add(self.check_counter('\\%s\\%s'%(name1, name2), ['ShowAll', 'MaxWarn=10']))
|
||||
return result
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult('Testing W32 systems')
|
||||
result.add(self.run_test_proc())
|
||||
result.add(self.run_test_counters())
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
conf = self.conf
|
||||
conf.set_string('/modules', 'test_system', 'CheckSystem')
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
conf.set_string('/settings/pytest/scripts', 'test_w32sys', 'test_w32_system.py')
|
||||
conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
setup_singleton(Win32SystemTest)
|
||||
|
||||
all_tests = [Win32SystemTest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
113
bibliotheque/files/nsclient/scripts/python/test_w32_wmi.py
Normal file
113
bibliotheque/files/nsclient/scripts/python/test_w32_wmi.py
Normal file
@ -0,0 +1,113 @@
|
||||
from NSCP import Settings, Registry, Core, log, status, log_error, sleep
|
||||
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
|
||||
from types import *
|
||||
import random
|
||||
import subprocess
|
||||
import uuid
|
||||
import os
|
||||
import sys, stat, datetime, time
|
||||
|
||||
class Win32WMITest(BasicTest):
|
||||
|
||||
reg = None
|
||||
conf = None
|
||||
core = None
|
||||
|
||||
def desc(self):
|
||||
return 'Testcase for w32 check_wmi module'
|
||||
|
||||
def title(self):
|
||||
return 'Win32File tests'
|
||||
|
||||
def setup(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
|
||||
def teardown(self):
|
||||
None
|
||||
|
||||
def get_count(self,perf):
|
||||
if not perf:
|
||||
return -1
|
||||
(title, data) = perf.split('=')
|
||||
if not data:
|
||||
return -1
|
||||
(count, warn, crit) = data.split(';')
|
||||
return int(count)
|
||||
|
||||
|
||||
def check_cli_ns(self):
|
||||
result = TestResult('Checking CLI list-ns')
|
||||
(ret, ns_msgs) = self.core.simple_exec('any', 'wmi', ['--list-all-ns', '--namespace', 'root'])
|
||||
result.assert_equals(ret, 0, 'Check that --list-all-ns returns ok')
|
||||
result.assert_equals(len(ns_msgs), 1, 'Check that --list-all-ns returns one entry')
|
||||
if len(ns_msgs) > 0:
|
||||
result.assert_contains(ns_msgs[0], 'CIMV2', 'Check that --list-all-ns contains cimv2')
|
||||
return result
|
||||
|
||||
def check_cli_ls(self, ns, expected, missing):
|
||||
result = TestResult('Checking CLI list-classes %s'%ns)
|
||||
args = ['--list-classes', '--simple']
|
||||
if ns != None:
|
||||
args.extend(['--namespace', ns])
|
||||
(ret, ns_msgs) = self.core.simple_exec('any', 'wmi', args)
|
||||
result.assert_equals(ret, 0, 'Check that --list-classes returns ok')
|
||||
result.assert_equals(len(ns_msgs), 1, 'Check that --list-classes returns one entry')
|
||||
if len(ns_msgs) > 0:
|
||||
result.assert_contains(ns_msgs[0], expected, 'Check that --list-classes contains %s'%expected)
|
||||
result.assert_not_contains(ns_msgs[0], missing, 'Check that --list-classes does not contains %s'%missing)
|
||||
return result
|
||||
|
||||
def check_cli_query(self, query, count, check, ns = None):
|
||||
result = TestResult('Checking CLI query %s'%query)
|
||||
args = ['--select', query, '--simple']
|
||||
if ns != None:
|
||||
args.extend(['--namespace', ns])
|
||||
(ret, ns_msgs) = self.core.simple_exec('any', 'wmi', args)
|
||||
result.assert_equals(ret, 0, 'Check that --select returns ok')
|
||||
result.assert_equals(len(ns_msgs), 1, 'Check that --select returns one entry')
|
||||
if len(ns_msgs) > 0:
|
||||
result.add_message(count(ns_msgs[0].splitlines()), 'Check that it contains the right number of rows')
|
||||
result.add_message(check(ns_msgs[0]), 'Check that it contains the right data')
|
||||
return result
|
||||
|
||||
def run_test(self):
|
||||
result = TestResult('Testing W32 file systems')
|
||||
result.add(self.check_cli_ns())
|
||||
result.add(self.check_cli_ls(None, 'Win32_Processor', 'LogFileEventConsumer'))
|
||||
result.add(self.check_cli_ls('root\subscription', 'LogFileEventConsumer', 'Win32_Processor'))
|
||||
result.add(self.check_cli_query('SELECT DeviceId, AddressWidth, Caption, Name FROM Win32_Processor', lambda x:x>1, lambda x:'CPU0' in x))
|
||||
return result
|
||||
|
||||
def install(self, arguments):
|
||||
conf = self.conf
|
||||
conf.set_string('/modules', 'test_wmi', 'CheckWMI')
|
||||
conf.set_string('/modules', 'pytest', 'PythonScript')
|
||||
conf.set_string('/settings/pytest/scripts', 'test_w32wmi', 'test_w32_wmi.py')
|
||||
conf.save()
|
||||
|
||||
def uninstall(self):
|
||||
None
|
||||
|
||||
def help(self):
|
||||
None
|
||||
|
||||
def init(self, plugin_id, prefix):
|
||||
self.reg = Registry.get(plugin_id)
|
||||
self.core = Core.get(plugin_id)
|
||||
self.conf = Settings.get(plugin_id)
|
||||
|
||||
def shutdown(self):
|
||||
None
|
||||
|
||||
setup_singleton(Win32WMITest)
|
||||
|
||||
all_tests = [Win32WMITest]
|
||||
|
||||
def __main__(args):
|
||||
install_testcases(all_tests)
|
||||
|
||||
def init(plugin_id, plugin_alias, script_alias):
|
||||
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
|
||||
|
||||
def shutdown():
|
||||
shutdown_testcases()
|
21
bibliotheque/files/nsclient/scripts/restart_service.ps1
Normal file
21
bibliotheque/files/nsclient/scripts/restart_service.ps1
Normal file
@ -0,0 +1,21 @@
|
||||
# Restart Service Script
|
||||
# Please enable external scripts and external scrips variable before use.
|
||||
|
||||
param (
|
||||
[string[]]$serviceName
|
||||
)
|
||||
Foreach ($Service in $ServiceName)
|
||||
{
|
||||
Restart-Service $ServiceName -ErrorAction SilentlyContinue -ErrorVariable ServiceError
|
||||
If (!$ServiceError) {
|
||||
$Time=Get-Date
|
||||
Write-Host "Restarted service $Service at $Time"
|
||||
}
|
||||
If ($ServiceError) {
|
||||
write-host $error[0]
|
||||
exit 3
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
45
bibliotheque/files/nsclient/scripts/services.vbs
Normal file
45
bibliotheque/files/nsclient/scripts/services.vbs
Normal file
@ -0,0 +1,45 @@
|
||||
' Services.vbs
|
||||
' Script to List running autostarted services
|
||||
' www.computerperformance.co.uk/
|
||||
' Author Guy Thomas http://computerperformance.co.uk/
|
||||
' Version 1.5 December 2005
|
||||
'
|
||||
' Modified by Per Asberg Dec 2010, op5 AB, http://www.op5.com
|
||||
' Modified by Peter Ostlin May 2011, op5 AB, http://www.op5.com
|
||||
' -------------------------------------------------------'
|
||||
Option Explicit
|
||||
Dim icnt, cnt, page, start, objWMIService, objItem, objService, strServiceList
|
||||
Dim colListOfServices, strComputer, strService, Args
|
||||
|
||||
'On Error Resume Next
|
||||
|
||||
' ---------------------------------------------------------
|
||||
|
||||
cnt = 0 ' tot count
|
||||
icnt = 0 ' count listed (returned) services
|
||||
page = 20 ' nr of services to include (pagination)
|
||||
start = 0 ' where to start (pagination)
|
||||
|
||||
Set Args = WScript.Arguments.Named
|
||||
|
||||
If Args.Exists("start") Then start = Cint(Args("start"))
|
||||
|
||||
strComputer = "."
|
||||
Set objWMIService = GetObject("winmgmts:" _
|
||||
& "{impersonationLevel=impersonate}!\\" _
|
||||
& strComputer & "\root\cimv2")
|
||||
Set colListOfServices = objWMIService.ExecQuery _
|
||||
("Select * from Win32_Service WHERE StartMode='auto' AND name != 'NSClientpp'")
|
||||
|
||||
' WMI and VBScript loop
|
||||
For Each objService in colListOfServices
|
||||
If icnt < page AND cnt >= start THEN
|
||||
strServiceList = strServiceList & objService.name & ","
|
||||
icnt = icnt +1
|
||||
End if
|
||||
cnt = cnt + 1
|
||||
Next
|
||||
|
||||
WScript.Echo strServiceList
|
||||
|
||||
' End of WMI script to list services
|
48
bibliotheque/files/nsclient/scripts/test.lua
Normal file
48
bibliotheque/files/nsclient/scripts/test.lua
Normal file
@ -0,0 +1,48 @@
|
||||
nscp.print('Loading test script...')
|
||||
|
||||
nscp.execute('version')
|
||||
v = nscp.getSetting('NSCA Agent', 'interval', 'broken')
|
||||
nscp.print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
|
||||
nscp.print('value: ' .. v)
|
||||
nscp.print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
|
||||
|
||||
nscp.register('check_something', 'something')
|
||||
nscp.register('lua_debug', 'debug')
|
||||
nscp.register('foo', 'something')
|
||||
nscp.register('lua_alias', 'execute_all_alias')
|
||||
|
||||
function something (command)
|
||||
nscp.print(command)
|
||||
code, msg, perf = nscp.execute('CheckCPU','time=5','MaxCrit=5')
|
||||
print(code .. ': ' .. msg .. ', ' .. perf)
|
||||
collectgarbage ()
|
||||
|
||||
return code, 'hello from LUA: ' .. msg, perf
|
||||
end
|
||||
|
||||
function execute_all_alias()
|
||||
commands = nscp.getSection('External Alias')
|
||||
ok = 0
|
||||
err = 0
|
||||
for i,key in pairs(commands) do
|
||||
args = nscp.getSetting('External Alias', key)
|
||||
code, msg, perf = nscp.execute(key,args)
|
||||
if code == 'ok' then
|
||||
ok = ok + 1
|
||||
else
|
||||
err = err + 1
|
||||
print('[' .. i .. '] ' .. key .. ': ' .. code .. ' <' .. msg ..'>')
|
||||
end
|
||||
end
|
||||
if err == 0 then
|
||||
return 'ok', 'All ' .. ok .. ' commands were ok'
|
||||
else
|
||||
return 'error', 'Only ' .. ok .. ' commands of the ' .. (ok+err) .. ' were successfull'
|
||||
end
|
||||
end
|
||||
|
||||
function debug (command, args)
|
||||
table.foreachi(args, print)
|
||||
print ('Command was: ' .. command)
|
||||
return 'ok', 'hello'
|
||||
end
|
58
bibliotheque/files/snmp/etc-snmp-snmpd.conf
Normal file
58
bibliotheque/files/snmp/etc-snmp-snmpd.conf
Normal file
@ -0,0 +1,58 @@
|
||||
################################################################################
|
||||
#
|
||||
# __ __ ______ __
|
||||
# / | / | / \ / |
|
||||
# _$$ |_ $$/ ______ _______ ______ /$$$$$$ | _____ ____ $$/ _______ ______
|
||||
# / $$ | / | / \ / |______ / \ $$ |_ $$/______ / \/ \ / |/ \ / \
|
||||
# $$$$$$/ $$ |/$$$$$$ |/$$$$$$$// |/$$$$$$ |$$ | / |$$$$$$ $$$$ |$$ |$$$$$$$ |/$$$$$$ |
|
||||
# $$ | __ $$ |$$ | $$ |$$ \$$$$$$/ $$ | $$ |$$$$/ $$$$$$/ $$ | $$ | $$ |$$ |$$ | $$ |$$ $$ |
|
||||
# $$ |/ |$$ |$$ |__$$ | $$$$$$ | $$ \__$$ |$$ | $$ | $$ | $$ |$$ |$$ | $$ |$$$$$$$$/
|
||||
# $$ $$/ $$ |$$ $$/ / $$/ $$ $$/ $$ | $$ | $$ | $$ |$$ |$$ | $$ |$$ |
|
||||
# $$$$/ $$/ $$$$$$$/ $$$$$$$/ $$$$$$/ $$/ $$/ $$/ $$/ $$/ $$/ $$/ $$$$$$$/
|
||||
# $$ |
|
||||
# $$ |
|
||||
# $$/
|
||||
#
|
||||
################################################################################
|
||||
|
||||
#com2sec paranoid default EXEMPLE-PUB
|
||||
com2sec readonly default EXEMPLE-PRIV
|
||||
#com2sec readwrite default EXEMPLE-PRIV
|
||||
#group MyRWGroup usm readwrite
|
||||
group ROGroup v1 readonly
|
||||
|
||||
informsink 10.59.1.11 EXEMPLE-PRIV
|
||||
|
||||
# incl/excl subtree mask
|
||||
view all included .1 80
|
||||
view system included .iso.org.dod.internet.mgmt.mib-2.system
|
||||
view system included .1.3.6.1.4.1.2021.11
|
||||
view System included .1.3.6.1.2.1.1
|
||||
view System included .1.3.6.1.2.1.25.1.1
|
||||
|
||||
####
|
||||
# Finally, grant the 2 groups access to the 1 view with different
|
||||
# write permissions:
|
||||
|
||||
# context sec.model sec.level match read write notif
|
||||
access MyROSystem "" any noauth exact system none none
|
||||
access MyROGroup "" any noauth exact all none none
|
||||
access MyRWGroup "" any noauth exact all all none
|
||||
access ROGroup "" v1 noauth exact all none none
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
syslocation France (configure /etc/snmp/snmpd.local.conf)
|
||||
syscontact Root <administrateur@tips-of-mine.fr> (configure /etc/snmp/snmpd.local.conf)
|
||||
|
||||
# Check the / partition and make sure it contains at least 10 megs.
|
||||
|
||||
disk / 10000
|
||||
|
||||
# MUCH more can be done with the snmpd.conf than is shown as an
|
||||
# example here.
|
||||
exec .1.3.6.1.4.1.2021.54 hdNum /usr/local/bin/snmpdiskio hdNum
|
||||
exec .1.3.6.1.4.1.2021.55 hdIndex /usr/local/bin/snmpdiskio hdIndex
|
||||
exec .1.3.6.1.4.1.2021.56 hdDescr /usr/local/bin/snmpdiskio hdDescr
|
||||
exec .1.3.6.1.4.1.2021.57 hdInBlocks /usr/local/bin/snmpdiskio hdInBlocks
|
||||
exec .1.3.6.1.4.1.2021.58 hdOutBlocks /usr/local/bin/snmpdiskio hdOutBlocks
|
Reference in New Issue
Block a user