diff --git a/.gitignore b/.gitignore
index 875db1dd6b57..bcb7c7e19da2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,10 @@
build
*.py[co]
pkg/arch/*.tar.xz
+*.swp
+doc/_build
+dist
+MANIFEST
# virtualenv
# - ignores directories of a virtualenv when you create it right on
diff --git a/AUTHORS b/AUTHORS
index d09d9945ebaf..dce530ff988d 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -21,10 +21,53 @@ documentation is being maintained by Seth
Developers
----------------------------
-Pedro Algarvio, aka, s0undt3ch
+Aaron Bull Schaefer
+Aaron Toponce
+Antti Kaihola
+archme
+Brad Barden
+Brian Wagner
+Carlo Pires
+Chris Scheller
+Christer Edwards
+Clint Savage
+Corey Quinn
+David Boucha
+Doug Renn
+Eivind Uggedal
+epoelke@gmail.com
+Eric Poelke
+Erik Nolte
+Evan Borgstrom
+Jed Glazner
+Jeffrey C. Ollie
+Jeff Schroeder
+Jonas Buckner
+Joseph Hall
+Kent Tenney
+Markus Gattol
+Martin Schnabel
+Matthew Printz
+Matthias Teege
+Maxim Burgerhout
+Mitch Anderson
+Nathaniel Whiteinge
+Nigel Owen
+Pedro Algarvio
+Pierre Carrier
+Seth House
+Seth Vidal
+Thomas Schreiber
+Thomas S Hatch
+Tor Hveem
+Travis Cline
+Wieland Hoffmann
-Joseph P. Hall
-Erik Nolte
+Growing Community
+--------------------------------
-Matthias Teege
+Salt is a rapidly growing project with a large community, to view all
+contributors please check ohloh, this file can sometimes be out of date:
+
+https://www.ohloh.net/p/salt/contributors
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 000000000000..81c035d98553
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1 @@
+include salt/msgpack/*.h
diff --git a/conf/master b/conf/master
index 830a310ce797..01248da7464f 100644
--- a/conf/master
+++ b/conf/master
@@ -14,7 +14,8 @@
# The port used by the communication interface
#ret_port: 4506
-# The root directory prepended to these options: pki_dir, cachedir, log_file.
+# The root directory prepended to these options: pki_dir, cachedir,
+# sock_dir, log_file.
#root_dir: /
# Directory used to store public key data
@@ -29,16 +30,25 @@
# Set the directory used to hold unix sockets
#sock_dir: /tmp/salt-unix
+# Set the acceptance level for serialization of messages. This should only be
+# set if the master is newer that 0.9.5 and the minion are older, this option
+# allows a 0.9.5 and newer master to communicate with minions 0.9.4 and
+# earlier. It is not recommended to keep this setting on if the minions are
+# all 0.9.5 or higher, as leaving pickle as the serialization medium is slow
+# and opens up security risks
+#
+#serial: msgpack
+
##### Security settings #####
##########################################
# Enable "open mode", this mode still maintains encryption, but turns off
# authentication, this is only intended for highly secure environments or for
-# the situation where your keys end up in a bad state. If you run in open more
+# the situation where your keys end up in a bad state. If you run in open mode
# you do so at your own risk!
#open_mode: False
# Enable auto_accept, this setting will automatically accept all incoming
-# public keys from the minions
+# public keys from the minions. Note that this is insecure.
#auto_accept: False
##### State System settings #####
@@ -46,7 +56,7 @@
# The state system uses a "top" file to tell the minions what environment to
# use and what modules to use. The state_top file is defined relative to the
# root of the base environment
-#state_top: top.yml
+#state_top: top.sls
#
# The renderer to use on the minions to render the state data
#renderer: yaml_jinja
@@ -162,9 +172,19 @@
# Logger levels can be used to tweak specific loggers logging levels.
# Imagine you want to have the salt library at the 'warning' level, but, you
# still wish to have 'salt.modules' at the 'debug' level:
-# log_granular_levels: {
+# log_granular_levels:
# 'salt': 'warning',
# 'salt.modules': 'debug'
-# }
#
#log_granular_levels: {}
+
+
+##### Node Groups #####
+##########################################
+# Node groups allow for logical groupings of minion nodes.
+# A group consists of a group name and a compound target.
+#
+# nodegroups:
+# group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com',
+# group2: 'G@os:Debian and foo.domain.com',
+
diff --git a/conf/minion b/conf/minion
index adba3d17aa6c..ac8ad9d04d1a 100644
--- a/conf/minion
+++ b/conf/minion
@@ -28,6 +28,13 @@
# Where cache data goes
#cachedir: /var/cache/salt
+# When waiting for a master to accept the minion's public key, salt will
+# contiuously attempt to reconnect until successful. This is the time, in
+# seconds, between those reconnection attempts.
+# acceptance_wait_time = 10
+
+
+
##### Minion module management #####
##########################################
# Disable specific modules, this will allow the admin to limit the level os
@@ -59,8 +66,22 @@
#
#renderer: yaml_jinja
#
-# Test allows for the state runs to only be test runs
-#test: False
+# state_verbose allows for the data returned from the minion to be more
+# verbose. Normaly only states that fail or states that have changes are
+# returned, but setting state_verbose to True will return all states that
+# were checked
+#state_verbose: False
+#
+# autoload_dynamic_modules Turns on automatic loading of modules found in the
+# environments on the master. This is turned on by default, to turn of
+# autoloading modules when states run set this value to False
+#autoload_dynamic_modules: True
+#
+# clean_dynamic_modules keeps the dynamic modules on the minion in sync with
+# the dynamic modules on the master, this means that if a dynamic module is
+# not on the master it will be deleted from the minion. By default this is
+# enabled and can be disabled by changing this value to False
+#clean_dynamic_modules: True
###### Security settings #####
###########################################
@@ -96,7 +117,6 @@
#
#log_granular_levels: {}
-
###### Module configuration #####
###########################################
# Salt allows for modules to be passed arbitrary configuration data, any data
diff --git a/debian/AUTHORS b/debian/AUTHORS
deleted file mode 100644
index d09d9945ebaf..000000000000
--- a/debian/AUTHORS
+++ /dev/null
@@ -1,30 +0,0 @@
-=============
-Salt Authors
-=============
-
-Whos Who in Salt
-============================
-
-The Man With the Plan
-----------------------------
-
-Thomas S Hatch is the main developer of Salt. He is the founder, owner,
-maintainer and lead of the Salt project, as well as author of the majority
-of the Salt code and documentation.
-
-Documentation System
-----------------------------
-
-The documentation system was put together by Seth House, much of the
-documentation is being maintained by Seth
-
-Developers
-----------------------------
-
-Pedro Algarvio, aka, s0undt3ch
-
-Joseph P. Hall
-
-Erik Nolte
-
-Matthias Teege
diff --git a/debian/changelog b/debian/changelog
index 5b25936a4304..52368b3f1704 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,36 +1,6 @@
-salt (0.9.4-1) lucid; urgency=low
+salt (0.9.5+git20111227.g8182e48-1) unstable; urgency=low
- * Build PPA for 0.9.4
+ * First package release. (Closes: #643789)
- -- Seth House Sun, 27 Nov 2011 16:11:36 -0700
+ -- Corey Quinn Mon, 26 Dec 2011 13:55:22 -0800
-salt (0.9.4.pre-d353743-1) lucid; urgency=low
-
- * Build PPA for Ubuntu (fixed?)
-
- -- Seth House Wed, 25 Nov 2011 23:19:00 -0600
-
-salt (0.9.4.pre-6dd76f2-1) lucid; urgency=low
-
- * Build PPA for Ubuntu (fixed?)
-
- -- Seth House Wed, 25 Nov 2011 11:19:00 -0600
-
-salt (0.9.4.pre-1) lucid; urgency=low
-
- * Build PPA for Ubuntu
-
- -- Seth House Wed, 25 Nov 2011 11:19:00 -0600
-
-salt (0.9.2-2) unstable; urgency=low
-
- * Fixed many lintian(1) warnings and errors
-
- -- Aaron Toponce Mon, 03 Oct 2011 21:34:25 -0600
-
-salt (0.9.2-1) unstable; urgency=low
-
- * Initial release. (Closes: #643789)
- * First Debian package
-
- -- Aaron Toponce Wed, 28 Sep 2011 01:21:48 -0600
diff --git a/debian/control b/debian/control
index d0dda9998c95..b353685884b8 100644
--- a/debian/control
+++ b/debian/control
@@ -1,24 +1,81 @@
Source: salt
Section: admin
Priority: optional
-Maintainer: Aaron Toponce
-Build-Depends: debhelper (>= 7), python-support, cython, python-yaml,
- python-setuptools, python-yaml, python-crypto, python-m2crypto,
- python-pyzmq (>= 2.1.9), libzmq1 (>= 2.1.9), libzmq-dev (>= 2.1.9),
- python (>= 2.6), python-jinja2
-Standards-Version: 3.8.3
+Maintainer: Corey Quinn
+Build-Depends: debhelper (>= 7.0.50~),
+ python-support,
+ cython,
+ python-yaml,
+ python-setuptools,
+ python-yaml,
+ python-crypto,
+ python-m2crypto,
+ python-zmq (>= 2.1.9),
+ libzmq1 (>= 2.1.9),
+ libzmq-dev (>= 2.1.9),
+ python (>= 2.6),
+ python-dev (>= 2.6),
+ python-jinja2
+Standards-Version: 3.9.2
Homepage: http://saltstack.org
#Vcs-Git: git://git.debian.org/collab-maint/salt.git
#Vcs-Browser: http://git.debian.org/?p=collab-maint/salt.git;a=summary
-Package: salt
+
+Package: salt-common
+Architecture: any
+Depends: ${python:Depends},
+ ${misc:Depends}
+Description: Shared libraries that salt requires for all packages
+ This package is a powerful remote execution manager that can be used
+ to administer servers in a fast and efficient way.
+ .
+ It allows commands to be executed across large groups of servers. This
+ means systems can be easily managed, but data can also be easily gathered.
+ Quick introspection into running systems becomes a reality.
+ .
+ Remote execution is usually used to set up a certain state on a remote
+ system. Salt addresses this problem as well, the salt state system uses
+ salt state files to define the state a server needs to be in.
+ .
+ Between the remote execution system, and state management Salt addresses
+ the backbone of cloud and data center management.
+ .
+ This particular package provides shared libraries that salt-master, salt-minion,
+ and salt-syndic require to function.
+
+
+Package: salt-master
+Architecture: all
+Depends: ${python:Depends},
+ ${misc:Depends},
+ salt-common
+Description: This package provides a remote manager to administer servers via salt
+ This package is a powerful remote execution manager that can be used
+ to administer servers in a fast and efficient way.
+ .
+ It allows commands to be executed across large groups of servers. This
+ means systems can be easily managed, but data can also be easily gathered.
+ Quick introspection into running systems becomes a reality.
+ .
+ Remote execution is usually used to set up a certain state on a remote
+ system. Salt addresses this problem as well, the salt state system uses
+ salt state files to define the state a server needs to be in.
+ .
+ Between the remote execution system, and state management Salt addresses
+ the backbone of cloud and data center management.
+ .
+ This particular package provides the salt controller.
+
+
+Package: salt-minion
Architecture: all
-Depends: ${python:Depends}, ${misc:Depends}, python-setuptools,
- python-yaml, python-crypto, python-m2crypto, python-pyzmq (>= 2.1.9),
- libzmq1 (>= 2.1.9), libzmq-dev (>= 2.1.9), python (>= 2.6), python-jinja2
-Description: This package provides a remote manager to administer servers.
- This package is a powerful remote execution manager that can be used to
- administer servers in a fast and efficient way.
+Depends: ${python:Depends},
+ ${misc:Depends},
+ salt-common
+Description: This package represents the client package for salt
+ This package is a powerful remote execution manager that can be used
+ to administer servers in a fast and efficient way.
.
It allows commands to be executed across large groups of servers. This
means systems can be easily managed, but data can also be easily gathered.
@@ -30,3 +87,29 @@ Description: This package provides a remote manager to administer servers.
.
Between the remote execution system, and state management Salt addresses
the backbone of cloud and data center management.
+ .
+ This particular package provides the worker / agent for salt.
+
+
+Package: salt-syndic
+Architecture: all
+Depends: ${python:Depends},
+ ${misc:Depends},
+ salt-master
+Description: salt-syndic represents the master-of-masters for salt
+ This package is a powerful remote execution manager that can be used
+ to administer servers in a fast and efficient way.
+ .
+ It allows commands to be executed across large groups of servers. This
+ means systems can be easily managed, but data can also be easily gathered.
+ Quick introspection into running systems becomes a reality.
+ .
+ Remote execution is usually used to set up a certain state on a remote
+ system. Salt addresses this problem as well, the salt state system uses
+ salt state files to define the state a server needs to be in.
+ .
+ Between the remote execution system, and state management Salt addresses
+ the backbone of cloud and data center management.
+ .
+ This particular package provides the master of masters for salt-- it enables the management
+ of multiple masters at a time.
diff --git a/debian/files b/debian/files
deleted file mode 100644
index 69bfff7778d2..000000000000
--- a/debian/files
+++ /dev/null
@@ -1 +0,0 @@
-salt_0.9.4-1_all.deb admin optional
diff --git a/debian/links b/debian/links
new file mode 100644
index 000000000000..e325b02fa1d7
--- /dev/null
+++ b/debian/links
@@ -0,0 +1,8 @@
+usr/share/salt/salt /usr/bin/salt
+usr/share/salt/salt-master /usr/bin/salt-master
+usr/share/salt/salt-syndic /usr/bin/salt-syndic
+usr/share/salt/salt-cp /usr/bin/salt-cp
+usr/share/salt/salt-key /usr/bin/salt-key
+usr/share/salt/salt-run /usr/bin/salt-run
+usr/share/salt/salt-minion /usr/bin/salt-minion
+usr/share/salt/salt-call /usr/bin/salt-call
diff --git a/debian/patches/no-license b/debian/patches/no-license
deleted file mode 100644
index 77ebf370f3f1..000000000000
--- a/debian/patches/no-license
+++ /dev/null
@@ -1,13 +0,0 @@
-Index: salt-0.9.4/setup.py
-===================================================================
---- salt-0.9.4.orig/setup.py 2011-10-03 21:07:32.524520895 -0600
-+++ salt-0.9.4/setup.py 2011-10-03 21:14:33.852854281 -0600
-@@ -92,7 +92,7 @@
- ['salt/modules/cytest.pyx',
- ]),
- (doc_path,
-- ['LICENSE'
-+ [
- ]),
- (template_path,
- ['doc/example/templates/yaml-jinja.yml',
diff --git a/debian/patches/series b/debian/patches/series
deleted file mode 100644
index 7a5b31479f82..000000000000
--- a/debian/patches/series
+++ /dev/null
@@ -1 +0,0 @@
-no-license
diff --git a/debian/rules b/debian/rules
index e129263c6aca..a140040585e6 100755
--- a/debian/rules
+++ b/debian/rules
@@ -1,18 +1,15 @@
#!/usr/bin/make -f
-# -*- makefile -*-
-# Sample debian/rules that uses debhelper.
-# This file was originally written by Joey Hess and Craig Small.
-# As a special exception, when this file is copied by dh-make into a
-# dh-make output file, you may use that output file without restriction.
-# This special exception was added by Craig Small in version 0.37 of dh-make.
-
-# Uncomment this to turn on verbose mode.
-#export DH_VERBOSE=1
%:
- dh $@
+ dh $@
+
+#override_dh_installinit:
+# dh_installinit --no-start --name="salt-master"
+# dh_installinit --no-start --name="salt-minion"
+# dh_installinit --no-start --name="salt-syndic"
-override_dh_installinit:
- dh_installinit --no-start --name="salt-master"
- dh_installinit --no-start --name="salt-minion"
- dh_installinit --no-start --name="salt-syndic"
+get-orig-source:
+ git clone https://github.com/saltstack/salt.git
+ mv salt salt-0.9.5
+ tar -zcvf salt_0.9.5.orig.tar.gz --exclude "debian*" --exclude-vcs salt-0.9.5
+ rm -rf salt-0.9.5
diff --git a/debian/salt-common.install b/debian/salt-common.install
new file mode 100644
index 000000000000..8ef4a2454ea8
--- /dev/null
+++ b/debian/salt-common.install
@@ -0,0 +1,9 @@
+usr/share/man/man1/salt-minion.1
+usr/share/man/man1/salt-call.1
+usr/share/man/man1/salt-key.1
+usr/share/man/man1/salt-master.1
+usr/share/man/man1/salt-syndic.1
+usr/share/man/man1/salt-cp.1
+usr/share/man/man1/salt.1
+conf/minion /etc/salt/minion
+salt/* /usr/share/salt/
diff --git a/debian/salt.salt-master.init b/debian/salt-master.init
similarity index 90%
rename from debian/salt.salt-master.init
rename to debian/salt-master.init
index 08399953b841..5171ca90427b 100644
--- a/debian/salt.salt-master.init
+++ b/debian/salt-master.init
@@ -29,7 +29,7 @@ DEBIAN_VERSION=/etc/debian_version
SUSE_RELEASE=/etc/SuSE-release
# Source function library.
if [ -f $DEBIAN_VERSION ]; then
- break
+ break
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
. /etc/rc.status
else
@@ -46,11 +46,11 @@ PROC_LIST=""
RETVAL=0
findproc() {
- PROC_LIST=`$PS_CMD | grep $PROCESS | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
+ PROC_LIST=`$PS_CMD | grep 'bin/python.*salt-master' | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
}
start() {
- echo -n $"Starting salt-master daemon: "
+ echo -n "Starting salt-master daemon: "
if [ -f $SUSE_RELEASE ]; then
startproc -f -p /var/run/$SERVICE.pid /usr/bin/salt-master -d $CONFIG_ARGS
rc_status -v
@@ -72,7 +72,7 @@ start() {
}
stop() {
- echo -n $"Stopping salt-master daemon: "
+ echo -n "Stopping salt-master daemon: "
if [ -f $SUSE_RELEASE ]; then
killproc -TERM /usr/bin/salt-master
rc_status -v
@@ -131,7 +131,7 @@ case "$1" in
RETVAL=$?
;;
*)
- echo $"Usage: $0 {start|stop|status|restart|condrestart|reload|force-reload}"
+ echo "Usage: $0 {start|stop|status|restart|condrestart|reload|force-reload}"
exit 1
;;
esac
diff --git a/debian/salt-master.install b/debian/salt-master.install
new file mode 100644
index 000000000000..53d4c1340db6
--- /dev/null
+++ b/debian/salt-master.install
@@ -0,0 +1 @@
+conf/master /etc/salt/master
diff --git a/debian/salt-master.manpages b/debian/salt-master.manpages
new file mode 100644
index 000000000000..074f304fb2c4
--- /dev/null
+++ b/debian/salt-master.manpages
@@ -0,0 +1,6 @@
+doc/man/salt.7
+doc/man/salt.1
+doc/man/salt-master.1
+doc/man/salt-key.1
+doc/man/salt-cp.1
+doc/man/salt-run.1
diff --git a/debian/salt.salt-minion.init b/debian/salt-minion.init
similarity index 88%
rename from debian/salt.salt-minion.init
rename to debian/salt-minion.init
index 1248db6c23f0..d70f742e1159 100644
--- a/debian/salt.salt-minion.init
+++ b/debian/salt-minion.init
@@ -29,7 +29,7 @@ DEBIAN_VERSION=/etc/debian_version
SUSE_RELEASE=/etc/SuSE-release
# Source function library.
if [ -f $DEBIAN_VERSION ]; then
- break
+ break
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
. /etc/rc.status
else
@@ -46,18 +46,18 @@ PROC_LIST=""
RETVAL=0
findproc() {
- PROC_LIST=`$PS_CMD | grep $PROCESS | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
+ PROC_LIST=`$PS_CMD | grep 'bin/python.*salt-minion' | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
}
start() {
- echo -n $"Starting salt-minion daemon: "
+ echo -n "Starting salt-minion daemon: "
if [ -f $SUSE_RELEASE ]; then
startproc -f -p /var/run/$SERVICE.pid /usr/bin/salt-minion -d $CONFIG_ARGS
rc_status -v
elif [ -e $DEBIAN_VERSION ]; then
findproc
if [ -n "$PROC_LIST" ]; then
- echo -n "already started, lock file found"
+ echo -n "already started, lock file found"
RETVAL=1
elif /usr/bin/python /usr/bin/salt-minion -d; then
echo -n "OK"
@@ -72,7 +72,7 @@ start() {
}
stop() {
- echo -n $"Stopping salt-minion daemon: "
+ echo -n "Stopping salt-minion daemon: "
if [ -f $SUSE_RELEASE ]; then
killproc -TERM /usr/bin/salt-minion
rc_status -v
@@ -131,7 +131,7 @@ case "$1" in
RETVAL=$?
;;
*)
- echo $"Usage: $0 {start|stop|status|restart|condrestart|reload|force-reload}"
+ echo "Usage: $0 {start|stop|status|restart|condrestart|reload|force-reload}"
exit 1
;;
esac
diff --git a/debian/salt-minion.install b/debian/salt-minion.install
new file mode 100644
index 000000000000..7ed61cee7ef1
--- /dev/null
+++ b/debian/salt-minion.install
@@ -0,0 +1,4 @@
+scripts/salt-minion /usr/share/salt/salt-minion
+scripts/salt-call /usr/share/salt/salt-call
+modules/* /usr/share/salt/modules/
+conf/minion /etc/salt/minion
diff --git a/debian/salt-minion.manpages b/debian/salt-minion.manpages
new file mode 100644
index 000000000000..d8d924f31a95
--- /dev/null
+++ b/debian/salt-minion.manpages
@@ -0,0 +1,2 @@
+doc/man/salt-call.1
+doc/man/salt-minion.1
diff --git a/debian/salt.salt-syndic.init b/debian/salt-syndic.init
similarity index 88%
rename from debian/salt.salt-syndic.init
rename to debian/salt-syndic.init
index c0d53ddd248e..8d7796b3a826 100644
--- a/debian/salt.salt-syndic.init
+++ b/debian/salt-syndic.init
@@ -17,7 +17,7 @@
# chkconfig header
-# chkconfig: 2345 99 01
+# chkconfig: 2345 99 01
# description: This is a daemon that controls the salt mininons
#
# processname: /usr/bin/salt-syndic
@@ -29,7 +29,7 @@ DEBIAN_VERSION=/etc/debian_version
SUSE_RELEASE=/etc/SuSE-release
# Source function library.
if [ -f $DEBIAN_VERSION ]; then
- break
+ break
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
. /etc/rc.status
else
@@ -46,18 +46,18 @@ PROC_LIST=""
RETVAL=0
findproc() {
- PROC_LIST=`$PS_CMD | grep $PROCESS | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
+ PROC_LIST=`$PS_CMD | grep 'bin/python.*salt-syndic' | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
}
start() {
- echo -n $"Starting salt-syndic daemon: "
+ echo -n "Starting salt-syndic daemon: "
if [ -f $SUSE_RELEASE ]; then
startproc -f -p /var/run/$SERVICE.pid /usr/bin/salt-syndic -d $CONFIG_ARGS
rc_status -v
elif [ -e $DEBIAN_VERSION ]; then
findproc
if [ -n "$PROC_LIST" ]; then
- echo -n "already started, lock file found"
+ echo -n "already started, lock file found"
RETVAL=1
elif /usr/bin/python /usr/bin/salt-syndic -d; then
echo -n "OK"
@@ -72,7 +72,7 @@ start() {
}
stop() {
- echo -n $"Stopping salt-syndic daemon: "
+ echo -n "Stopping salt-syndic daemon: "
if [ -f $SUSE_RELEASE ]; then
killproc -TERM /usr/bin/salt-syndic
rc_status -v
@@ -127,7 +127,7 @@ case "$1" in
RETVAL=$?
;;
*)
- echo $"Usage: $0 {start|stop|status|restart|reload|force-reload}"
+ echo "Usage: $0 {start|stop|status|restart|reload|force-reload}"
exit 1
;;
esac
diff --git a/debian/salt-syndic.install b/debian/salt-syndic.install
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/debian/salt-syndic.manpages b/debian/salt-syndic.manpages
new file mode 100644
index 000000000000..09238dc4e1a8
--- /dev/null
+++ b/debian/salt-syndic.manpages
@@ -0,0 +1 @@
+doc/man/salt-syndic.1
diff --git a/debian/salt.debhelper.log b/debian/salt.debhelper.log
deleted file mode 100644
index 8609f659ff84..000000000000
--- a/debian/salt.debhelper.log
+++ /dev/null
@@ -1,48 +0,0 @@
-dh_auto_configure
-dh_auto_build
-dh_auto_test
-dh_prep
-dh_installdirs
-dh_auto_install
-dh_install
-dh_installdocs
-dh_installchangelogs
-dh_installexamples
-dh_installman
-dh_installcatalogs
-dh_installcron
-dh_installdebconf
-dh_installemacsen
-dh_installifupdown
-dh_installinfo
-dh_pysupport
-override_dh_installinit dh_installinit
-override_dh_installinit dh_installinit
-override_dh_installinit dh_installinit
-dh_installinit
-dh_installmenu
-dh_installmime
-dh_installmodules
-dh_installlogcheck
-dh_installlogrotate
-dh_installpam
-dh_installppp
-dh_installudev
-dh_installwm
-dh_installxfonts
-dh_installgsettings
-dh_bugfiles
-dh_ucf
-dh_lintian
-dh_gconf
-dh_icons
-dh_perl
-dh_usrlocal
-dh_link
-dh_compress
-dh_fixperms
-dh_installdeb
-dh_gencontrol
-dh_md5sums
-dh_builddeb
-dh_builddeb
diff --git a/debian/salt.postinst.debhelper b/debian/salt.postinst.debhelper
deleted file mode 100644
index d081710f0ebe..000000000000
--- a/debian/salt.postinst.debhelper
+++ /dev/null
@@ -1,20 +0,0 @@
-# Automatically added by dh_pysupport
-if which update-python-modules >/dev/null 2>&1; then
- update-python-modules salt.public
-fi
-# End automatically added section
-# Automatically added by dh_installinit
-if [ -x "/etc/init.d/salt-master" ]; then
- update-rc.d salt-master defaults >/dev/null || exit $?
-fi
-# End automatically added section
-# Automatically added by dh_installinit
-if [ -x "/etc/init.d/salt-minion" ]; then
- update-rc.d salt-minion defaults >/dev/null || exit $?
-fi
-# End automatically added section
-# Automatically added by dh_installinit
-if [ -x "/etc/init.d/salt-syndic" ]; then
- update-rc.d salt-syndic defaults >/dev/null || exit $?
-fi
-# End automatically added section
diff --git a/debian/salt.postrm.debhelper b/debian/salt.postrm.debhelper
deleted file mode 100644
index dabbbd39cbd9..000000000000
--- a/debian/salt.postrm.debhelper
+++ /dev/null
@@ -1,15 +0,0 @@
-# Automatically added by dh_installinit
-if [ "$1" = "purge" ] ; then
- update-rc.d salt-syndic remove >/dev/null
-fi
-# End automatically added section
-# Automatically added by dh_installinit
-if [ "$1" = "purge" ] ; then
- update-rc.d salt-minion remove >/dev/null
-fi
-# End automatically added section
-# Automatically added by dh_installinit
-if [ "$1" = "purge" ] ; then
- update-rc.d salt-master remove >/dev/null
-fi
-# End automatically added section
diff --git a/debian/salt.prerm.debhelper b/debian/salt.prerm.debhelper
deleted file mode 100644
index b4c5f5ab1d4b..000000000000
--- a/debian/salt.prerm.debhelper
+++ /dev/null
@@ -1,5 +0,0 @@
-# Automatically added by dh_pysupport
-if which update-python-modules >/dev/null 2>&1; then
- update-python-modules -c salt.public
-fi
-# End automatically added section
diff --git a/debian/salt.substvars b/debian/salt.substvars
deleted file mode 100644
index 8665d5cf98ab..000000000000
--- a/debian/salt.substvars
+++ /dev/null
@@ -1,3 +0,0 @@
-python:Versions=2.6, 2.7
-python:Depends=python, python-support (>= 0.90.0)
-misc:Depends=
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
index 768575ff4721..8457607eac6b 100644
--- a/doc/_templates/indexsidebar.html
+++ b/doc/_templates/indexsidebar.html
@@ -13,8 +13,10 @@
Download
#}
{% endif %}
-
Notice something different? We moved the Salt repository to the new saltstack GitHub organization.
-Read why.
+
Recent updates
+
Our IRC channel is now on the popular Freenode network. See you there!
+
The Salt git repository can now be found at the new saltstack GitHub organization.
+Read why.
diff --git a/doc/conf.py b/doc/conf.py
index 4aa81d70b7e3..6ece48c1e4eb 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -3,11 +3,13 @@
import sys, os
docs_basepath = os.path.abspath(os.path.join(os.path.dirname(__file__)))
+addtl_paths = (
+ os.pardir, # salt directory (for autodoc)
+ '_ext', # custom Sphinx extensions
+)
-sys.path.extend([
- os.path.join(docs_basepath, '..'), # salt directory (for autodoc)
- os.path.join(docs_basepath, '_ext'), # Sphinx extensions
-])
+for path in addtl_paths:
+ sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
from salt import __version__
diff --git a/doc/gen_gource b/doc/gen_gource
new file mode 100644
index 000000000000..5a151d333628
--- /dev/null
+++ b/doc/gen_gource
@@ -0,0 +1,7 @@
+#!/bin/bash
+#
+# This script is used to generate the gource video released with each release of Salt
+#
+#
+gource -1280x720 -s 0.5 --stop-at-end --hide filenames --highlight-all-users --file-filter po -a 5 --camera-mode overview --disable-progress --disable-bloom --output-ppm-stream - --output-framerate 30 -o - | ffmpeg -y -r 60 -f image2pipe -vcodec ppm -i - -vcodec libvpx -b 10000K gource.webm
+
diff --git a/doc/man/salt-call.1 b/doc/man/salt-call.1
index dfebb31eccbd..5a435a76b791 100644
--- a/doc/man/salt-call.1
+++ b/doc/man/salt-call.1
@@ -1,4 +1,4 @@
-.TH "SALT-CALL" "1" "November 27, 2011" "0.9.4" "Salt"
+.TH "SALT-CALL" "1" "January 14, 2012" "0.9.5" "Salt"
.SH NAME
salt-call \- salt-call Documentation
.
diff --git a/doc/man/salt-cp.1 b/doc/man/salt-cp.1
index 38256b765f82..e0196f6f49d8 100644
--- a/doc/man/salt-cp.1
+++ b/doc/man/salt-cp.1
@@ -1,4 +1,4 @@
-.TH "SALT-CP" "1" "November 27, 2011" "0.9.4" "Salt"
+.TH "SALT-CP" "1" "January 14, 2012" "0.9.5" "Salt"
.SH NAME
salt-cp \- salt-cp Documentation
.
@@ -80,7 +80,7 @@ regular expresion>\(aq; example: \(aqos:Arch.*\(aq
.INDENT 0.0
.TP
.B \-Q, \-\-query
-Execute a salt command query, this can be used to find the results os a
+Execute a salt command query, this can be used to find the results of a
previous function call: \-Q test.echo\(aq)
.UNINDENT
.INDENT 0.0
diff --git a/doc/man/salt-key.1 b/doc/man/salt-key.1
index b6142973adf1..58cba2c6c43d 100644
--- a/doc/man/salt-key.1
+++ b/doc/man/salt-key.1
@@ -1,4 +1,4 @@
-.TH "SALT-KEY" "1" "November 27, 2011" "0.9.4" "Salt"
+.TH "SALT-KEY" "1" "January 14, 2012" "0.9.5" "Salt"
.SH NAME
salt-key \- salt-key Documentation
.
diff --git a/doc/man/salt-master.1 b/doc/man/salt-master.1
index f5b34ded028c..067ad4e9d5cd 100644
--- a/doc/man/salt-master.1
+++ b/doc/man/salt-master.1
@@ -1,4 +1,4 @@
-.TH "SALT-MASTER" "1" "November 27, 2011" "0.9.4" "Salt"
+.TH "SALT-MASTER" "1" "January 14, 2012" "0.9.5" "Salt"
.SH NAME
salt-master \- salt-master Documentation
.
diff --git a/doc/man/salt-minion.1 b/doc/man/salt-minion.1
index 8176a5731d00..ac1ed16d7059 100644
--- a/doc/man/salt-minion.1
+++ b/doc/man/salt-minion.1
@@ -1,4 +1,4 @@
-.TH "SALT-MINION" "1" "November 27, 2011" "0.9.4" "Salt"
+.TH "SALT-MINION" "1" "January 14, 2012" "0.9.5" "Salt"
.SH NAME
salt-minion \- salt-minion Documentation
.
diff --git a/doc/man/salt-run.1 b/doc/man/salt-run.1
index ceb615024ca2..6c88774014b6 100644
--- a/doc/man/salt-run.1
+++ b/doc/man/salt-run.1
@@ -1,4 +1,4 @@
-.TH "SALT-RUN" "1" "November 27, 2011" "0.9.4" "Salt"
+.TH "SALT-RUN" "1" "January 14, 2012" "0.9.5" "Salt"
.SH NAME
salt-run \- salt-run Documentation
.
diff --git a/doc/man/salt-syndic.1 b/doc/man/salt-syndic.1
index 1d35a184b719..a68f744f3426 100644
--- a/doc/man/salt-syndic.1
+++ b/doc/man/salt-syndic.1
@@ -1,4 +1,4 @@
-.TH "SALT-SYNDIC" "1" "November 27, 2011" "0.9.4" "Salt"
+.TH "SALT-SYNDIC" "1" "January 14, 2012" "0.9.5" "Salt"
.SH NAME
salt-syndic \- salt-syndic Documentation
.
diff --git a/doc/man/salt.1 b/doc/man/salt.1
index da3c461a2bb5..4756ef854d37 100644
--- a/doc/man/salt.1
+++ b/doc/man/salt.1
@@ -1,4 +1,4 @@
-.TH "SALT" "1" "November 27, 2011" "0.9.4" "Salt"
+.TH "SALT" "1" "January 14, 2012" "0.9.5" "Salt"
.SH NAME
salt \- salt
.
@@ -37,7 +37,9 @@ salt \(aq*\(aq [ options ] sys.doc
.sp
salt \-E \(aq.*\(aq [ options ] sys.doc cmd
.sp
-salt \-F \(aqoperatingsystem:Arch.*\(aq [ options ] test.ping
+salt \-G \(aqos:Arch.*\(aq [ options ] test.ping
+.sp
+salt \-C \fI\%'G@os\fP:Arch.* and webserv* or \fI\%G@kernel\fP:FreeBSD\(aq [ options ] test.ping
.sp
salt \-Q test.ping
.UNINDENT
@@ -60,6 +62,11 @@ The timeout in seconds to wait for replies from the salt minions.
.UNINDENT
.INDENT 0.0
.TP
+.B \-\-version
+Print the version of salt that is running.
+.UNINDENT
+.INDENT 0.0
+.TP
.B \-E, \-\-pcre
The target expression will be interpreted as a pcre regular expression
rather than a shell glob.
@@ -79,8 +86,35 @@ regular expression>\(aq; example: \(aqos:Arch.*\(aq
.UNINDENT
.INDENT 0.0
.TP
+.B \-C, \-\-compound
+Utilize many target definitions to make the call very granular. This option
+takes a group of targets separated by and or or. The default matcher is a
+glob as usual, if something other than a glob is used preface it with the
+letter denoting the type, example: \(aqwebserv* and \fI\%G@os\fP:Debian or \fI\%E@db.*\fP\(aq
+make sure that the compound target is encapsulated in quotes.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-X, \-\-exsel
+Instead of using shell globs use the return code of a function.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-N, \-\-nodegroup
+Use a predefined compound target defined in the salt master configuration
+file
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-\-return
+Chose an alternative returner to call on the minion, if an alternative
+returner is used then the return will not come back tot he command line
+but will be sent to the specified return system.
+.UNINDENT
+.INDENT 0.0
+.TP
.B \-Q, \-\-query
-Execute a salt command query, this can be used to find the results os a
+Execute a salt command query, this can be used to find the results of a
previous function call: \-Q test.echo\(aq)
.UNINDENT
.INDENT 0.0
@@ -90,6 +124,29 @@ The location of the salt master configuration file, the salt master
settings are required to know where the connections are;
default=/etc/salt/master
.UNINDENT
+.INDENT 0.0
+.TP
+.B \-\-raw\-out
+Print the output from the salt command in raw python
+form, this is suitable for re\-reading the output into
+an executing python script with eval.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-\-text\-out
+Print the output from the salt command in the same
+form the shell would.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-\-yaml\-out
+Print the output from the salt command in yaml.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-\-json\-out
+Print the output from the salt command in json.
+.UNINDENT
.SH SEE ALSO
.sp
\fIsalt(7)\fP
diff --git a/doc/man/salt.7 b/doc/man/salt.7
index 34b6185a97de..7545aabab458 100644
--- a/doc/man/salt.7
+++ b/doc/man/salt.7
@@ -1,4 +1,4 @@
-.TH "SALT" "7" "November 27, 2011" "0.9.4" "Salt"
+.TH "SALT" "7" "January 14, 2012" "0.9.5" "Salt"
.SH NAME
salt \- Salt Documentation
.
@@ -219,53 +219,42 @@ gcc — dynamic \fI\%Cython\fP module compiling
.IP \(bu 2
\fI\%Debian / Ubuntu\fP
.IP \(bu 2
+\fI\%Gentoo\fP
+.IP \(bu 2
+\fI\%FreeBSD\fP
+.IP \(bu 2
\fI\%Installing from source\fP
.UNINDENT
.SS Red Hat
-.SS Fedora
.sp
-Salt is currently being built for Fedora. The latest koji build pages can be
-found here:
+We are working to get Salt packages into EPEL. In the meantime you can
+\fByum install salt\-master salt\-minion\fP via our Fedora People
+repository.
+.SS Red Hat Enterprise Linux 5 & 6 or CentOS 5 & 6
.INDENT 0.0
-.IP \(bu 2
-\fI\%Fedora 14\fP
-.IP \(bu 2
-\fI\%Fedora 15\fP
-.IP \(bu 2
-\fI\%Fedora Rawhide\fP
-.UNINDENT
-.SS Red Hat Enterprise Linux 6
-.sp
-Salt is being built for EPEL6. \fI\%Browse the latest builds.\fP
+.IP 1. 3
+Install the \fI\%EPEL\fP repository:
+.IP 2. 3
+Install our repository on FedoraPeople:
.sp
-The ZeroMQ packages in EPEL6 have been tested with this package, but if you
-still have issues these backports may help:
-.INDENT 0.0
-.IP \(bu 2
-\fI\%ZeroMQ backport\fP
-.IP \(bu 2
-\fI\%pyzmq bindings backport\fP
-.IP \(bu 2
-\fI\%Package to set up EPEL repository\fP
-(provided by the EPEL project)
+.nf
+.ft C
+wget \-O /etc/yum.repos.d/epel\-salt.repo \e
+ http://repos.fedorapeople.org/repos/herlo/salt/epel\-salt.repo
+.ft P
+.fi
.UNINDENT
-.SS Red Hat Enterprise Linux 5
-.sp
-Salt is being built for RHEL5, updates will be available soon!
-.sp
-Red Hat Enterprise Linux 5 requires more backports and the use of the Python
-2.6 stack provided in the EPEL repository. All of the listed packages need to
-be installed and the EPEL repository enabled to bring in the needed
-dependencies:
+.SS Fedora 15 & 16
.INDENT 0.0
-.IP \(bu 2
-\fI\%Salt rpm\fP
-.IP \(bu 2
-\fI\%YAML bindings for Python 2.6\fP
-.IP \(bu 2
-\fI\%ZeroMQ backport\fP
-.IP \(bu 2
-\fI\%pyzmq bindings backport\fP
+.IP 1. 3
+Install our repository on FedoraPeople:
+.sp
+.nf
+.ft C
+wget \-O /etc/yum.repos.d/fedora\-salt.repo \e
+ http://repos.fedorapeople.org/repos/herlo/salt/fedora\-salt.repo
+.ft P
+.fi
.UNINDENT
.SS Arch Linux
.sp
@@ -279,75 +268,94 @@ Salt can be easily installed from the Arch Linux AUR in one of two flavors:
.SS Debian / Ubuntu
.SS Ubuntu
.sp
-A PPA is available until we can get packages into apt:
+We are working to get Salt into apt. In the meantime we have a PPA available
+for Lucid:
.sp
.nf
.ft C
aptitude \-y install python\-software\-properties
+add\-apt\-repository ppa:chris\-lea/libpgm
+add\-apt\-repository ppa:chris\-lea/zeromq
add\-apt\-repository ppa:saltstack/salt
aptitude update
aptitude install salt
.ft P
.fi
-.IP "Installing ZeroMQ on Ubuntu Lucid (10.04 LTS)"
+.SS Debian
+.sp
+\fI\%A deb package is currently in testing\fP for inclusion in apt. Until that is
+accepted you can install Salt by downloading the latest \fB.deb\fP in the
+\fI\%downloads section on GitHub\fP and installing that manually:
+.sp
+.nf
+.ft C
+dpkg \-i salt\-0.9.5\&.deb
+.ft P
+.fi
+.IP "Installing ZeroMQ on Squeeze (Debian 6)"
.sp
-The ZeroMQ package is available starting with Maverick but there are \fI\%PPA
-packages available for Lucid\fP for both ZeroMQ and pyzmq. You will need to
-also enable the following PPAs before running the commands above:
+There is a \fI\%python-zmq\fP package available in Debian "wheezy (testing)".
+If you don\(aqt have that repo enabled the best way to install Salt and pyzmq
+is by using \fBpip\fP (or \fBeasy_install\fP):
.sp
.nf
.ft C
-add\-apt\-repository ppa:chris\-lea/libpgm
-add\-apt\-repository ppa:chris\-lea/zeromq
+pip install pyzmq salt
.ft P
.fi
.RE
-.SS Debian
+.SS Gentoo
.sp
-\fI\%A deb package is currently in testing\fP. Until that is accepted you can
-install Salt via \fBeasy_install\fP or \fBpip\fP:
+Salt can be easily installed on Gentoo:
.sp
.nf
.ft C
-pip install salt
+emerge pyyaml m2crypto pycrypto jinja pyzmq
.ft P
.fi
-.IP "Installing ZeroMQ on Squeeze (Debian 6)"
.sp
-ZeroMQ packages are available in squeeze\-backports.
+Then download and install from source:
.INDENT 0.0
.IP 1. 3
-Add the following line to your \fB/etc/apt/sources.list\fP:
+Download the latest source tarball from the GitHub downloads directory for
+the Salt project: \fI\%https://github.com/downloads/saltstack/salt/salt-0.9.5.tar.gz\fP
+.IP 2. 3
+Untar the tarball and run the \fBsetup.py\fP as root:
+.UNINDENT
.sp
.nf
.ft C
-deb http://backports.debian.org/debian\-backports squeeze\-backports main
+tar xvf salt\-0.9.5\&.tar.gz
+cd salt\-0.9.5
+python2 setup.py install
.ft P
.fi
-.IP 2. 3
-Run:
+.SS FreeBSD
+.sp
+Salt is available in the FreeBSD ports tree:
.sp
.nf
.ft C
-aptitude update
-aptitude install libzmq1 python\-zmq
+cd /usr/ports/sysutils/salt && make install clean
.ft P
.fi
-.UNINDENT
+.IP "See also"
+.sp
+\fBfreebsd installation guide\fP
.RE
.SS Installing from source
.INDENT 0.0
.IP 1. 3
Download the latest source tarball from the GitHub downloads directory for
-the Salt project: \fI\%https://github.com/downloads/saltstack/salt/salt-0.9.4.tar.gz\fP
+the Salt project: \fI\%https://github.com/downloads/saltstack/salt/salt-0.9.5.tar.gz\fP
.IP 2. 3
Untar the tarball and run the \fBsetup.py\fP as root:
.UNINDENT
.sp
.nf
.ft C
-tar xvf salt\-0.9.4\&.tar.gz
-cd salt\-0.9.4
+tar xvf salt\-0.9.5\&.tar.gz
+cd salt\-0.9.5
python2 setup.py install
.ft P
.fi
@@ -743,6 +751,16 @@ You can specify multiple \fIstate declarations\fP under
an \fIID declaration\fP. For example, a quick modification to our
\fBwebserver.sls\fP to also start Apache if it is not running:
.sp
+.nf
+.ft C
+apache:
+ pkg:
+ \- installed
+ service:
+ \- running
+.ft P
+.fi
+.sp
Try stopping Apache before running \fBstate.highstate\fP once again and observe
the output.
.SS Expand the SLS module
@@ -778,6 +796,23 @@ webserver so we don\(aqt want Salt to install our HTML file until Apache is
installed and running. Include the following at the bottom of your
\fBwebserver/init.sls\fP file:
.sp
+.nf
+.ft C
+apache:
+ pkg:
+ \- installed
+ service:
+ \- running
+
+/var/www/index.html: # ID declaration
+ file: # state declaration
+ \- managed # function
+ \- source: salt://webserver/index.html # function arg
+ \- require: # requisite declaration
+ \- pkg: apache # requisite reference
+.ft P
+.fi
+.sp
Again in \fBline 1\fP is the \fIID declaration\fP. In this example it is the
location we want to install our custom HTML file. (\fBNote:\fP the default
location that Apache serves may differ from the above on your OS or distro.
@@ -819,7 +854,7 @@ master using Salt\(aqs File Server:
.sp
.nf
.ft C
-salt \(aq*\(aq salt.highstate
+salt \(aq*\(aq state.highstate
.ft P
.fi
.sp
@@ -835,6 +870,23 @@ For example, if you use Salt to install an Apache virtual host
configuration file and want to restart Apache whenever that file is changed
you could modify our Apache example from earlier as follows:
.sp
+.nf
+.ft C
+/etc/httpd/extra/httpd\-vhosts.conf:
+ file:
+ \- managed
+ \- source: salt://webserver/httpd\-vhosts.conf
+
+apache:
+ pkg:
+ \- installed
+ service:
+ \- running
+ \- watch:
+ \- file: /etc/httpd/extra/httpd\-vhosts.conf
+.ft P
+.fi
+.sp
If the pkg and service names differ on your OS or distro of choice you can
specify each one separately using a \fIname declaration\fP which
explained in \fBPart 3\fP.
@@ -1014,6 +1066,25 @@ declaration\fP. For example the previous example is a bit more maintainable if
rewritten as the following:
.sp
\fBmywebsite.sls\fP:
+.sp
+.nf
+.ft C
+include:
+ \- apache
+
+extend:
+ apache
+ service:
+ \- watch:
+ \- file: mywebsite
+
+mywebsite:
+ file:
+ \- managed
+ \- name: /etc/httpd/extra/httpd\-vhosts.conf
+ \- source: salt://httpd\-vhosts.conf
+.ft P
+.fi
.SS \fINames declaration\fP
.sp
Even more powerful is using a \fInames declaration\fP to override the
@@ -1058,12 +1129,8 @@ is hosted by Google Groups. It is open to new members.
\fI\%http://groups.google.com/group/salt-users\fP
.SS IRC
.sp
-The Salt IRC channel is hosted on the \fI\%OFTC\fP network. Connect to the OFTC
-server:
-.sp
-\fI\%irc://irc.oftc.net:6667\fP
-.sp
-and join us in \fB#salt\fP.
+The \fB#salt\fP IRC channel is hosted on the popular \fI\%Freenode\fP network. You
+can use the \fI\%Freenode webchat client\fP right from your browser.
.SS Follow on Github
.sp
The Salt code is developed via Github. Follow Salt for constant updates on what
@@ -1129,7 +1196,7 @@ executions to manipulating the flow of how data is handled by Salt.
The minion execution modules or just \fBmodules\fP are the core to what salt is
and does. These modules are found in:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/modules\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/modules\fP
.sp
These modules are what is called by the salt command line and the salt client
api. Adding modules is done by simply adding additional python modules to the
@@ -1146,7 +1213,7 @@ of execution modules and types to specific salt minions.
.sp
The code used to generate the Salt grains can be found here:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/grains\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/grains\fP
.SS States
.sp
Salt supports state enforcement, this makes Salt a high speed and very efficient
@@ -1154,7 +1221,7 @@ solution for system configuration management.
.sp
States can be easily added to Salt by dropping a new state module in:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/states\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/states\fP
.SS Renderers
.sp
Salt states are controlled by simple data structures, these structures can be
@@ -1165,7 +1232,7 @@ it.
.sp
The existing renderers can be found here:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/renderers\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/renderers\fP
.SS Returners
.sp
The salt commands all produce a return value, that return value is sent to the
@@ -1175,7 +1242,7 @@ from an SQL or NOSQL database, to a custom application made to use Salt.
.sp
The existing returners can be found here:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/returners\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/returners\fP
.SS Runners
.sp
Sometimes a certain application can be made to execute and run from the
@@ -1185,7 +1252,7 @@ act as a generic interface for encapsulating master side executions.
.sp
Existing Salt runners are located here:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/runners\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/runners\fP
.SH MODULES
.sp
Salt modules are the functions called by the \fBsalt\fP command.
@@ -1321,9 +1388,9 @@ regardless of what the actual module is named.
.sp
The package manager modules are the best example of using the \fB__virtual__\fP
function:
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/modules/pacman.py\fP
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/modules/yumpkg.py\fP
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/modules/apt.py\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/modules/pacman.py\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/modules/yumpkg.py\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/modules/apt.py\fP
.SS Documentation
.sp
Salt modules are self documenting, the \fBsys.doc()\fP function will return the
@@ -1397,7 +1464,7 @@ functions for salt, but to stand as examples for building out more Salt
modules.
.sp
The existing modules can be found here:
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/modules\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/modules\fP
.sp
The most simple module is the test module, it contains the simplest salt
function, test.ping:
@@ -1447,7 +1514,6 @@ _
T{
\fBcluster\fP
T} T{
-The cluster module is used to distribute and activate salt HA cluster
T}
_
T{
@@ -1459,7 +1525,6 @@ _
T{
\fBcp\fP
T} T{
-Minion side functions for salt\-cp
T}
_
T{
@@ -1565,7 +1630,6 @@ _
T{
\fBpublish\fP
T} T{
-Publish a command from a minion to a target
T}
_
T{
@@ -1595,7 +1659,7 @@ _
T{
\fBservice\fP
T} T{
-Top level package command wrapper, used to translate the os detected by the
+The default service module, if not otherwise specified salt will fall back
T}
_
T{
@@ -1619,7 +1683,6 @@ _
T{
\fBstate\fP
T} T{
-Control the state system on the minion
T}
_
T{
@@ -1774,8 +1837,26 @@ salt \(aq*\(aq pkg.available_version
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.apt.install(pkg, refresh=False)
+.B salt.modules.apt.install(pkg, refresh=False, repo=\(aq\(aq, skip_verify=False)
Install the passed package
+.INDENT 7.0
+.TP
+.B pkg
+The name of the package to be installed
+.TP
+.B refresh
+False
+Update apt before continuing
+.TP
+.B repo
+(default)
+Specify a package repository to install from
+(e.g., \fBapt\-get \-t unstable install somepackage\fP)
+.TP
+.B skip_verify
+False
+Skip the GPG verification check (e.g., \fB\-\-allow\-unauthenticated\fP)
+.UNINDENT
.sp
Return a dict containing the new package names and versions:
.sp
@@ -1816,8 +1897,8 @@ salt \(aq*\(aq pkg.list_pkgs
.INDENT 0.0
.TP
.B salt.modules.apt.purge(pkg)
-Remove a package via aptitude along with all configuration files and
-unused dependencies.
+Remove a package via \fBapt\-get purge\fP along with all configuration
+files and unused dependencies.
.sp
Returns a list containing the names of the removed packages
.sp
@@ -1853,7 +1934,7 @@ salt \(aq*\(aq pkg.refresh_db
.INDENT 0.0
.TP
.B salt.modules.apt.remove(pkg)
-Remove a single package via \fBaptitude remove\fP
+Remove a single package via \fBapt\-get remove\fP
.sp
Returns a list containing the names of the removed packages.
.sp
@@ -1868,7 +1949,7 @@ salt \(aq*\(aq pkg.remove
.INDENT 0.0
.TP
.B salt.modules.apt.upgrade(refresh=True)
-Upgrades all packages via aptitude full\-upgrade
+Upgrades all packages via \fBapt\-get dist\-upgrade\fP
.sp
Returns a list of dicts containing the package names, and the new and old
versions:
@@ -1912,7 +1993,7 @@ A module to wrap archive calls
.INDENT 0.0
.TP
.B salt.modules.archive.gunzip(gzipfile)
-Uses the gzip command to create gzip files
+Uses the gunzip command to unpack gzip files
.sp
CLI Example to create \fB/tmp/sourcefile.txt\fP:
.sp
@@ -2083,16 +2164,6 @@ salt \(aq*\(aq buttervm.local_images
.ft P
.fi
.UNINDENT
-.SS salt.modules.cluster
-.sp
-The cluster module is used to distribute and activate salt HA cluster
-components
-.INDENT 0.0
-.TP
-.B salt.modules.cluster.distrib(minions, master_conf, master_pem, conf_file)
-Set up this minion as a failover master \- only intended for use by the
-cluster interface
-.UNINDENT
.SS salt.modules.cmd
.sp
A module for shelling out
@@ -2123,7 +2194,7 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq cat
+salt \(aq*\(aq cmd.has_exec cat
.ft P
.fi
.UNINDENT
@@ -2149,7 +2220,7 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq cmd.run "ls \-l | grep foo | awk \(aq{print $2}\(aq"
+salt \(aq*\(aq cmd.run "ls \-l | awk \(aq/foo/{print $2}\(aq"
.ft P
.fi
.UNINDENT
@@ -2162,7 +2233,7 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq cmd.run_all "ls \-l | grep foo | awk \(aq{print $2}\(aq"
+salt \(aq*\(aq cmd.run_all "ls \-l | awk \(aq/foo/{print $2}\(aq"
.ft P
.fi
.UNINDENT
@@ -2175,7 +2246,7 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq cmd.run "ls \-l | grep foo | awk \(aq{print $2}\(aq"
+salt \(aq*\(aq cmd.run_stderr "ls \-l | awk \(aq/foo/{print $2}\(aq"
.ft P
.fi
.UNINDENT
@@ -2188,58 +2259,22 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq cmd.run "ls \-l | grep foo | awk \(aq{print $2}\(aq"
+salt \(aq*\(aq cmd.run_stdout "ls \-l | awk \(aq/foo/{print $2}\(aq"
.ft P
.fi
.UNINDENT
-.SS salt.modules.cp
-.sp
-Minion side functions for salt\-cp
-.INDENT 0.0
-.TP
-.B salt.modules.cp.cache_dir(path, env=\(aqbase\(aq)
-Download and cache everything under a directory from the master
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.cp.cache_file(path, env=\(aqbase\(aq)
-Used to cache a single file in the local salt\-master file cache.
-.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.cp.cache_files(paths, env=\(aqbase\(aq)
-Used to gather many files from the master, the gathered files will be
-saved in the minion cachedir reflective to the paths retrieved from the
-master.
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.cp.cache_master(env=\(aqbase\(aq)
-Retrieve all of the files on the master and cache them locally
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.cp.get_file(path, dest, env=\(aqbase\(aq)
-Used to get a single file from the salt master
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.cp.hash_file(path, env=\(aqbase\(aq)
-Return the hash of a file, to get the hash of a file on the
-salt master file server prepend the path with salt://
-otherwise, prepend the file with / for a local file.
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.cp.list_master(env=\(aqbase\(aq)
-Retrieve all of the files on the master and cache them locally
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.cp.recv(files, dest)
-Used with salt\-cp, pass the files dict, and the destination.
+.B salt.modules.cmd.which(cmd)
+Returns the path of an executable available on the minion, None otherwise
.sp
-This function receives small fast copy files from the master via salt\-cp
+CLI Example:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq cmd.which cat
+.ft P
+.fi
.UNINDENT
.SS salt.modules.cron
.sp
@@ -2259,11 +2294,29 @@ salt \(aq*\(aq cron.list_tab root
.UNINDENT
.INDENT 0.0
.TP
+.B salt.modules.cron.ls(user)
+Return the contents of the specified user\(aqs crontab
+.sp
+CLI Example:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq cron.list_tab root
+.ft P
+.fi
+.UNINDENT
+.INDENT 0.0
+.TP
.B salt.modules.cron.raw_cron(user)
Return the contents of the user\(aqs crontab
.UNINDENT
.INDENT 0.0
.TP
+.B salt.modules.cron.rm(user, minute, hour, dom, month, dow, cmd)
+Remove a cron job up for a specified user.
+.UNINDENT
+.INDENT 0.0
+.TP
.B salt.modules.cron.rm_job(user, minute, hour, dom, month, dow, cmd)
Remove a cron job up for a specified user.
.UNINDENT
@@ -2290,6 +2343,19 @@ salt \(aq*\(aq cron.set_special @hourly \(aqecho foobar\(aq
Module for gathering disk information
.INDENT 0.0
.TP
+.B salt.modules.disk.inodeusage()
+Return inode usage information for volumes mounted on this minion
+.sp
+CLI Example:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq disk.inodeusage
+.ft P
+.fi
+.UNINDENT
+.INDENT 0.0
+.TP
.B salt.modules.disk.usage()
Return usage information for volumes mounted on this minion
.sp
@@ -2319,7 +2385,7 @@ salt \(aq*\(aq pkg.available_version
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.ebuild.install(pkg, refresh=False)
+.B salt.modules.ebuild.install(pkg, refresh=False, **kwargs)
Install the passed package
.sp
Return a dict containing the new package names and versions:
@@ -2464,6 +2530,23 @@ Manage information about files on the minion, set/read user, group, and mode
data
.INDENT 0.0
.TP
+.B salt.modules.file.append(path, *args)
+Append text to the end of a file
+.sp
+Usage:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq file.append /etc/motd \e
+ "With all thine offerings thou shalt offer salt."\e
+ "Salt is what makes things taste bad when it isn\(aqt in them."
+.ft P
+.fi
+.sp
+New in version 0.9.5.
+.UNINDENT
+.INDENT 0.0
+.TP
.B salt.modules.file.chgrp(path, group)
Change the group of a file
.sp
@@ -2490,6 +2573,61 @@ salt \(aq*\(aq file.chown /etc/passwd root root
.UNINDENT
.INDENT 0.0
.TP
+.B salt.modules.file.comment(path, regex, char=\(aq#\(aq, backup=\(aq.bak\(aq)
+Comment out specified lines in a file
+.INDENT 7.0
+.TP
+.B path
+The full path to the file to be edited
+.TP
+.B regex
+A regular expression used to find the lines that are to be commented;
+this pattern will be wrapped in parenthesis and will move any
+preceding/trailing \fB^\fP or \fB$\fP characters outside the parenthesis
+(e.g., the pattern \fB^foo$\fP will be rewritten as \fB^(foo)$\fP)
+.TP
+.B char
+\fB#\fP
+The character to be inserted at the beginning of a line in order to
+comment it out
+.TP
+.B backup
+\fB.bak\fP
+The file will be backed up before edit with this file extension
+.IP Warning
+This backup will be overwritten each time \fBsed\fP / \fBcomment\fP /
+\fBuncomment\fP is called. Meaning the backup will only be useful
+after the first invocation.
+.RE
+.UNINDENT
+.sp
+Usage:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq file.comment /etc/modules pcspkr
+.ft P
+.fi
+.sp
+New in version 0.9.5.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.modules.file.contains(path, text, limit=\(aq\(aq)
+Return True if the file at \fBpath\fP contains \fBtext\fP
+.sp
+Usage:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq file.contains /etc/crontab \(aqmymaintenance.sh\(aq
+.ft P
+.fi
+.sp
+New in version 0.9.5.
+.UNINDENT
+.INDENT 0.0
+.TP
.B salt.modules.file.find(path, *opts)
Approximate the Unix find(1) command and return a list of paths that
meet the specified critera.
@@ -2606,16 +2744,16 @@ CLI Examples:
.sp
.nf
.ft C
-salt \(aq*\(aq / type=f name=\e*.bak size=+10m
-salt \(aq*\(aq /var mtime=+30d size=+10m print=path,size,mtime
-salt \(aq*\(aq /var/log name=\e*.[0\-9] mtime=+30d size=+10m delete
+salt \(aq*\(aq file.find / type=f name=\e*.bak size=+10m
+salt \(aq*\(aq file.find /var mtime=+30d size=+10m print=path,size,mtime
+salt \(aq*\(aq file.find /var/log name=\e*.[0\-9] mtime=+30d size=+10m delete
.ft P
.fi
.UNINDENT
.INDENT 0.0
.TP
.B salt.modules.file.get_gid(path)
-Return the user that owns a given file
+Return the id of the group that owns a given file
.sp
CLI Example:
.sp
@@ -2628,7 +2766,7 @@ salt \(aq*\(aq file.get_gid /etc/passwd
.INDENT 0.0
.TP
.B salt.modules.file.get_group(path)
-Return the user that owns a given file
+Return the group that owns a given file
.sp
CLI Example:
.sp
@@ -2661,14 +2799,14 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq /etc/passwd sha512
+salt \(aq*\(aq file.get_sum /etc/passwd sha512
.ft P
.fi
.UNINDENT
.INDENT 0.0
.TP
.B salt.modules.file.get_uid(path)
-Return the user that owns a given file
+Return the id of the user that owns a given file
.sp
CLI Example:
.sp
@@ -2719,62 +2857,175 @@ salt \(aq*\(aq file.group_to_gid root
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.file.set_mode(path, mode)
-Set the more of a file
+.B salt.modules.file.sed(path, before, after, limit=\(aq\(aq, backup=\(aq.bak\(aq, options=\(aq\-r \-e\(aq, flags=\(aqg\(aq)
+Make a simple edit to a file
.sp
-CLI Example:
+Equivalent to:
.sp
.nf
.ft C
-salt \(aq*\(aq file.set_mode /etc/passwd 0644
+sed "// s///"
.ft P
.fi
-.UNINDENT
-.INDENT 0.0
+.INDENT 7.0
.TP
-.B salt.modules.file.uid_to_user(uid)
-Convert a uid to a user name
+.B path
+The full path to the file to be edited
+.TP
+.B before
+A pattern to find in order to replace with \fBafter\fP
+.TP
+.B after
+Text that will replace \fBbefore\fP
+.TP
+.B limit
+\fB\(aq\(aq\fP
+An initial pattern to search for before searching for \fBbefore\fP
+.TP
+.B backup
+\fB.bak\fP
+The file will be backed up before edit with this file extension;
+\fBWARNING:\fP each time \fBsed\fP/\fBcomment\fP/\fBuncomment\fP is called will
+overwrite this backup
+.TP
+.B options
+\fB\-r \-e\fP
+Options to pass to sed
+.TP
+.B flags
+\fBg\fP
+Flags to modify the sed search; e.g., \fBi\fP for case\-insensitve pattern
+matching
+.UNINDENT
.sp
-CLI Example:
+Forward slashes and single quotes will be escaped automatically in the
+\fBbefore\fP and \fBafter\fP patterns.
+.sp
+Usage:
.sp
.nf
.ft C
-salt \(aq*\(aq file.uid_to_user 0
+salt \(aq*\(aq file.sed /etc/httpd/httpd.conf \(aqLogLevel warn\(aq \(aqLogLevel info\(aq
.ft P
.fi
+.sp
+New in version 0.9.5.
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.file.user_to_uid(user)
-Convert user name to a gid
+.B salt.modules.file.set_mode(path, mode)
+Set the mode of a file
.sp
CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq file.user_to_uid root
+salt \(aq*\(aq file.set_mode /etc/passwd 0644
.ft P
.fi
.UNINDENT
-.SS salt.modules.freebsdpkg
+.INDENT 0.0
+.TP
+.B salt.modules.file.touch(name, atime=None, mtime=None)
+Just like \(aqnix\(aqs "touch" command, create a file if it
+doesn\(aqt exist or simply update the atime and mtime if
+it already does.
+.INDENT 7.0
+.TP
+.B atime:
+Access time in Unix epoch time
+.TP
+.B mtime:
+Last modification in Unix epoch time
+.TP
+.B Usage::
+salt \(aq*\(aq file.touch /var/log/emptyfile
+.UNINDENT
.sp
-Package support for FreeBSD
+New in version 0.9.5.
+.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.freebsdpkg.available_version(name)
-The available version of the package in the repository
+.B salt.modules.file.uid_to_user(uid)
+Convert a uid to a user name
.sp
CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq pkg.available_version
+salt \(aq*\(aq file.uid_to_user 0
+.ft P
+.fi
+.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.modules.file.uncomment(path, regex, char=\(aq#\(aq, backup=\(aq.bak\(aq)
+Uncomment specified commented lines in a file
+.INDENT 7.0
+.TP
+.B path
+The full path to the file to be edited
+.TP
+.B regex
+A regular expression used to find the lines that are to be uncommented.
+This regex should not include the comment character. A leading \fB^\fP
+character will be stripped for convenience (for easily switching
+between comment() and uncomment()).
+.TP
+.B char
+\fB#\fP
+The character to remove in order to uncomment a line; if a single
+whitespace character follows the comment it will also be removed
+.TP
+.B backup
+\fB.bak\fP
+The file will be backed up before edit with this file extension;
+\fBWARNING:\fP each time \fBsed\fP/\fBcomment\fP/\fBuncomment\fP is called will
+overwrite this backup
+.UNINDENT
+.sp
+Usage:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq file.uncomment /etc/hosts.deny \(aqALL: PARANOID\(aq
+.ft P
+.fi
+.sp
+New in version 0.9.5.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.modules.file.user_to_uid(user)
+Convert user name to a uid
+.sp
+CLI Example:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq file.user_to_uid root
+.ft P
+.fi
+.UNINDENT
+.SS salt.modules.freebsdpkg
+.sp
+Package support for FreeBSD
+.INDENT 0.0
+.TP
+.B salt.modules.freebsdpkg.available_version(name)
+The available version of the package in the repository
+.sp
+CLI Example:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq pkg.available_version
.ft P
.fi
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.freebsdpkg.install(name, refresh=False)
+.B salt.modules.freebsdpkg.install(name, **kwargs)
Install the passed package
.sp
Return a dict containing the new package names and versions:
@@ -2831,7 +3082,7 @@ salt \(aq*\(aq pkg.purge
.INDENT 0.0
.TP
.B salt.modules.freebsdpkg.refresh_db()
-Update the ports tree with portsnap. If the ports tre does not exist it
+Update the ports tree with portsnap. If the ports tree does not exist it
will be downloaded and set up.
.sp
CLI Example:
@@ -2897,7 +3148,7 @@ salt \(aq*\(aq pkg.version
Control aspects of the grains data
.INDENT 0.0
.TP
-.B salt.modules.grains.item(key)
+.B salt.modules.grains.item(key=None)
Return a singe component of the grains data
.sp
CLI Example:
@@ -2921,6 +3172,19 @@ salt \(aq*\(aq grains.items
.ft P
.fi
.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.modules.grains.ls()
+Return a list of all available grains
+.sp
+CLI Example:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq grains.ls
+.ft P
+.fi
+.UNINDENT
.SS salt.modules.groupadd
.sp
Manage groups on Linux
@@ -2940,13 +3204,13 @@ salt \(aq*\(aq group.add foo 3456
.INDENT 0.0
.TP
.B salt.modules.groupadd.chgid(name, gid)
-Change the default shell of the user
+Change the gid for a named group
.sp
CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq user.chshell foo /bin/zsh
+salt \(aq*\(aq group.chgid foo 4376
.ft P
.fi
.UNINDENT
@@ -3065,7 +3329,7 @@ salt \(aq*\(aq hosts.rm_host
.INDENT 0.0
.TP
.B salt.modules.hosts.set_host(ip, alias)
-Set the host entry in th hosts file for the given ip, this will overwrite
+Set the host entry in the hosts file for the given ip, this will overwrite
any previous entry for the given ip
.INDENT 7.0
.TP
@@ -3092,7 +3356,7 @@ salt \(aq*\(aq kmod.available
.INDENT 0.0
.TP
.B salt.modules.kmod.check_available(mod)
-Check to see if the speciified kernel module is available
+Check to see if the specified kernel module is available
.sp
CLI Example:
.sp
@@ -3168,7 +3432,7 @@ salt \(aq*\(aq sysctl.get net.ipv4.ip_forward
.INDENT 0.0
.TP
.B salt.modules.linux_sysctl.persist(name, value, config=\(aq/etc/sysctl.conf\(aq)
-Assign and persist a simple sysctl paramater for this minion
+Assign and persist a simple sysctl parameter for this minion
.sp
CLI Example:
.sp
@@ -3459,7 +3723,7 @@ salt \(aq*\(aq pkg.available_version
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.pacman.install(name, refresh=False)
+.B salt.modules.pacman.install(name, refresh=False, **kwargs)
Install the passed package, add refresh=True to install with an \-Sy
.sp
Return a dict containing the new package names and versions:
@@ -3583,36 +3847,6 @@ salt \(aq*\(aq pkg.version
.ft P
.fi
.UNINDENT
-.SS salt.modules.publish
-.sp
-Publish a command from a minion to a target
-.INDENT 0.0
-.TP
-.B salt.modules.publish.publish(tgt, fun, arg=None, expr_form=\(aqglob\(aq, returner=\(aq\(aq)
-Publish a command from the minion out to other minions, publications need
-to be enabled on the Salt master and the minion needs to have permission
-to publish the command. The Salt master will also prevent a recursive
-publication loop, this means that a minion cannot command another minion
-to command another minion as that would create an infinite command loop.
-.sp
-The arguments sent to the minion publish function are separated with
-commas. This means that for a minion executing a command with multiple
-args it will look like this:
-.sp
-.nf
-.ft C
-salt system.example.com publish.publish \(aq*\(aq user.add \(aqfoo,1020,1020\(aq
-.ft P
-.fi
-.sp
-CLI Example:
-.sp
-.nf
-.ft C
-salt system.example.com publish.publish \(aq*\(aq cmd.run \(aqls \-la /tmp\(aq
-.ft P
-.fi
-.UNINDENT
.SS salt.modules.puppet
.sp
Execute puppet routines
@@ -3649,13 +3883,13 @@ salt \(aq*\(aq group.add foo 3456
.INDENT 0.0
.TP
.B salt.modules.pw_group.chgid(name, gid)
-Change the default shell of the user
+Change the gid for a named group
.sp
CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq user.chshell foo /bin/zsh
+salt \(aq*\(aq group.chgid foo 4376
.ft P
.fi
.UNINDENT
@@ -3823,7 +4057,7 @@ salt \(aq*\(aq user.info root
.INDENT 0.0
.TP
.B salt.modules.pw_user.list_groups(name)
-Return a list of groups the named user belings to
+Return a list of groups the named user belongs to
.sp
CLI Example:
.sp
@@ -3856,8 +4090,8 @@ Set the enforcing mode
.UNINDENT
.SS salt.modules.service
.sp
-Top level package command wrapper, used to translate the os detected by the
-grains to the correct service manager
+The default service module, if not otherwise specified salt will fall back
+to this basic module
.INDENT 0.0
.TP
.B salt.modules.service.restart(name)
@@ -3924,7 +4158,7 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq shadow.user root
+salt \(aq*\(aq shadow.info root
.ft P
.fi
.UNINDENT
@@ -3939,7 +4173,7 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq root $1$UYCIxa628.9qXjpQCjM4a..
+salt \(aq*\(aq shadow.set_password root $1$UYCIxa628.9qXjpQCjM4a..
.ft P
.fi
.UNINDENT
@@ -3947,12 +4181,12 @@ salt \(aq*\(aq root $1$UYCIxa628.9qXjpQCjM4a..
.SS Apache Solr Salt Module
.sp
Author: Jed Glazner
-Version: 0.2
-Modified: 9/20/2011
+Version: 0.2.1
+Modified: 12/09/2011
.sp
This module uses http requests to talk to the apache solr request handlers
to gather information and report errors. Because of this the minion doesn\(aqt
-nescessarily need to reside on the actual slave. However if you want to
+necessarily need to reside on the actual slave. However if you want to
use the signal function the minion must reside on the physical solr host.
.sp
This module supports multi\-core and standard setups. Certain methods are
@@ -4019,7 +4253,7 @@ Get verbose output
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.abort_import(handler, core_name=None, verbose=False)
+.B salt.modules.solr.abort_import(handler, host=None, core_name=None, verbose=False)
MASTER ONLY
Aborts an existing import command to the specified handler.
This command can only be run if the minion is is configured with
@@ -4030,20 +4264,24 @@ solr.type=master
str
The name of the data import handler.
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core
str (None)
The core the handler belongs to.
.TP
.B verbose
-bool (False)
+boolean (False)
Run the command with verbose output.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4051,35 +4289,39 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq solr.abort_import dataimport music {\(aqclean\(aq:True}
+salt \(aq*\(aq solr.abort_import dataimport None music {\(aqclean\(aq:True}
.ft P
.fi
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.backup(core_name=None, append_core_to_path=False)
+.B salt.modules.solr.backup(host=None, core_name=None, append_core_to_path=False)
Tell solr make a backup. This method can be mis\-leading since it uses the
backup api. If an error happens during the backup you are not notified.
The status: \(aqOK\(aq in the response simply means that solr received the
request successfully.
.INDENT 7.0
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core_name
str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
.TP
.B append_core_to_path
-str (False)
+boolean (False)
If True add the name of the core to the backup path. Assumes that
minion backup path is not None.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4093,27 +4335,39 @@ salt \(aq*\(aq solr.backup music
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.core_status(core_name)
+.B salt.modules.solr.core_status(host=None, core_name=None)
MULTI\-CORE HOSTS ONLY
Get the status for a given core or all cores if no core is specified
.INDENT 7.0
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core_name
str
The name of the core to reload
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+.ft P
+.fi
+.sp
+CLI Example:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq solr.core_status None music
.ft P
.fi
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.delta_import(handler, core_name=None, options={}, extra=[])
+.B salt.modules.solr.delta_import(handler, host=None, core_name=None, options={}, extra=[])
Submits an import command to the specified handler using specified options.
This command can only be run if the minion is is configured with
solr.type=master
@@ -4123,6 +4377,10 @@ solr.type=master
str
The name of the data import handler.
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core
str (None)
The core the handler belongs to.
@@ -4138,11 +4396,11 @@ dict ([])
Extra name value pairs to pass to the handler. eg ["name=value"]
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4150,13 +4408,13 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq solr.delta_import dataimport music {\(aqclean\(aq:True}
+salt \(aq*\(aq solr.delta_import dataimport None music {\(aqclean\(aq:True}
.ft P
.fi
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.full_import(handler, core_name=None, options={}, extra=[])
+.B salt.modules.solr.full_import(handler, host=None, core_name=None, options={}, extra=[])
MASTER ONLY
Submits an import command to the specified handler using specified options.
This command can only be run if the minion is is configured with
@@ -4167,6 +4425,10 @@ solr.type=master
str
The name of the data import handler.
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core
str (None)
The core the handler belongs to.
@@ -4182,11 +4444,11 @@ dict ([])
Extra name value pairs to pass to the handler. e.g. ["name=value"]
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4194,13 +4456,13 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq solr.full_import dataimport music {\(aqclean\(aq:True}
+salt \(aq*\(aq solr.full_import dataimport None music {\(aqclean\(aq:True}
.ft P
.fi
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.import_status(handler, core_name=None, verbose=False)
+.B salt.modules.solr.import_status(handler, host=None, core_name=None, verbose=False)
Submits an import command to the specified handler using specified options.
This command can only be run if the minion is is configured with
solr.type: \(aqmaster\(aq
@@ -4210,20 +4472,24 @@ solr.type: \(aqmaster\(aq
str
The name of the data import handler.
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core
str (None)
The core the handler belongs to.
.TP
.B verbose
-bool (False)
+boolean (False)
Specifies verbose output
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4231,28 +4497,32 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq solr.import_status dataimport music False
+salt \(aq*\(aq solr.import_status dataimport None music False
.ft P
.fi
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.is_replication_enabled(core_name=None)
-USED ONLY BY SLAVES
+.B salt.modules.solr.is_replication_enabled(host=None, core_name=None)
+SLAVE CALL
Check for errors, and determine if a slave is replicating or not.
.INDENT 7.0
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core_name
str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4278,11 +4548,11 @@ The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
.UNINDENT
.sp
-Return: dict:
+Return: dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4296,24 +4566,30 @@ salt \(aq*\(aq solr.lucene_version
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.match_index_versions(core_name=None)
-SLAVE ONLY
-Verifies that the master and the slave versions are in sync by comparing
-the index version. If you are constantly pushing updates the index the
-master and slave versions will seldom match.
+.B salt.modules.solr.match_index_versions(host=None, core_name=None)
+SLAVE CALL
+Verifies that the master and the slave versions are in sync by
+comparing the index version. If you are constantly pushing updates
+the index the master and slave versions will seldom match. A solution
+to this is pause indexing every so often to allow the slave to replicate
+and then call this method before allowing indexing to resume.
.INDENT 7.0
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core_name
str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4327,28 +4603,32 @@ salt \(aq*\(aq solr.match_index_versions music
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.optimize(core_name=None)
-Optimize the solr index. Optimizing the index is a good way to keep Search
-queries fast, but it is a very expensive operation. The ideal process is to
-run this with a master/slave configuration. Then you can optimize the
-master, and push the optimized index to the slaves. If you are running a
-single solr instance, or if you are going to run this on a slave be aware
-than search performance will be horrible while this command is being run.
-Additionally it can take a LONG time to run and your http request may
-timeout. If that happens adjust your timeout settings.
+.B salt.modules.solr.optimize(host=None, core_name=None)
+Search queries fast, but it is a very expensive operation. The ideal
+process is to run this with a master/slave configuration. Then you
+can optimize the master, and push the optimized index to the slaves.
+If you are running a single solr instance, or if you are going to run
+this on a slave be aware than search performance will be horrible
+while this command is being run. Additionally it can take a LONG time
+to run and your http request may timeout. If that happens adjust your
+timeout settings.
.INDENT 7.0
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core_name
str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4362,21 +4642,25 @@ salt \(aq*\(aq solr.optimize music
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.ping(core_name=None)
+.B salt.modules.solr.ping(host=None, core_name=None)
Does a health check on solr, makes sure solr can talk to the indexes.
.INDENT 7.0
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core_name
str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4390,7 +4674,7 @@ salt \(aq*\(aq solr.ping music
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.reload_core(core_name)
+.B salt.modules.solr.reload_core(host=None, core_name=None)
MULTI\-CORE HOSTS ONLY
Load a new core from the same configuration as an existing registered core.
While the "new" core is initializing, the "old" one will continue to accept
@@ -4398,22 +4682,36 @@ requests. Once it has finished, all new request will go to the "new" core,
and the "old" core will be unloaded.
.INDENT 7.0
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core_name
str
The name of the core to reload
.UNINDENT
.sp
-Return : dict:
+Return : dict:
+.sp
+.nf
+.ft C
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+.ft P
+.fi
+.sp
+CLI Example:
.sp
.nf
.ft C
+salt \(aq*\(aq solr.reload_core None music
+
{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.reload_import_config(handler, core_name=None, verbose=False)
+.B salt.modules.solr.reload_import_config(handler, host=None, core_name=None, verbose=False)
MASTER ONLY
re\-loads the handler config XML file.
This command can only be run if the minion is a \(aqmaster\(aq type
@@ -4423,20 +4721,24 @@ This command can only be run if the minion is a \(aqmaster\(aq type
str
The name of the data import handler.
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core
str (None)
The core the handler belongs to.
.TP
.B verbose
-bool (False)
+boolean (False)
Run the command with verbose output.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4444,27 +4746,31 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq solr.reload_import_config dataimport music {\(aqclean\(aq:True}
+salt \(aq*\(aq solr.reload_import_config dataimport None music {\(aqclean\(aq:True}
.ft P
.fi
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.replication_details(core_name=None)
+.B salt.modules.solr.replication_details(host=None, core_name=None)
Get the full replication details.
.INDENT 7.0
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core_name
str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4478,26 +4784,30 @@ salt \(aq*\(aq solr.replication_details music
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.set_is_polling(polling, core_name=None)
-SLAVE ONLY
+.B salt.modules.solr.set_is_polling(polling, host=None, core_name=None)
+SLAVE CALL
Prevent the slaves from polling the master for updates.
.INDENT 7.0
.TP
.B polling
-bool
+boolean
True will enable polling. False will disable it.
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core_name
str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4511,7 +4821,7 @@ salt \(aq*\(aq solr.set_is_polling False
.UNINDENT
.INDENT 0.0
.TP
-.B salt.modules.solr.set_replication_enabled(status, core_name)
+.B salt.modules.solr.set_replication_enabled(status, host=None, core_name=None)
MASTER ONLY
Sets the master to ignore poll requests from the slaves. Useful when you
don\(aqt want the slaves replicating during indexing or when clearing the
@@ -4519,20 +4829,32 @@ index.
.INDENT 7.0
.TP
.B status
-bool
+boolean
Sets the replication status to the specified state.
.TP
+.B host
+str (None)
+The solr host to query. __opts__[\(aqhost\(aq] is default.
+.TP
.B core_name
str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to set the status on all cores.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+.ft P
+.fi
+.sp
+CLI Example:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq solr.set_replication_enabled false, None, music
.ft P
.fi
.UNINDENT
@@ -4572,11 +4894,11 @@ The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
.UNINDENT
.sp
-Return : dict:
+Return : dict:
.sp
.nf
.ft C
-{\(aqsuccess\(aq:bool, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
+{\(aqsuccess\(aq:boolean, \(aqdata\(aq:dict, \(aqerrors\(aq:list, \(aqwarnings\(aq:list}
.ft P
.fi
.sp
@@ -4643,102 +4965,6 @@ salt \(aq*\(aq ssh.set_auth_key dsa \(aq[]\(aq .ssh/authorized_keys
.ft P
.fi
.UNINDENT
-.SS salt.modules.state
-.sp
-Control the state system on the minion
-.INDENT 0.0
-.TP
-.B salt.modules.state.high(data)
-Execute the compound calls stored in a single set of high data
-This function is mostly intended for testing the state system
-.sp
-CLI Example:
-.sp
-.nf
-.ft C
-salt \(aq*\(aq state.high \(aq{"vim": {"pkg": ["installed"]}}\(aq
-.ft P
-.fi
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.state.highstate()
-Retrive the state data from the salt master for this minion and execute it
-.sp
-CLI Example:
-.sp
-.nf
-.ft C
-salt \(aq*\(aq state.highstate
-.ft P
-.fi
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.state.low(data)
-Execute a single low data call
-This function is mostly intended for testing the state system
-.sp
-CLI Example:
-.sp
-.nf
-.ft C
-salt \(aq*\(aq state.low \(aq{"state": "pkg", "fun": "installed", "name": "vi"}\(aq
-.ft P
-.fi
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.state.show_highstate()
-Retrive the highstate data from the salt master and display it
-.sp
-CLI Example:
-.sp
-.nf
-.ft C
-salt \(aq*\(aq state.show_highstate
-.ft P
-.fi
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.state.show_lowstate()
-List out the low data that will be applied to this minion
-.sp
-CLI Example:
-.sp
-.nf
-.ft C
-salt \(aq*\(aq show_lowstate
-.ft P
-.fi
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.state.template(tem)
-Execute the information stored in a template file on the minion
-.sp
-CLI Example:
-.sp
-.nf
-.ft C
-salt \(aq*\(aq state.template \(aq\(aq
-.ft P
-.fi
-.UNINDENT
-.INDENT 0.0
-.TP
-.B salt.modules.state.template_str(tem)
-Execute the information stored in a template file on the minion
-.sp
-CLI Example:
-.sp
-.nf
-.ft C
-salt \(aq*\(aq state.template_str \(aq\(aq
-.ft P
-.fi
-.UNINDENT
.SS salt.modules.status
.sp
Module for returning various status data about a minion.
@@ -4746,7 +4972,7 @@ These data can be useful for compiling into stats later.
.INDENT 0.0
.TP
.B salt.modules.status.all_status()
-Return a composite of all status data and info for this minoon.
+Return a composite of all status data and info for this minion.
Warning: There is a LOT here!
.sp
CLI Example:
@@ -5053,6 +5279,19 @@ salt \(aq*\(aq test.ping
.ft P
.fi
.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.modules.test.version()
+Return the version of salt on the minion
+.sp
+CLI Example:
+.sp
+.nf
+.ft C
+salt \(aq*\(aq test.version
+.ft P
+.fi
+.UNINDENT
.SS salt.modules.tomcat
.sp
Support for Tomcat
@@ -5065,7 +5304,7 @@ CLI Example:
.sp
.nf
.ft C
-salt \(aq*\(aq full.fullversion
+salt \(aq*\(aq tomcat.fullversion
.ft P
.fi
.UNINDENT
@@ -5220,7 +5459,7 @@ salt \(aq*\(aq user.info root
.INDENT 0.0
.TP
.B salt.modules.useradd.list_groups(name)
-Return a list of groups the named user belings to
+Return a list of groups the named user belongs to
.sp
CLI Example:
.sp
@@ -5462,7 +5701,7 @@ salt \(aq*\(aq virt.pause
.B salt.modules.virt.purge(vm_, dirs=False)
Recursively destroy and delete a virtual machine, pass True for dir\(aqs to
also delete the directories containing the virtual machine disk images \-
-USE WITH EXTREAME CAUTION!
+USE WITH EXTREME CAUTION!
.sp
CLI Example:
.sp
@@ -5502,6 +5741,17 @@ salt \(aq*\(aq virt.seed_non_shared_migrate
.UNINDENT
.INDENT 0.0
.TP
+.B salt.modules.virt.set_autostart(vm_, state=\(aqon\(aq)
+Set the autostart flag on a VM so that the VM will start with the host
+system on reboot.
+.INDENT 7.0
+.TP
+.B CLI Example::
+salt "*" virt.enable_autostart
+.UNINDENT
+.UNINDENT
+.INDENT 0.0
+.TP
.B salt.modules.virt.shutdown(vm_)
Send a soft shutdown signal to the named vm
.sp
@@ -5594,7 +5844,7 @@ grains need to be static data.
The core module in the grains package is where the main grains are loaded by
the salt minion and the principal example of how to write grains:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/grains/core.py\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/grains/core.py\fP
.SH RETURNERS
.sp
By default the return values of the commands sent to the salt minions are
@@ -5647,7 +5897,7 @@ serializes the data as json and sets it in redis.
.SS Examples
.sp
The collection of builtin salt returners can be found here:
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/returners\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/returners\fP
.SH FULL LIST OF BUILTIN RETURNERS
.TS
center;
@@ -5662,7 +5912,6 @@ _
T{
\fBmongo_return\fP
T} T{
-Return data to a mongodb server
T}
_
T{
@@ -5680,16 +5929,6 @@ return data to the console to verify that it is being passed properly
.B salt.returners.local.returner(ret)
Print the return data to the terminal to verify functionality
.UNINDENT
-.SS salt.returners.mongo_return
-.sp
-Return data to a mongodb server
-.sp
-This is the default interface for returning data for the butter statd subsytem
-.INDENT 0.0
-.TP
-.B salt.returners.mongo_return.returner(ret)
-Return data to a mongodb server
-.UNINDENT
.SH FAILHARD GLOBAL OPTION
.sp
Normally, when a state fails Salt continues to execute the remainder of the
@@ -6118,7 +6357,7 @@ files.
The available renderers can be found in the renderers directory in the Salt
source code:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/renderers\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/renderers\fP
.sp
By default sls files are rendered using jinja as a templating engine, and yaml
as the serialization format. Since the rendering system can be extended simply
@@ -6170,21 +6409,82 @@ vim:
\- order: last
.ft P
.fi
-.SH FULL LIST OF BUILTIN STATES
-.TS
-center;
-|l|l|.
-_
-T{
-\fBcmd\fP
-T} T{
-Command Executions
-T}
-_
-T{
-\fBcron\fP
-T} T{
-Cron Management
+.SH STATE MODULES
+.sp
+State Modules are the components that map to actual enforcement and management
+of salt states.
+.SS States are \- Easy to Write!
+.sp
+State Modules should be easy to write and straightforward. The information
+passed to the SLS data structures will map directly to the states modules.
+.sp
+Mapping the information from the SLS data is simple, this example should
+illustrate:
+.sp
+SLS file
+.. code\-block:: yaml
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.B /etc/salt/master: # maps to "name"
+.INDENT 7.0
+.TP
+.B file: # maps to State module filename eg \fI\%https://github.com/saltstack/salt/blob/develop/salt/states/file.py\fP
+.INDENT 7.0
+.IP \(bu 2
+managed # maps to the managed function in the file State module
+.IP \(bu 2
+user: root # one of many options passed to the manage function
+.IP \(bu 2
+group: root
+.IP \(bu 2
+mode: 644
+.IP \(bu 2
+source: salt://salt/master
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.sp
+Therefore this SLS data can be directly linked to a module, function and
+arguments passed to that function.
+.sp
+This does issue the burden, that function names, state names and function
+arguments should be very human readable inside state modules, since they
+directly define the user interface.
+.SS Cross Calling Modules
+.sp
+As with Execution Modules State Modules can also make use of the \fB__salt__\fP
+and \fB__grains__\fP data.
+.sp
+It is important to note, that the real work of state management should not be
+done in the state module unless it is needed, a good example is the pkg state
+module. This module does not do any package management work, it just calls the
+pkg execution module. This makes the pkg state module completely generic, which
+is why there is only one pkg state module and many backend pkg execution
+modules.
+.sp
+On the other hand some modules will require that the logic be placed in the
+state module, a good example of this is the file module. But in the vast
+majority of cases this is not the best approach, and writing specific
+execution modules to do the backend work will be the optimal solution.
+.SH FULL LIST OF BUILTIN STATES
+.TS
+center;
+|l|l|.
+_
+T{
+\fBcmd\fP
+T} T{
+Command Executions
+T}
+_
+T{
+\fBcron\fP
+T} T{
+Cron Management
T}
_
T{
@@ -6214,7 +6514,7 @@ _
T{
\fBmount\fP
T} T{
-Mount Managment
+Mount Management
T}
_
T{
@@ -6252,16 +6552,16 @@ _
.SS Command Executions
.sp
The cmd state module manages the enforcement of executed commands, this
-state can tell a command to run under certian circumstances.
+state can tell a command to run under certain circumstances.
.SS Available Functions
.sp
The cmd state only has a single function, the \fBrun\fP function
.INDENT 0.0
.TP
.B run
-Execute a command given certian conditions
+Execute a command given certain conditions
.sp
-A simple exampe:
+A simple example:
.sp
.nf
.ft C
@@ -6272,7 +6572,7 @@ cmd:
.fi
.UNINDENT
.sp
-Only run if another execution returns sucessfully, in this case truncate
+Only run if another execution returns successfully, in this case truncate
syslog if there is no disk space:
.sp
.nf
@@ -6286,7 +6586,36 @@ syslog if there is no disk space:
.INDENT 0.0
.TP
.B salt.states.cmd.run(name, onlyif=None, unless=None, cwd=\(aq/root\(aq, user=None, group=None)
-Run a command if certian circumstances are met
+Run a command if certain circumstances are met
+.INDENT 7.0
+.TP
+.B name
+The command to execute, remember that the command will execute with the
+path and permissions of the salt\-minion.
+.TP
+.B onlyif
+A command to run as a check, run the named command only if the command
+passed to the \fBonlyif\fP option returns true
+.TP
+.B unless
+A command to run as a check, only run the named command if the command
+passed to the \fBunless\fP option returns false
+.TP
+.B cwd
+The current working directory to execute the command in, defaults to
+/root
+.TP
+.B user
+The user name to run the command as
+.TP
+.B group
+The group context to run the command as
+.UNINDENT
+.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.states.cmd.wait(name, onlyif=None, unless=None, cwd=\(aq/root\(aq, user=None, group=None)
+Run the given command only if the watch statement calls it
.INDENT 7.0
.TP
.B name
@@ -6315,7 +6644,7 @@ The group context to run the command as
.INDENT 0.0
.TP
.B salt.states.cmd.watcher(name, onlyif=None, unless=None, cwd=\(aq/root\(aq, user=None, group=None)
-Run a command if certian circumstances are met
+Run a command if certain circumstances are met
.INDENT 7.0
.TP
.B name
@@ -6346,12 +6675,12 @@ The group context to run the command as
.sp
The cron state module allows for user crontabs to be cleanly managed.
.sp
-Cron declarations require a number of paramaters. The timing paramaters, need
+Cron declarations require a number of parameters. The timing parameters, need
to be declared, minute, hour, daymonth, month and dayweek. The user who\(aqs
crontab is to be edited also needs to be defined.
.sp
By default the timing arguments are all \fB*\fP and the user is root. When making
-changes to an existing cron job the name declaraion is the uniqe factor, so if
+changes to an existing cron job the name declaration is the unique factor, so if
and existing cron that looks like this:
.sp
.nf
@@ -6417,7 +6746,7 @@ The information to be set in the day of day of week section. Default is
.B salt.states.cron.present(name, user=\(aqroot\(aq, minute=\(aq*\(aq, hour=\(aq*\(aq, daymonth=\(aq*\(aq, month=\(aq*\(aq, dayweek=\(aq*\(aq)
Verifies that the specified cron job is present for the specified user.
For more advanced information about what exactly can be set in the cron
-timing paramaters check your cron system\(aqs documentation. Most Unix\-like
+timing parameters check your cron system\(aqs documentation. Most Unix\-like
systems\(aq cron documentation can be found via the crontab man page:
\fBman 5 crontab\fP.
.INDENT 7.0
@@ -6451,7 +6780,7 @@ The information to be set in the day of day of week section. Default is
.SS salt.states.file
.SS File Management
.sp
-Salt States can agresively manipulate files on a system. There are a number of
+Salt States can aggressively manipulate files on a system. There are a number of
ways in which files can be managed.
.sp
Regular files can be enforced with the \fBmanaged\fP function. This function
@@ -6470,11 +6799,16 @@ makes use of the jinja templating system would look like this:
\- group: root
\- mode: 644
\- template: jinja
+ \- context:
+ custom_var: "override"
+ \- defaults:
+ custom_var: "default value"
+ other_var: 123
.ft P
.fi
.sp
Directories can be managed via the \fBdirectory\fP function. This function can
-create and enforce the premissions on a directory. A directory statement will
+create and enforce the permissions on a directory. A directory statement will
look like this:
.sp
.nf
@@ -6503,7 +6837,7 @@ takes a few arguments
.sp
Recursive directory management can also be set via the \fBrecurse\fP
function. Recursive directory management allows for a directory on the salt
-master to be recursively coppied down to the minion. This is a great tool for
+master to be recursively copied down to the minion. This is a great tool for
deploying large code and configuration systems. A recuse state would look
something like this:
.sp
@@ -6528,7 +6862,60 @@ The path which should be deleted
.UNINDENT
.INDENT 0.0
.TP
-.B salt.states.file.directory(name, user=None, group=None, mode=None, makedirs=False)
+.B salt.states.file.append(name, text)
+Ensure that some text appears at the end of a file
+.sp
+The text will not be appended again if it already exists in the file. You
+may specify a single line of text or a list of lines to append.
+.sp
+Multi\-line example:
+.sp
+.nf
+.ft C
+/etc/motd:
+ file:
+ \- append
+ \- text: |
+ Thou hadst better eat salt with the Philosophers of Greece,
+ than sugar with the Courtiers of Italy.
+ \- Benjamin Franklin
+.ft P
+.fi
+.sp
+Multiple lines of text:
+.sp
+.nf
+.ft C
+/etc/motd:
+ file:
+ \- append
+ \- text:
+ \- Trust no one unless you have eaten much salt with him.
+ \- Salt is born of the purest of parents: the sun and the sea.
+.ft P
+.fi
+.sp
+New in version 0.9.5.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.states.file.comment(name, regex, char=\(aq#\(aq, backup=\(aq.bak\(aq)
+Usage:
+.sp
+.nf
+.ft C
+/etc/fstab:
+ file:
+ \- comment
+ \- regex: ^//10.10.20.5
+.ft P
+.fi
+.sp
+New in version 0.9.5.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.states.file.directory(name, user=None, group=None, mode=None, makedirs=False, clean=False, require=None)
Ensure that a named directory is present and has the right perms
.INDENT 7.0
.TP
@@ -6548,14 +6935,19 @@ The permissions to set on this directory, aka 755
.TP
.B makedirs
If the directory is located in a path without a parent directory, then
-the the state will fail. If makedirs is set to True, then the parent
+the state will fail. If makedirs is set to True, then the parent
directories will be created to facilitate the creation of the named
file.
+.TP
+.B clean
+Make sure that only files that are set up by salt and required by this
+function are kept. If this option is set then everything in this
+directory will be deleted unless it is required.
.UNINDENT
.UNINDENT
.INDENT 0.0
.TP
-.B salt.states.file.managed(name, source, user=None, group=None, mode=None, template=None, makedirs=False, __env__=\(aqbase\(aq)
+.B salt.states.file.managed(name, source=None, user=None, group=None, mode=None, template=None, makedirs=False, context=None, defaults=None, __env__=\(aqbase\(aq)
Manage a given file, this function allows for a file to be downloaded from
the salt master and potentially run through a templating system.
.INDENT 7.0
@@ -6567,7 +6959,8 @@ The location of the file to manage
The source file, this file is located on the salt master file server
and is specified with the salt:// protocol. If the file is located on
the master in the directory named spam, and is called eggs, the source
-string is salt://spam/eggs
+string is salt://spam/eggs. If source is left blank or None, the file
+will be created as an empty file and the content will not be managed
.TP
.B user
The user to own the file, this defaults to the user salt is running as
@@ -6586,15 +6979,21 @@ used to render the downloaded file, currently jinja and mako are
supported
.TP
.B makedirs
-If the file is located in a path without a parent directory, then the
+If the file is located in a path without a parent directory, then
the state will fail. If makedirs is set to True, then the parent
directories will be created to facilitate the creation of the named
file.
+.TP
+.B context
+Overrides default context variables passed to the template.
+.TP
+.B defaults
+Default context passed to the template.
.UNINDENT
.UNINDENT
.INDENT 0.0
.TP
-.B salt.states.file.recurse(name, source, __env__=\(aqbase\(aq)
+.B salt.states.file.recurse(name, source, clean=False, require=None, __env__=\(aqbase\(aq)
Recurse through a subdirectory on the master and copy said subdirecory
over to the specified path.
.INDENT 7.0
@@ -6607,7 +7006,33 @@ The source directory, this directory is located on the salt master file
server and is specified with the salt:// protocol. If the directory is
located on the master in the directory named spam, and is called eggs,
the source string is salt://spam/eggs
+.TP
+.B clean
+Make sure that only files that are set up by salt and required by this
+function are kept. If this option is set then everything in this
+directory will be deleted unless it is required.
+.UNINDENT
.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.states.file.sed(name, before, after, limit=\(aq\(aq, backup=\(aq.bak\(aq, options=\(aq\-r \-e\(aq, flags=\(aqg\(aq)
+Maintain a simple edit to a file
+.sp
+Usage:
+.sp
+.nf
+.ft C
+# Disable the epel repo by default
+/etc/yum.repos.d/epel.repo:
+ file:
+ \- sed
+ \- before: 1
+ \- after: 0
+ \- limit: ^enabled=
+.ft P
+.fi
+.sp
+New in version 0.9.5.
.UNINDENT
.INDENT 0.0
.TP
@@ -6632,6 +7057,40 @@ then the state will fail, setting makedirs to True will allow Salt to
create the parent directory
.UNINDENT
.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.states.file.touch(name, atime=None, mtime=None)
+Replicate the \(aqnix "touch" command to create a new empty
+file or update the atime and mtime of an existing file.
+.sp
+Usage:
+.sp
+.nf
+.ft C
+/var/log/httpd/logrotate.empty
+ file:
+ \- touch
+.ft P
+.fi
+.sp
+New in version 0.9.5.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B salt.states.file.uncomment(name, regex, char=\(aq#\(aq, backup=\(aq.bak\(aq)
+Usage:
+.sp
+.nf
+.ft C
+/etc/adduser.conf:
+ file:
+ \- uncomment
+ \- regex: EXTRA_GROUPS
+.ft P
+.fi
+.sp
+New in version 0.9.5.
+.UNINDENT
.SS salt.states.group
.SS Group Management
.sp
@@ -6686,7 +7145,7 @@ salt\-master:
.INDENT 0.0
.TP
.B salt.states.host.absent(name, ip)
-Ensure that the the named host is absent
+Ensure that the named host is absent
.INDENT 7.0
.TP
.B name
@@ -6746,7 +7205,7 @@ The name of the kernel module to verify is loaded
.UNINDENT
.UNINDENT
.SS salt.states.mount
-.SS Mount Managment
+.SS Mount Management
.sp
Mount any type of mountable filesystem with the mounted function:
.sp
@@ -6818,7 +7277,7 @@ vim:
.fi
.INDENT 0.0
.TP
-.B salt.states.pkg.installed(name)
+.B salt.states.pkg.installed(name, repo=\(aq\(aq, skip_verify=False)
Verify that the package is installed, and only that it is installed. This
state will not upgrade an existing package and only verify that it is
installed
@@ -6826,11 +7285,30 @@ installed
.TP
.B name
The name of the package to install
+.TP
+.B repo
+Specify a non\-default repository to install from
+.TP
+.B skip_verify
+False
+Skip the GPG verification check for the package to be installed
.UNINDENT
+.sp
+Usage:
+.sp
+.nf
+.ft C
+httpd:
+ \- pkg
+ \- installed
+ \- repo: mycustomrepo
+ \- skip_verify: True
+.ft P
+.fi
.UNINDENT
.INDENT 0.0
.TP
-.B salt.states.pkg.latest(name)
+.B salt.states.pkg.latest(name, repo=\(aq\(aq, skip_verify=False)
Verify that the named package is installed and the latest available
package. If the package can be updated this state function will update
the package. Generally it is better for the installed function to be
@@ -6840,6 +7318,14 @@ package is available
.TP
.B name
The name of the package to maintain at the latest available version
+.TP
+.B repo
+(default)
+Specify a non\-default repository to install from
+.TP
+.B skip_verify
+False
+Skip the GPG verification check for the package to be installed
.UNINDENT
.UNINDENT
.INDENT 0.0
@@ -6879,26 +7365,36 @@ httpd:
.fi
.INDENT 0.0
.TP
-.B salt.states.service.dead(name, sig=None)
+.B salt.states.service.dead(name, enable=None, sig=None)
Ensure that the named service is dead
.INDENT 7.0
.TP
.B name
The name of the init or rc script used to manage the service
.TP
+.B enable
+Set the service to be enabled at boot time, True sets the service to
+be enabled, False sets the named service to be disabled. The default
+is None, which does not enable or disable anything.
+.TP
.B sig
The string to search for when looking for the service process with ps
.UNINDENT
.UNINDENT
.INDENT 0.0
.TP
-.B salt.states.service.running(name, sig=None)
+.B salt.states.service.running(name, enable=None, sig=None)
Verify that the service is running
.INDENT 7.0
.TP
.B name
The name of the init or rc script used to manage the service
.TP
+.B enable
+Set the service to be enabled at boot time, True sets the service to
+be enabled, False sets the named service to be disabled. The default
+is None, which does not enable or disable anything.
+.TP
.B sig
The string to search for when looking for the service process with ps
.UNINDENT
@@ -6941,11 +7437,11 @@ Verifies that the specified ssh key is absent
The ssh key to manage
.TP
.B user
-The user who owns the ssh authorixed keys file to modify
+The user who owns the ssh authorized keys file to modify
.TP
.B config
The location of the authorized keys file relative to the user\(aqs home
-direcotory, defaults to ".ssh/authorized_keys"
+directory, defaults to ".ssh/authorized_keys"
.UNINDENT
.UNINDENT
.INDENT 0.0
@@ -6958,7 +7454,7 @@ Verifies that the specified ssh key is present for the specified user
The ssh key to manage
.TP
.B user
-The user who owns the ssh authorixed keys file to modify
+The user who owns the ssh authorized keys file to modify
.TP
.B enc
Defines what type of key is being used, can be ssh\-rsa or ssh\-dss
@@ -6971,7 +7467,7 @@ The options passed to the key, pass a list object
.TP
.B config
The location of the authorized keys file relative to the user\(aqs home
-direcotory, defaults to ".ssh/authorized_keys"
+directory, defaults to ".ssh/authorized_keys"
.UNINDENT
.UNINDENT
.SS salt.states.sysctl
@@ -7098,7 +7594,7 @@ derived from the file.
The best place to find examples of renderers is in the Salt source code. The
renderers included with Salt can be found here:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/renderers\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/renderers\fP
.sp
Here is a simple jinja + yaml example:
.sp
@@ -7139,7 +7635,6 @@ _
T{
\fBjson_mako\fP
T} T{
-Process json with the Mako templating engine
T}
_
T{
@@ -7151,7 +7646,6 @@ _
T{
\fByaml_mako\fP
T} T{
-Process yaml with the Mako templating engine
T}
_
T{
@@ -7169,18 +7663,7 @@ This renderer will take a json file with the jinja template and render it to a
high data format for salt states.
.INDENT 0.0
.TP
-.B salt.renderers.json_jinja.render(template, env=\(aq\(aq, sls=\(aq\(aq)
-Render the data passing the functions and grains into the rendering system
-.UNINDENT
-.SS salt.renderers.json_mako
-.sp
-Process json with the Mako templating engine
-.sp
-This renderer will take a json file with the Mako template and render it to a
-high data format for salt states.
-.INDENT 0.0
-.TP
-.B salt.renderers.json_mako.render(template)
+.B salt.renderers.json_jinja.render(template_file, env=\(aq\(aq, sls=\(aq\(aq)
Render the data passing the functions and grains into the rendering system
.UNINDENT
.SS salt.renderers.yaml_jinja
@@ -7191,18 +7674,7 @@ This renderer will take a yaml file with the jinja2 template and render it to a
high data format for salt states.
.INDENT 0.0
.TP
-.B salt.renderers.yaml_jinja.render(template, env=\(aq\(aq, sls=\(aq\(aq)
-Render the data passing the functions and grains into the rendering system
-.UNINDENT
-.SS salt.renderers.yaml_mako
-.sp
-Process yaml with the Mako templating engine
-.sp
-This renderer will take a yaml file within a mako template and render it to a
-high data format for salt states.
-.INDENT 0.0
-.TP
-.B salt.renderers.yaml_mako.render(template, env=\(aq\(aq, sls=\(aq\(aq)
+.B salt.renderers.yaml_jinja.render(template_file, env=\(aq\(aq, sls=\(aq\(aq)
Render the data passing the functions and grains into the rendering system
.UNINDENT
.SS salt.renderers.py
@@ -7223,7 +7695,7 @@ Salt runners are convenience applications executed with the salt\-run command.
A Salt runner can be a simple client call, or a complex application.
.sp
The use for a salt running is to build a frontend hook for running sets of
-commands via salt of creating special formatted output.
+commands via salt or creating special formatted output.
.SS Writing Salt Runners
.sp
Salt runners can be easily written, the work in a similar way to Salt modules
@@ -7244,7 +7716,7 @@ contains a function called \fBfoo\fP then the function could be called with:
.sp
The best examples of runners can be found in the Salt source:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/runners\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/runners\fP
.sp
A simple runner that returns a well formated list of the minons that are
responding to salt calls would look like this:
@@ -7585,7 +8057,8 @@ def get_file(path, dest, env=\(aqbase\(aq):
# The port used by the communication interface
#ret_port: 4506
-# The root directory prepended to these options: pki_dir, cachedir, log_file.
+# The root directory prepended to these options: pki_dir, cachedir,
+# sock_dir, log_file.
#root_dir: /
# Directory used to store public key data
@@ -7600,16 +8073,25 @@ def get_file(path, dest, env=\(aqbase\(aq):
# Set the directory used to hold unix sockets
#sock_dir: /tmp/salt\-unix
+# Set the acceptance level for serialization of messages. This should only be
+# set if the master is newer that 0.9.5 and the minion are older, this option
+# allows a 0.9.5 and newer master to communicate with minions 0.9.4 and
+# earlier. It is not recommended to keep this setting on if the minions are
+# all 0.9.5 or higher, as leaving pickle as the serialization medium is slow
+# and opens up security risks
+#
+#serial: msgpack
+
##### Security settings #####
##########################################
# Enable "open mode", this mode still maintains encryption, but turns off
# authentication, this is only intended for highly secure environments or for
-# the situation where your keys end up in a bad state. If you run in open more
+# the situation where your keys end up in a bad state. If you run in open mode
# you do so at your own risk!
#open_mode: False
# Enable auto_accept, this setting will automatically accept all incoming
-# public keys from the minions
+# public keys from the minions. Note that this is insecure.
#auto_accept: False
##### State System settings #####
@@ -7617,7 +8099,7 @@ def get_file(path, dest, env=\(aqbase\(aq):
# The state system uses a "top" file to tell the minions what environment to
# use and what modules to use. The state_top file is defined relative to the
# root of the base environment
-#state_top: top.yml
+#state_top: top.sls
#
# The renderer to use on the minions to render the state data
#renderer: yaml_jinja
@@ -7733,13 +8215,23 @@ def get_file(path, dest, env=\(aqbase\(aq):
# Logger levels can be used to tweak specific loggers logging levels.
# Imagine you want to have the salt library at the \(aqwarning\(aq level, but, you
# still wish to have \(aqsalt.modules\(aq at the \(aqdebug\(aq level:
-# log_granular_levels: {
+# log_granular_levels:
# \(aqsalt\(aq: \(aqwarning\(aq,
# \(aqsalt.modules\(aq: \(aqdebug\(aq
-# }
#
#log_granular_levels: {}
+
+##### Node Groups #####
+##########################################
+# Node groups allow for logical groupings of minion nodes.
+# A group consists of a group name and a compound target.
+#
+# nodegroups:
+# group1: \(aqL@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com\(aq,
+# group2: \(aqG@os:Debian and foo.domain.com\(aq,
+
+
.ft P
.fi
.SS Example minion configuration file
@@ -7776,6 +8268,13 @@ def get_file(path, dest, env=\(aqbase\(aq):
# Where cache data goes
#cachedir: /var/cache/salt
+# When waiting for a master to accept the minion\(aqs public key, salt will
+# contiuously attempt to reconnect until successful. This is the time, in
+# seconds, between those reconnection attempts.
+# acceptance_wait_time = 10
+
+
+
##### Minion module management #####
##########################################
# Disable specific modules, this will allow the admin to limit the level os
@@ -7807,8 +8306,22 @@ def get_file(path, dest, env=\(aqbase\(aq):
#
#renderer: yaml_jinja
#
-# Test allows for the state runs to only be test runs
-#test: False
+# state_verbose allows for the data returned from the minion to be more
+# verbose. Normaly only states that fail or states that have changes are
+# returned, but setting state_verbose to True will return all states that
+# were checked
+#state_verbose: False
+#
+# autoload_dynamic_modules Turns on automatic loading of modules found in the
+# environments on the master. This is turned on by default, to turn of
+# autoloading modules when states run set this value to False
+#autoload_dynamic_modules: True
+#
+# clean_dynamic_modules keeps the dynamic modules on the minion in sync with
+# the dynamic modules on the master, this means that if a dynamic module is
+# not on the master it will be deleted from the minion. By default this is
+# enabled and can be disabled by changing this value to False
+#clean_dynamic_modules: True
###### Security settings #####
###########################################
@@ -7844,7 +8357,6 @@ def get_file(path, dest, env=\(aqbase\(aq):
#
#log_granular_levels: {}
-
###### Module configuration #####
###########################################
# Salt allows for modules to be passed arbitrary configuration data, any data
@@ -7900,17 +8412,6 @@ The network port to set up the publication interface
publish_port: 4505
.ft P
.fi
-.SS \fBpublish_pull_port\fP
-.sp
-Default: \fB45055\fP
-.sp
-The port used to communicate to the local publisher
-.sp
-.nf
-.ft C
-publish_pull_port: 45055
-.ft P
-.fi
.SS \fBworker_threads\fP
.sp
Default: \fB5\fP
@@ -7924,18 +8425,6 @@ worker_threads value.
worker_threads: 5
.ft P
.fi
-.SS \fBworker_start_port\fP
-.sp
-Default: \fB5\fP
-.sp
-The port to begin binding workers on, the workers will be created on
-increasingly higher ports
-.sp
-.nf
-.ft C
-worker_start_port: 45056
-.ft P
-.fi
.SS \fBret_port\fP
.sp
Default: \fB4506\fP
@@ -7948,6 +8437,12 @@ execution returns and command executions.
ret_port: 4506
.ft P
.fi
+.SS \fBroot_dir\fP
+.sp
+Default: \fB/\fP
+.sp
+The system root direcotry to oporate from, change this to make Salt run from
+an alternative root
.SS \fBpki_dir\fP
.sp
Default: \fB/etc/salt/pki\fP
@@ -7976,6 +8471,12 @@ cachedir: /var/cache/salt
Default: \fB24\fP
.sp
Set the number of hours to keep old job information
+.SS \fBsock_dir\fP
+.sp
+Default:: \fB/tmp/salt\-unix\fP
+.sp
+Set the location to use for creating Unix sockets for master process
+communication
.SS Master Security Settings
.SS \fBopen_mode\fP
.sp
@@ -8009,7 +8510,7 @@ auto_accept: False
.SS Master State System Settings
.SS \fBstate_top\fP
.sp
-Default: \fBtop.yml\fP
+Default: \fBtop.sls\fP
.sp
The state system uses a "top" file to tell the minions what environment to
use and what modules to use. The state_top file is defined relative to the
@@ -8017,7 +8518,7 @@ root of the base environment
.sp
.nf
.ft C
-state_top: top.yml
+state_top: top.sls
.ft P
.fi
.SS \fBrenderer\fP
@@ -8031,6 +8532,17 @@ The renderer to use on the minions to render the state data
renderer: yaml_jinja
.ft P
.fi
+.sp
+Default:: \fBFalse\fP
+.sp
+Set the global failhard flag, this informs all states to stop running states
+at the moment a single state fails
+.sp
+.nf
+.ft C
+failhard: False
+.ft P
+.fi
.SS Master File Server Settings
.SS \fBfile_roots\fP
.sp
@@ -8089,65 +8601,147 @@ The buffer size in the file server in bytes
file_buffer_size: 1048576
.ft P
.fi
-.SS Master Logging Settings
-.SS \fBlog_file\fP
+.SS Syndic Server Settings
.sp
-Default: \fB/etc/salt/pki\fP
+The Salt syndic is used to pass commands through a master from a higher
+master. Using the syndic is simple, if this is a master that will have
+syndic servers(s) below it set the "order_masters" setting to True, if this
+is a master that will be running a syndic daemon for passthrough the
+"syndic_master" setting needs to be set to the location of the master server
+to recieve commands from
+.SS \fBorder_masters\fP
.sp
-The location of the master log file
+Default: \fBFalse\fP
+.sp
+Extra data needs to be sind with publications if the master os controlling a
+lower level master via a syndic minion. If this is the case the order_masters
+value must be set to True
.sp
.nf
.ft C
-log_file: /var/log/salt/master
+order_masters: False
.ft P
.fi
-.SS \fBlog_level\fP
+.SS \fBsyndic_master\fP
.sp
-Default: \fBwarning\fP
+Default: \fBNone\fP
.sp
-The level of messages to send to the log file.
-One of \(aqinfo\(aq, \(aqquiet\(aq, \(aqcritical\(aq, \(aqerror\(aq, \(aqdebug\(aq, \(aqwarning\(aq.
+If this master will be running a salt\-syndic to connect to a higher level
+master specify the higher level master with this configuration value
.sp
.nf
.ft C
-log_level: warning
+syndic_master: masterofmasters
.ft P
.fi
-.SS \fBlog_granular_levels\fP
+.SS Peer Publish Settings
+.sp
+Salt minions can send commands to other minions, but only if the minion is
+allowed to. By default "Peer Publication" is disabled, and when enabled it
+is enabled for specific minions and specific commands. This allows secure
+compartmentalization of commands based on individual minions.
+.SS \fBpeer\fP
.sp
Default: \fB{}\fP
.sp
-Logger levels can be used to tweak specific loggers logging levels.
-Imagine you want to have the salt library at the \(aqwarning\(aq level, but, you
-still wish to have \(aqsalt.modules\(aq at the \(aqdebug\(aq level:
+The configuration uses regular expressions to match minions and then a list
+of regular expressions to match functions, the following will allow the
+minion authenticated as foo.example.com to execute functions from the test
+and pkg modules
.sp
.nf
.ft C
-log_granular_levels: {
- \(aqsalt\(aq: \(aqwarning\(aq,
- \(aqsalt.modules\(aq: \(aqdebug\(aq
-}
+peer:
+ foo.example.com:
+ \- test.*
+ \- pkg.*
.ft P
.fi
-.SH CONFIGURING THE SALT MINION
.sp
-The Salt system is amazingly simple and easy to configure, the two components
-of the Salt system each have a respective configuration file. The
-\fBsalt\-master\fP is configured via the master configuration file, and the
-\fBsalt\-minion\fP is configured via the minion configuration file.
-.IP "See also"
+This will allow all minions to execute all commands:
.sp
-\fIexample minion configuration file\fP
-.RE
+.nf
+.ft C
+peer:
+ .*:
+ \- .*
+.ft P
+.fi
.sp
-The Salt Minion configuration is very simple, typically the only value that
-needs to be set is the master value so the minion can find its master.
-.SS Minion Primary Configuration
-.SS \fBmaster\fP
+This is not recomanded, since it would allow anyone who gets root on any
+single minion to instantly have root on all of the minions!
+.SS Node Groups
.sp
-Default: \fBsalt\fP
+Default: \fB{}\fP
.sp
-The hostname or ipv4 of the master.
+Node groups allow for logical groupings of minion nodes.
+A group consists of a group name and a compound target.
+.sp
+.nf
+.ft C
+nodegroups:
+ group1: \(aqL@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com\(aq
+ group2: \(aqG@os:Debian and foo.domain.com\(aq
+.ft P
+.fi
+.SS Master Logging Settings
+.SS \fBlog_file\fP
+.sp
+Default: \fB/var/log/salt/master\fP
+.sp
+The location of the master log file
+.sp
+.nf
+.ft C
+log_file: /var/log/salt/master
+.ft P
+.fi
+.SS \fBlog_level\fP
+.sp
+Default: \fBwarning\fP
+.sp
+The level of messages to send to the log file.
+One of \(aqinfo\(aq, \(aqquiet\(aq, \(aqcritical\(aq, \(aqerror\(aq, \(aqdebug\(aq, \(aqwarning\(aq.
+.sp
+.nf
+.ft C
+log_level: warning
+.ft P
+.fi
+.SS \fBlog_granular_levels\fP
+.sp
+Default: \fB{}\fP
+.sp
+Logger levels can be used to tweak specific loggers logging levels.
+Imagine you want to have the salt library at the \(aqwarning\(aq level, but, you
+still wish to have \(aqsalt.modules\(aq at the \(aqdebug\(aq level:
+.sp
+.nf
+.ft C
+log_granular_levels:
+ \(aqsalt\(aq: \(aqwarning\(aq,
+ \(aqsalt.modules\(aq: \(aqdebug\(aq
+.ft P
+.fi
+.SH CONFIGURING THE SALT MINION
+.sp
+The Salt system is amazingly simple and easy to configure, the two components
+of the Salt system each have a respective configuration file. The
+\fBsalt\-master\fP is configured via the master configuration file, and the
+\fBsalt\-minion\fP is configured via the minion configuration file.
+.IP "See also"
+.sp
+\fIexample minion configuration file\fP
+.RE
+.sp
+The Salt Minion configuration is very simple, typically the only value that
+needs to be set is the master value so the minion can find its master.
+.SS Minion Primary Configuration
+.SS \fBmaster\fP
+.sp
+Default: \fBsalt\fP
+.sp
+The hostname or ipv4 of the master.
.sp
.nf
.ft C
@@ -8192,6 +8786,17 @@ clusters.
id: foo.bar.com
.ft P
.fi
+.SS \fBsub_timeout\fP
+.sp
+The minion connection to the master may be inturupted, the minion will
+verify the connection every so many seconds, to disable connection
+verification set this value to 0
+.sp
+.nf
+.ft C
+sub_timeout: 60
+.ft P
+.fi
.SS \fBcachedir\fP
.sp
Default: \fB/var/cache/salt\fP
@@ -8203,6 +8808,17 @@ The location for minion cache data.
cachedir: /var/cache/salt
.ft P
.fi
+.sp
+Default: \fB10\fP
+.sp
+The number of seconds to wait until attempting to re\-authenticate with the
+master.
+.sp
+.nf
+.ft C
+acceptance_wait_time: 10
+.ft P
+.fi
.SS Minion Module Management
.SS \fBdisable_modules\fP
.sp
@@ -8212,11 +8828,141 @@ The event may occur in which the administrator desires that a minion should not
be able to execute a certain module. The sys module is built into the minion
and cannot be disabled.
.sp
+This setting can also tune the minion, as all modules are loaded into ram
+disabling modules will lover the minion\(aqs ram footprint.
+.sp
+.nf
+.ft C
+disable_modules:
+ \- test
+ \- solr
+.ft P
+.fi
+.SS \fBdisable_returners\fP
+.sp
+Default: \fB[]\fP (all returners are enabled by default)
+.sp
+If certian returners should be disabled, this is the place
+.sp
+.nf
+.ft C
+disable_returners:
+ \- mongo_return
+.ft P
+.fi
+.SS \fBmodule_dirs\fP
+.sp
+Default: \fB[]\fP
+.sp
+A list of extra directories to search for salt modules
+.sp
+.nf
+.ft C
+module_dirs:
+ \- /var/lib/salt/modules
+.ft P
+.fi
+.SS \fBreturner_dirs\fP
+.sp
+Default: \fB[]\fP
+.sp
+A list of extra directories to search for salt returners
+.sp
.nf
.ft C
-disable_modules: [cmd, virt, test]
+returners_dirs:
+ \- /var/lib/salt/returners
.ft P
.fi
+.SS \fBstates_dirs\fP
+.sp
+Default: \fB[]\fP
+.sp
+A list of extra directories to search for salt states
+.sp
+.nf
+.ft C
+states_dirs:
+ \- /var/lib/salt/states
+.ft P
+.fi
+.SS \fBrender_dirs\fP
+.sp
+Default: \fB[]\fP
+.sp
+A list of extra directories to search for salt renderers
+.sp
+.nf
+.ft C
+render_dirs:
+ \- /var/lib/salt/renderers
+.ft P
+.fi
+.SS \fBcython_enable\fP
+.sp
+Default: \fBFalse\fP
+.sp
+Set this value to true to enable auto loading and compiling of .pyx modules,
+This setting requires that gcc and cython are installed on the minion
+.sp
+.nf
+.ft C
+cython_enable: False
+.ft P
+.fi
+.SS State Management Settings
+.SS \fBrenderer\fP
+.sp
+Default: \fByaml_jinja\fP
+.sp
+The default renderer used for local state executions
+.sp
+.nf
+.ft C
+renderer: yaml_jinja
+.ft P
+.fi
+.SS \fBstate_verbose\fP
+.sp
+Default: \fBFalse\fP
+.sp
+state_verbose allows for the data returned from the minion to be more
+verbose. Normaly only states that fail or states that have changes are
+returned, but setting state_verbose to True will return all states that
+were checked
+.sp
+.nf
+.ft C
+state_verbose: True
+.ft P
+.fi
+.SS \fBautoload_dynamic_modules\fP
+.sp
+Default: \fBTrue\fP
+.sp
+autoload_dynamic_modules Turns on automatic loading of modules found in the
+environments on the master. This is turned on by default, to turn of
+autoloading modules when states run set this value to False
+.sp
+.nf
+.ft C
+autoload_dynamic_modules: True
+.ft P
+.fi
+.sp
+Default: \fBTrue\fP
+.sp
+clean_dynamic_modules keeps the dynamic modules on the minion in sync with
+the dynamic modules on the master, this means that if a dynamic module is
+not on the master it will be deleted from the minion. By default this is
+enabled and can be disabled by changing this value to False
+.sp
+.nf
+.ft C
+clean_dynamic_modules: True
+.ft P
+.fi
+.SS Security Settings
.SS \fBopen_mode\fP
.sp
Default: \fBFalse\fP
@@ -8230,6 +8976,57 @@ minion to clean the keys.
open_mode: False
.ft P
.fi
+.SS Thread Settings
+.sp
+Default: \fBTrue\fP
+.sp
+Disable multiprocessing support, by default when a minion receives a
+publication a new process is spawned and the command is executed therein.
+.sp
+.nf
+.ft C
+multiprocessing: True
+.ft P
+.fi
+.SS Minion Logging Settings
+.SS \fBlog_file\fP
+.sp
+Default: \fB/var/log/salt/minion\fP
+.sp
+The location of the minion log file
+.sp
+.nf
+.ft C
+log_file: /var/log/salt/minion
+.ft P
+.fi
+.SS \fBlog_level\fP
+.sp
+Default: \fBwarning\fP
+.sp
+The level of messages to send to the log file.
+One of \(aqinfo\(aq, \(aqquiet\(aq, \(aqcritical\(aq, \(aqerror\(aq, \(aqdebug\(aq, \(aqwarning\(aq.
+.sp
+.nf
+.ft C
+log_level: warning
+.ft P
+.fi
+.SS \fBlog_granular_levels\fP
+.sp
+Default: \fB{}\fP
+.sp
+Logger levels can be used to tweak specific loggers logging levels.
+Imagine you want to have the salt library at the \(aqwarning\(aq level, but, you
+still wish to have \(aqsalt.modules\(aq at the \(aqdebug\(aq level:
+.sp
+.nf
+.ft C
+log_granular_levels:
+ \(aqsalt\(aq: \(aqwarning\(aq,
+ \(aqsalt.modules\(aq: \(aqdebug\(aq
+.ft P
+.fi
.SH COMMAND LINE REFERENCE
.sp
Salt can be controlled by a command line client by the root user on the Salt
@@ -8260,7 +9057,7 @@ glob:
.sp
.nf
.ft C
-salt \(aq*foo.com\(aq sys.doc
+salt \e*foo.com sys.doc
.ft P
.fi
.sp
@@ -8319,6 +9116,43 @@ on the return of the primary function the main function is executed.
.sp
Execution matching allows for matching minions based on any arbitrairy running
data on tne minions.
+.SS Compound Targeting
+.sp
+New in version 0.9.5.
+.sp
+Multiple target interfaces can be used in conjunction to determine the command
+targets. These targets can then be combined using and or or statements. This
+is well defined with an example:
+.sp
+.nf
+.ft C
+salt \-C \(aqG@os:Debian and webser* or E@db.*\(aq test.ping
+.ft P
+.fi
+.sp
+in this example any minion who\(aqs id starts with webser and is running Debian,
+or any minion who\(aqs id starts with db will be matched.
+.sp
+The type of matcher defaults to glob, but can be specified with the
+corresponding letter followed by the @ symbol. In the above example a grain is
+used with G@ as well as a regular expression with E@. The webser* target does
+not need to be prefaced with a target type specifier because it is a glob.
+.SS Node Group Targeting
+.sp
+New in version 0.9.5.
+.sp
+Often the convenience of having a predefined group of minions to execute
+targets on is desired. This can be accomplished with the new nodegroups
+feature. Nodegroups allow for predefined compound targets to be declared in
+the master configuration file:
+.sp
+.nf
+.ft C
+nodegroups:
+ \ group1: \(aqL@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com\(aq
+ \ group2: \(aqG@os:Debian and foo.domain.com\(aq
+.ft P
+.fi
.SS Calling the Function
.sp
The function to call on the specified target is placed after the target
@@ -8369,7 +9203,9 @@ salt \(aq*\(aq [ options ] sys.doc
.sp
salt \-E \(aq.*\(aq [ options ] sys.doc cmd
.sp
-salt \-F \(aqoperatingsystem:Arch.*\(aq [ options ] test.ping
+salt \-G \(aqos:Arch.*\(aq [ options ] test.ping
+.sp
+salt \-C \fI\%'G@os\fP:Arch.* and webserv* or \fI\%G@kernel\fP:FreeBSD\(aq [ options ] test.ping
.sp
salt \-Q test.ping
.UNINDENT
@@ -8392,6 +9228,11 @@ The timeout in seconds to wait for replies from the salt minions.
.UNINDENT
.INDENT 0.0
.TP
+.B \-\-version
+Print the version of salt that is running.
+.UNINDENT
+.INDENT 0.0
+.TP
.B \-E, \-\-pcre
The target expression will be interpreted as a pcre regular expression
rather than a shell glob.
@@ -8411,8 +9252,35 @@ regular expression>\(aq; example: \(aqos:Arch.*\(aq
.UNINDENT
.INDENT 0.0
.TP
+.B \-C, \-\-compound
+Utilize many target definitions to make the call very granular. This option
+takes a group of targets separated by and or or. The default matcher is a
+glob as usual, if something other than a glob is used preface it with the
+letter denoting the type, example: \(aqwebserv* and \fI\%G@os\fP:Debian or \fI\%E@db.*\fP\(aq
+make sure that the compound target is encapsulated in quotes.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-X, \-\-exsel
+Instead of using shell globs use the return code of a function.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-N, \-\-nodegroup
+Use a predefined compound target defined in the salt master configuration
+file
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-\-return
+Chose an alternative returner to call on the minion, if an alternative
+returner is used then the return will not come back tot he command line
+but will be sent to the specified return system.
+.UNINDENT
+.INDENT 0.0
+.TP
.B \-Q, \-\-query
-Execute a salt command query, this can be used to find the results os a
+Execute a salt command query, this can be used to find the results of a
previous function call: \-Q test.echo\(aq)
.UNINDENT
.INDENT 0.0
@@ -8422,6 +9290,29 @@ The location of the salt master configuration file, the salt master
settings are required to know where the connections are;
default=/etc/salt/master
.UNINDENT
+.INDENT 0.0
+.TP
+.B \-\-raw\-out
+Print the output from the salt command in raw python
+form, this is suitable for re\-reading the output into
+an executing python script with eval.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-\-text\-out
+Print the output from the salt command in the same
+form the shell would.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-\-yaml\-out
+Print the output from the salt command in yaml.
+.UNINDENT
+.INDENT 0.0
+.TP
+.B \-\-json\-out
+Print the output from the salt command in json.
+.UNINDENT
.SS See also
.sp
\fIsalt(7)\fP
@@ -8571,7 +9462,7 @@ regular expresion>\(aq; example: \(aqos:Arch.*\(aq
.INDENT 0.0
.TP
.B \-Q, \-\-query
-Execute a salt command query, this can be used to find the results os a
+Execute a salt command query, this can be used to find the results of a
previous function call: \-Q test.echo\(aq)
.UNINDENT
.INDENT 0.0
@@ -9049,7 +9940,7 @@ use the file extension “.pyx” and the minion module will be compiled when
the minion is started. An example cython module is included in the main
distribution called cytest.pyx:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/modules/cytest.pyx\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/modules/cytest.pyx\fP
.SS Dynamic Returners \-
.sp
By default salt returns command data back to the salt master, but now salt can
@@ -9063,7 +9954,7 @@ data so anything from MySQL, redis, mongodb and more!
.sp
There are 2 simple stock returners in the returners directory:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/returners\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/returners\fP
.sp
The documentation on writing returners will be added to the wiki shortly, and
returners can be written in pure python, or in cython.
@@ -9079,7 +9970,7 @@ Information on how to use this simple addition has been added to the wiki:
The test module has an example of using the __opts__ dict, and how to set
default options:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/modules/test.py\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/modules/test.py\fP
.SS Advanced Minion Threading:
.sp
In 0.7.0 the minion would block after receiving a command from the master, now
@@ -9091,7 +9982,7 @@ exploit the negative aspects of the Python GIL to run faster and more reliably,
but simple calls will still be faster with python threading.
The configuration option can be found in the minion configuration file:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/conf/minion\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/conf/minion\fP
.sp
Lowered Supported Python to 2.6 \-
.sp
@@ -9169,7 +10060,7 @@ The system for loading salt modules has been pulled out of the minion class to
be a standalone module, this has enabled more dynamic loading of Salt modules
and enables many of the updates in 0.8.7 –
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/loader.py\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/loader.py\fP
.sp
Salt Job ids are now microsecond precise, this was needed to repair a race
condition unveiled by the speed improvements in the new ZeroMQ topology.
@@ -9445,7 +10336,7 @@ The minion and master classes have been redesigned to allow for specialized
minion and master servers to be easily created. An example on how this is done
for the master can be found in the \fBmaster.py\fP salt module:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/master.py\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/master.py\fP
.sp
The \fBMaster\fP class extends the \fBSMaster\fP class and set up the main master
server.
@@ -9453,7 +10344,7 @@ server.
The minion functions can now also be easily added to another application via
the \fBSMinion\fP class, this class can be found in the \fBminion.py\fP module:
.sp
-\fI\%https://github.com/saltstack/salt/blob/v0.9.4/salt/minion.py\fP
+\fI\%https://github.com/saltstack/salt/blob/v0.9.5/salt/minion.py\fP
.SS Cleaner Key Management
.sp
This release changes some of the key naming to allow for multiple master keys
@@ -9839,6 +10730,463 @@ AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyYvlRBsJdDOo49CNfhlWHWXQRqul6rwL4KIuPrhY7hBw0tV
\- comment: \(aqFrank\(aqs key\(aq
.ft P
.fi
+.SS Salt 0.9.4 Release Notes
+.sp
+Salt 0.9.4 has arrived. This is a critical update that repairs a number of
+key bugs found in 0.9.3. But this update is not without feature additions
+as well! 0.9.4 adds support for Gentoo portage to the pkg module and state
+system. Also there are 2 major new state additions, the failhard option and
+the ability to set up finite state ordering with the \fBorder\fP option.
+.sp
+This release also sees our largest increase in community contributions.
+These contributors have and continue to be the life blood of the Salt
+project, and the team continues to grow. I want to put out a big thanks to
+our new and existing contributors.
+.SS Download!
+.sp
+The Salt source can be downloaded from the salt github site:
+.sp
+\fI\%https://github.com/downloads/saltstack/salt/salt-0.9.4.tar.gz\fP
+.sp
+Or from PiPy:
+.sp
+\fI\%http://pypi.python.org/packages/source/s/salt/salt-0.9.4.tar.gz\fP
+.sp
+For instructions on how to set up Salt please see the \fBinstallation
+instructions\fP.
+.SS New Features
+.SS Failhard State Option
+.sp
+Normally, when a state fails Salt continues to execute the remainder of the
+defined states and will only refuse to execute states that require the failed
+state.
+.sp
+But the situation may exist, where you would want all state execution to stop
+if a single state execution fails. The capability to do this is called
+\fBfailing hard\fP.
+.SS State Level Failhard
+.sp
+A single state can have a failhard set, this means that if this individual
+state fails that all state execution will immediately stop. This is a great
+thing to do if there is a state that sets up a critical config file and
+setting a require for each state that reads the config would be cumbersome.
+A good example of this would be setting up a package manager early on:
+.sp
+.nf
+.ft C
+/etc/yum.repos.d/company.repo:
+ file:
+ \- managed
+ \- source: salt://company/yumrepo.conf
+ \- user: root
+ \- group: root
+ \- mode: 644
+ \- order: 1
+ \- failhard: True
+.ft P
+.fi
+.sp
+In this situation, the yum repo is going to be configured before other states,
+and if it fails to lay down the config file, than no other states will be
+executed.
+.SS Global Failhard
+.sp
+It may be desired to have failhard be applied to every state that is executed,
+if this is the case, then failhard can be set in the master configuration
+file. Setting failhard in the master configuration file will result in failing
+hard when any minion gathering states from the master have a state fail.
+.sp
+This is NOT the default behavior, normally Salt will only fail states that
+require a failed state.
+.sp
+Using the global failhard is generally not recommended, since it can result
+in states not being executed or even checked. It can also be confusing to
+see states failhard if an admin is not actively aware that the failhard has
+been set.
+.sp
+To use the global failhard set failhard: True in the master configuration
+.SS Finite Ordering of State Execution
+.sp
+When creating salt sls files, it is often important to ensure that they run in
+a specific order. While states will always execute in the same order, that
+order is not necessarily defined the way you want it.
+.sp
+A few tools exist in Salt to set up the correct state ordering, these tools
+consist of requisite declarations and order options.
+.SS The Order Option
+.sp
+Before using the order option, remember that the majority of state ordering
+should be done with requisite statements, and that a requisite statement
+will override an order option.
+.sp
+The order option is used by adding an order number to a state declaration
+with the option \fIorder\fP:
+.sp
+.nf
+.ft C
+vim:
+ pkg:
+ \- installed
+ \- order: 1
+.ft P
+.fi
+.sp
+By adding the order option to \fI1\fP this ensures that the vim package will be
+installed in tandem with any other state declaration set to the order \fI1\fP.
+.sp
+Any state declared without an order option will be executed after all states
+with order options are executed.
+.sp
+But this construct can only handle ordering states from the beginning.
+Sometimes you may want to send a state to the end of the line, to do this
+set the order to last:
+.sp
+.nf
+.ft C
+vim:
+ pkg:
+ \- installed
+ \- order: last
+.ft P
+.fi
+.sp
+Substantial testing has gone into the state system and it is ready for real
+world usage. A great deal has been added to the documentation for states and
+the modules and functions available to states have been cleanly documented.
+.sp
+A number of State System bugs have also been founds and repaired, the output
+from the state system has also been refined to be extremely clear and concise.
+.sp
+Error reporting has also been introduced, issues found in sls files will now
+be clearly reported when executing Salt States.
+.SS Gentoo Support
+.sp
+Additional experimental support has been added for Gentoo. This is found in
+the contribution from Doug Renn, aka nestegg.
+.SS Salt 0.9.5 Release Notes
+.sp
+Salt 0.9.5 is one of the largest steps forward in the development of Salt.
+.sp
+0.9.5 comes with many milestones, this release has seen the community of
+developers grow out to an international team of 46 code contributors and has
+many feature additions, feature enhancements, bug fixes and speed improvements.
+.SS Community
+.sp
+Nothing has proven to have more value to the development of Salt that the
+outstanding community that has been growing at such a great pace around Salt.
+This has proven not only that Salt has great value, but also the
+expandability of Salt is as exponential as I originally intended.
+.sp
+0.9.5 has received over 600 additional commits since 0.9.4 with a swath of new
+commiters. The following individuals have contributed to the development of
+0.9.5:
+.sp
+Aaron Bull Schaefer
+Antti Kaihola
+Bas Tichelaar
+Brad Barden
+Brian Wagner
+Byron Clark
+Chris Scheller
+Christer Edwards
+Clint Savage
+Corey Quinn
+David Boucha
+Eivind Uggedal
+Eric Poelke
+Evan Borgstrom
+Jed Glazner
+Jeff Schroeder
+Jeffrey C. Ollie
+Jonas Buckner
+Kent Tenney
+Martin Schnabel
+Maxim Burgerhout
+Mitch Anderson
+Nathaniel Whiteinge
+Seth House
+Thomas S Hatch
+Thomas Schreiber
+Tor Hveem
+lzyeval
+syphernl
+.sp
+This makes 21 new developers since 0.9.4 was released!
+.sp
+To keep up with the growing community follow Salt on Ohloh
+(\fI\%http://www.ohloh.net/p/salt\fP), to join the Salt development community, fork
+Salt on Github, and get coding (\fI\%https://github.com/saltstack/salt\fP)!
+.SS Major Features
+.SS SPEED! Pickle to msgpack
+.sp
+For a few months now we have been talking about moving away from python
+pickles for network serialization, but a preferred serialization format
+had not yet been found. After an extensive performance testing period
+involving everything from JSON to protocol buffers, a clear winner emerged.
+Message Pack (\fI\%http://msgpack.org/\fP) proved to not only be the fastest and most
+compact, but also the most "salt like". Message Pack is simple, and the code
+involved is very small. The msgpack library for python has been added directly
+to Salt.
+.sp
+This move introduces a few changes to Salt. First off, Salt is no longer a
+"noarch" package, since the msgpack lib is written in C. Salt 0.9.5 will also
+have compatibility issues with 0.9.4 with the default configuration.
+.sp
+We have gone through great lengths to avoid backwards compatibility issues
+with Salt, but changing the serialization medium was going to create issues
+regardless. Salt 0.9.5 is somewhat backwards compatible with earlier minions.
+A 0.9.5 master can command older minions, but only if the ?serial? config
+value in the master is set to ?pickle?. This will tell the master to publish
+messages in pickle format and will allow the master to receive messages in
+both msgpack and pickle formats.
+.sp
+Therefore the suggested methods for upgrading are either to just upgrade
+everything at once, or to upgrade the master to 0.9.5, set "serial: pickle" in
+the master config, upgrade the minions, and then remove the serial option from
+the config. Since pickles can be used as a security exploit the ability for a
+master to accept pickles from minions at all will be removed in a future
+release.
+.SS C Bindings for YAML
+.sp
+All of the YAML rendering is now done with the YAML C bindings. This speeds up
+all of the sls files when running states.
+.SS Experimental Windows Support
+.sp
+David Boucha has worked tirelessly to bring initial support to Salt for
+Microsoft Windows operating systems. Right now the Salt Minion can run as a
+native Windows service and accept commands.
+.sp
+In the weeks and months to come Windows will receive the full treatment and
+will have support for Salt States and more robust support for managing Windows
+systems. This is a big step forward for Salt to move entirely outside of the
+Unix world, and proves Salt is a viable cross platform solution. Big Thanks
+to Dave for his contribution here!
+.SS Dynamic Module Distribution
+.sp
+Many Salt users have expressed the desire to have Salt distribute in\-house
+modules, states, renderers, returners, and grains. This support has been added
+in a number of ways:
+.SS Modules via States
+.sp
+Now when salt modules are deployed to a minion via the state system as a file,
+then the modules will be automatically loaded into the active running minion
+\- no restart required \- and into the active running state. So custom state
+.INDENT 0.0
+.INDENT 3.5
+modules can be deployed and used in the same state run.
+.UNINDENT
+.UNINDENT
+.SS Modules via Module Environment Directories
+.sp
+Under the file_roots each environment can now have directories that are used
+to deploy large groups of modules. These directories sync modules at the
+beginning of a state run on the minion, or can be manually synced via the Salt
+module "saltutil.sync_all".
+.sp
+The directories are named:
+_modules
+_states
+_grains
+_renderers
+_returners
+.sp
+The modules are pushed to their respective scopes on the minions.
+.SS Module Reloading
+.sp
+Modules can now be reloaded without restarting the minion, this is done by
+calling the sys.reload_modules function.
+.sp
+But wait, there\(aqs more! Now when a salt module of any type is added via
+states the modules will be automatically reloaded, allowing for modules to be
+laid down with states and then immediately used.
+.sp
+Finally, all modules are reloaded when modules are dynamically distributed
+from the salt master.
+.SS Enable / Disable Added to Service
+.sp
+A great deal of demand has existed for adding the capability to set services
+to be started at boot in the service module. This feature also comes with an
+overhaul of the service modules and initial systemd support.
+.sp
+This means that the service state can now accept "\- enable: True" to make sure
+a service is enabled at boot, and "\- enable: False" to make sure it is
+disabled.
+.SS Compound Target
+.sp
+A new target type has been added to the lineup, the compound target. In
+previous versions the desired minions could only be targeted via a single
+specific target type, but now many target specifications can be declared.
+.sp
+These targets can also be separated by and/or operators, so certain properties
+can be used to omit a node:
+.sp
+salt \-C \(aqwebserv* and \fI\%G@os\fP:Debian or \fI\%E@db.*\fP\(aq test.ping
+.sp
+will match all minions with ids starting with webserv via a glob and minions
+matching the os:Debian grain. Or minions that match the "db.*" regular
+expression.
+.SS Node Groups
+.sp
+Often the convenience of having a predefined group of minions to execute
+targets on is desired. This can be accomplished with the new nodegroups
+feature. Nodegroups allow for predefined compound targets to be declared in
+the master configuration file:
+.INDENT 0.0
+.TP
+.B nodegroups:
+group1: \fI\%'L@foo.domain.com\fP,bar.domain.com,baz.domain.com and bl*.domain.com\(aq
+group2: \fI\%'G@os\fP:Debian and foo.domain.com\(aq
+.UNINDENT
+.sp
+And then used via the \-N option:
+.sp
+salt \-N group1 test.ping
+.SS Minion Side Data Store
+.sp
+The data module introduces the initial approach into storing persistent data on
+the minions, specific to the minions. This allows for data to be stored on
+minions that can be accessed from the master or from the minion.
+.sp
+The Minion datastore is young, and will eventually provide an interface similar
+to a more mature key/value pair server.
+.SS Major Grains Improvement
+.sp
+The Salt grains have been overhauled to include a massive amount of extra data.
+this includes hardware data, os data and salt specific data.
+.SS Salt \-Q is Useful Now
+.sp
+In the past the salt query system, which would display the data from recent
+executions would be displayed in pure python, and it was unreadable.
+.sp
+0.9.5 has added the outputter system to the \-Q option, thus enabling the salt
+query system to return readable output.
+.SS Packaging Updates
+.sp
+Huge strides have been made in packaging Salt for distributions. These
+additions are thanks to our wonderful community where the work to set up
+packages has proceeded tirelessly.
+.SS FreeBSD
+.sp
+Salt on FreeBSD? There a port for that:
+.sp
+\fI\%http://www.freebsd.org/cgi/cvsweb.cgi/ports/sysutils/salt/pkg-descr\fP
+.sp
+This port was developed and added by Christer Edwards. This also marks the
+first time Salt has been included in an upstream packaging system!
+.SS Fedora and Red Hat Enterprise
+.sp
+Salt packages have been prepared for inclusion in the Fedora Project and in
+EPEL for Red Hat Enterprise 5 and 6. These packages are the result of the
+efforts made by Clint Savage (herlo).
+.SS Debian/Ubuntu
+.sp
+A team of many contributors have assisted in developing packages for Debian
+and Ubuntu. Salt is still actively seeking inclusion in upstream Debian and
+Ubuntu and the package data that has been prepared is being pushed through
+the needed channels for inclusion.
+.sp
+These packages have been prepared with the help of:
+Corey
+Aaron Toponce
+and\(ga
+.SS More to Come
+.sp
+We are actively seeking inclusion in more distributions. Primarily getting
+Salt into Gentoo, Suse, OpenBSD and preparing Solaris support are all turning
+into higher priorities.
+.SS Refinement
+.sp
+Salt continues to be refined into a faster, more stable and more usable
+application. 0.9.5 comes with more debug logging, more bug fixes and more
+complete support.
+.SS More Testing, More BugFixes
+.sp
+0.9.5 comes with more bugfixes due to more testing than any previous release.
+The growing community and the introduction a a dedicated QA environment have
+unearthed many issues that were hiding under the covers. This has further
+refined and cleaned the state interface, taking care of things from minor
+visual issues to repairing misleading data.
+.SS Custom Exceptions
+.sp
+A custom exception module has been added to throw salt specific exceptions.
+This allows Salt to give much more granular error information.
+.SS New Modules
+.SS data
+.sp
+The new data module manages a persistent datastore on the minion.
+Big thanks to bastichelaar for his help refining this module
+.SS freebsdkmod
+.sp
+FreeBSD kernel modules can now be managed in the same way Salt handles Linux
+kernel modules.
+This module was contributed thanks to the efforts of Christer Edwards
+.SS gentoo_service
+.sp
+Support has been added for managing services in Gentoo. Now Gentoo services
+can be started, stopped, restarted, enabled, disabled and viewed.
+.SS pip
+.sp
+The pip module introduces management for pip installed applications.
+Thanks goes to whitinge for the addition of the pip module
+.SS rh_service
+.sp
+The rh_service module enables Red Hat and Fedora specific service management.
+Now Red Hat like systems come with extensive management of the classic init
+system used by Red Hat
+.SS saltutil
+.sp
+The saltutil module has been added as a place to hold functions used in the
+maintenance and management of salt itself. Saltutil is used to salt the salt
+minion. The saltutil module is presently used only to sync extension modules
+from the master server.
+.SS systemd
+.sp
+Systemd support has been added to Salt, now systems using this next generation
+init system are supported on systems running systemd.
+.SS virtualenv
+.sp
+The virtualenv module has been added to allow salt to create virtual python
+environments.
+Thanks goes to whitinge for the addition of the virtualenv module
+.SS win_disk
+.sp
+Support for gathering disk information on Microsoft Windows minions
+The windows modules come courtesy of Utah_Dave
+.SS win_service
+.sp
+The win_service module adds service support to Salt for Microsoft Windows
+services
+.SS win_useradd
+.sp
+Salt can now manage local users on Microsoft Windows Systems
+.SS yumpkg5
+.sp
+The yumpkg module introduces in 0.9.4 uses the yum api to interact with the
+yum package manager. Unfortunately, on Red Hat 5 systems salt does not have
+access to the yum api because the yum api is running under python 2.4 and Salt
+needs to run under python 2.6.
+.sp
+The yumpkg5 module bypasses this issue by shelling out to yum on systems where
+the yum api is not available.
+.SS New States
+.SS mysql_database
+.sp
+The new mysql_database state adds the ability to systems running a mysql
+server to manage the existence of mysql databases.
+.sp
+The mysql states are thanks to syphernl
+.SS mysql_user
+.sp
+The mysql_user state enables mysql user management.
+.SS virtualenv
+.sp
+The virtualenv state can manage the state of python virtual environments.
+Thanks to Whitinge for the virtualenv state
+.SS New Returners
+.SS cassandra_returner
+.sp
+A returnerer allowing Salt to send data to a cassandra server.
+Thanks to Byron Clark for contributing this returner
.SH AUTHOR
Thomas S. Hatch and many others, please see the Authors file
.SH COPYRIGHT
diff --git a/doc/ref/cli/index.rst b/doc/ref/cli/index.rst
index eab76d7c1902..060c7b612feb 100644
--- a/doc/ref/cli/index.rst
+++ b/doc/ref/cli/index.rst
@@ -34,7 +34,7 @@ glob:
.. code-block:: bash
- salt '*foo.com' sys.doc
+ salt \*foo.com sys.doc
Salt can also define the target minions with regular expressions:
@@ -91,6 +91,43 @@ on the return of the primary function the main function is executed.
Execution matching allows for matching minions based on any arbitrairy running
data on tne minions.
+Compound Targeting
+``````````````````
+
+.. versionadded:: 0.9.5
+
+Multiple target interfaces can be used in conjunction to determine the command
+targets. These targets can then be combined using and or or statements. This
+is well defined with an example:
+
+.. code-block:: bash
+
+ salt -C 'G@os:Debian and webser* or E@db.*' test.ping
+
+in this example any minion who's id starts with webser and is running Debian,
+or any minion who's id starts with db will be matched.
+
+The type of matcher defaults to glob, but can be specified with the
+corresponding letter followed by the @ symbol. In the above example a grain is
+used with G@ as well as a regular expression with E@. The webser* target does
+not need to be prefaced with a target type specifier because it is a glob.
+
+Node Group Targeting
+````````````````````
+
+.. versionadded:: 0.9.5
+
+Often the convenience of having a predefined group of minions to execute
+targets on is desired. This can be accomplished with the new nodegroups
+feature. Nodegroups allow for predefined compound targets to be declared in
+the master configuration file:
+
+.. code-block:: yaml
+
+ nodegroups:
+ group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com'
+ group2: 'G@os:Debian and foo.domain.com'
+
Calling the Function
--------------------
diff --git a/doc/ref/cli/salt-cp.rst b/doc/ref/cli/salt-cp.rst
index dfef3289acba..977aec2dd48d 100644
--- a/doc/ref/cli/salt-cp.rst
+++ b/doc/ref/cli/salt-cp.rst
@@ -52,7 +52,7 @@ Options
.. option:: -Q, --query
- Execute a salt command query, this can be used to find the results os a
+ Execute a salt command query, this can be used to find the results of a
previous function call: -Q test.echo')
.. option:: -c CONFIG, --config=CONFIG
diff --git a/doc/ref/cli/salt.rst b/doc/ref/cli/salt.rst
index f5058c94419b..cf1af8db46c5 100644
--- a/doc/ref/cli/salt.rst
+++ b/doc/ref/cli/salt.rst
@@ -9,7 +9,9 @@ Synopsis
salt -E '.*' [ options ] sys.doc cmd
- salt -F 'operatingsystem:Arch.*' [ options ] test.ping
+ salt -G 'os:Arch.*' [ options ] test.ping
+
+ salt -C 'G@os:Arch.* and webserv* or G@kernel:FreeBSD' [ options ] test.ping
salt -Q test.ping
@@ -33,6 +35,10 @@ Options
The timeout in seconds to wait for replies from the salt minions.
+.. option:: --version
+
+ Print the version of salt that is running.
+
.. option:: -E, --pcre
The target expression will be interpreted as a pcre regular expression
@@ -49,9 +55,32 @@ Options
the minions. The target expression is in the format of ':'; example: 'os:Arch.*'
+.. option:: -C, --compound
+
+ Utilize many target definitions to make the call very granular. This option
+ takes a group of targets separated by and or or. The default matcher is a
+ glob as usual, if something other than a glob is used preface it with the
+ letter denoting the type, example: 'webserv* and G@os:Debian or E@db.*'
+ make sure that the compound target is encapsulated in quotes.
+
+.. option:: -X, --exsel
+
+ Instead of using shell globs use the return code of a function.
+
+.. option:: -N, --nodegroup
+
+ Use a predefined compound target defined in the salt master configuration
+ file
+
+.. option:: --return
+
+ Chose an alternative returner to call on the minion, if an alternative
+ returner is used then the return will not come back tot he command line
+ but will be sent to the specified return system.
+
.. option:: -Q, --query
- Execute a salt command query, this can be used to find the results os a
+ Execute a salt command query, this can be used to find the results of a
previous function call: -Q test.echo')
.. option:: -c CONFIG, --config=CONFIG
@@ -60,6 +89,25 @@ Options
settings are required to know where the connections are;
default=/etc/salt/master
+.. option:: --raw-out
+
+ Print the output from the salt command in raw python
+ form, this is suitable for re-reading the output into
+ an executing python script with eval.
+
+.. option:: --text-out
+
+ Print the output from the salt command in the same
+ form the shell would.
+
+.. option:: --yaml-out
+
+ Print the output from the salt command in yaml.
+
+.. option:: --json-out
+
+ Print the output from the salt command in json.
+
See also
========
diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst
index 50785340bfa3..c9908c5d7de9 100644
--- a/doc/ref/configuration/master.rst
+++ b/doc/ref/configuration/master.rst
@@ -42,19 +42,6 @@ The network port to set up the publication interface
publish_port: 4505
-.. conf_master:: publish_pull_port
-
-``publish_pull_port``
----------------------
-
-Default: ``45055``
-
-The port used to communicate to the local publisher
-
-.. code-block:: yaml
-
- publish_pull_port: 45055
-
.. conf_master:: worker_threads
``worker_threads``
@@ -70,19 +57,6 @@ worker_threads value.
worker_threads: 5
-``worker_start_port``
----------------------
-
-Default: ``5``
-
-The port to begin binding workers on, the workers will be created on
-increasingly higher ports
-
-
-.. code-block:: yaml
-
- worker_start_port: 45056
-
.. conf_master:: ret_port
``ret_port``
@@ -97,6 +71,20 @@ execution returns and command executions.
ret_port: 4506
+.. conf_master:: root_dir
+
+``root_dir``
+------------
+
+Default: :file:`/`
+
+The system root direcotry to oporate from, change this to make Salt run from
+an alternative root
+
+.. code_block:: yaml
+
+ root_dir: /
+
.. conf_master:: pki_dir
``pki_dir``
@@ -133,6 +121,16 @@ Default: ``24``
Set the number of hours to keep old job information
+.. conf_master:: sock_dir
+
+``sock_dir``
+------------
+
+Default:: :file:`/tmp/salt-unix`
+
+Set the location to use for creating Unix sockets for master process
+communication
+
Master Security Settings
------------------------
@@ -177,7 +175,7 @@ Master State System Settings
``state_top``
-------------
-Default: ``top.yml``
+Default: ``top.sls``
The state system uses a "top" file to tell the minions what environment to
use and what modules to use. The state_top file is defined relative to the
@@ -185,7 +183,7 @@ root of the base environment
.. code-block:: yaml
- state_top: top.yml
+ state_top: top.sls
.. conf_master:: renderer
@@ -200,6 +198,17 @@ The renderer to use on the minions to render the state data
renderer: yaml_jinja
+.. conf_master:: failhard
+
+Default:: ``False``
+
+Set the global failhard flag, this informs all states to stop running states
+at the moment a single state fails
+
+.. code-block:: yaml
+
+ failhard: False
+
Master File Server Settings
---------------------------
@@ -264,6 +273,99 @@ The buffer size in the file server in bytes
file_buffer_size: 1048576
+Syndic Server Settings
+----------------------
+
+The Salt syndic is used to pass commands through a master from a higher
+master. Using the syndic is simple, if this is a master that will have
+syndic servers(s) below it set the "order_masters" setting to True, if this
+is a master that will be running a syndic daemon for passthrough the
+"syndic_master" setting needs to be set to the location of the master server
+to recieve commands from
+
+.. conf_master:: order_masters
+
+``order_masters``
+-----------------
+
+Default: ``False``
+
+Extra data needs to be sind with publications if the master os controlling a
+lower level master via a syndic minion. If this is the case the order_masters
+value must be set to True
+
+.. code-block:: yaml
+
+ order_masters: False
+
+.. conf_master:: syndic_master
+
+``syndic_master``
+-----------------
+
+Default: ``None``
+
+If this master will be running a salt-syndic to connect to a higher level
+master specify the higher level master with this configuration value
+
+.. code-block:: yaml
+
+ syndic_master: masterofmasters
+
+Peer Publish Settings
+---------------------
+
+Salt minions can send commands to other minions, but only if the minion is
+allowed to. By default "Peer Publication" is disabled, and when enabled it
+is enabled for specific minions and specific commands. This allows secure
+compartmentalization of commands based on individual minions.
+
+.. conf_master:: peer
+
+``peer``
+--------
+
+Default: ``{}``
+
+The configuration uses regular expressions to match minions and then a list
+of regular expressions to match functions, the following will allow the
+minion authenticated as foo.example.com to execute functions from the test
+and pkg modules
+
+.. code-block:: yaml
+
+ peer:
+ foo.example.com:
+ - test.*
+ - pkg.*
+
+This will allow all minions to execute all commands:
+
+.. code-block:: yaml
+
+ peer:
+ .*:
+ - .*
+
+This is not recomanded, since it would allow anyone who gets root on any
+single minion to instantly have root on all of the minions!
+
+Node Groups
+-----------
+
+.. conf_master:: nodegroups
+
+Default: ``{}``
+
+Node groups allow for logical groupings of minion nodes.
+A group consists of a group name and a compound target.
+
+.. code-block:: yaml
+
+ nodegroups:
+ group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com'
+ group2: 'G@os:Debian and foo.domain.com'
+
Master Logging Settings
-----------------------
@@ -272,7 +374,7 @@ Master Logging Settings
``log_file``
------------
-Default: :file:`/etc/salt/pki`
+Default: :file:`/var/log/salt/master`
The location of the master log file
@@ -307,7 +409,6 @@ still wish to have 'salt.modules' at the 'debug' level:
.. code-block:: yaml
- log_granular_levels: {
+ log_granular_levels:
'salt': 'warning',
'salt.modules': 'debug'
- }
diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst
index 38c30c688f80..c4eb45ee9c4f 100644
--- a/doc/ref/configuration/minion.rst
+++ b/doc/ref/configuration/minion.rst
@@ -73,6 +73,19 @@ clusters.
id: foo.bar.com
+.. conf_minion:: sub_timeout
+
+``sub_timeout``
+---------------
+
+The minion connection to the master may be inturupted, the minion will
+verify the connection every so many seconds, to disable connection
+verification set this value to 0
+
+.. code-block:: yaml
+
+ sub_timeout: 60
+
.. conf_minion:: cachedir
``cachedir``
@@ -86,6 +99,17 @@ The location for minion cache data.
cachedir: /var/cache/salt
+.. conf_minion:: acceptance_wait_time
+
+Default: ``10``
+
+The number of seconds to wait until attempting to re-authenticate with the
+master.
+
+.. code-block:: yaml
+
+ acceptance_wait_time: 10
+
Minion Module Management
------------------------
@@ -100,9 +124,163 @@ The event may occur in which the administrator desires that a minion should not
be able to execute a certain module. The sys module is built into the minion
and cannot be disabled.
+This setting can also tune the minion, as all modules are loaded into ram
+disabling modules will lover the minion's ram footprint.
+
+.. code-block:: yaml
+
+ disable_modules:
+ - test
+ - solr
+
+.. conf_minion:: disable_returners
+
+``disable_returners``
+---------------------
+
+Default: ``[]`` (all returners are enabled by default)
+
+If certian returners should be disabled, this is the place
+
+.. code-block:: yaml
+
+ disable_returners:
+ - mongo_return
+
+.. conf_minion:: module_dirs
+
+``module_dirs``
+---------------
+
+Default: ``[]``
+
+A list of extra directories to search for salt modules
+
+.. code-block:: yaml
+
+ module_dirs:
+ - /var/lib/salt/modules
+
+.. conf_minion:: returner_dirs
+
+``returner_dirs``
+---------------
+
+Default: ``[]``
+
+A list of extra directories to search for salt returners
+
+.. code-block:: yaml
+
+ returners_dirs:
+ - /var/lib/salt/returners
+
+.. conf_minion:: states_dirs
+
+``states_dirs``
+---------------
+
+Default: ``[]``
+
+A list of extra directories to search for salt states
+
+.. code-block:: yaml
+
+ states_dirs:
+ - /var/lib/salt/states
+
+
+.. conf_minion:: render_dirs
+
+``render_dirs``
+---------------
+
+Default: ``[]``
+
+A list of extra directories to search for salt renderers
+
+.. code-block:: yaml
+
+ render_dirs:
+ - /var/lib/salt/renderers
+
+.. conf_minion:: cython_enable
+
+``cython_enable``
+-----------------
+
+Default: ``False``
+
+Set this value to true to enable auto loading and compiling of .pyx modules,
+This setting requires that gcc and cython are installed on the minion
+
.. code-block:: yaml
- disable_modules: [cmd, virt, test]
+ cython_enable: False
+
+State Management Settings
+-------------------------
+
+.. conf_minion:: renderer
+
+``renderer``
+------------
+
+Default: ``yaml_jinja``
+
+The default renderer used for local state executions
+
+.. code-block:: renderer
+
+ renderer: yaml_jinja
+
+.. conf_minion:: state_verbose
+
+``state_verbose``
+-----------------
+
+Default: ``False``
+
+state_verbose allows for the data returned from the minion to be more
+verbose. Normaly only states that fail or states that have changes are
+returned, but setting state_verbose to True will return all states that
+were checked
+
+.. code-block:: state_verbose
+
+ state_verbose: True
+
+.. conf_minion:: autoload_dynamic_modules
+
+``autoload_dynamic_modules``
+----------------------------
+
+Default: ``True``
+
+autoload_dynamic_modules Turns on automatic loading of modules found in the
+environments on the master. This is turned on by default, to turn of
+autoloading modules when states run set this value to False
+
+.. code-block:: autoload_dynamic_modules
+
+ autoload_dynamic_modules: True
+
+.. conf_minion:: clean_dynamic_modules
+
+Default: ``True``
+
+clean_dynamic_modules keeps the dynamic modules on the minion in sync with
+the dynamic modules on the master, this means that if a dynamic module is
+not on the master it will be deleted from the minion. By default this is
+enabled and can be disabled by changing this value to False
+
+.. code-block:: clean_dynamic_modules
+
+ clean_dynamic_modules: True
+
+
+Security Settings
+------------------
.. conf_minion:: open_mode
@@ -118,3 +296,64 @@ minion to clean the keys.
.. code-block:: yaml
open_mode: False
+
+Thread Settings
+---------------
+
+.. conf_minion:: multiprocessing
+
+Default: ``True``
+
+Disable multiprocessing support, by default when a minion receives a
+publication a new process is spawned and the command is executed therein.
+
+.. code-block:: yaml
+
+ multiprocessing: True
+
+Minion Logging Settings
+-----------------------
+
+.. conf_minion:: log_file
+
+``log_file``
+------------
+
+Default: :file:`/var/log/salt/minion`
+
+The location of the minion log file
+
+.. code-block:: yaml
+
+ log_file: /var/log/salt/minion
+
+.. conf_minion:: log_level
+
+``log_level``
+-------------
+
+Default: ``warning``
+
+The level of messages to send to the log file.
+One of 'info', 'quiet', 'critical', 'error', 'debug', 'warning'.
+
+.. code-block:: yaml
+
+ log_level: warning
+
+.. conf_minion:: log_granular_levels
+
+``log_granular_levels``
+-----------------------
+
+Default: ``{}``
+
+Logger levels can be used to tweak specific loggers logging levels.
+Imagine you want to have the salt library at the 'warning' level, but, you
+still wish to have 'salt.modules' at the 'debug' level:
+
+.. code-block:: yaml
+
+ log_granular_levels:
+ 'salt': 'warning',
+ 'salt.modules': 'debug'
diff --git a/doc/ref/file_server/dynamic-modules.rst b/doc/ref/file_server/dynamic-modules.rst
new file mode 100644
index 000000000000..fffff4acc44c
--- /dev/null
+++ b/doc/ref/file_server/dynamic-modules.rst
@@ -0,0 +1,6 @@
+Dynamic Module Distribution
+===========================
+
+.. versionadded:: 0.9.5
+
+
diff --git a/doc/ref/runners.rst b/doc/ref/runners.rst
index d4b804f280a7..138866813db1 100644
--- a/doc/ref/runners.rst
+++ b/doc/ref/runners.rst
@@ -7,7 +7,7 @@ Salt runners are convenience applications executed with the salt-run command.
A Salt runner can be a simple client call, or a complex application.
The use for a salt running is to build a frontend hook for running sets of
-commands via salt of creating special formatted output.
+commands via salt or creating special formatted output.
Writing Salt Runners
--------------------
diff --git a/doc/ref/states/writing.rst b/doc/ref/states/writing.rst
new file mode 100644
index 000000000000..3326b2de95dc
--- /dev/null
+++ b/doc/ref/states/writing.rst
@@ -0,0 +1,51 @@
+=============
+State Modules
+=============
+
+State Modules are the components that map to actual enforcement and management
+of salt states.
+
+States are - Easy to Write!
+============================
+
+State Modules should be easy to write and straightforward. The information
+passed to the SLS data structures will map directly to the states modules.
+
+Mapping the information from the SLS data is simple, this example should
+illustrate:
+
+SLS file
+.. code-block:: yaml
+
+ /etc/salt/master: # maps to "name"
+ file: # maps to State module filename eg https://github.com/saltstack/salt/blob/develop/salt/states/file.py
+ - managed # maps to the managed function in the file State module
+ - user: root # one of many options passed to the manage function
+ - group: root
+ - mode: 644
+ - source: salt://salt/master
+
+Therefore this SLS data can be directly linked to a module, function and
+arguments passed to that function.
+
+This does issue the burden, that function names, state names and function
+arguments should be very human readable inside state modules, since they
+directly define the user interface.
+
+Cross Calling Modules
+=====================
+
+As with Execution Modules State Modules can also make use of the ``__salt__``
+and ``__grains__`` data.
+
+It is important to note, that the real work of state management should not be
+done in the state module unless it is needed, a good example is the pkg state
+module. This module does not do any package management work, it just calls the
+pkg execution module. This makes the pkg state module completely generic, which
+is why there is only one pkg state module and many backend pkg execution
+modules.
+
+On the other hand some modules will require that the logic be placed in the
+state module, a good example of this is the file module. But in the vast
+majority of cases this is not the best approach, and writing specific
+execution modules to do the backend work will be the optimal solution.
diff --git a/doc/topics/community.rst b/doc/topics/community.rst
index feea6de6e58f..f9a4faf5b5e1 100644
--- a/doc/topics/community.rst
+++ b/doc/topics/community.rst
@@ -26,14 +26,11 @@ http://groups.google.com/group/salt-users
IRC
===
-The Salt IRC channel is hosted on the `OFTC`_ network. Connect to the OFTC
-server:
+The ``#salt`` IRC channel is hosted on the popular `Freenode`__ network. You
+can use the `Freenode webchat client`__ right from your browser.
-irc://irc.oftc.net:6667
-
-and join us in ``#salt``.
-
-.. _`OFTC`: http://www.oftc.net/oftc/
+.. __: http://freenode.net/irc_servers.shtml
+.. __: http://webchat.freenode.net/?channels=salt&uio=Mj10cnVlJjk9dHJ1ZSYxMD10cnVl83
.. _community-github:
diff --git a/doc/topics/installation.rst b/doc/topics/installation.rst
index 1fb6cf479029..8c5129f91051 100644
--- a/doc/topics/installation.rst
+++ b/doc/topics/installation.rst
@@ -40,45 +40,29 @@ running and the Salt :term:`minions ` point to the master.
Red Hat
=======
-Fedora
-------
-
-Salt is currently being built for Fedora. The latest koji build pages can be
-found here:
+We are working to get Salt packages into EPEL. In the meantime you can
+:command:`yum install salt-master salt-minion` via our Fedora People
+repository.
-* `Fedora 14 `_
-* `Fedora 15 `_
-* `Fedora Rawhide `_
+Red Hat Enterprise Linux 5 & 6 or CentOS 5 & 6
+----------------------------------------------
-Red Hat Enterprise Linux 6
---------------------------
+1. Install the `EPEL`__ repository::
-Salt is being built for EPEL6. `Browse the latest builds.
-`_
+2. Install our repository on FedoraPeople::
-The ZeroMQ packages in EPEL6 have been tested with this package, but if you
-still have issues these backports may help:
+ wget -O /etc/yum.repos.d/epel-salt.repo \
+ http://repos.fedorapeople.org/repos/herlo/salt/epel-salt.repo
-* :download:`ZeroMQ backport `
-* :download:`pyzmq bindings backport `
-* `Package to set up EPEL repository
- `_
- (provided by the EPEL project)
-
-Red Hat Enterprise Linux 5
---------------------------
+.. __: http://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F
-Salt is being built for RHEL5, updates will be available soon!
+Fedora 15 & 16
+--------------
-Red Hat Enterprise Linux 5 requires more backports and the use of the Python
-2.6 stack provided in the EPEL repository. All of the listed packages need to
-be installed and the EPEL repository enabled to bring in the needed
-dependencies:
+1. Install our repository on FedoraPeople::
-* :download:`Salt rpm `
-* :download:`YAML bindings for Python 2.6 `
-* :download:`ZeroMQ backport `
-* :download:`pyzmq bindings backport `
+ wget -O /etc/yum.repos.d/fedora-salt.repo \
+ http://repos.fedorapeople.org/repos/herlo/salt/fedora-salt.repo
Arch Linux
==========
@@ -94,46 +78,68 @@ Debian / Ubuntu
Ubuntu
------
-A PPA is available until we can get packages into apt::
+We are working to get Salt into apt. In the meantime we have a PPA available
+for Lucid::
aptitude -y install python-software-properties
+ add-apt-repository ppa:chris-lea/libpgm
+ add-apt-repository ppa:chris-lea/zeromq
add-apt-repository ppa:saltstack/salt
aptitude update
aptitude install salt
-.. admonition:: Installing ZeroMQ on Ubuntu Lucid (10.04 LTS)
-
- The ZeroMQ package is available starting with Maverick but there are `PPA
- packages available for Lucid`_ for both ZeroMQ and pyzmq. You will need to
- also enable the following PPAs before running the commands above::
-
- add-apt-repository ppa:chris-lea/libpgm
- add-apt-repository ppa:chris-lea/zeromq
-
-.. _`PPA packages available for Lucid`: https://launchpad.net/~chris-lea/+archive/zeromq
-
Debian
------
-`A deb package is currently in testing`__. Until that is accepted you can
-install Salt via :command:`easy_install` or :command:`pip`::
+`A deb package is currently in testing`__ for inclusion in apt. Until that is
+accepted you can install Salt by downloading the latest ``.deb`` in the
+`downloads section on GitHub`__ and installing that manually:
- pip install salt
+.. parsed-literal::
+
+ dpkg -i salt-|version|.deb
.. __: http://mentors.debian.net/package/salt
+.. __: https://github.com/saltstack/salt/downloads
.. admonition:: Installing ZeroMQ on Squeeze (Debian 6)
- ZeroMQ packages are available in squeeze-backports.
+ There is a `python-zmq`__ package available in Debian "wheezy (testing)".
+ If you don't have that repo enabled the best way to install Salt and pyzmq
+ is by using :command:`pip` (or :command:`easy_install`)::
+
+ pip install pyzmq salt
+
+.. __: http://packages.debian.org/search?keywords=python-zmq
+
+Gentoo
+======
+
+Salt can be easily installed on Gentoo::
+
+ emerge pyyaml m2crypto pycrypto jinja pyzmq
- 1. Add the following line to your :file:`/etc/apt/sources.list`::
+Then download and install from source:
+
+1. Download the latest source tarball from the GitHub downloads directory for
+ the Salt project: |latest|
+
+2. Untar the tarball and run the :file:`setup.py` as root:
+
+.. parsed-literal::
+
+ tar xvf salt-|version|.tar.gz
+ cd salt-|version|
+ python2 setup.py install
+
+FreeBSD
+=======
- deb http://backports.debian.org/debian-backports squeeze-backports main
+Salt is available in the FreeBSD ports tree::
- 2. Run::
+ cd /usr/ports/sysutils/salt && make install clean
- aptitude update
- aptitude install libzmq1 python-zmq
+.. seealso:: :doc:`freebsd installation guide `
Installing from source
======================
diff --git a/doc/topics/releases/0.9.4.rst b/doc/topics/releases/0.9.4.rst
index 8433e5005909..cd6244eea3b9 100644
--- a/doc/topics/releases/0.9.4.rst
+++ b/doc/topics/releases/0.9.4.rst
@@ -127,6 +127,7 @@ set the order to last:
pkg:
- installed
- order: last
+
Substantial testing has gone into the state system and it is ready for real
world usage. A great deal has been added to the documentation for states and
the modules and functions available to states have been cleanly documented.
diff --git a/doc/topics/releases/0.9.5.rst b/doc/topics/releases/0.9.5.rst
new file mode 100644
index 000000000000..816876213d52
--- /dev/null
+++ b/doc/topics/releases/0.9.5.rst
@@ -0,0 +1,388 @@
+========================
+Salt 0.9.5 Release Notes
+========================
+
+Salt 0.9.5 is one of the largest steps forward in the development of Salt.
+
+0.9.5 comes with many milestones, this release has seen the community of
+developers grow out to an international team of 46 code contributors and has
+many feature additions, feature enhancements, bug fixes and speed improvements.
+
+Community
+=========
+
+Nothing has proven to have more value to the development of Salt that the
+outstanding community that has been growing at such a great pace around Salt.
+This has proven not only that Salt has great value, but also the
+expandability of Salt is as exponential as I originally intended.
+
+0.9.5 has received over 600 additional commits since 0.9.4 with a swath of new
+commiters. The following individuals have contributed to the development of
+0.9.5:
+
+Aaron Bull Schaefer
+Antti Kaihola
+Bas Tichelaar
+Brad Barden
+Brian Wagner
+Byron Clark
+Chris Scheller
+Christer Edwards
+Clint Savage
+Corey Quinn
+David Boucha
+Eivind Uggedal
+Eric Poelke
+Evan Borgstrom
+Jed Glazner
+Jeff Schroeder
+Jeffrey C. Ollie
+Jonas Buckner
+Kent Tenney
+Martin Schnabel
+Maxim Burgerhout
+Mitch Anderson
+Nathaniel Whiteinge
+Seth House
+Thomas S Hatch
+Thomas Schreiber
+Tor Hveem
+lzyeval
+syphernl
+
+This makes 21 new developers since 0.9.4 was released!
+
+To keep up with the growing community follow Salt on Ohloh
+(http://www.ohloh.net/p/salt), to join the Salt development community, fork
+Salt on Github, and get coding (https://github.com/saltstack/salt)!
+
+Major Features
+==============
+
+SPEED! Pickle to msgpack
+------------------------
+
+For a few months now we have been talking about moving away from python
+pickles for network serialization, but a preferred serialization format
+had not yet been found. After an extensive performance testing period
+involving everything from JSON to protocol buffers, a clear winner emerged.
+Message Pack (http://msgpack.org/) proved to not only be the fastest and most
+compact, but also the most "salt like". Message Pack is simple, and the code
+involved is very small. The msgpack library for python has been added directly
+to Salt.
+
+This move introduces a few changes to Salt. First off, Salt is no longer a
+"noarch" package, since the msgpack lib is written in C. Salt 0.9.5 will also
+have compatibility issues with 0.9.4 with the default configuration.
+
+We have gone through great lengths to avoid backwards compatibility issues
+with Salt, but changing the serialization medium was going to create issues
+regardless. Salt 0.9.5 is somewhat backwards compatible with earlier minions.
+A 0.9.5 master can command older minions, but only if the ?serial? config
+value in the master is set to ?pickle?. This will tell the master to publish
+messages in pickle format and will allow the master to receive messages in
+both msgpack and pickle formats.
+
+Therefore the suggested methods for upgrading are either to just upgrade
+everything at once, or to upgrade the master to 0.9.5, set "serial: pickle" in
+the master config, upgrade the minions, and then remove the serial option from
+the config. Since pickles can be used as a security exploit the ability for a
+master to accept pickles from minions at all will be removed in a future
+release.
+
+C Bindings for YAML
+--------------------
+
+All of the YAML rendering is now done with the YAML C bindings. This speeds up
+all of the sls files when running states.
+
+Experimental Windows Support
+----------------------------
+
+David Boucha has worked tirelessly to bring initial support to Salt for
+Microsoft Windows operating systems. Right now the Salt Minion can run as a
+native Windows service and accept commands.
+
+In the weeks and months to come Windows will receive the full treatment and
+will have support for Salt States and more robust support for managing Windows
+systems. This is a big step forward for Salt to move entirely outside of the
+Unix world, and proves Salt is a viable cross platform solution. Big Thanks
+to Dave for his contribution here!
+
+Dynamic Module Distribution
+---------------------------
+
+Many Salt users have expressed the desire to have Salt distribute in-house
+modules, states, renderers, returners, and grains. This support has been added
+in a number of ways:
+
+Modules via States
+```````````````````
+
+Now when salt modules are deployed to a minion via the state system as a file,
+then the modules will be automatically loaded into the active running minion
+- no restart required - and into the active running state. So custom state
+ modules can be deployed and used in the same state run.
+
+Modules via Module Environment Directories
+```````````````````````````````````````````
+Under the file_roots each environment can now have directories that are used
+to deploy large groups of modules. These directories sync modules at the
+beginning of a state run on the minion, or can be manually synced via the Salt
+module "saltutil.sync_all".
+
+The directories are named:
+_modules
+_states
+_grains
+_renderers
+_returners
+
+The modules are pushed to their respective scopes on the minions.
+
+Module Reloading
+----------------
+
+Modules can now be reloaded without restarting the minion, this is done by
+calling the sys.reload_modules function.
+
+But wait, there's more! Now when a salt module of any type is added via
+states the modules will be automatically reloaded, allowing for modules to be
+laid down with states and then immediately used.
+
+Finally, all modules are reloaded when modules are dynamically distributed
+from the salt master.
+
+Enable / Disable Added to Service
+---------------------------------
+
+A great deal of demand has existed for adding the capability to set services
+to be started at boot in the service module. This feature also comes with an
+overhaul of the service modules and initial systemd support.
+
+This means that the service state can now accept "- enable: True" to make sure
+a service is enabled at boot, and "- enable: False" to make sure it is
+disabled.
+
+Compound Target
+---------------
+
+A new target type has been added to the lineup, the compound target. In
+previous versions the desired minions could only be targeted via a single
+specific target type, but now many target specifications can be declared.
+
+These targets can also be separated by and/or operators, so certain properties
+can be used to omit a node:
+
+salt -C 'webserv* and G@os:Debian or E@db.*' test.ping
+
+will match all minions with ids starting with webserv via a glob and minions
+matching the os:Debian grain. Or minions that match the "db.*" regular
+expression.
+
+
+Node Groups
+-----------
+
+Often the convenience of having a predefined group of minions to execute
+targets on is desired. This can be accomplished with the new nodegroups
+feature. Nodegroups allow for predefined compound targets to be declared in
+the master configuration file:
+
+nodegroups:
+ group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com'
+ group2: 'G@os:Debian and foo.domain.com'
+
+And then used via the -N option:
+
+salt -N group1 test.ping
+
+Minion Side Data Store
+-----------------------
+
+The data module introduces the initial approach into storing persistent data on
+the minions, specific to the minions. This allows for data to be stored on
+minions that can be accessed from the master or from the minion.
+
+The Minion datastore is young, and will eventually provide an interface similar
+to a more mature key/value pair server.
+
+Major Grains Improvement
+-------------------------
+
+The Salt grains have been overhauled to include a massive amount of extra data.
+this includes hardware data, os data and salt specific data.
+
+Salt -Q is Useful Now
+---------------------
+
+In the past the salt query system, which would display the data from recent
+executions would be displayed in pure python, and it was unreadable.
+
+0.9.5 has added the outputter system to the -Q option, thus enabling the salt
+query system to return readable output.
+
+Packaging Updates
+=================
+
+Huge strides have been made in packaging Salt for distributions. These
+additions are thanks to our wonderful community where the work to set up
+packages has proceeded tirelessly.
+
+FreeBSD
+-------
+
+Salt on FreeBSD? There a port for that:
+
+http://www.freebsd.org/cgi/cvsweb.cgi/ports/sysutils/salt/pkg-descr
+
+This port was developed and added by Christer Edwards. This also marks the
+first time Salt has been included in an upstream packaging system!
+
+Fedora and Red Hat Enterprise
+------------------------------
+
+Salt packages have been prepared for inclusion in the Fedora Project and in
+EPEL for Red Hat Enterprise 5 and 6. These packages are the result of the
+efforts made by Clint Savage (herlo).
+
+Debian/Ubuntu
+-------------
+
+A team of many contributors have assisted in developing packages for Debian
+and Ubuntu. Salt is still actively seeking inclusion in upstream Debian and
+Ubuntu and the package data that has been prepared is being pushed through
+the needed channels for inclusion.
+
+These packages have been prepared with the help of:
+Corey
+Aaron Toponce
+and`
+
+More to Come
+------------
+
+We are actively seeking inclusion in more distributions. Primarily getting
+Salt into Gentoo, Suse, OpenBSD and preparing Solaris support are all turning
+into higher priorities.
+
+Refinement
+==========
+
+Salt continues to be refined into a faster, more stable and more usable
+application. 0.9.5 comes with more debug logging, more bug fixes and more
+complete support.
+
+More Testing, More BugFixes
+---------------------------
+
+0.9.5 comes with more bugfixes due to more testing than any previous release.
+The growing community and the introduction a a dedicated QA environment have
+unearthed many issues that were hiding under the covers. This has further
+refined and cleaned the state interface, taking care of things from minor
+visual issues to repairing misleading data.
+
+Custom Exceptions
+-----------------
+
+A custom exception module has been added to throw salt specific exceptions.
+This allows Salt to give much more granular error information.
+
+New Modules
+-----------
+
+data
+````
+The new data module manages a persistent datastore on the minion.
+Big thanks to bastichelaar for his help refining this module
+
+freebsdkmod
+````````````
+FreeBSD kernel modules can now be managed in the same way Salt handles Linux
+kernel modules.
+This module was contributed thanks to the efforts of Christer Edwards
+
+gentoo_service
+``````````````
+Support has been added for managing services in Gentoo. Now Gentoo services
+can be started, stopped, restarted, enabled, disabled and viewed.
+
+pip
+````
+The pip module introduces management for pip installed applications.
+Thanks goes to whitinge for the addition of the pip module
+
+rh_service
+``````````
+The rh_service module enables Red Hat and Fedora specific service management.
+Now Red Hat like systems come with extensive management of the classic init
+system used by Red Hat
+
+saltutil
+````````
+The saltutil module has been added as a place to hold functions used in the
+maintenance and management of salt itself. Saltutil is used to salt the salt
+minion. The saltutil module is presently used only to sync extension modules
+from the master server.
+
+systemd
+````````
+Systemd support has been added to Salt, now systems using this next generation
+init system are supported on systems running systemd.
+
+virtualenv
+``````````
+The virtualenv module has been added to allow salt to create virtual python
+environments.
+Thanks goes to whitinge for the addition of the virtualenv module
+
+win_disk
+````````
+Support for gathering disk information on Microsoft Windows minions
+The windows modules come courtesy of Utah_Dave
+
+win_service
+```````````
+The win_service module adds service support to Salt for Microsoft Windows
+services
+
+win_useradd
+````````````
+Salt can now manage local users on Microsoft Windows Systems
+
+yumpkg5
+```````
+The yumpkg module introduces in 0.9.4 uses the yum api to interact with the
+yum package manager. Unfortunately, on Red Hat 5 systems salt does not have
+access to the yum api because the yum api is running under python 2.4 and Salt
+needs to run under python 2.6.
+
+The yumpkg5 module bypasses this issue by shelling out to yum on systems where
+the yum api is not available.
+
+New States
+-----------
+
+mysql_database
+``````````````
+The new mysql_database state adds the ability to systems running a mysql
+server to manage the existence of mysql databases.
+
+The mysql states are thanks to syphernl
+
+mysql_user
+```````````
+The mysql_user state enables mysql user management.
+
+virtualenv
+`````````
+The virtualenv state can manage the state of python virtual environments.
+Thanks to Whitinge for the virtualenv state
+
+New Returners
+-------------
+
+cassandra_returner
+``````````````````
+A returner allowing Salt to send data to a cassandra server.
+Thanks to Byron Clark for contributing this returner
diff --git a/doc/topics/tutorials/freebsd.rst b/doc/topics/tutorials/freebsd.rst
new file mode 100644
index 000000000000..067509a5bb4a
--- /dev/null
+++ b/doc/topics/tutorials/freebsd.rst
@@ -0,0 +1,245 @@
+.. _introduction:
+
+Introduction
+============
+
+Salt was added to the FreeBSD ports tree Dec 26th, 2011 by Christer Edwards
+. It has been tested on FreeBSD 8.2 and 9.0RC
+releases.
+
+Salt is dependent on the following additional ports. These will be installed as
+dependencies of the ``sysutils/salt`` port::
+
+ /devel/py-yaml
+ /devel/py-pyzmq
+ /devel/py-Jinja2
+ /security/py-pycrypto
+ /security/py-m2crypto
+
+.. _installation:
+
+Installation
+============
+
+To install Salt from the FreeBSD ports tree, use the command::
+
+ cd /usr/ports/sysutils/salt && make install clean
+
+Once the port is installed you'll need to make a few configuration changes.
+These include defining the IP to bind to (optional), and some configuration
+path changes to make salt fit more natively into the FreeBSD filesystem tree.
+
+.. _configuration:
+
+Configuration
+=============
+
+In the sections below I'll outline configuration options for both the Salt
+Master and Salt Minions.
+
+The Salt port installs two sample configuration files, salt/master.sample and
+salt/minion.sample (these should be installed in /usr/local/etc/, unless you use a
+different %%PREFIX%%). You'll need to copy these .sample files into place and
+make a few edits. First, copy them into place as seen here::
+
+ cp /usr/local/etc/salt/master.sample /usr/local/etc/salt/master
+ cp /usr/local/etc/salt/minion.sample /usr/local/etc/salt/minion
+
+Note: You'll only need to copy the config for the service you're going to run.
+
+Once you've copied the config into place you'll need to make changes specific
+to your setup. Below I'll outline suggested configuration changes to the
+Master, after which I'll outline configuring the Minion.
+
+.. _master_configuration:
+
+Master Configuration
+====================
+
+This section outlines configuration of a Salt Master, which is used to control
+other machines known as "minions" (see "Minion Configuration" for instructions
+on configuring a minion). This will outline IP configuration, and a few key
+configuration paths.
+
+**Interface**
+
+By default the Salt master listens on ports 4505 and 4506 on all interfaces
+(0.0.0.0). If you have a need to bind Salt to a specific IP, redefine the
+"interface" directive as seen here::
+
+ - #interface: 0.0.0.0
+ + interface: 10.0.0.1
+
+**pki_dir**
+
+Salt is primarily developed on Linux, and as such carries some Linux-isms in
+its development and configuration. These are all very easily remedied to more
+seamlessly fit into FreeBSD. One such configuration option is the *pki_dir:*
+directive. To ensure all of Salts files end up where you expect, you'll likely
+want to update this line as seen here::
+
+ - #pki_dir: /etc/salt/pki
+ + pki_dir: /usr/local/etc/salt/pki
+
+**file_roots**
+
+Finally, if you plan on using Salts state-management features, you'll want to
+update the *file_roots:* directive. This directive defines the location of the
+state files. I suggest updating this directive as seen here::
+
+ - #file_roots:
+ - # base:
+ - # - /srv/salt
+ + file_roots:
+ + base:
+ + - /usr/local/etc/salt/states
+
+**rc.conf**
+
+Last but not least you'll need to activate the Salt Master in your rc.conf
+file. Using your favorite editor, open /etc/rc.conf or /etc/rc.conf.local and
+add this line::
+
+ + salt_master_enable="YES"
+
+Once you've completed all of these steps you're ready to start your Salt
+Master. The Salt port installs an rc script which should be used to manage your
+Salt Master. You should be able to start your Salt Master now using the command
+seen here::
+
+ service salt_master start
+
+If your Salt Master doesn't start successfully, go back through each step and
+see if anything was missed. Salt doesn't take much configuration (part of its
+beauty!), and errors are usually simple mistakes.
+
+.. _ minion_configuration:
+
+Minion Configuration
+====================
+
+Configuring a Salt Minion is surprisingly simple. Unless you have a real need
+for customizing your minion configuration (which there are plenty of options if
+you are so inclined!), there is one simple directive that needs to be updated.
+That option is the location of the master.
+
+By default a Salt Minion will try to connect to the dns name "salt". If you
+have the ability to update DNS records for your domain you might create an A or
+CNAME record for "salt" that points to your Salt Master. If you are able to do
+this you likely can do without any minion configuration at all.
+
+If you are not able to update DNS, you'll simply need to update one entry in
+the configuration file. Using your favorite editor, open the minion
+configuration file and update the "master" entry as seen here::
+
+ - #master: salt
+ + master: 10.0.0.1
+
+Simply update the master directive to the IP or hostname of your Salt Master.
+Save your changes and you're ready to start your Salt Minion. Advanced
+configuration options are covered in another chapter.
+
+**pki_dir**
+
+Salt is primarily developed on Linux, and as such carries some Linux-isms in
+its development and configuration. These are all very easily remedied to more
+seamlessly fit into FreeBSD. One such configuration option is the *pki_dir:*
+directive. To ensure all of Salts files end up where you expect, you'll likely
+want to update this line as seen here::
+
+ - #pki_dir: /etc/salt/pki
+ + pki_dir: /usr/local/etc/salt/pki
+
+**rc.conf**
+
+Before you're able to start the Salt Minion you'll need to update your rc.conf
+file. Using your favorite editor open /etc/rc.conf or /etc/rc.conf.local and
+add this line::
+
+ + salt_minion_enable="YES"
+
+Once you've completed all of these steps you're ready to start your Salt
+Minion. The Salt port installs an rc script which should be used to manage your
+Salt Minion. You should be able to start your Salt Minion now using the command
+seen here::
+
+ service salt_minion start
+
+If your Salt Minion doesn't start successfully, go back through each step and
+see if anything was missed. Salt doesn't take much configuration (part of its
+beauty!), and errors are usually simple mistakes.
+
+.. _tying_it_all_together:
+
+Tying It All Together
+======================
+
+If you've successfully completed each of the steps above you should have a
+running Salt Master and a running Salt Minion. The Minion should be configured
+to point to the Master. To verify that there is communication flowing between
+the Minion and Master we'll run a few initial ``salt`` commands. These commands
+will validate the Minions RSA encryption key, and then send a test command to
+the Minion to ensure that commands and responses are flowing as expected.
+
+**Key Management**
+
+Salt uses AES encryption for all communication between the Master and the
+Minion. This ensures that the commands you send to your Minions (your cloud)
+can not be tampered with, and that communication between Master and Minion is
+only done through trusted, accepted keys.
+
+Before you'll be able to do any remote execution or state management you'll
+need to accept any pending keys on the Master. Run the ``salt-key`` command to
+list the keys known to the Salt Master::
+
+ [root@master ~]# salt-key -L
+ Unaccepted Keys:
+ avon
+ bodie
+ bubbles
+ marlo
+ Accepted Keys:
+
+This example shows that the Salt Master is aware of four Minions, but none of
+the keys have been accepted. To accept the keys and allow the Minions to be
+controlled by the Master, again use the ``salt-key`` command::
+
+ [root@master ~]# salt-key -A
+ [root@master ~]# salt-key -L
+ Unaccepted Keys:
+ Accepted Keys:
+ avon
+ bodie
+ bubbles
+ marlo
+
+The ``salt-key`` command allows for signing keys individually or in bulk. The
+example above, using ``-A`` bulk-accepts all pending keys. To accept keys
+individually use the lowercase of the same option, ``-a keyname``.
+
+.. _sending_commands:
+
+Sending Commands
+================
+
+Everything should be set for you to begin remote management of your Minions.
+Whether you have a few or a few-dozen, Salt can help you manage them easily!
+
+For final verification, send a test function from your Salt Master to your
+minions. If all of your minions are properly communicating with your Master,
+you should "True" responses from each of them. See the example below to send
+the ``test.ping`` remote command::
+
+ [root@avon ~]# salt '*' test.ping
+ {'avon': True}
+
+.. _where_do_i_go_from_here:
+
+Where Do I Go From Here
+========================
+
+Congratulations! You've successfully configured your first Salt Minions and are
+able to send remote commands. I'm sure you're eager to learn more about what
+Salt can do. Depending on the primary way you want to manage your machines you
+may either want to visit the section regarding Salt States, or the section on
+Modules.
diff --git a/doc/topics/tutorials/starting_states.rst b/doc/topics/tutorials/starting_states.rst
new file mode 100644
index 000000000000..d1361e8340ef
--- /dev/null
+++ b/doc/topics/tutorials/starting_states.rst
@@ -0,0 +1,457 @@
+=========================
+How Do I Use Salt States?
+=========================
+
+Simplicity, Simplicity, Simplicity
+
+Many of the most powerful and useful engineering solutions are founded on
+simple principals, the Salt SLS system strives to do just that.
+
+The core of the Salt State system is the SLS, or the SaLt State file. The SLS
+is a representation of the state in which a system should be in, and is set up
+to contain this data in the most simple way possible.
+
+It is All Just Data
+===================
+
+Before delving into the particulars, it will help to understand that the SLS
+is just a data structure under the hood. While understanding that the SLS is
+just a data structure is not at all critical to understand to make use Salt States,
+it should help bolster the understanding of where the real power is.
+
+SLS files are therefore, in reality, just dictionaries, lists strings and
+numbers. By using this approach Salt can be much more flexible, and as someone
+writes more SLS files it becomes clear exactly what is being written. The result
+is a system that is easy to understand, yet grows with the needs of the admin
+or developer, offering simple constructs that grow to encompass the most
+complicated needs.
+
+In the section titled "State Data Structures" a reference exists, explaining
+in depth how the data is laid out.
+
+Default Data - YAML
+===================
+
+By default Salt represents the SLS data in what is one of the simplest
+serialization formats available - YAML.
+
+A typical, small SLS file will often look like this in YAML:
+
+.. code-block:: yaml
+
+ apache:
+ pkg:
+ - installed
+ service:
+ - running
+ - require:
+ - pkg: apache
+
+This SLS data will ensure that the package named apache is installed, and
+that the apache service is running. The components can be explained in a
+simple way.
+
+The first like it the ID for a set of data, and it is called the ID
+Declaration. This ID sets the name of the thing that needs to be manipulated.
+
+The second and fourth lines are the start of the State Declarations, so they
+are using the pkg and service states respectively. The pkg state manages a
+software package to get installed via the system's native package manager,
+and the service state manages a system daemon. Below the pkg and service
+lines are the function to run. This function defines what state the named
+package and service should be in. Here the package is to be installed, and
+the service should be running.
+
+Finally, on line 6, is the word ``require``, this is called a Requisite
+Statement, and it makes sure that the apache service is only started after
+the successful installation of the apache package.
+
+Adding Configs and Users
+========================
+
+When setting up a service like an apache server many more components may
+need to be added. The apache configuration file will most likely be managed,
+and a user and group may need to be set up.
+
+.. code-block:: yaml
+
+ apache:
+ pkg:
+ - installed
+ service:
+ - running
+ - watch:
+ - pkg: apache
+ - file: /etc/httpd/conf/httpd.conf
+ - user: apache
+ user:
+ - present
+ - uid: 87
+ - gid: 87
+ - home: /var/www/html
+ - shell: /bin/nologin
+ - require:
+ - group: apache
+ group:
+ - present
+ - gid: 87
+ - require:
+ - pkg: apache
+
+ /etc/httpd/conf/httpd.conf:
+ file:
+ - managed
+ - source: salt://apache/httpd.conf
+ - user: root
+ - group: root
+ - mode: 644
+
+This SLS data greatly extends the first example, and includes a config file,
+a user, a group and new requisite statement: ``watch``.
+
+Adding more states is easy, since the new user and group states are under
+the apache ID, the user and group will be the apache user and group. The
+``require`` statements will make sure that the user will only be made after
+the group, and that the group will be made only after the apache package is
+installed.
+
+Next,the ``require`` statement under service was changed to watch, and is
+now watching 3 states instead of just one. The watch statement does the same
+thing as require, making sure that the other states run before running the
+state with a watch, but it adds an extra component. The ``watch`` statement
+will run the state's watcher function if any of the watched states changed
+anything. So if the package was updated, the config file changed, or the user
+uid modified, then the service state's watcher will be run. The service
+state's watcher just restarts the service, so in this case, a change in the
+config file will also trigger a restart of the respective service.
+
+Moving Beyond a Single SLS
+==========================
+
+When setting up Salt States more than one SLS will need to be used, the above
+examples were just in a single SLS file, but more than one SLS file can be
+combined to build out a State Tree. The above example also references a file
+with a strange source - salt://apache/httpd.conf, that file will need to be
+available as well.
+
+The SLS files are laid out in a directory on the salt master. Files are laid
+out as just files, an sls is just a file and files to download are just files.
+
+The apache example would be laid out in the root of the salt file server like
+this:
+
+/apache/init.sls
+/apache/httpd.conf
+
+So the httpd.conf is just a file in the apache directory, and is referenced
+directly.
+
+But with more than a single SLS file, more components can be added to the
+toolkit, consider this ssh example:
+
+/ssh/init.sls
+.. code-block:: yaml
+
+ openssh-client:
+ pkg:
+ - installed
+
+ /etc/ssh/ssh_config
+ file:
+ - managed
+ - user: root
+ - group: root
+ - mode: 644
+ - source: salt://ssh/ssh_config
+ - require:
+ - pkg: openssh-client
+
+/ssh/server.sls
+.. code-block:: yaml
+
+ include:
+ - ssh
+
+ openssh-server:
+ pkg:
+ - installed
+
+ sshd:
+ service:
+ - running
+ - require:
+ - pkg: openssh-client
+ - pkg: openssh-server
+ - file: /etc/ssh/banner
+ - file: /etc/ssh/sshd_config
+
+ /etc/ssh/sshd_config:
+ - managed
+ - user: root
+ - group: root
+ - mode: 644
+ - source: salt://ssh/sshd_config
+ - require:
+ - pkg: openssh-server
+
+ /etc/ssh/banner:
+ file:
+ - managed
+ - user: root
+ - group: root
+ - mode: 644
+ - source: salt://ssh/banner
+ - require:
+ - pkg: openssh-server
+
+Now our State Tree looks like this:
+
+/apache/init.sls
+/apache/httpd.conf
+/ssh/init.sls
+/ssh/server.sls
+/ssh/banner
+/ssh/ssh_config
+/ssh/sshd_config
+
+This example now introduces the ``include`` statement. The include statement
+includes another SLS file so that components found in it can be required,
+watched or as will soon be demonstrated - extended.
+
+The include statement allows for states to be cross linked. When an SLS
+has an include statement it is literally extended to include the contents of
+the included SLS files.
+
+Extending Included SLS Data
+===========================
+
+Sometimes SLS data needs to be extended. Perhaps the apache service needs to
+watch additional resources, or under certain circumstances a different file
+needs to be placed.
+
+These examples will add more watchers to apache and change the ssh banner.
+
+/ssh/custom-server.sls
+.. code-block:: yaml
+
+ include:
+ - ssh.server
+
+ extend:
+ /etc/ssh/banner:
+ file:
+ - source: salt://ssh/custom-banner
+
+/python/mod_python.sls
+.. code-block:: yaml
+
+ include:
+ - apache
+
+ extend:
+ apache:
+ service:
+ - watch:
+ - pkg: mod_python
+
+ mod_python:
+ pkg:
+ - installed
+
+The custom-server.sls file uses the extend statement to overwrite where the
+banner is being downloaded from, and therefore changing what file is being used
+to configure the banner.
+
+In the new mod_python SLS the mod_python package is added, but more importantly
+the apache service was extended to also watch the mod_python package.
+
+There is a bit of a trick here, in the extend statement Requisite Statements
+are extended, so the ``- pkg: mod_python`` is appended to the watch list. But
+all other statements are overwritten.
+
+Understanding the Render System
+===============================
+
+Since the SLS data is just plain old data, it does not need to be represented
+with YAML. Salt defaults to YAML because it is very straightforward and easy
+to learn and use. But the SLS files can be rendered from almost any imaginable
+medium, so long as a renderer module is provided.
+
+The default rendering system is the ``yaml_jinja`` renderer. The
+``yaml_jinja`` renderer will first pass the template through the jinja
+templating system, and then through the YAML parser. The benefit here is that
+full programming constructs are available when creating SLS files.
+
+Other renderers available are ``yaml_mako`` which uses the mako templating
+system rather than the jinja templating system, and more notably, the pure
+python or ``py`` renderer. The ``py`` renderer allows for SLS files to be
+written in pure python, allowing for the utmost level of flexibility and
+power when preparing SLS data.
+
+Geting to Know the Default - yaml_jinja
+---------------------------------------
+
+The default renderer - ``yaml_jinja``, allows for the use of the jinja
+templating system. A guide to the jinja templating system can be found here:
+.
+
+When working with renderers a few very useful bits of data are passed in. In
+the case of templating engine based renderers two critical components are
+available, ``salt`` and ``grains``. The salt object allows for any salt
+function to be called from within the template, and grains allows for the
+grains to be accessed from within the template. A few examples are in order:
+
+/apache/init.sls
+.. code-block:: yaml
+
+ apache:
+ pkg:
+ - installed
+ {% if grains['os'] == 'RedHat'%}
+ - name: httpd
+ {% endif %}
+ service:
+ - running
+ {% if grains['os'] == 'RedHat'%}
+ - name: httpd
+ {% endif %}
+ - watch:
+ - pkg: apache
+ - file: /etc/httpd/conf/httpd.conf
+ - user: apache
+ user:
+ - present
+ - uid: 87
+ - gid: 87
+ - home: /var/www/html
+ - shell: /bin/nologin
+ - require:
+ - group: apache
+ group:
+ - present
+ - gid: 87
+ - require:
+ - pkg: apache
+
+ /etc/httpd/conf/httpd.conf:
+ file:
+ - managed
+ - source: salt://apache/httpd.conf
+ - user: root
+ - group: root
+ - mode: 644
+
+This example is simple, if the os grain states that the operating system is
+Red Hat, then the name of the apache package and service needs to be httpd.
+
+A more aggressive way to use Jinja can be found here, in a module to set up
+a MooseFS distributed filesystem chunkserver:
+
+/moosefs/chunk.sls
+.. code-block:: yaml
+
+ include:
+ - moosefs
+
+ {% for mnt in salt['cmd.run']('ls /dev/data/moose*').split() %}
+ /mnt/moose{{ mnt[-1] }}:
+ mount:
+ - mounted
+ - device: {{ mnt }}
+ - fstype: xfs
+ - mkmnt: True
+ file:
+ - directory
+ - user: mfs
+ - group: mfs
+ - require:
+ - user: mfs
+ - group: mfs
+ {% endfor %}
+
+ '/etc/mfshdd.cfg':
+ file:
+ - managed
+ - source: salt://moosefs/mfshdd.cfg
+ - user: root
+ - group: root
+ - mode: 644
+ - template: jinja
+ - require:
+ - pkg: mfs-chunkserver
+
+ '/etc/mfschunkserver.cfg':
+ file:
+ - managed
+ - source: salt://moosefs/mfschunkserver.cfg
+ - user: root
+ - group: root
+ - mode: 644
+ - template: jinja
+ - require:
+ - pkg: mfs-chunkserver
+
+ mfs-chunkserver:
+ pkg:
+ - installed
+ mfschunkserver:
+ service:
+ - running
+ - require:
+ {% for mnt in salt['cmd.run']('ls /dev/data/moose*') %}
+ - mount: /mnt/moose{{ mnt[-1] }}
+ - file: /mnt/moose{{ mnt[-1] }}
+ {% endfor %}
+ - file: /etc/mfschunkserver.cfg
+ - file: /etc/mfshdd.cfg
+ - file: /var/lib/mfs
+
+This example shows much more of the available power provided by Jinja.
+Multiple for loops are used to dynamically detect available hard drives
+and set them up to be mounted, and the ``salt`` object is used multiple
+times to call shell commands to gather data.
+
+Introducing the Python Renderer
+-------------------------------
+
+Sometimes the chosen default renderer might not have enough logical power to
+accomplish the needed task. When this happens, the python renderer can be
+used. Normally a yaml renderer should be used for the majority of SLS files,
+but a SLS file set to use another renderer can be easily added to the tree.
+
+This example shows a very basic python SLS file:
+
+/python/django.sls
+
+.. code-block:: python
+
+ #!py
+
+ def run():
+ '''
+ Install the django package
+ '''
+ return {'include': ['python'],
+ 'django': {'pkg': ['installed']}}
+
+This is a very simple example, the first line has a SLS shebang line that
+tells Salt to not use the default renderer, but to use the ``py`` renderer.
+Then the run function is defined, the return value from the run function
+must be a Salt friendly data structure, or better known as a Salt
+``HighState`` data structure.
+
+This python example would look like this if it were written in YAML:
+
+.. code-block:: yaml
+
+ include:
+ - python
+
+ django:
+ pkg:
+ - installed
+
+This clearly illustrates, that not only is using the YAML renderer a wise
+decision as the default, but that unbridled power can be obtained where
+needed by using a pure python SLS.
+
diff --git a/doc/topics/tutorials/states_pt2.rst b/doc/topics/tutorials/states_pt2.rst
index 8ded93a61ce6..4a43811a2e44 100644
--- a/doc/topics/tutorials/states_pt2.rst
+++ b/doc/topics/tutorials/states_pt2.rst
@@ -119,7 +119,7 @@ Last, call :func:`state.highstate ` again and the
minion will fetch and execute the highstate as well as our HTML file from the
master using Salt's File Server::
- salt '*' salt.highstate
+ salt '*' state.highstate
Verify that Apache is now serving your custom HTML.
diff --git a/pkg/arch/PKGBUILD b/pkg/arch/PKGBUILD
index cc176b57704f..a6b40add4580 100755
--- a/pkg/arch/PKGBUILD
+++ b/pkg/arch/PKGBUILD
@@ -19,12 +19,14 @@ backup=('etc/salt/master'
makedepends=()
optdepends=()
options=()
-source=("https://github.com/downloads/thatch45/salt/$pkgname-$pkgver.tar.gz"
+source=("https://github.com/downloads/saltstack/salt/$pkgname-$pkgver.tar.gz"
"salt-master"
"salt-syndic"
"salt-minion")
-md5sums=('26456860e89f53deaf75193da50b449a'
- '4baf45d1610d771b742de2cbd8951b9f')
+md5sums=('c27837bac06dadfdb51b4a2b63fe6d35'
+ '1594591acb0a266854186a694da21103'
+ '09683ef4966e401761f7d2db6ad4b692'
+ '21ab2eac231e9f61bf002ba5f16f8a3d')
package() {
cd $srcdir/$pkgname-$pkgver
@@ -32,8 +34,8 @@ package() {
python2 setup.py install --root=$pkgdir/ --optimize=1
mkdir -p $pkgdir/etc/rc.d/
- cp $srcdir/pkg/arch/salt-master $pkgdir/etc/rc.d/
- cp $srcdir/pkg/arch/salt-minion $pkgdir/etc/rc.d/
- cp $srcdir/pkg/arch/salt-syndic $pkgdir/etc/rc.d/
+ cp $srcdir/salt-master $pkgdir/etc/rc.d/
+ cp $srcdir/salt-minion $pkgdir/etc/rc.d/
+ cp $srcdir/salt-syndic $pkgdir/etc/rc.d/
chmod +x $pkgdir/etc/rc.d/*
}
diff --git a/pkg/arch/PKGBUILD-git b/pkg/arch/PKGBUILD-git
index f2ff5bfa1dd8..93ca36e1dc40 100755
--- a/pkg/arch/PKGBUILD-git
+++ b/pkg/arch/PKGBUILD-git
@@ -1,10 +1,10 @@
# Maintainer: Thomas S Hatch
-pkgname=salt
+pkgname=salt-git
pkgver=$(date +%Y%m%d)
pkgrel=1
pkgdesc="A remote execution and communication system built on zeromq"
arch=('any')
-url="https://github.com/thatch45/salt"
+url="https://github.com/saltstack/salt"
license=('APACHE')
groups=()
depends=('python2'
@@ -16,9 +16,10 @@ depends=('python2'
'python2-jinja')
makedepends=('git')
provides=()
-backup=('etc/salt/master'
+backup=('etc/salt/master'
'etc/salt/minion')
options=()
+conflicts=('salt')
source=("salt-master"
"salt-minion"
"salt-syndic")
@@ -52,10 +53,10 @@ package() {
cd "$srcdir/$_gitname-build"
python2 setup.py install --root=$pkgdir/ --optimize=1
-
+
mkdir -p $pkgdir/etc/rc.d/
cp $srcdir/salt-master $pkgdir/etc/rc.d/
cp $srcdir/salt-minion $pkgdir/etc/rc.d/
cp $srcdir/salt-syndic $pkgdir/etc/rc.d/
chmod +x $pkgdir/etc/rc.d/*
-}
+}
diff --git a/pkg/rpm/salt-master b/pkg/rpm/salt-master
index eaebe93ae6d3..3e47ccb6b29f 100755
--- a/pkg/rpm/salt-master
+++ b/pkg/rpm/salt-master
@@ -6,17 +6,16 @@
# LSB header
### BEGIN INIT INFO
+# Description: This is a daemon that controls the salt minions
# Provides: salt-master
# Required-Start: network
-# Default-Start: 3 4 5
# Short-Description: salt master control daemon
-# Description: This is a daemon that controls the salt minions
### END INIT INFO
# chkconfig header
# chkconfig: 345 99 99
-# description: This is a daemon that controls the salt mininons
+# description: This is a daemon that controls the salt minions
#
# processname: /usr/bin/salt-master
diff --git a/pkg/rpm/salt-master.service b/pkg/rpm/salt-master.service
new file mode 100644
index 000000000000..4f3827ede6df
--- /dev/null
+++ b/pkg/rpm/salt-master.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=The Salt Master Server
+After=syslog.target network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/salt-master
+
+[Install]
+WantedBy=multi-user.target
diff --git a/pkg/rpm/salt-minion b/pkg/rpm/salt-minion
index 43ec1153d3e0..399c840a7cdf 100755
--- a/pkg/rpm/salt-minion
+++ b/pkg/rpm/salt-minion
@@ -8,7 +8,6 @@
### BEGIN INIT INFO
# Provides: salt-minion
# Required-Start: network
-# Default-Start: 3 4 5
# Short-Description: salt minion control daemon
# Description: This is a daemon that controls the salt minions
### END INIT INFO
diff --git a/pkg/rpm/salt-minion.service b/pkg/rpm/salt-minion.service
new file mode 100644
index 000000000000..2f199f998eca
--- /dev/null
+++ b/pkg/rpm/salt-minion.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=The Salt Minion
+After=syslog.target network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/salt-minion
+
+[Install]
+WantedBy=multi-user.target
diff --git a/pkg/rpm/salt-syndic b/pkg/rpm/salt-syndic
index 724c3b92c98e..44a4a8d87419 100755
--- a/pkg/rpm/salt-syndic
+++ b/pkg/rpm/salt-syndic
@@ -8,7 +8,6 @@
### BEGIN INIT INFO
# Provides: salt-syndic
# Required-Start: network
-# Default-Start: 3 4 5
# Short-Description: salt syndic master-minion passthrough daemon
# Description: This is a daemon that controls the salt syndic
### END INIT INFO
diff --git a/pkg/rpm/salt-syndic.service b/pkg/rpm/salt-syndic.service
new file mode 100644
index 000000000000..922eef35af8b
--- /dev/null
+++ b/pkg/rpm/salt-syndic.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=The Salt Master Server
+After=syslog.target network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/salt-syndic
+
+[Install]
+WantedBy=multi-user.target
diff --git a/pkg/rpm/salt.spec b/pkg/rpm/salt.spec
index 45b0bf4ffaa8..778e774094a0 100644
--- a/pkg/rpm/salt.spec
+++ b/pkg/rpm/salt.spec
@@ -1,30 +1,77 @@
+%if ! (0%{?rhel} >= 6 || 0%{?fedora} > 12)
+%global with_python26 1
+%define pybasever 2.6
+%define __python_ver 26
+%define __python %{_bindir}/python%{?pybasever}
+%endif
+
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
Name: salt
-Version: 0.9.2
-Release: 1%{?dist}
+Version: 0.9.4
+Release: 6%{?dist}
Summary: A parallel remote execution system
-Group: System/Utilities
+Group: System Environment/Daemons
License: ASL 2.0
-URL: https://github.com/thatch45/salt
-Source0: %{name}-%{version}.tar.gz
+URL: http://saltstack.org/
+Source0: https://github.com/downloads/saltstack/%{name}/%{name}-%{version}.tar.gz
Source1: %{name}-master
Source2: %{name}-syndic
Source3: %{name}-minion
+Source4: %{name}-master.service
+Source5: %{name}-syndic.service
+Source6: %{name}-minion.service
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
-Requires: python
-Requires: PyYAML
+BuildArch: noarch
+
+%if 0%{?with_python26}
+BuildRequires: python26-zmq
+BuildRequires: python26-crypto
+BuildRequires: python26-devel
+BuildRequires: python26-PyYAML
+BuildRequires: python26-m2crypto
+
+Requires: python26-crypto
+Requires: python26-zmq
+Requires: python26-jinja2
+Requires: python26-PyYAML
+Requires: python26-m2crypto
+Requires: python26-PyXML
+
+%else
+
+BuildRequires: python-zmq
+BuildRequires: python-crypto
+BuildRequires: python-devel
+BuildRequires: PyYAML
+BuildRequires: m2crypto
+
Requires: python-crypto
-Requires: m2crypto
Requires: python-zmq
+Requires: python-jinja2
+Requires: PyYAML
+Requires: m2crypto
+Requires: PyXML
-BuildArch: noarch
+%endif
-BuildRequires: python-devel
-BuildRequires: Cython
+%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15)
+
+Requires(post): chkconfig
+Requires(preun): chkconfig
+Requires(preun): initscripts
+Requires(postun): initscripts
+
+%else
+
+BuildRequires: systemd-units
+
+%endif
+
+Requires: MySQL-python libvirt-python yum
%description
Salt is a distributed remote execution system used to execute commands and
@@ -35,33 +82,21 @@ information, and not just dozens, but hundreds or even thousands of individual
servers, handle them quickly and through a simple and manageable interface.
%package -n salt-master
-Group: System/Utilities
Summary: Management component for salt, a parallel remote execution system
-Requires: salt
+Group: System Environment/Daemons
+Requires: salt = %{version}-%{release}
%description -n salt-master
-Salt is a distributed remote execution system used to execute commands and
-query data. It was developed in order to bring the best solutions found in
-the world of remote execution together and make them better, faster and more
-malleable. Salt accomplishes this via its ability to handle larger loads of
-information, and not just dozens, but hundreds or even thousands of individual
-servers, handle them quickly and through a simple and manageable interface.
-Summary: A parallel remote execution system
+The Salt master is the central server to which all minions connect.
+Summary:
%package -n salt-minion
-Requires: salt
-Group: System/Utilities
-Summary: Client tools for salt, a parallel remote execution system
-Requires: salt
+Summary: Client component for salt, a parallel remote execution system
+Group: System Environment/Daemons
+Requires: salt = %{version}-%{release}
%description -n salt-minion
-Salt is a distributed remote execution system used to execute commands and
-query data. It was developed in order to bring the best solutions found in
-the world of remote execution together and make them better, faster and more
-malleable. Salt accomplishes this via its ability to handle larger loads of
-information, and not just dozens, but hundreds or even thousands of individual
-servers, handle them quickly and through a simple and manageable interface.
-Summary: Client utilities for Salt, a parallel remote execution system
+Salt minion is queried and controlled from the master.
%prep
%setup -q
@@ -72,49 +107,161 @@ Summary: Client utilities for Salt, a parallel remote execution system
%install
rm -rf $RPM_BUILD_ROOT
%{__python} setup.py install -O1 --root $RPM_BUILD_ROOT
+
+%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15)
mkdir -p $RPM_BUILD_ROOT%{_initrddir}
-install -p -m 0775 %{SOURCE1} $RPM_BUILD_ROOT%{_initrddir}/
-install -p -m 0775 %{SOURCE2} $RPM_BUILD_ROOT%{_initrddir}/
-install -p -m 0775 %{SOURCE3} $RPM_BUILD_ROOT%{_initrddir}/
+install -p %{SOURCE1} $RPM_BUILD_ROOT%{_initrddir}/
+install -p %{SOURCE2} $RPM_BUILD_ROOT%{_initrddir}/
+install -p %{SOURCE3} $RPM_BUILD_ROOT%{_initrddir}/
+%else
+mkdir -p $RPM_BUILD_ROOT%{_unitdir}
+install -p -m 0644 %{SOURCE4} $RPM_BUILD_ROOT%{_unitdir}/
+install -p -m 0644 %{SOURCE5} $RPM_BUILD_ROOT%{_unitdir}/
+install -p -m 0644 %{SOURCE6} $RPM_BUILD_ROOT%{_unitdir}/
+%endif
%clean
rm -rf $RPM_BUILD_ROOT
%files
%defattr(-,root,root,-)
-%doc %{_defaultdocdir}/salt*
-%{python_sitelib}/*
-%doc %{_mandir}/man7/salt.7.gz
-#{_initrddir}/*
+%doc LICENSE
+%{python_sitelib}/%{name}/*
+%{python_sitelib}/%{name}-%{version}-py?.?.egg-info
+%doc %{_mandir}/man7/salt.7.*
%files -n salt-minion
%defattr(-,root,root)
-%doc %{_mandir}/man1/salt-call.1.gz
-%doc %{_mandir}/man1/salt-minion.1.gz
+%doc %{_mandir}/man1/salt-call.1.*
+%doc %{_mandir}/man1/salt-minion.1.*
%{_bindir}/salt-minion
%{_bindir}/salt-call
-%{_initrddir}/salt-minion
+
+%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15)
+%attr(0755, root, root) %{_initrddir}/salt-minion
+%else
+%{_unitdir}/salt-minion.service
+%endif
+
%config(noreplace) /etc/salt/minion
%files -n salt-master
%defattr(-,root,root)
-%doc %{_mandir}/man1/salt-master.1.gz
-%doc %{_mandir}/man1/salt.1.gz
-%doc %{_mandir}/man1/salt-cp.1.gz
-%doc %{_mandir}/man1/salt-key.1.gz
-%doc %{_mandir}/man1/salt-run.1.gz
-%doc %{_mandir}/man1/salt-syndic.1.gz
+%doc %{_mandir}/man1/salt-master.1.*
+%doc %{_mandir}/man1/salt.1.*
+%doc %{_mandir}/man1/salt-cp.1.*
+%doc %{_mandir}/man1/salt-key.1.*
+%doc %{_mandir}/man1/salt-run.1.*
+%doc %{_mandir}/man1/salt-syndic.1.*
%{_bindir}/salt
%{_bindir}/salt-master
%{_bindir}/salt-syndic
%{_bindir}/salt-cp
%{_bindir}/salt-key
%{_bindir}/salt-run
-%{_initrddir}/salt-master
-%{_initrddir}/salt-syndic
+%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15)
+%attr(0755, root, root) %{_initrddir}/salt-master
+%attr(0755, root, root) %{_initrddir}/salt-syndic
+%else
+%{_unitdir}/salt-master.service
+%{_unitdir}/salt-syndic.service
+%endif
%config(noreplace) /etc/salt/master
+%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15)
+
+%preun -n salt-master
+if [ $1 -eq 0 ] ; then
+ /sbin/service salt-master stop >/dev/null 2>&1
+ /sbin/service salt-syndic stop >/dev/null 2>&1
+ /sbin/chkconfig --del salt-master
+ /sbin/chkconfig --del salt-syndic
+fi
+
+%preun -n salt-minion
+if [ $1 -eq 0 ] ; then
+ /sbin/service salt-minion stop >/dev/null 2>&1
+ /sbin/chkconfig --del salt-minion
+fi
+
+%post -n salt-master
+/sbin/chkconfig --add salt-master
+/sbin/chkconfig --add salt-syndic
+
+%post -n salt-minion
+/sbin/chkconfig --add salt-minion
+
+%postun -n salt-master
+if [ "$1" -ge "1" ] ; then
+ /sbin/service salt-master condrestart >/dev/null 2>&1 || :
+ /sbin/service salt-syndic condrestart >/dev/null 2>&1 || :
+fi
+
+%postun -n salt-minion
+if [ "$1" -ge "1" ] ; then
+ /sbin/service salt-master condrestart >/dev/null 2>&1 || :
+ /sbin/service salt-syndic condrestart >/dev/null 2>&1 || :
+fi
+
+%else
+
+%preun -n salt-master
+if [ $1 -eq 0 ] ; then
+ # Package removal, not upgrade
+ /bin/systemctl --no-reload disable salt-master.service > /dev/null 2>&1 || :
+ /bin/systemctl stop salt-master.service > /dev/null 2>&1 || :
+
+ /bin/systemctl --no-reload disable salt-syndic.service > /dev/null 2>&1 || :
+ /bin/systemctl stop salt-syndic.service > /dev/null 2>&1 || :
+fi
+
+%preun -n salt-minion
+if [ $1 -eq 0 ] ; then
+ # Package removal, not upgrade
+ /bin/systemctl --no-reload disable salt-master.service > /dev/null 2>&1 || :
+ /bin/systemctl stop salt-master.service > /dev/null 2>&1 || :
+
+fi
+
+%post -n salt-master
+/bin/systemctl daemon-reload &>/dev/null || :
+
+%post -n salt-minion
+/bin/systemctl daemon-reload &>/dev/null || :
+
+%postun -n salt-master
+/bin/systemctl daemon-reload &>/dev/null
+[ $1 -gt 0 ] && /bin/systemctl try-restart salt-master.service &>/dev/null || :
+[ $1 -gt 0 ] && /bin/systemctl try-restart salt-syndic.service &>/dev/null || :
+
+%postun -n salt-minion
+/bin/systemctl daemon-reload &>/dev/null
+[ $1 -gt 0 ] && /bin/systemctl try-restart salt-master.service &>/dev/null || :
+[ $1 -gt 0 ] && /bin/systemctl try-restart salt-syndic.service &>/dev/null || :
+
+%endif
+
%changelog
+* Sun Jan 8 2012 Clint Savage - 0.9.4-6
+- Missed some critical elements for SysV and rpmlint cleanup
+
+* Sun Jan 8 2012 Clint Savage - 0.9.4-5
+- SysV clean up in post
+
+* Sat Jan 7 2012 Clint Savage - 0.9.4-4
+- Cleaning up perms, group and descriptions, adding post scripts for systemd
+
+* Thu Jan 5 2012 Clint Savage - 0.9.4-3
+- Updating for systemd on Fedora 15+
+
+* Thu Dec 1 2011 Clint Savage - 0.9.4-2
+- Removing requirement for Cython. Optional only for salt-minion
+
+* Thu Nov 30 2011 Clint Savage - 0.9.4-1
+- New upstream release with new features and bugfixes
+
+* Thu Nov 17 2011 Clint Savage - 0.9.3-1
+- New upstream release with new features and bugfixes
* Sat Sep 17 2011 Clint Savage - 0.9.2-1
- Bugfix release from upstream to fix python2.6 issues
diff --git a/salt/__init__.py b/salt/__init__.py
index 69b64d73dea3..909b430390b9 100755
--- a/salt/__init__.py
+++ b/salt/__init__.py
@@ -1,18 +1,22 @@
'''
Make me some salt!
'''
-
-__version_info__ = (0, 9, 4)
-__version__ = '.'.join(map(str, __version_info__))
+from salt.version import __version__
# Import python libs
-import optparse
import os
import sys
+import stat
+import optparse
-# Import salt libs
-import salt.config
-
+# Import salt libs, the try block bypasses an issue at build time so that c
+# modules don't cause the build to fail
+try:
+ import salt.config
+ import salt.utils.verify
+except ImportError as e:
+ if e.message != 'No module named _msgpack':
+ raise
def verify_env(dirs):
'''
@@ -22,9 +26,20 @@ def verify_env(dirs):
for dir_ in dirs:
if not os.path.isdir(dir_):
try:
+ cumask = os.umask(191)
os.makedirs(dir_)
+ os.umask(cumask)
except OSError, e:
- print 'Failed to create directory path "%s" - %s' % (dir_, e)
+ sys.stderr.write('Failed to create directory path "{0}" - {1}\n'.format(dir_, e))
+
+ mode = os.stat(dir_)
+ # TODO: Should this log if it can't set the permissions
+ # to very secure for these PKI cert directories?
+ if not stat.S_IMODE(mode.st_mode) == 448:
+ if os.access(dir_, os.W_OK):
+ os.chmod(dir_, 448)
+ # Run the extra verification checks
+ salt.utils.verify.run()
class Master(object):
@@ -61,9 +76,9 @@ def __parse_cli(self):
'see the config file. Default: \'%%default\'.' %
', '.join([repr(l) for l in salt.log.LOG_LEVELS.keys()])
)
-
+ log_format = '%(asctime)s,%(msecs)03.0f [%(name)-15s][%(levelname)-8s] %(message)s'
options, args = parser.parse_args()
- salt.log.setup_console_logger(options.log_level)
+ salt.log.setup_console_logger(options.log_level, log_format=log_format)
cli = {'daemon': options.daemon,
'config': options.config}
@@ -87,6 +102,7 @@ def start(self):
for name, level in self.opts['log_granular_levels'].iteritems():
salt.log.set_logger_level(name, level)
import logging
+ log = logging.getLogger(__name__)
# Late import so logging works correctly
import salt.master
master = salt.master.Master(self.opts)
@@ -132,7 +148,8 @@ def __parse_cli(self):
', '.join([repr(l) for l in salt.log.LOG_LEVELS.keys()]))
options, args = parser.parse_args()
- salt.log.setup_console_logger(options.log_level)
+ log_format = '%(asctime)s,%(msecs)03.0f [%(name)-15s][%(levelname)-8s] %(message)s'
+ salt.log.setup_console_logger(options.log_level, log_format=log_format)
cli = {'daemon': options.daemon,
'config': options.config}
@@ -142,8 +159,10 @@ def start(self):
'''
Execute this method to start up a minion.
'''
- verify_env([self.opts['pki_dir'], self.opts['cachedir'],
- os.path.dirname(self.opts['log_file']),
+ verify_env([self.opts['pki_dir'],
+ self.opts['cachedir'],
+ self.opts['extension_modules'],
+ os.path.dirname(self.opts['log_file']),
])
import salt.log
salt.log.setup_logfile_logger(
@@ -156,12 +175,17 @@ def start(self):
# Late import so logging works correctly
import salt.minion
- if self.cli['daemon']:
- # Late import so logging works correctly
- import salt.utils
- salt.utils.daemonize()
- minion = salt.minion.Minion(self.opts)
- minion.tune_in()
+ log = logging.getLogger(__name__)
+ try:
+ if self.cli['daemon']:
+ # Late import so logging works correctly
+ import salt.utils
+ salt.utils.daemonize()
+ minion = salt.minion.Minion(self.opts)
+ minion.tune_in()
+ except KeyboardInterrupt:
+ log.warn('Stopping the Salt Minion')
+ raise SystemExit('\nExiting on Ctrl-c')
class Syndic(object):
@@ -253,9 +277,14 @@ def start(self):
# Late import so logging works correctly
import salt.minion
- syndic = salt.minion.Syndic(self.opts)
- if self.cli['daemon']:
- # Late import so logging works correctly
- import salt.utils
- salt.utils.daemonize()
- syndic.tune_in()
+ log = logging.getLogger(__name__)
+ try:
+ syndic = salt.minion.Syndic(self.opts)
+ if self.cli['daemon']:
+ # Late import so logging works correctly
+ import salt.utils
+ salt.utils.daemonize()
+ syndic.tune_in()
+ except KeyboardInterrupt:
+ log.warn('Stopping the Salt Syndic Minion')
+ raise SystemExit('\nExiting on Ctrl-c')
diff --git a/salt/cli/__init__.py b/salt/cli/__init__.py
index 688a590a19dd..af1a99966314 100644
--- a/salt/cli/__init__.py
+++ b/salt/cli/__init__.py
@@ -7,12 +7,7 @@
import os
import sys
import yaml
-JSON = False
-try:
- import json
- JSON = True
-except:
- pass
+import json
# Import salt components
import salt.cli.caller
@@ -23,6 +18,8 @@
import salt.runner
from salt import __version__ as VERSION
+from salt.exceptions import SaltInvocationError, SaltClientError, \
+ SaltException
class SaltCMD(object):
@@ -79,9 +76,30 @@ def __parse(self):
action='store_true',
help=('Instead of using shell globs use the return code '
'of a function.'))
+ parser.add_option('-N',
+ '--nodegroup',
+ default=False,
+ dest='nodegroup',
+ action='store_true',
+ help=('Instead of using shell globs to evaluate the target '
+ 'use one of the predefined nodegroups to identify a '
+ 'list of targets.'))
+ parser.add_option('-C',
+ '--compound',
+ default=False,
+ dest='compound',
+ action='store_true',
+ help=('The compound target option allows for multiple '
+ 'target types to be evaluated, allowing for greater '
+ 'granularity in target matching. The compound target '
+ 'is space delimited, targets other than globs are '
+ 'preceted with an identifyer matching the specific '
+ 'targets argument type: salt \'G@os:RedHat and '
+ 'webser* or E@database.*\''))
parser.add_option('--return',
default='',
dest='return_',
+ metavar='RETURNER',
help=('Set an alternative return method. By default salt will '
'send the return data from the command back to the '
'master, but the return data can be redirected into '
@@ -118,12 +136,11 @@ def __parse(self):
action='store_true',
dest='yaml_out',
help='Print the output from the salt command in yaml.')
- if JSON:
- parser.add_option('--json-out',
- default=False,
- action='store_true',
- dest='json_out',
- help='Print the output from the salt command in json.')
+ parser.add_option('--json-out',
+ default=False,
+ action='store_true',
+ dest='json_out',
+ help='Print the output from the salt command in json.')
options, args = parser.parse_args()
@@ -134,15 +151,14 @@ def __parse(self):
opts['list'] = options.list_
opts['grain'] = options.grain
opts['exsel'] = options.exsel
+ opts['nodegroup'] = options.nodegroup
+ opts['compound'] = options.compound
opts['return'] = options.return_
opts['conf_file'] = options.conf_file
opts['raw_out'] = options.raw_out
opts['txt_out'] = options.txt_out
opts['yaml_out'] = options.yaml_out
- if JSON:
- opts['json_out'] = options.json_out
- else:
- opts['json_out'] = False
+ opts['json_out'] = options.json_out
if opts['return']:
if opts['timeout'] == 5:
@@ -187,9 +203,33 @@ def run(self):
'''
Execute the salt command line
'''
- local = salt.client.LocalClient(self.opts['conf_file'])
+ try:
+ local = salt.client.LocalClient(self.opts['conf_file'])
+ except SaltClientError as exc:
+ local = None
+ ret = exc
+ out = ''
if 'query' in self.opts:
- print local.find_cmd(self.opts['cmd'])
+ ret = local.find_cmd(self.opts['cmd'])
+ for jid in ret:
+ if isinstance(ret, list) or isinstance(ret, dict):
+ # Determine the proper output method and run it
+ get_outputter = salt.output.get_outputter
+ if self.opts['raw_out']:
+ printout = get_outputter('raw')
+ elif self.opts['json_out']:
+ printout = get_outputter('json')
+ elif self.opts['txt_out']:
+ printout = get_outputter('txt')
+ elif self.opts['yaml_out']:
+ printout = get_outputter('yaml')
+ else:
+ printout = get_outputter(None)
+
+ print 'Return data for job {0}:'.format(jid)
+ printout(ret[jid])
+ print ''
+
else:
args = [self.opts['tgt'],
self.opts['fun'],
@@ -204,21 +244,31 @@ def run(self):
args.append('grain')
elif self.opts['exsel']:
args.append('exsel')
+ elif self.opts['nodegroup']:
+ args.append('nodegroup')
+ elif self.opts['compound']:
+ args.append('compound')
else:
args.append('glob')
if self.opts['return']:
args.append(self.opts['return'])
- full_ret = local.cmd_full_return(*args)
- ret, out = self._format_ret(full_ret)
+ try:
+ # local will be None when there was an error
+ if local:
+ full_ret = local.cmd_full_return(*args)
+ ret, out = self._format_ret(full_ret)
+ except SaltInvocationError as exc:
+ ret = exc
+ out = ''
# Handle special case commands
if self.opts['fun'] == 'sys.doc':
self._print_docs(ret)
else:
+ # Determine the proper output method and run it
+ get_outputter = salt.output.get_outputter
if isinstance(ret, list) or isinstance(ret, dict):
- # Determine the proper output method and run it
- get_outputter = salt.output.get_outputter
if self.opts['raw_out']:
printout = get_outputter('raw')
elif self.opts['json_out']:
@@ -231,8 +281,14 @@ def run(self):
printout = get_outputter(out)
else:
printout = get_outputter(None)
+ # Pretty print any salt exceptions
+ elif isinstance(ret, SaltException):
+ printout = get_outputter("txt")
+ printout(ret)
- printout(ret)
+ # Always exit with a return code of 1 on issues
+ if isinstance(ret, Exception):
+ sys.exit(1)
def _format_ret(self, full_ret):
'''
@@ -307,6 +363,14 @@ def __parse(self):
'use a grain value to identify targets, the syntax '
'for the target is the grains key followed by a pcre '
'regular expression:\n"os:Arch.*"'))
+ parser.add_option('-N',
+ '--nodegroup',
+ default=False,
+ dest='nodegroup',
+ action='store_true',
+ help=('Instead of using shell globs to evaluate the target '
+ 'use one of the predefined nodegroups to identify a '
+ 'list of targets.'))
parser.add_option('-c',
'--config',
default='/etc/salt/master',
@@ -323,6 +387,7 @@ def __parse(self):
opts['pcre'] = options.pcre
opts['list'] = options.list_
opts['grain'] = options.grain
+ opts['nodegroup'] = options.nodegroup
opts['conf_file'] = options.conf_file
if opts['list']:
@@ -417,7 +482,9 @@ def __parse(self):
default=2048,
type=int,
help=('Set the keysize for the generated key, only works with '
- 'the "--gen-keys" option; default=2048'))
+ 'the "--gen-keys" option, the key size must be 2048 or '
+ 'higher, otherwise it will be rounded up to 2048'
+ '; default=2048'))
parser.add_option('-c',
'--config',
@@ -438,7 +505,10 @@ def __parse(self):
opts['delete'] = options.delete
opts['gen_keys'] = options.gen_keys
opts['gen_keys_dir'] = options.gen_keys_dir
- opts['keysize'] = options.keysize
+ if options.keysize < 2048:
+ opts['keysize'] = 2048
+ else:
+ opts['keysize'] = options.keysize
opts.update(salt.config.master_config(options.config))
@@ -512,12 +582,11 @@ def __parse(self):
action='store_true',
dest='yaml_out',
help='Print the output from the salt command in yaml.')
- if JSON:
- parser.add_option('--json-out',
- default=False,
- action='store_true',
- dest='json_out',
- help='Print the output from the salt command in json.')
+ parser.add_option('--json-out',
+ default=False,
+ action='store_true',
+ dest='json_out',
+ help='Print the output from the salt command in json.')
parser.add_option('--no-color',
default=False,
dest='no_color',
@@ -535,10 +604,7 @@ def __parse(self):
opts['txt_out'] = options.txt_out
opts['yaml_out'] = options.yaml_out
opts['color'] = not options.no_color
- if JSON:
- opts['json_out'] = options.json_out
- else:
- opts['json_out'] = False
+ opts['json_out'] = options.json_out
opts.update(salt.config.minion_config(options.config))
opts['log_level'] = options.log_level
if len(args) >= 1:
diff --git a/salt/cli/caller.py b/salt/cli/caller.py
index 382eb56b9f54..5165b52582f6 100644
--- a/salt/cli/caller.py
+++ b/salt/cli/caller.py
@@ -4,13 +4,15 @@
'''
# Import python modules
-import pprint
+import sys
# Import salt libs
import salt
import salt.loader
import salt.minion
+# Custom exceptions
+from salt.exceptions import CommandExecutionError
class Caller(object):
'''
@@ -30,10 +32,15 @@ def call(self):
'''
ret = {}
if self.opts['fun'] not in self.minion.functions:
- print 'Function {0} is not available'.format(self.opts['fun'])
- ret['return'] = self.minion.functions[self.opts['fun']](
- *self.opts['arg']
- )
+ sys.stderr.write('Function {0} is not available\n'.format(self.opts['fun']))
+ sys.exit(1)
+ try:
+ ret['return'] = self.minion.functions[self.opts['fun']](
+ *self.opts['arg']
+ )
+ except (TypeError, CommandExecutionError) as exc:
+ sys.stderr.write('Error running \'{0}\': {1}\n'.format(self.opts['fun'], str(exc)))
+ sys.exit(1)
if hasattr(self.minion.functions[self.opts['fun']], '__outputter__'):
oput = self.minion.functions[self.opts['fun']].__outputter__
if isinstance(oput, str):
@@ -58,7 +65,27 @@ def print_grains(self):
Print out the grains
'''
grains = salt.loader.grains(self.opts)
- pprint.pprint(grains)
+ printout = self._get_outputter(out='yaml')
+ # If --json-out is specified, pretty print it
+ if 'json_out' in self.opts and self.opts['json_out']:
+ printout.indent = 2
+ printout(grains)
+
+ def _get_outputter(self, out=None):
+ get_outputter = salt.output.get_outputter
+ if self.opts['raw_out']:
+ printout = get_outputter('raw')
+ elif self.opts['json_out']:
+ printout = get_outputter('json')
+ elif self.opts['txt_out']:
+ printout = get_outputter('txt')
+ elif self.opts['yaml_out']:
+ printout = get_outputter('yaml')
+ elif out:
+ printout = get_outputter(out)
+ else:
+ printout = get_outputter(None)
+ return printout
def run(self):
'''
@@ -71,18 +98,11 @@ def run(self):
else:
ret = self.call()
# Determine the proper output method and run it
- get_outputter = salt.output.get_outputter
- if self.opts['raw_out']:
- printout = get_outputter('raw')
- elif self.opts['json_out']:
- printout = get_outputter('json')
- elif self.opts['txt_out']:
- printout = get_outputter('txt')
- elif self.opts['yaml_out']:
- printout = get_outputter('yaml')
- elif 'out' in ret:
- printout = get_outputter(ret['out'])
+ if 'out' in ret:
+ printout = self._get_outputter(ret['out'])
else:
- printout = get_outputter(None)
+ printout = self._get_outputter()
+ if 'json_out' in self.opts and self.opts['json_out']:
+ printout.indent = 2
printout({'local': ret['return']}, color=self.opts['color'])
diff --git a/salt/cli/cp.py b/salt/cli/cp.py
index cbf32107a815..8a9316aa8610 100644
--- a/salt/cli/cp.py
+++ b/salt/cli/cp.py
@@ -8,9 +8,7 @@
# Import python modules
import os
import sys
-
-# Import third party libs
-import yaml
+import pprint
# Import salt modules
import salt.client
@@ -76,7 +74,9 @@ def run(self):
args.append('list')
elif self.opts['grain']:
args.append('grain')
+ elif self.opts['nodegroup']:
+ args.append('nodegroup')
ret = local.cmd(*args)
- print yaml.dump(ret)
+ pprint.pprint(ret)
diff --git a/salt/client.py b/salt/client.py
index 38d74bdd1d9a..aeb9a1de714f 100644
--- a/salt/client.py
+++ b/salt/client.py
@@ -26,13 +26,12 @@
# small, and only start with the ability to execute salt commands locally.
# This means that the primary client to build is, the LocalClient
-import cPickle as pickle
-import datetime
-import glob
import os
import re
import sys
+import glob
import time
+import datetime
# Import zmq modules
import zmq
@@ -40,6 +39,7 @@
# Import salt modules
import salt.config
import salt.payload
+from salt.exceptions import SaltClientError, SaltInvocationError
def prep_jid(cachedir):
@@ -59,19 +59,13 @@ def prep_jid(cachedir):
return jid
-class SaltClientError(Exception):
- '''
- Custom exception class.
- '''
- pass
-
-
class LocalClient(object):
'''
Connect to the salt master via the local server and via root
'''
def __init__(self, c_path='/etc/salt/master'):
self.opts = salt.config.master_config(c_path)
+ self.serial = salt.payload.Serial(self.opts)
self.key = self.__read_master_key()
def __read_master_key(self):
@@ -82,8 +76,8 @@ def __read_master_key(self):
keyfile = os.path.join(self.opts['cachedir'], '.root_key')
key = open(keyfile, 'r').read()
return key
- except:
- raise SaltClientError('Failed to read in the salt root key')
+ except (OSError, IOError):
+ raise SaltClientError('Problem reading the salt root key. Are you root?')
def _check_glob_minions(self, expr):
'''
@@ -200,7 +194,7 @@ def get_returns(self, jid, minions, timeout=5):
continue
while fn_ not in ret:
try:
- ret[fn_] = pickle.load(open(retp, 'r'))
+ ret[fn_] = self.serial.load(open(retp, 'r'))
except:
pass
if ret and start == 999999999999:
@@ -239,10 +233,10 @@ def get_full_returns(self, jid, minions, timeout=5):
continue
while fn_ not in ret:
try:
- ret_data = pickle.load(open(retp, 'r'))
+ ret_data = self.serial.load(open(retp, 'r'))
ret[fn_] = {'ret': ret_data}
if os.path.isfile(outp):
- ret[fn_]['out'] = pickle.load(open(outp, 'r'))
+ ret[fn_]['out'] = self.serial.load(open(outp, 'r'))
except:
pass
if ret and start == 999999999999:
@@ -269,7 +263,7 @@ def find_cmd(self, cmd):
loadp = os.path.join(jid_dir, '.load.p')
if os.path.isfile(loadp):
try:
- load = pickle.load(open(loadp, 'r'))
+ load = self.serial.load(open(loadp, 'r'))
if load['fun'] == cmd:
# We found a match! Add the return values
ret[jid] = {}
@@ -278,7 +272,7 @@ def find_cmd(self, cmd):
retp = os.path.join(host_dir, 'return.p')
if not os.path.isfile(retp):
continue
- ret[jid][host] = pickle.load(open(retp))
+ ret[jid][host] = self.serial.load(open(retp))
except:
continue
else:
@@ -297,6 +291,7 @@ def check_minions(self, expr, expr_form='glob'):
'list': self._check_list_minions,
'grain': self._check_grain_minions,
'exsel': self._check_grain_minions,
+ 'compound': self._check_grain_minions,
}[expr_form](expr)
def pub(self, tgt, fun, arg=(), expr_form='glob',
@@ -322,6 +317,14 @@ def pub(self, tgt, fun, arg=(), expr_form='glob',
minions:
A set, the targets that the tgt passed should match.
'''
+ if expr_form == 'nodegroup':
+ if tgt not in self.opts['nodegroups']:
+ conf_file = self.opts.get('conf_file', 'the master config file')
+ err = 'Node group {0} unavailable in {1}'.format(tgt, conf_file)
+ raise SaltInvocationError(err)
+ tgt = self.opts['nodegroups'][tgt]
+ expr_form = 'compound'
+
# Run a check_minions, if no minions match return False
# format the payload - make a function that does this in the payload
# module
@@ -330,6 +333,7 @@ def pub(self, tgt, fun, arg=(), expr_form='glob',
# send!
# return what we get back
minions = self.check_minions(tgt, expr_form)
+
if not minions:
return {'jid': '',
'minions': minions}
@@ -369,7 +373,7 @@ def pub(self, tgt, fun, arg=(), expr_form='glob',
payload = None
for ind in range(100):
try:
- payload = salt.payload.unpackage(
+ payload = self.serial.loads(
socket.recv(
zmq.NOBLOCK
)
diff --git a/salt/config.py b/salt/config.py
index d0be9411b987..731c4191dcac 100644
--- a/salt/config.py
+++ b/salt/config.py
@@ -4,11 +4,18 @@
# Import python modules
import os
+import tempfile
import socket
import sys
# import third party libs
import yaml
+try:
+ yaml.Loader = yaml.CLoader
+ yaml.Dumper = yaml.CDumper
+except:
+ pass
+
# Import salt libs
import salt.crypt
@@ -28,9 +35,13 @@ def load_config(opts, path, env_var):
if os.path.isfile(path):
try:
conf_opts = yaml.safe_load(open(path, 'r'))
- if conf_opts == None:
+ if conf_opts is None:
# The config file is empty and the yaml.load returned None
conf_opts = {}
+ else:
+ # allow using numeric ids: convert int to string
+ if 'id' in conf_opts:
+ conf_opts['id'] = str(conf_opts['id'])
opts.update(conf_opts)
opts['conf_file'] = path
except Exception, e:
@@ -45,8 +56,9 @@ def prepend_root_dir(opts, path_options):
'root_dir' option.
'''
for path_option in path_options:
- opts[path_option] = os.path.normpath(
- os.sep.join([opts['root_dir'], opts[path_option]]))
+ if path_option in opts:
+ opts[path_option] = os.path.normpath(
+ os.sep.join([opts['root_dir'], opts[path_option]]))
def minion_config(path):
@@ -62,12 +74,14 @@ def minion_config(path):
'conf_file': path,
'renderer': 'yaml_jinja',
'failhard': False,
+ 'autoload_dynamic_modules': True,
'disable_modules': [],
'disable_returners': [],
'module_dirs': [],
'returner_dirs': [],
'states_dirs': [],
'render_dirs': [],
+ 'clean_dynamic_modules': True,
'open_mode': False,
'multiprocessing': True,
'sub_timeout': 60,
@@ -76,6 +90,8 @@ def minion_config(path):
'log_granular_levels': {},
'test': False,
'cython_enable': False,
+ 'state_verbose': False,
+ 'acceptance_wait_time': 10,
}
load_config(opts, path, 'SALT_MINION_CONFIG')
@@ -87,17 +103,16 @@ def minion_config(path):
# Enabling open mode requires that the value be set to True, and nothing
# else!
- if opts['open_mode']:
- if opts['open_mode'] == True:
- opts['open_mode'] = True
- else:
- opts['open_mode'] = False
+ opts['open_mode'] = opts['open_mode'] is True
opts['grains'] = salt.loader.grains(opts)
# Prepend root_dir to other paths
prepend_root_dir(opts, ['pki_dir', 'cachedir', 'log_file'])
+ # set up the extension_modules location from the cachedir
+ opts['extension_modules'] = os.path.join(opts['cachedir'], 'extmods')
+
return opts
@@ -108,7 +123,7 @@ def master_config(path):
opts = {'interface': '0.0.0.0',
'publish_port': '4505',
'worker_threads': 5,
- 'sock_dir': '/tmp/.salt-unix',
+ 'sock_dir': os.path.join(tempfile.gettempdir(), '.salt-unix'),
'ret_port': '4506',
'keep_jobs': 24,
'root_dir': '/',
@@ -116,7 +131,7 @@ def master_config(path):
'cachedir': '/var/cache/salt',
'file_roots': {
'base': ['/srv/salt'],
- },
+ },
'file_buffer_size': 1048576,
'hash_type': 'md5',
'conf_file': path,
@@ -131,27 +146,21 @@ def master_config(path):
'log_granular_levels': {},
'cluster_masters': [],
'cluster_mode': 'paranoid',
- }
+ 'serial': 'msgpack',
+ 'nodegroups': {},
+ }
load_config(opts, path, 'SALT_MASTER_CONFIG')
opts['aes'] = salt.crypt.Crypticle.generate_key_string()
# Prepend root_dir to other paths
- prepend_root_dir(opts, ['pki_dir', 'cachedir', 'log_file'])
+ prepend_root_dir(opts, ['pki_dir', 'cachedir', 'log_file', 'sock_dir'])
# Enabling open mode requires that the value be set to True, and nothing
# else!
- if opts['open_mode']:
- if opts['open_mode'] == True:
- opts['open_mode'] = True
- else:
- opts['open_mode'] = False
- if opts['auto_accept']:
- if opts['auto_accept'] == True:
- opts['auto_accept'] = True
- else:
- opts['auto_accept'] = False
+ opts['open_mode'] = opts['open_mode'] is True
+ opts['auto_accept'] = opts['auto_accept'] is True
return opts
diff --git a/salt/crypt.py b/salt/crypt.py
index 859e0515d8c1..aab35afe6d06 100644
--- a/salt/crypt.py
+++ b/salt/crypt.py
@@ -1,11 +1,10 @@
'''
-The crypt module manages all of the cyptogophy functions for minions and
+The crypt module manages all of the cryptography functions for minions and
masters, encrypting and decrypting payloads, preparing messages, and
authenticating peers
'''
# Import python libs
-import cPickle as pickle
import hashlib
import hmac
import logging
@@ -23,6 +22,7 @@
# Import salt utils
import salt.payload
import salt.utils
+from salt.exceptions import AuthenticationError
log = logging.getLogger(__name__)
@@ -42,7 +42,9 @@ def gen_keys(keydir, keyname, keysize):
priv = '{0}.pem'.format(base)
pub = '{0}.pub'.format(base)
gen = RSA.gen_key(keysize, 1)
+ cumask = os.umask(191)
gen.save_key(priv, callback=foo_pass)
+ os.umask(cumask)
gen.save_pub_key(pub)
key = RSA.load_key(priv, callback=foo_pass)
os.chmod(priv, 256)
@@ -69,9 +71,9 @@ def __get_priv_key(self):
key = None
try:
key = RSA.load_key(self.rsa_path, callback=foo_pass)
- log.debug('Loaded master key: %s', self.rsa_path)
+ log.debug('Loaded master key: {0}'.format(self.rsa_path))
except:
- log.info('Generating master key: %s', self.rsa_path)
+ log.info('Generating master key: {0}'.format(self.rsa_path))
key = gen_keys(self.opts['pki_dir'], 'master', 4096)
return key
@@ -98,6 +100,7 @@ class Auth(object):
'''
def __init__(self, opts):
self.opts = opts
+ self.serial = salt.payload.Serial(self.opts)
self.rsa_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
if 'syndic_master' in self.opts:
self.mpub = 'syndic_master.pub'
@@ -113,9 +116,9 @@ def get_priv_key(self):
key = None
try:
key = RSA.load_key(self.rsa_path, callback=foo_pass)
- log.debug('Loaded minion key: %s', self.rsa_path)
+ log.debug('Loaded minion key: {0}'.format(self.rsa_path))
except:
- log.info('Generating minion key: %s', self.rsa_path)
+ log.info('Generating minion key: {0}'.format(self.rsa_path))
key = gen_keys(self.opts['pki_dir'], 'minion', 4096)
return key
@@ -188,9 +191,9 @@ def sign_in(self):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(self.opts['master_uri'])
- payload = salt.payload.package(self.minion_sign_in_payload())
+ payload = self.serial.dumps(self.minion_sign_in_payload())
socket.send(payload)
- payload = salt.payload.unpackage(socket.recv())
+ payload = self.serial.loads(socket.recv())
if 'load' in payload:
if 'ret' in payload['load']:
if not payload['load']['ret']:
@@ -205,8 +208,9 @@ def sign_in(self):
else:
log.error(
'The Salt Master has cached the public key for this '
- 'node, this salt minion will wait for 10 seconds '
- 'before attempting to re-authenticate'
+ 'node, this salt minion will wait for %s seconds '
+ 'before attempting to re-authenticate',
+ self.opts['acceptance_wait_time']
)
return 'retry'
if not self.verify_master(payload['pub_key'], payload['token']):
@@ -224,14 +228,6 @@ def sign_in(self):
return auth
-class AuthenticationError(Exception):
- '''
- Custom exception class.
- '''
-
- pass
-
-
class Crypticle(object):
'''
Authenticated encryption class
@@ -244,9 +240,10 @@ class Crypticle(object):
AES_BLOCK_SIZE = 16
SIG_SIZE = hashlib.sha256().digest_size
- def __init__(self, key_string, key_size=192):
+ def __init__(self, opts, key_string, key_size=192):
self.keys = self.extract_keys(key_string, key_size)
self.key_size = key_size
+ self.serial = salt.payload.Serial(opts)
@classmethod
def generate_key_string(cls, key_size=192):
@@ -288,21 +285,21 @@ def decrypt(self, data):
data = cypher.decrypt(data)
return data[:-ord(data[-1])]
- def dumps(self, obj, pickler=pickle):
+ def dumps(self, obj):
'''
- pickle and encrypt a python object
+ Serialize and encrypt a python object
'''
- return self.encrypt(self.PICKLE_PAD + pickler.dumps(obj))
+ return self.encrypt(self.PICKLE_PAD + self.serial.dumps(obj))
- def loads(self, data, pickler=pickle):
+ def loads(self, data):
'''
- decrypt and un-pickle a python object
+ Decrypt and un-serialize a python object
'''
data = self.decrypt(data)
# simple integrity check to verify that we got meaningful data
if not data.startswith(self.PICKLE_PAD):
return {}
- return pickler.loads(data[len(self.PICKLE_PAD):])
+ return self.serial.loads(data[len(self.PICKLE_PAD):])
class SAuth(Auth):
@@ -326,7 +323,7 @@ def __authenticate(self):
print 'Failed to authenticate with the master, verify that this'\
+ ' minion\'s public key has been accepted on the salt master'
sys.exit(2)
- return Crypticle(creds['aes'])
+ return Crypticle(self.opts, creds['aes'])
def gen_token(self, clear_tok):
'''
diff --git a/salt/exceptions.py b/salt/exceptions.py
new file mode 100644
index 000000000000..341b90b2369b
--- /dev/null
+++ b/salt/exceptions.py
@@ -0,0 +1,60 @@
+'''
+This module is a central location for all salt exceptions
+'''
+
+class SaltException(Exception):
+ '''
+ Base exception class; all Salt-specific exceptions should subclass this
+ '''
+ pass
+
+class SaltClientError(SaltException):
+ '''
+ Problem reading the master root key
+ '''
+ pass
+
+class AuthenticationError(SaltException):
+ '''
+ If sha256 signature fails during decryption
+ '''
+ pass
+
+class CommandNotFoundError(SaltException):
+ '''
+ Used in modules or grains when a required binary is not available
+ '''
+ pass
+
+class CommandExecutionError(SaltException):
+ '''
+ Used when a module runs a command which returns an error and
+ wants to show the user the output gracefully instead of dying
+ '''
+ pass
+
+class LoaderError(SaltException):
+ '''
+ Problems loading the right renderer
+ '''
+ pass
+
+class MinionError(SaltException):
+ '''
+ Minion problems reading uris such as salt:// or http://
+ '''
+ pass
+
+class SaltInvocationError(SaltException):
+ '''
+ Used when the wrong number of arguments are sent to modules
+ or invalid arguments are specified on the command line
+ '''
+ pass
+
+class PkgParseError(SaltException):
+ '''
+ Used when of the pkg modules cannot correctly parse the output from the CLI
+ tool (pacman, yum, apt, aptitude, etc)
+ '''
+ pass
diff --git a/salt/grains/core.py b/salt/grains/core.py
index 6986c6bf6120..32416c9282e8 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -15,8 +15,15 @@
import os
import socket
-import subprocess
import sys
+import re
+import platform
+import salt.utils
+
+# Solve the Chicken and egg problem where grains need to run before any
+# of the modules are loaded and are generally available for any usage.
+import salt.modules.cmd
+__salt__ = {'cmd.run': salt.modules.cmd._run_quiet}
def _kernel():
@@ -26,20 +33,32 @@ def _kernel():
# Provides:
# kernel
grains = {}
- grains['kernel'] = subprocess.Popen(['uname', '-s'],
- stdout=subprocess.PIPE).communicate()[0].strip()
+ grains['kernel'] = __salt__['cmd.run']('uname -s').strip()
+
if grains['kernel'] == 'aix':
- grains['kernelrelease'] = subprocess.Popen(['oslevel', '-s'],
- stdout=subprocess.PIPE).communicate()[0].strip()
+ grains['kernelrelease'] = __salt__['cmd.run']('oslevel -s').strip()
else:
- grains['kernelrelease'] = subprocess.Popen(['uname', '-r'],
- stdout=subprocess.PIPE).communicate()[0].strip()
+ grains['kernelrelease'] = __salt__['cmd.run']('uname -r').strip()
if 'kernel' not in grains:
grains['kernel'] = 'Unknown'
if not grains['kernel']:
grains['kernel'] = 'Unknown'
return grains
+def _windows_cpudata():
+ '''
+ Return the cpu information for Windows systems architecture
+ '''
+ # Provides:
+ # cpuarch
+ # num_cpus
+ # cpu_model
+ grains = {}
+ grains['cpuarch'] = platform.machine()
+ if 'NUMBER_OF_PROCESSORS' in os.environ:
+ grains['num_cpus'] = os.environ['NUMBER_OF_PROCESSORS']
+ grains['cpu_model'] = platform.processor()
+ return grains
def _linux_cpudata():
'''
@@ -53,10 +72,17 @@ def _linux_cpudata():
grains = {}
cpuinfo = '/proc/cpuinfo'
# Grab the Arch
- arch = subprocess.Popen(['uname', '-m'],
- stdout=subprocess.PIPE).communicate()[0].strip()
+ arch = __salt__['cmd.run']('uname -m').strip()
grains['cpuarch'] = arch
- if not grains['cpuarch']:
+ # Some systems such as Debian don't like uname -m
+ # so fallback gracefully to the processor type
+ if not grains['cpuarch'] or grains['cpuarch'] == 'unknown':
+ arch = __salt__['cmd.run']('uname -p')
+ grains['cpuarch'] = arch
+ if not grains['cpuarch'] or grains['cpuarch'] == 'unknown':
+ arch = __salt__['cmd.run']('uname -i')
+ grains['cpuarch'] = arch
+ if not grains['cpuarch'] or grains['cpuarch'] == 'unknown':
grains['cpuarch'] = 'Unknown'
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
@@ -84,22 +110,16 @@ def _freebsd_cpudata():
Return cpu information for FreeBSD systems
'''
grains = {}
- grains['cpuarch'] = subprocess.Popen(
- '/sbin/sysctl hw.machine',
- shell=True,
- stdout=subprocess.PIPE
- ).communicate()[0].split(':')[1].strip()
- grains['num_cpus'] = subprocess.Popen(
- '/sbin/sysctl hw.ncpu',
- shell=True,
- stdout=subprocess.PIPE
- ).communicate()[0].split(':')[1].strip()
- grains['cpu_model'] = subprocess.Popen(
- '/sbin/sysctl hw.model',
- shell=True,
- stdout=subprocess.PIPE
- ).communicate()[0].split(':')[1].strip()
- grains['cpu_flags'] = []
+ sysctl = salt.utils.which('sysctl')
+
+ if sysctl:
+ machine_cmd = '{0} -n hw.machine'.format(sysctl)
+ ncpu_cmd = '{0} -n hw.ncpu'.format(sysctl)
+ model_cpu = '{0} -n hw.model'.format(sysctl)
+ grains['num_cpus'] = __salt__['cmd.run'](ncpu_cmd).strip()
+ grains['cpu_model'] = __salt__['cmd.run'](model_cpu).strip()
+ grains['cpuarch'] = __salt__['cmd.run'](machine_cmd).strip()
+ grains['cpu_flags'] = []
return grains
@@ -112,6 +132,7 @@ def _memdata(osdata):
grains = {'mem_total': 0}
if osdata['kernel'] == 'Linux':
meminfo = '/proc/meminfo'
+
if os.path.isfile(meminfo):
for line in open(meminfo, 'r').readlines():
comps = line.split(':')
@@ -119,13 +140,20 @@ def _memdata(osdata):
continue
if comps[0].strip() == 'MemTotal':
grains['mem_total'] = int(comps[1].split()[0]) / 1024
- elif osdata['kernel'] == 'FreeBSD':
- mem = subprocess.Popen(
- '/sbin/sysctl hw.physmem',
- shell=True,
- stdout=subprocess.PIPE
- ).communicate()[0].split(':')[1].strip()
- grains['mem_total'] = str(int(mem) / 1024 / 1024)
+ elif osdata['kernel'] in ('FreeBSD','OpenBSD'):
+ sysctl = salt.utils.which('sysctl')
+ if sysctl:
+ mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl)).strip()
+ grains['mem_total'] = str(int(mem) / 1024 / 1024)
+ elif osdata['kernel'] == 'Windows':
+ for line in __salt__['cmd.run']('SYSTEMINFO /FO LIST').split('\n'):
+ comps = line.split(':')
+ if not len(comps) > 1:
+ continue
+ if comps[0].strip() == 'Total Physical Memory':
+ grains['mem_total'] = int(comps[1].split()[0].replace(',', ''))
+ break
+
return grains
@@ -138,24 +166,82 @@ def _virtual(osdata):
# Provides:
# virtual
grains = {'virtual': 'physical'}
- if 'Linux OpenBSD SunOS HP-UX'.count(osdata['kernel']):
- if os.path.isdir('/proc/vz'):
+ lspci = salt.utils.which('lspci')
+ dmidecode = salt.utils.which('dmidecode')
+
+ if dmidecode:
+ output = __salt__['cmd.run']('dmidecode')
+ # Product Name: VirtualBox
+ if 'Vendor: QEMU' in output:
+ # FIXME: Make this detect between kvm or qemu
+ grains['virtual'] = 'kvm'
+ elif 'VirtualBox' in output:
+ grains['virtual'] = 'VirtualBox'
+ # Product Name: VMware Virtual Platform
+ elif 'VMware' in output:
+ grains['virtual'] = 'VMware'
+ # Manufacturer: Microsoft Corporation
+ # Product Name: Virtual Machine
+ elif 'Manufacturer: Microsoft' in output and 'Virtual Machine' in output:
+ grains['virtual'] = 'VirtualPC'
+ # Fall back to lspci if dmidecode isn't available
+ elif lspci:
+ model = __salt__['cmd.run']('lspci').lower()
+ if 'vmware' in model:
+ grains['virtual'] = 'VMware'
+ # 00:04.0 System peripheral: InnoTek Systemberatung GmbH VirtualBox Guest Service
+ elif 'virtualbox' in model:
+ grains['virtual'] = 'VirtualBox'
+ elif 'qemu' in model:
+ grains['virtual'] = 'kvm'
+ choices = ('Linux', 'OpenBSD', 'SunOS', 'HP-UX')
+ isdir = os.path.isdir
+ if osdata['kernel'] in choices:
+ if isdir('/proc/vz'):
if os.path.isfile('/proc/vz/version'):
grains['virtual'] = 'openvzhn'
else:
grains['virtual'] = 'openvzve'
- if os.path.isdir('/.SUNWnative'):
+ elif isdir('/proc/sys/xen') or isdir('/sys/bus/xen') or isdir('/proc/xen'):
+ if os.path.isfile('/proc/xen/xsd_kva'):
+ # Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
+ # Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
+ grains['virtual_subtype'] = 'Xen Dom0'
+ else:
+ if grains.get('productname', '') == 'HVM domU':
+ # Requires dmidecode!
+ grains['virtual_subtype'] = 'Xen HVM DomU'
+ elif os.path.isfile('/proc/xen/capabilities'):
+ caps = open('/proc/xen/capabilities')
+ if 'control_d' not in caps.read():
+ # Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
+ grains['virtual_subtype'] = 'Xen PV DomU'
+ else:
+ # Shouldn't get to this, but just in case
+ grains['virtual_subtype'] = 'Xen Dom0'
+ caps.close()
+ # Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
+ # Tested on Fedora 15 / 2.6.41.4-1 without running xen
+ elif isdir('/sys/bus/xen'):
+ if 'xen' in __salt__['cmd.run']('dmesg').lower():
+ grains['virtual_subtype'] = 'Xen PV DomU'
+ elif os.listdir('/sys/bus/xen/drivers'):
+ # An actual DomU will have several drivers
+ # whereas a paravirt ops kernel will not.
+ grains['virtual_subtype'] = 'Xen PV DomU'
+ # If a Dom0 or DomU was detected, obviously this is xen
+ if 'dom' in grains.get('virtual_subtype', '').lower():
+ grains['virtual'] = 'xen'
+ elif isdir('/.SUNWnative'):
grains['virtual'] = 'zone'
- if os.path.isfile('/proc/cpuinfo'):
- if open('/proc/cpuinfo', 'r').read().count('QEMU Virtual CPU'):
+ elif os.path.isfile('/proc/cpuinfo'):
+ if 'QEMU Virtual CPU' in open('/proc/cpuinfo', 'r').read():
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'FreeBSD':
- model = subprocess.Popen(
- '/sbin/sysctl hw.model',
- shell=True,
- stdout=subprocess.PIPE
- ).communicate()[0].split(':')[1].strip()
- if model.count('QEMU Virtual CPU'):
+ sysctl = salt.utils.which('sysctl')
+ if sysctl:
+ model = __salt__['cmd.run']('{0} hw.model'.format(sysctl)).strip()
+ if 'QEMU Virtual CPU' in model:
grains['virtual'] = 'kvm'
return grains
@@ -165,30 +251,128 @@ def _ps(osdata):
Return the ps grain
'''
grains = {}
- grains['ps'] = 'ps auxwww' if\
- 'FreeBSD NetBSD OpenBSD Darwin'.count(osdata['os']) else 'ps -ef'
+ bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'Darwin')
+ if osdata['os'] in bsd_choices:
+ grains['ps'] = 'ps auxwww'
+ else:
+ grains['ps'] = 'ps -efH'
return grains
+def _linux_platform_data(osdata):
+ '''
+ The platform module is very smart about figuring out linux distro
+ information. Instead of re-inventing the wheel, lets use it!
+ '''
+ # Provides:
+ # osrelease
+ # oscodename
+ grains = {}
+ (osname, osrelease, oscodename) = platform.dist()
+ if 'os' not in osdata and osname:
+ grains['os'] = osname
+ if osrelease:
+ grains['osrelease'] = osrelease
+ if oscodename:
+ grains['oscodename'] = oscodename
+ return grains
+
+def _windows_platform_data(osdata):
+ '''
+ Use the platform module for as much as we can.
+ '''
+ # Provides:
+ # osrelease
+ # osversion
+ # osmanufacturer
+ # manufacturer
+ # productname
+ # biosversion
+ # osfullname
+ # inputlocale
+ # timezone
+ # windowsdomain
+
+ grains = {}
+ (osname, hostname, osrelease, osversion, machine, processor) = platform.uname()
+ if 'os' not in osdata and osname:
+ grains['os'] = osname
+ if osrelease:
+ grains['osrelease'] = osrelease
+ if osversion:
+ grains['osversion'] = osversion
+ get_these_grains = {
+ 'OS Manufacturer': 'osmanufacturer',
+ 'System Manufacturer': 'manufacturer',
+ 'System Model': 'productname',
+ 'BIOS Version': 'biosversion',
+ 'OS Name': 'osfullname',
+ 'Input Locale': 'inputlocale',
+ 'Time Zone': 'timezone',
+ 'Domain': 'windowsdomain',
+ }
+ systeminfo = __salt__['cmd.run']('SYSTEMINFO')
+ for line in systeminfo.split('\n'):
+ comps = line.split(':', 1)
+ if not len(comps) > 1:
+ continue
+ item = comps[0].strip()
+ value = comps[1].strip()
+ if item in get_these_grains:
+ grains[get_these_grains[item]] = value
+ return grains
+
def os_data():
'''
Return grains pertaining to the operating system
'''
grains = {}
+ if 'os' in os.environ:
+ if os.environ['os'].startswith('Windows'):
+ grains['os'] = 'Windows'
+ grains['kernel'] = 'Windows'
+ grains.update(_memdata(grains))
+ grains.update(_windows_platform_data(grains))
+ grains.update(_windows_cpudata())
+ return grains
grains.update(_kernel())
+
if grains['kernel'] == 'Linux':
+ # Add lsb grains on any distro with lsb-release
+ if os.path.isfile('/etc/lsb-release'):
+ for line in open('/etc/lsb-release').readlines():
+ # Matches any possible format:
+ # DISTRIB_ID="Ubuntu"
+ # DISTRIB_ID='Mageia'
+ # DISTRIB_ID=Fedora
+ # DISTRIB_RELEASE='10.10'
+ # DISTRIB_CODENAME='squeeze'
+ # DISTRIB_DESCRIPTION='Ubuntu 10.10'
+ regex = re.compile('^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?([\w\s\.-_]+)(?:\'|")?')
+ match = regex.match(line)
+ if match:
+ # Adds: lsb_distrib_{id,release,codename,description}
+ grains['lsb_{0}'.format(match.groups()[0].lower())] = match.groups()[1].rstrip()
if os.path.isfile('/etc/arch-release'):
grains['os'] = 'Arch'
elif os.path.isfile('/etc/debian_version'):
grains['os'] = 'Debian'
+ if 'lsb_distrib_id' in grains:
+ if 'Ubuntu' in grains['lsb_distrib_id']:
+ grains['os'] = 'Ubuntu'
+ elif os.path.isfile('/etc/issue.net') and \
+ 'Ubuntu' in open('/etc/issue.net').readline():
+ grains['os'] = 'Ubuntu'
elif os.path.isfile('/etc/gentoo-release'):
grains['os'] = 'Gentoo'
- elif os.path.isfile('/etc/fedora-version'):
+ elif os.path.isfile('/etc/fedora-release'):
grains['os'] = 'Fedora'
elif os.path.isfile('/etc/mandriva-version'):
grains['os'] = 'Mandriva'
elif os.path.isfile('/etc/mandrake-version'):
grains['os'] = 'Mandrake'
+ elif os.path.isfile('/etc/mageia-version'):
+ grains['os'] = 'Mageia'
elif os.path.isfile('/etc/meego-version'):
grains['os'] = 'MeeGo'
elif os.path.isfile('/etc/vmware-version'):
@@ -206,22 +390,27 @@ def os_data():
grains['os'] = 'OEL'
elif os.path.isfile('/etc/redhat-release'):
data = open('/etc/redhat-release', 'r').read()
- if data.count('centos'):
+ if 'centos' in data.lower():
grains['os'] = 'CentOS'
- elif data.count('scientific'):
+ elif 'scientific' in data.lower():
grains['os'] = 'Scientific'
else:
grains['os'] = 'RedHat'
elif os.path.isfile('/etc/SuSE-release'):
data = open('/etc/SuSE-release', 'r').read()
- if data.count('SUSE LINUX Enterprise Server'):
+ if 'SUSE LINUX Enterprise Server' in data:
grains['os'] = 'SLES'
- elif data.count('SUSE LINUX Enterprise Desktop'):
+ elif 'SUSE LINUX Enterprise Desktop' in data:
grains['os'] = 'SLED'
- elif data.count('openSUSE'):
+ elif 'openSUSE' in data:
grains['os'] = 'openSUSE'
else:
grains['os'] = 'SUSE'
+ # Use the already intelligent platform module to get distro info
+ grains.update(_linux_platform_data(grains))
+ # If the Linux version can not be determined
+ if not 'os' in grains:
+ grains['os'] = 'Unknown {0}'.format(grains['kernel'])
elif grains['kernel'] == 'sunos':
grains['os'] = 'Solaris'
elif grains['kernel'] == 'VMkernel':
@@ -230,18 +419,21 @@ def os_data():
grains['os'] = 'MacOS'
else:
grains['os'] = grains['kernel']
-
if grains['kernel'] == 'Linux':
grains.update(_linux_cpudata())
- elif grains['kernel'] == 'FreeBSD':
+ elif grains['kernel'] in ('FreeBSD', 'OpenBSD'):
+ # _freebsd_cpudata works on OpenBSD as well.
grains.update(_freebsd_cpudata())
grains.update(_memdata(grains))
- # Load the virtual machine info
+ # Get the hardware and bios data
+ grains.update(_hw_data(grains))
+ # Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_ps(grains))
+
return grains
@@ -253,11 +445,13 @@ def hostname():
# Provides:
# fqdn
# host
+ # localhost
# domain
grains = {}
grains['fqdn'] = socket.getfqdn()
comps = grains['fqdn'].split('.')
grains['host'] = comps[0]
+ grains['localhost'] = socket.gethostname()
if len(comps) > 1:
grains['domain'] = '.'.join(comps[1:])
else:
@@ -297,3 +491,84 @@ def saltpath():
# saltpath
path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {'saltpath': os.path.dirname(path)}
+
+
+# Relatively complex mini-algorithm to iterate over the various
+# sections of dmidecode output and return matches for specific
+# lines containing data we want, but only in the right section.
+def _dmidecode_data(regex_dict):
+ '''
+ Parse the output of dmidecode in a generic fashion that can
+ be used for the multiple system types which have dmidecode.
+ '''
+ # NOTE: This function might gain support for smbios instead
+ # of dmidecode when salt gets working Solaris support
+ ret = {}
+
+ # No use running if dmidecode isn't in the path
+ if not salt.utils.which('dmidecode'):
+ return ret
+
+ out = __salt__['cmd.run']('dmidecode')
+
+ for section in regex_dict:
+ section_found = False
+
+ # Look at every line for the right section
+ for line in out.split('\n'):
+ if not line: continue
+ # We've found it, woohoo!
+ if re.match(section, line):
+ section_found = True
+ continue
+ if not section_found:
+ continue
+
+ # Now that a section has been found, find the data
+ for item in regex_dict[section]:
+ # Examples:
+ # Product Name: 64639SU
+ # Version: 7LETC1WW (2.21 )
+ regex = re.compile('\s+{0}\s+(.*)$'.format(item))
+ grain = regex_dict[section][item]
+ # Skip to the next iteration if this grain
+ # has been found in the dmidecode output.
+ if grain in ret: continue
+
+ match = regex.match(line)
+
+ # Finally, add the matched data to the grains returned
+ if match:
+ ret[grain] = match.group(1).strip()
+ return ret
+
+
+def _hw_data(osdata):
+ '''
+ Get system specific hardware data from dmidecode
+
+ Provides
+ biosversion
+ productname
+ manufacturer
+ serialnumber
+ biosreleasedate
+
+ .. versionadded:: 0.9.5
+ '''
+ grains = {}
+ # TODO: *BSD dmidecode output
+ if osdata['kernel'] == 'Linux':
+ linux_dmi_regex = {
+ 'BIOS [Ii]nformation': {
+ '[Vv]ersion:': 'biosversion',
+ '[Rr]elease [Dd]ate:': 'biosreleasedate',
+ },
+ '[Ss]ystem [Ii]nformation': {
+ 'Manufacturer:': 'manufacturer',
+ 'Product(?: Name)?:': 'productname',
+ 'Serial Number:': 'serialnumber',
+ },
+ }
+ grains.update(_dmidecode_data(linux_dmi_regex))
+ return grains
diff --git a/salt/loader.py b/salt/loader.py
index 5ab601cf019d..0a2ab46c635f 100644
--- a/salt/loader.py
+++ b/salt/loader.py
@@ -11,26 +11,22 @@
import logging
import os
import salt
+from salt.exceptions import LoaderError
log = logging.getLogger(__name__)
salt_base_path = os.path.dirname(salt.__file__)
-class LoaderError(Exception):
- '''
- Custom exception class.
- '''
-
- pass
-
-
def minion_mods(opts):
'''
Returns the minion modules
'''
- extra_dirs = []
+ extra_dirs = [
+ os.path.join(opts['extension_modules'],
+ 'modules')
+ ]
if 'module_dirs' in opts:
- extra_dirs = opts['module_dirs']
+ extra_dirs.extend(opts['module_dirs'])
module_dirs = [
os.path.join(salt_base_path, 'modules'),
] + extra_dirs
@@ -42,9 +38,12 @@ def returners(opts):
'''
Returns the returner modules
'''
- extra_dirs = []
+ extra_dirs = [
+ os.path.join(opts['extension_modules'],
+ 'returners')
+ ]
if 'returner_dirs' in opts:
- extra_dirs = opts['returner_dirs']
+ extra_dirs.extend(opts['returner_dirs'])
module_dirs = [
os.path.join(salt_base_path, 'returners'),
] + extra_dirs
@@ -56,9 +55,12 @@ def states(opts, functions):
'''
Returns the returner modules
'''
- extra_dirs = []
+ extra_dirs = [
+ os.path.join(opts['extension_modules'],
+ 'states')
+ ]
if 'states_dirs' in opts:
- extra_dirs = opts['states_dirs']
+ extra_dirs.extend(opts['states_dirs'])
module_dirs = [
os.path.join(salt_base_path, 'states'),
] + extra_dirs
@@ -72,9 +74,12 @@ def render(opts, functions):
'''
Returns the render modules
'''
- extra_dirs = []
+ extra_dirs = [
+ os.path.join(opts['extension_modules'],
+ 'renderers')
+ ]
if 'render_dirs' in opts:
- extra_dirs = opts['render_dirs']
+ extra_dirs.extend(opts['render_dirs'])
module_dirs = [
os.path.join(salt_base_path, 'renderers'),
] + extra_dirs
@@ -84,7 +89,7 @@ def render(opts, functions):
rend = load.filter_func('render', pack)
if opts['renderer'] not in rend:
err = ('The renderer {0} is unavailable, this error is often because '
- 'the needed software is unavailabe'.format(opts['renderer']))
+ 'the needed software is unavailable'.format(opts['renderer']))
log.critical(err)
raise LoaderError(err)
return rend
@@ -135,7 +140,7 @@ class Loader(object):
used to only load specific functions from a directory, or to call modules
in an arbitrary directory directly.
'''
- def __init__(self, module_dirs, opts={}):
+ def __init__(self, module_dirs, opts=dict()):
self.module_dirs = module_dirs
if 'grains' in opts:
self.grains = opts['grains']
@@ -164,7 +169,7 @@ def get_docs(self, funcs, module=''):
docs[fun] = funcs[fun].__doc__
return docs
- def call(self, fun, arg=[]):
+ def call(self, fun, arg=list()):
'''
Call a function in the load path.
'''
@@ -212,7 +217,7 @@ def gen_functions(self, pack=None):
log.info('Cython is enabled in options put not present '
'on the system path. Skipping Cython modules.')
for mod_dir in self.module_dirs:
- if not mod_dir.startswith('/'):
+ if not os.path.isabs(mod_dir):
continue
if not os.path.isdir(mod_dir):
continue
@@ -234,7 +239,9 @@ def gen_functions(self, pack=None):
else:
fn_, path, desc = imp.find_module(name, self.module_dirs)
mod = imp.load_module(name, fn_, path, desc)
- except ImportError:
+ except ImportError as exc:
+ log.debug(('Failed to import module {0}, this is most likely'
+ ' NOT a problem: {1}').format(name, exc))
continue
modules.append(mod)
for mod in modules:
@@ -247,12 +254,20 @@ def gen_functions(self, pack=None):
mod.__grains__ = self.grains
if pack:
- if type(pack) == type(list()):
+ if isinstance(pack, list):
for chunk in pack:
setattr(mod, chunk['name'], chunk['value'])
else:
setattr(mod, pack['name'], pack['value'])
+ # Call a module's initialization method if it exists
+ if hasattr(mod, '__init__'):
+ if callable(mod.__init__):
+ try:
+ mod.__init__()
+ except TypeError:
+ pass
+
if hasattr(mod, '__virtual__'):
if callable(mod.__virtual__):
virtual = mod.__virtual__()
@@ -265,7 +280,7 @@ def gen_functions(self, pack=None):
func = getattr(mod, attr)
funcs[virtual + '.' + attr] = func
self._apply_outputter(func, mod)
- elif virtual == False:
+ elif virtual is False:
pass
else:
func = getattr(mod, attr)
@@ -293,13 +308,14 @@ def apply_introspection(self, funcs):
funcs['sys.list_functions'] = lambda: self.list_funcs(funcs)
funcs['sys.list_modules'] = lambda: self.list_modules(funcs)
funcs['sys.doc'] = lambda module = '': self.get_docs(funcs, module)
+ funcs['sys.reload_modules'] = lambda: True
return funcs
def list_funcs(self, funcs):
'''
List the functions
'''
- return funcs.keys()
+ return sorted(funcs.keys())
def list_modules(self, funcs):
'''
@@ -338,7 +354,7 @@ def chop_mods(self):
def gen_grains(self):
'''
Read the grains directory and execute all of the public callable
- members. then verify that the returns are python dict's and return a
+ members. Then verify that the returns are python dict's and return a
dict containing all of the returned values.
'''
grains = {}
@@ -347,14 +363,14 @@ def gen_grains(self):
if not key[key.index('.') + 1:] == 'core':
continue
ret = fun()
- if not type(ret) == type(dict()):
+ if not isinstance(ret, dict):
continue
grains.update(ret)
for key, fun in funcs.items():
if key[key.index('.') + 1:] == 'core':
continue
ret = fun()
- if not type(ret) == type(dict()):
+ if not isinstance(ret, dict):
continue
grains.update(ret)
return grains
diff --git a/salt/log.py b/salt/log.py
index 5f5a76e68560..c41474196495 100755
--- a/salt/log.py
+++ b/salt/log.py
@@ -2,7 +2,7 @@
salt.log
~~~~~~~~
- This is were Salt's logging get's setup.
+ This is where Salt's logging gets set up.
:copyright: 2011 :email:`Pedro Algarvio (pedro@algarvio.me)`
@@ -54,7 +54,7 @@ def init():
logging.getLogger().setLevel(1)
-def setup_console_logger(log_level):
+def setup_console_logger(log_level='error', log_format=None, date_format=None):
'''
Setup the console logger
'''
@@ -65,16 +65,23 @@ def setup_console_logger(log_level):
handler = logging.StreamHandler()
handler.setLevel(level)
+
+ # Set the default console formatter config
+ if not log_format:
+ log_format = '[%(levelname)-8s] %(message)s'
+ if not date_format:
+ date_format = '%H:%M:%S'
+
formatter = logging.Formatter(
- '%(asctime)s,%(msecs)03.0f [%(name)-15s][%(levelname)-8s] %(message)s',
- datefmt="%H:%M:%S"
+ log_format,
+ datefmt = date_format
)
handler.setFormatter(formatter)
rootLogger.addHandler(handler)
-def setup_logfile_logger(log_path, log_level):
+def setup_logfile_logger(log_path, log_level='error'):
'''
Setup the logfile logger
'''
@@ -96,7 +103,7 @@ def setup_logfile_logger(log_path, log_level):
rootLogger.addHandler(handler)
-def set_logger_level(logger_name, log_level):
+def set_logger_level(logger_name, log_level='error'):
'''
Tweak a specific logger's logging level
'''
diff --git a/salt/master.py b/salt/master.py
index 1dc71aecb872..1e69fd272545 100644
--- a/salt/master.py
+++ b/salt/master.py
@@ -1,48 +1,48 @@
'''
-This module contains all fo the routines needed to set up a master server, this
+This module contains all foo the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python modules
-import cPickle as pickle
-import datetime
-import hashlib
-import logging
-import multiprocessing
import os
import re
+import time
import shutil
+import logging
+import hashlib
import tempfile
-import time
+import datetime
+import multiprocessing
# Import zeromq
-from M2Crypto import RSA
import zmq
+from M2Crypto import RSA
# Import salt modules
-import salt.client
import salt.crypt
-import salt.payload
import salt.utils
+import salt.client
+import salt.payload
log = logging.getLogger(__name__)
-def prep_jid(cachedir, load):
+def prep_jid(opts, load):
'''
Parses the job return directory, generates a job id and sets up the
job id directory.
'''
- jid_root = os.path.join(cachedir, 'jobs')
+ serial = salt.payload.Serial(opts)
+ jid_root = os.path.join(opts['cachedir'], 'jobs')
jid = "{0:%Y%m%d%H%M%S%f}".format(datetime.datetime.now())
jid_dir = os.path.join(jid_root, jid)
if not os.path.isdir(jid_dir):
os.makedirs(jid_dir)
- pickle.dump(load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
+ serial.dump(load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
else:
- return prep_jid(load)
+ return prep_jid(cachedir, load)
return jid
@@ -63,7 +63,7 @@ def __prep_crypticle(self):
'''
Return the crypticle used for AES
'''
- return salt.crypt.Crypticle(self.opts['aes'])
+ return salt.crypt.Crypticle(self.opts, self.opts['aes'])
def __prep_key(self):
'''
@@ -76,7 +76,9 @@ def __prep_key(self):
return open(keyfile, 'r').read()
else:
key = salt.crypt.Crypticle.generate_key_string()
+ cumask = os.umask(191)
open(keyfile, 'w+').write(key)
+ os.umask(cumask)
os.chmod(keyfile, 256)
return key
@@ -104,13 +106,16 @@ def _clear_old_jobs(self):
for jid in os.listdir(jid_root):
if int(cur) - int(jid[:10]) > self.opts['keep_jobs']:
shutil.rmtree(os.path.join(jid_root, jid))
- time.sleep(60)
+ try:
+ time.sleep(60)
+ except KeyboardInterrupt:
+ break
def start(self):
'''
Turn on the master server components
'''
- log.info('Starting the Salt Master')
+ log.warn('Starting the Salt Master')
multiprocessing.Process(target=self._clear_old_jobs).start()
aes_funcs = AESFuncs(self.opts, self.crypticle)
clear_funcs = ClearFuncs(
@@ -126,7 +131,13 @@ def start(self):
aes_funcs,
clear_funcs)
reqserv.start_publisher()
- reqserv.run()
+
+ try:
+ reqserv.run()
+ except KeyboardInterrupt:
+ # Shut the master down gracefully on SIGINT
+ log.warn('Stopping the Salt Master')
+ raise SystemExit('\nExiting on Ctrl-c')
class Publisher(multiprocessing.Process):
@@ -135,7 +146,7 @@ class Publisher(multiprocessing.Process):
commands.
'''
def __init__(self, opts):
- multiprocessing.Process.__init__(self)
+ super(Publisher, self).__init__()
self.opts = opts
def run(self):
@@ -153,10 +164,14 @@ def run(self):
pub_sock.bind(pub_uri)
pull_sock.bind(pull_uri)
- while True:
- package = pull_sock.recv()
- log.info('Publishing command')
- pub_sock.send(package)
+ try:
+ while True:
+ package = pull_sock.recv()
+ log.info('Publishing command')
+ pub_sock.send(package)
+ except KeyboardInterrupt:
+ pub_sock.close()
+ pull_sock.close()
class ReqServer(object):
@@ -230,6 +245,7 @@ def __init__(self,
clear_funcs):
multiprocessing.Process.__init__(self)
self.opts = opts
+ self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.aes_funcs = aes_funcs
self.clear_funcs = clear_funcs
@@ -244,19 +260,27 @@ def __bind(self):
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(w_uri))
- socket.connect(w_uri)
+ try:
+ socket.connect(w_uri)
- while True:
- package = socket.recv()
- payload = salt.payload.unpackage(package)
- ret = salt.payload.package(self._handle_payload(payload))
- socket.send(ret)
+ while True:
+ package = socket.recv()
+ payload = self.serial.loads(package)
+ ret = self.serial.dumps(self._handle_payload(payload))
+ socket.send(ret)
+ except KeyboardInterrupt:
+ socket.close()
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
'''
+ try:
+ key = payload['enc']
+ load = payload['load']
+ except KeyError:
+ return ''
return {'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[payload['enc']](payload['load'])
@@ -283,7 +307,7 @@ def _handle_aes(self, load):
except:
return ''
if 'cmd' not in data:
- log.error('Recieved malformed command {0}'.format(data))
+ log.error('Received malformed command {0}'.format(data))
return {}
log.info('AES payload received with command {0}'.format(data['cmd']))
return self.aes_funcs.run_func(data['cmd'], data)
@@ -303,6 +327,7 @@ class AESFuncs(object):
#
def __init__(self, opts, crypticle):
self.opts = opts
+ self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
@@ -425,15 +450,15 @@ def _return(self, load):
hn_dir = os.path.join(jid_dir, load['id'])
if not os.path.isdir(hn_dir):
os.makedirs(hn_dir)
- pickle.dump(load['return'],
+ self.serial.dump(load['return'],
open(os.path.join(hn_dir, 'return.p'), 'w+'))
if 'out' in load:
- pickle.dump(load['out'],
+ self.serial.dump(load['out'],
open(os.path.join(hn_dir, 'out.p'), 'w+'))
def _syndic_return(self, load):
'''
- Recieve a syndic minion return and format it to look like returns from
+ Receive a syndic minion return and format it to look like returns from
individual minions.
'''
# Verify the load
@@ -480,10 +505,13 @@ def minion_publish(self, clear_load):
# If the command will make a recursive publish don't run
if re.match('publish.*', clear_load['fun']):
return {}
- # Check the permisions for this minion
+ # Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
+ jid = clear_load['jid']
+ msg = 'Minion id {0} is not who it says it is!'.format(jid)
+ log.warn(msg)
return {}
perms = set()
for match in self.opts['peer']:
@@ -498,7 +526,7 @@ def minion_publish(self, clear_load):
if not good:
return {}
# Set up the publication payload
- jid = prep_jid(self.opts['cachedir'], clear_load)
+ jid = prep_jid(self.opts, clear_load)
payload = {'enc': 'aes'}
load = {
'fun': clear_load['fun'],
@@ -523,7 +551,7 @@ def minion_publish(self, clear_load):
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
- pub_sock.send(salt.payload.package(payload))
+ pub_sock.send(self.serial.dumps(payload))
# Run the client get_returns method
return self.local.get_returns(
jid,
@@ -556,12 +584,13 @@ class ClearFuncs(object):
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
- # The ClearFuncs object encasulates the functions that can be executed in
+ # The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key, master_key, crypticle):
self.opts = opts
+ self.serial = salt.payload.Serial(opts)
self.key = key
self.master_key = master_key
self.crypticle = crypticle
@@ -608,7 +637,7 @@ def _auth(self, load):
# 1. Verify that the key we are receiving matches the stored key
# 2. Store the key if it is not there
# 3. make an rsa key with the pub key
- # 4. encrypt the aes key as an encrypted pickle
+ # 4. encrypt the aes key as an encrypted salt.payload
# 5. package the return and return it
log.info('Authentication request from %(id)s', load)
pubfn = os.path.join(self.opts['pki_dir'],
@@ -655,7 +684,7 @@ def _auth(self, load):
else:
log.info(
'Authentication failed from host %(id)s, the key is in '
- 'pending and needs to be accepted with saltkey -a %(id)s',
+ 'pending and needs to be accepted with salt-key -a %(id)s',
load
)
return {'enc': 'clear',
@@ -666,6 +695,7 @@ def _auth(self, load):
pass
else:
# Something happened that I have not accounted for, FAIL!
+ log.warn('Unaccounted for authentication failure')
return {'enc': 'clear',
'load': {'ret': False}}
@@ -696,7 +726,7 @@ def publish(self, clear_load):
if not os.path.isdir(jid_dir):
os.makedirs(jid_dir)
# Save the invocation information
- pickle.dump(clear_load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
+ self.serial.dump(clear_load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
# Set up the payload
payload = {'enc': 'aes'}
load = {
@@ -718,6 +748,6 @@ def publish(self, clear_load):
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
- pub_sock.send(salt.payload.package(payload))
+ pub_sock.send(self.serial.dumps(payload))
return {'enc': 'clear',
'load': {'jid': clear_load['jid']}}
diff --git a/salt/minion.py b/salt/minion.py
index a4ff39758b85..ce53e1071771 100644
--- a/salt/minion.py
+++ b/salt/minion.py
@@ -3,9 +3,12 @@
'''
# Import python libs
+import BaseHTTPServer
+import contextlib
import glob
import logging
import multiprocessing
+import hashlib
import os
import re
import shutil
@@ -13,18 +16,22 @@
import threading
import time
import traceback
+import urllib2
+import urlparse
# Import zeromq libs
import zmq
# Import salt libs
-from salt.crypt import AuthenticationError
+from salt.exceptions import AuthenticationError, MinionError, \
+ CommandExecutionError, SaltInvocationError
import salt.client
import salt.crypt
import salt.loader
import salt.modules
import salt.returners
import salt.utils
+import salt.payload
log = logging.getLogger(__name__)
@@ -37,13 +44,6 @@
# 6. handle publications
-class MinionError(Exception):
- '''
- Custom exception class.
- '''
- pass
-
-
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
@@ -54,12 +54,18 @@ class SMinion(object):
def __init__(self, opts):
# Generate all of the minion side components
self.opts = opts
+ self.gen_modules()
+
+ def gen_modules(self):
+ '''
+ Load all of the modules for the minion
+ '''
self.functions = salt.loader.minion_mods(self.opts)
self.returners = salt.loader.returners(self.opts)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
-
+ self.functions['sys.reload_modules'] = self.gen_modules
class Minion(object):
'''
@@ -71,9 +77,14 @@ def __init__(self, opts):
Pass in the options dict
'''
self.opts = opts
+ self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self.__prep_mod_opts()
self.functions, self.returners = self.__load_modules()
self.matcher = Matcher(self.opts, self.functions)
+ if hasattr(self,'_syndic') and self._syndic:
+ log.warn('Starting the Salt Syndic Minion')
+ else:
+ log.warn('Starting the Salt Minion')
self.authenticate()
def __prep_mod_opts(self):
@@ -122,7 +133,7 @@ def _handle_aes(self, load):
# Verify that the publication applies to this minion
if 'tgt_type' in data:
if not getattr(self.matcher,
- data['tgt_type'] + '_match')(data['tgt']):
+ '{0}_match'.format(data['tgt_type']))(data['tgt']):
return
else:
if not self.matcher.glob_match(data['tgt']):
@@ -152,8 +163,12 @@ def _handle_decoded_payload(self, data):
Override this method if you wish to handle the decoded
data differently.
'''
+ if isinstance(data['fun'], str):
+ if data['fun'] == 'sys.reload_modules':
+ self.functions, self.returners = self.__load_modules()
+
if self.opts['multiprocessing']:
- if type(data['fun']) == type(list()):
+ if isinstance(data['fun'], list):
multiprocessing.Process(
target=lambda: self._thread_multi_return(data)
).start()
@@ -162,7 +177,7 @@ def _handle_decoded_payload(self, data):
target=lambda: self._thread_return(data)
).start()
else:
- if type(data['fun']) == type(list()):
+ if isinstance(data['fun'], list):
threading.Thread(
target=lambda: self._thread_multi_return(data)
).start()
@@ -180,10 +195,9 @@ def _thread_return(self, data):
for ind in range(0, len(data['arg'])):
try:
arg = eval(data['arg'][ind])
- if isinstance(arg, str) \
- or isinstance(arg, list) \
- or isinstance(arg, int) \
- or isinstance(arg, dict):
+ if isinstance(arg, bool):
+ data['arg'][ind] = str(data['arg'][ind])
+ elif isinstance(arg, (dict, int, list, str)):
data['arg'][ind] = arg
else:
data['arg'][ind] = str(data['arg'][ind])
@@ -194,12 +208,21 @@ def _thread_return(self, data):
if function_name in self.functions:
try:
ret['return'] = self.functions[data['fun']](*data['arg'])
+ except CommandExecutionError as exc:
+ msg = 'A command in {0} had a problem: {1}'
+ log.error(msg.format(function_name, str(exc)))
+ ret['return'] = 'ERROR: {0}'.format(str(exc))
+ except SaltInvocationError as exc:
+ msg = 'Problem executing "{0}": {1}'
+ log.error(msg.format(function_name, str(exc)))
+ ret['return'] = 'ERROR executing {0}: {1}'.format(function_name, str(exc))
except Exception as exc:
trb = traceback.format_exc()
- log.warning('The minion function caused an exception: %s', exc)
+ msg = 'The minion function caused an exception: {0}'
+ log.warning(msg.format(trb))
ret['return'] = trb
else:
- ret['return'] = '"%s" is not available.' % function_name
+ ret['return'] = '"{0}" is not available.'.format(function_name)
ret['jid'] = data['jid']
ret['fun'] = data['fun']
@@ -222,11 +245,9 @@ def _thread_multi_return(self, data):
for index in range(0, len(data['arg'][ind])):
try:
arg = eval(data['arg'][ind][index])
- # FIXME: do away the ugly here...
- if isinstance(arg, str) \
- or isinstance(arg, list) \
- or isinstance(arg, int) \
- or isinstance(arg, dict):
+ if isinstance(arg, bool):
+ data['arg'][ind][index] = str(data['arg'][ind][index])
+ elif isinstance(arg, (dict, int, list, str)):
data['arg'][ind][index] = arg
else:
data['arg'][ind][index] = str(data['arg'][ind][index])
@@ -280,17 +301,9 @@ def _return_pub(self, ret, ret_cmd='_return'):
except KeyError:
pass
payload['load'] = self.crypticle.dumps(load)
- socket.send_pyobj(payload)
+ socket.send(self.serial.dumps(payload))
return socket.recv()
- def reload_functions(self):
- '''
- Reload the functions dict for this minion, reading in any new functions
- '''
- self.functions = self.__load_functions()
- log.debug('Refreshed functions, loaded functions: %s', self.functions)
- return True
-
def authenticate(self):
'''
Authenticate with the master, this method breaks the functional
@@ -306,17 +319,38 @@ def authenticate(self):
log.info('Authentication with master successful!')
break
log.info('Waiting for minion key to be accepted by the master.')
- time.sleep(10)
+ time.sleep(self.opts['acceptance_wait_time'])
self.aes = creds['aes']
self.publish_port = creds['publish_port']
- self.crypticle = salt.crypt.Crypticle(self.aes)
+ self.crypticle = salt.crypt.Crypticle(self.opts, self.aes)
+
+ def passive_refresh(self):
+ '''
+ Check to see if the salt refresh file has been laid down, if it has,
+ refresh the functions and returners.
+ '''
+ if os.path.isfile(
+ os.path.join(
+ self.opts['cachedir'],
+ '.module_refresh'
+ )
+ ):
+ self.functions, self.returners = self.__load_modules()
+ os.remove(
+ os.path.join(
+ self.opts['cachedir'],
+ '.module_refresh'
+ )
+ )
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
'''
- master_pub = ('tcp://' + self.opts['master_ip'] +
- ':' + str(self.publish_port))
+ master_pub = 'tcp://{0}:{1}'.format(
+ self.opts['master_ip'],
+ str(self.publish_port)
+ )
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.setsockopt(zmq.SUBSCRIBE, '')
@@ -326,7 +360,7 @@ def tune_in(self):
while True:
payload = None
try:
- payload = socket.recv_pyobj(1)
+ payload = self.serial.loads(socket.recv(1))
self._handle_payload(payload)
last = time.time()
except:
@@ -341,15 +375,18 @@ def tune_in(self):
last = time.time()
time.sleep(0.05)
multiprocessing.active_children()
- while True:
- payload = None
- try:
- payload = socket.recv_pyobj(1)
- self._handle_payload(payload)
- except:
- pass
- time.sleep(0.05)
- multiprocessing.active_children()
+ self.passive_refresh()
+ else:
+ while True:
+ payload = None
+ try:
+ payload = self.serial(socket.recv(1))
+ self._handle_payload(payload)
+ except:
+ pass
+ time.sleep(0.05)
+ multiprocessing.active_children()
+ self.passive_refresh()
class Syndic(salt.client.LocalClient, Minion):
@@ -358,6 +395,7 @@ class Syndic(salt.client.LocalClient, Minion):
authenticate with a higher level master.
'''
def __init__(self, opts):
+ self._syndic = True
salt.client.LocalClient.__init__(self, opts['_master_conf_file'])
Minion.__init__(self, opts)
@@ -443,11 +481,11 @@ def confirm_top(self, match, data):
'''
matcher = 'glob'
for item in data:
- if type(item) == type(dict()):
+ if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
- return getattr(self, matcher + '_match')(match)
+ return getattr(self, '{0}_match'.format(matcher))(match)
else:
log.error('Attempting to match with unknown matcher: %s', matcher)
return False
@@ -475,7 +513,7 @@ def list_match(self, tgt):
'''
Determines if this host is on the list
'''
- return bool(tgt.count(self.opts['id']))
+ return bool(tgt in self.opts['id'])
def grain_match(self, tgt):
'''
@@ -486,7 +524,7 @@ def grain_match(self, tgt):
log.error('Got insufficient arguments for grains from master')
return False
if comps[0] not in self.opts['grains']:
- log.error('Got unknown grain from master: %s', comps[0])
+ log.error('Got unknown grain from master: {0}'.format(comps[0]))
return False
return bool(re.match(comps[1], self.opts['grains'][comps[0]]))
@@ -498,6 +536,54 @@ def exsel_match(self, tgt):
return False
return(self.functions[tgt]())
+ def compound_match(self, tgt):
+ '''
+ Runs the compound target check
+ '''
+ if not isinstance(tgt, str):
+ log.debug('Compound target received that is not a string')
+ return False
+ ref = {'G': 'grain',
+ 'X': 'exsel',
+ 'L': 'list',
+ 'E': 'pcre'}
+ results = []
+ for match in tgt.split():
+ # Attach the boolean operator
+ if match == 'and':
+ results.append('and')
+ continue
+ elif match == 'or':
+ results.append('or')
+ continue
+ # If we are here then it is not a boolean operator, check if the
+ # last member of the result list is a boolean, if no, append and
+ if results:
+ if results[-1] != 'and' or results[-1] != 'or':
+ results.append('and')
+ if match[1] == '@':
+ comps = match.split('@')
+ matcher = ref.get(comps[0])
+ if not matcher:
+ # If an unknown matcher is called at any time, fail out
+ return False
+ print comps
+ results.append(
+ str(getattr(
+ self,
+ '{0}_match'.format(matcher)
+ )('@'.join(comps[1:]))
+ ))
+ else:
+ results.append(
+ str(getattr(
+ self,
+ '{0}_match'.format(matcher)
+ )('@'.join(comps[1:]))
+ ))
+
+ print ' '.join(results)
+ return eval(' '.join(results))
class FileClient(object):
'''
@@ -505,6 +591,7 @@ class FileClient(object):
'''
def __init__(self, opts):
self.opts = opts
+ self.serial = salt.payload.Serial(self.opts)
self.auth = salt.crypt.SAuth(opts)
self.socket = self.__get_socket()
@@ -522,9 +609,27 @@ def _check_proto(self, path):
Make sure that this path is intended for the salt master and trim it
'''
if not path.startswith('salt://'):
- raise MinionError('Unsupported path')
+ raise MinionError('Unsupported path: {0}'.format(path))
return path[7:]
+ def _file_local_list(self, dest):
+ '''
+ Helper util to return a list of files in a directory
+ '''
+ if os.path.isdir(dest):
+ destdir = dest
+ else:
+ destdir = os.path.dirname(dest)
+
+ filelist = []
+
+ for root, dirs, files in os.walk(destdir):
+ for name in files:
+ path = os.path.join(root, name)
+ filelist.append(path)
+
+ return filelist
+
def get_file(self, path, dest='', makedirs=False, env='base'):
'''
Get a single file from the salt-master
@@ -549,9 +654,25 @@ def get_file(self, path, dest='', makedirs=False, env='base'):
else:
load['loc'] = fn_.tell()
payload['load'] = self.auth.crypticle.dumps(load)
- self.socket.send_pyobj(payload)
- data = self.auth.crypticle.loads(self.socket.recv_pyobj())
+ self.socket.send(self.serial.dumps(payload))
+ data = self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
if not data['data']:
+ if not fn_ and data['dest']:
+ # This is a 0 byte file on the master
+ dest = os.path.join(
+ self.opts['cachedir'],
+ 'files',
+ env,
+ data['dest']
+ )
+ destdir = os.path.dirname(dest)
+ cumask = os.umask(191)
+ if not os.path.isdir(destdir):
+ os.makedirs(destdir)
+ if not os.path.exists(dest):
+ open(dest, 'w+').write(data['data'])
+ os.chmod(dest, 384)
+ os.umask(cumask)
break
if not fn_:
dest = os.path.join(
@@ -561,12 +682,41 @@ def get_file(self, path, dest='', makedirs=False, env='base'):
data['dest']
)
destdir = os.path.dirname(dest)
+ cumask = os.umask(191)
if not os.path.isdir(destdir):
os.makedirs(destdir)
fn_ = open(dest, 'w+')
+ os.chmod(dest, 384)
+ os.umask(cumask)
fn_.write(data['data'])
return dest
+ def get_url(self, url, dest, makedirs=False, env='base'):
+ '''
+ Get a single file from a URL.
+ '''
+ if urlparse.urlparse(url).scheme == 'salt':
+ return self.get_file(url, dest, makedirs, env)
+ destdir = os.path.dirname(dest)
+ if not os.path.isdir(destdir):
+ if makedirs:
+ os.makedirs(destdir)
+ else:
+ return False
+ try:
+ with contextlib.closing(urllib2.urlopen(url)) as srcfp:
+ with open(dest, 'wb') as destfp:
+ shutil.copyfileobj(srcfp, destfp)
+ return dest
+ except urllib2.HTTPError, ex:
+ raise MinionError('HTTP error {0} reading {1}: {3}'.format(
+ ex.code,
+ url,
+ *BaseHTTPServer.BaseHTTPRequestHandler.responses[ex.code]))
+ except urllib2.URLError, ex:
+ raise MinionError('Error reading {0}: {1}'.format(url, ex.reason))
+ return False
+
def cache_file(self, path, env='base'):
'''
Pull a file down from the file server and store it in the minion file
@@ -604,6 +754,20 @@ def cache_dir(self, path, env='base'):
ret.append(self.cache_file('salt://{0}'.format(fn_), env))
return ret
+ def cache_local_file(self, path, **kwargs):
+ '''
+ Cache a local file on the minion in the localfiles cache
+ '''
+ dest = os.path.join(self.opts['cachedir'], 'localfiles',
+ path.lstrip('/'))
+ destdir = os.path.dirname(dest)
+
+ if not os.path.isdir(destdir):
+ os.makedirs(destdir)
+
+ shutil.copyfile(path, dest)
+ return dest
+
def file_list(self, env='base'):
'''
List the files on the master
@@ -612,8 +776,35 @@ def file_list(self, env='base'):
load = {'env': env,
'cmd': '_file_list'}
payload['load'] = self.auth.crypticle.dumps(load)
- self.socket.send_pyobj(payload)
- return self.auth.crypticle.loads(self.socket.recv_pyobj())
+ self.socket.send(self.serial.dumps(payload))
+ return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
+
+ def file_local_list(self, env='base'):
+ '''
+ List files in the local minion files and localfiles caches
+ '''
+ filesdest = os.path.join(self.opts['cachedir'], 'files', env)
+ localfilesdest = os.path.join(self.opts['cachedir'], 'localfiles')
+
+ return sorted(self._file_local_list(filesdest) +
+ self._file_local_list(localfilesdest))
+
+ def is_cached(self, path, env='base'):
+ '''
+ Returns the full path to a file if it is cached locally on the minion
+ otherwise returns a blank string
+ '''
+ localsfilesdest = os.path.join(
+ self.opts['cachedir'], 'localfiles', path.lstrip('/'))
+ filesdest = os.path.join(
+ self.opts['cachedir'], 'files', env, path.lstrip('salt://'))
+
+ if os.path.exists(filesdest):
+ return filesdest
+ elif os.path.exists(localsfilesdest):
+ return localsfilesdest
+
+ return ''
def hash_file(self, path, env='base'):
'''
@@ -621,14 +812,26 @@ def hash_file(self, path, env='base'):
salt master file server prepend the path with salt://
otherwise, prepend the file with / for a local file.
'''
- path = self._check_proto(path)
+ try:
+ path = self._check_proto(path)
+ except MinionError:
+ if not os.path.isfile(path):
+ err = ('Specified file {0} is not present to generate '
+ 'hash').format(path)
+ log.warning(err)
+ return {}
+ else:
+ ret = {}
+ ret['hsum'] = hashlib.md5(open(path, 'rb').read()).hexdigest()
+ ret['hash_type'] = 'md5'
+ return ret
payload = {'enc': 'aes'}
load = {'path': path,
'env': env,
'cmd': '_file_hash'}
payload['load'] = self.auth.crypticle.dumps(load)
- self.socket.send_pyobj(payload)
- return self.auth.crypticle.loads(self.socket.recv_pyobj())
+ self.socket.send(self.serial.dumps(payload))
+ return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
def list_env(self, path, env='base'):
'''
@@ -638,8 +841,8 @@ def list_env(self, path, env='base'):
load = {'env': env,
'cmd': '_file_list'}
payload['load'] = self.auth.crypticle.dumps(load)
- self.socket.send_pyobj(payload)
- return self.auth.crypticle.loads(self.socket.recv_pyobj())
+ self.socket.send(self.serial.dumps(payload))
+ return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
def get_state(self, sls, env):
'''
@@ -662,5 +865,5 @@ def master_opts(self):
payload = {'enc': 'aes'}
load = {'cmd': '_master_opts'}
payload['load'] = self.auth.crypticle.dumps(load)
- self.socket.send_pyobj(payload)
- return self.auth.crypticle.loads(self.socket.recv_pyobj())
+ self.socket.send(self.serial.dumps(payload))
+ return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
diff --git a/salt/modules/apache.py b/salt/modules/apache.py
index 40eb33b05051..d506cbe61363 100644
--- a/salt/modules/apache.py
+++ b/salt/modules/apache.py
@@ -2,18 +2,22 @@
Support for Apache
'''
-from re import sub
+import re
+
+__outputter__ = {
+ 'signal': 'txt',
+}
def __detect_os():
'''
Apache commands and paths differ depending on packaging
'''
- httpd = 'CentOS Scientific RedHat Fedora'
- apache2 = 'Ubuntu'
- if httpd.count(__grains__['os']):
+ httpd = ('CentOS', 'Scientific', 'RedHat', 'Fedora')
+ apache2 = ('Ubuntu',)
+ if __grains__['os'] in httpd:
return 'apachectl'
- elif apache2.count(__grains__['os']):
+ elif __grains__['os'] in apache2:
return 'apache2ctl'
else:
return 'apachectl'
@@ -46,10 +50,10 @@ def fullversion():
ret['compiled_with'] = []
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
- if not line.count(' '):
- continue
if ': ' in line:
comps = line.split(': ')
+ if not comps:
+ continue
ret[comps[0].strip().lower().replace(' ', '_')] = comps[1].strip()
elif ' -D' in line:
cw = line.strip(' -D ')
@@ -71,9 +75,9 @@ def modules():
ret['shared'] = []
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
- if not line.count(' '):
- continue
comps = line.split()
+ if not comps:
+ continue
if '(static)' in line:
ret['static'].append(comps[0])
if '(shared)' in line:
@@ -93,7 +97,7 @@ def servermods():
ret = []
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
- if not line.count(' '):
+ if not line:
continue
if '.c' in line:
ret.append(line.strip())
@@ -114,7 +118,7 @@ def directives():
out = __salt__['cmd.run'](cmd)
out = out.replace('\n\t', '\t')
for line in out.split('\n'):
- if not line.count(' '):
+ if not line:
continue
comps = line.split('\t')
desc = '\n'.join(comps[1:])
@@ -138,7 +142,7 @@ def vhosts():
namevhost = ''
out = __salt__['cmd.run'](cmd)
for line in out.split('\n'):
- if not line.count(' '):
+ if not line:
continue
comps = line.split()
if 'is a NameVirtualHost' in line:
@@ -148,11 +152,11 @@ def vhosts():
if comps[0] == 'default':
ret[namevhost]['default'] = {}
ret[namevhost]['default']['vhost'] = comps[2]
- ret[namevhost]['default']['conf'] = sub(r'\(|\)', '', comps[3])
+ ret[namevhost]['default']['conf'] = re.sub(r'\(|\)', '', comps[3])
if comps[0] == 'port':
ret[namevhost][comps[3]] = {}
ret[namevhost][comps[3]]['vhost'] = comps[3]
- ret[namevhost][comps[3]]['conf'] = sub(r'\(|\)', '', comps[4])
+ ret[namevhost][comps[3]]['conf'] = re.sub(r'\(|\)', '', comps[4])
ret[namevhost][comps[3]]['port'] = comps[1]
return ret
@@ -165,8 +169,28 @@ def signal(signal=None):
salt '*' apache.signal restart
'''
- valid_signals = 'start stop restart graceful graceful-stop'
- if not valid_signals.count(signal):
+ no_extra_args = ('configtest', 'status', 'fullstatus')
+ valid_signals = ('start', 'stop', 'restart', 'graceful', 'graceful-stop')
+
+ if signal not in valid_signals and signal not in no_extra_args:
return
- cmd = __detect_os() + ' -k %s' % signal
- out = __salt__['cmd.run'](cmd)
+ # Make sure you use the right arguments
+ if signal in valid_signals:
+ arguments = ' -k {0}'.format(signal)
+ else:
+ arguments = ' {0}'.format(signal)
+ cmd = __detect_os() + arguments
+ out = __salt__['cmd.run_all'](cmd)
+
+ # A non-zero return code means fail
+ if out['retcode'] and out['stderr']:
+ ret = out['stderr'].strip()
+ # 'apachectl configtest' returns 'Syntax OK' to stderr
+ elif out['stderr']:
+ ret = out['stderr'].strip()
+ elif out['stdout']:
+ ret = out['stdout'].strip()
+ # No output for something like: apachectl graceful
+ else:
+ ret = 'Command: "{0}" completed successfully!'.format(cmd)
+ return ret
diff --git a/salt/modules/apt.py b/salt/modules/apt.py
index 22719800deff..d115e3066b52 100644
--- a/salt/modules/apt.py
+++ b/salt/modules/apt.py
@@ -8,7 +8,7 @@ def __virtual__():
Confirm this module is on a Debian based system
'''
- return 'pkg' if __grains__['os'] == 'Debian' else False
+ return 'pkg' if __grains__['os'] in [ 'Debian', 'Ubuntu' ] else False
def available_version(name):
@@ -20,7 +20,7 @@ def available_version(name):
salt '*' pkg.available_version
'''
version = ''
- cmd = 'apt-cache show {0} | grep Version'.format(name)
+ cmd = 'apt-cache -q show {0} | grep Version'.format(name)
out = __salt__['cmd.run_stdout'](cmd)
@@ -59,7 +59,7 @@ def refresh_db():
salt '*' pkg.refresh_db
'''
- cmd = 'apt-get update'
+ cmd = 'apt-get -q update'
out = __salt__['cmd.run_stdout'](cmd)
@@ -69,7 +69,7 @@ def refresh_db():
if not len(cols):
continue
ident = " ".join(cols[1:4])
- if cols[0].count('Get'):
+ if 'Get' in cols[0]:
servers[ident] = True
else:
servers[ident] = False
@@ -77,10 +77,20 @@ def refresh_db():
return servers
-def install(pkg, refresh=False):
+def install(pkg, refresh=False, repo='', skip_verify=False):
'''
Install the passed package
+ pkg
+ The name of the package to be installed
+ refresh : False
+ Update apt before continuing
+ repo : (default)
+ Specify a package repository to install from
+ (e.g., ``apt-get -t unstable install somepackage``)
+ skip_verify : False
+ Skip the GPG verification check (e.g., ``--allow-unauthenticated``)
+
Return a dict containing the new package names and versions::
{'': {'old': '',
@@ -95,8 +105,15 @@ def install(pkg, refresh=False):
ret_pkgs = {}
old_pkgs = list_pkgs()
- cmd = 'apt-get -y install {0}'.format(pkg)
- __salt__['cmd.retcode'](cmd)
+
+ cmd = '{nonint} apt-get -q -y {confold}{verify}{target} install {pkg}'.format(
+ nonint='DEBIAN_FRONTEND=noninteractive',
+ confold='-o DPkg::Options::=--force-confold',
+ verify='--allow-unauthenticated' if skip_verify else '',
+ target=' -t {0}'.format(repo) if repo else '',
+ pkg=pkg)
+
+ __salt__['cmd.run'](cmd)
new_pkgs = list_pkgs()
for pkg in new_pkgs:
@@ -115,7 +132,7 @@ def install(pkg, refresh=False):
def remove(pkg):
'''
- Remove a single package via ``aptitude remove``
+ Remove a single package via ``apt-get remove``
Returns a list containing the names of the removed packages.
@@ -126,8 +143,8 @@ def remove(pkg):
ret_pkgs = []
old_pkgs = list_pkgs()
- cmd = 'apt-get -y remove {0}'.format(pkg)
- __salt__['cmd.retcode'](cmd)
+ cmd = 'DEBIAN_FRONTEND=noninteractive apt-get -q -y remove {0}'.format(pkg)
+ __salt__['cmd.run'](cmd)
new_pkgs = list_pkgs()
for pkg in old_pkgs:
if pkg not in new_pkgs:
@@ -138,8 +155,8 @@ def remove(pkg):
def purge(pkg):
'''
- Remove a package via aptitude along with all configuration files and
- unused dependencies.
+ Remove a package via ``apt-get purge`` along with all configuration
+ files and unused dependencies.
Returns a list containing the names of the removed packages
@@ -151,11 +168,11 @@ def purge(pkg):
old_pkgs = list_pkgs()
# Remove inital package
- purge_cmd = 'apt-get -y purge {0}'.format(pkg)
- __salt__['cmd.retcode'](purge_cmd)
-
+ purge_cmd = 'DEBIAN_FRONTEND=noninteractive apt-get -q -y purge {0}'.format(pkg)
+ __salt__['cmd.run'](purge_cmd)
+
new_pkgs = list_pkgs()
-
+
for pkg in old_pkgs:
if pkg not in new_pkgs:
ret_pkgs.append(pkg)
@@ -165,7 +182,7 @@ def purge(pkg):
def upgrade(refresh=True):
'''
- Upgrades all packages via aptitude full-upgrade
+ Upgrades all packages via ``apt-get dist-upgrade``
Returns a list of dicts containing the package names, and the new and old
versions::
@@ -187,8 +204,8 @@ def upgrade(refresh=True):
ret_pkgs = {}
old_pkgs = list_pkgs()
- cmd = 'apt-get -y dist-upgrade'
- __salt__['cmd.retcode'](cmd)
+ cmd = 'DEBIAN_FRONTEND=noninteractive apt-get -q -y -o DPkg::Options::=--force-confold dist-upgrade'
+ __salt__['cmd.run'](cmd)
new_pkgs = list_pkgs()
for pkg in new_pkgs:
@@ -222,7 +239,7 @@ def list_pkgs(regex_string=""):
for line in out.split('\n'):
cols = line.split()
- if len(cols) and cols[0].count('ii'):
+ if len(cols) and 'ii' in cols[0]:
ret[cols[1]] = cols[2]
return ret
diff --git a/salt/modules/archive.py b/salt/modules/archive.py
index c224a865dda1..cbc7fb763807 100644
--- a/salt/modules/archive.py
+++ b/salt/modules/archive.py
@@ -32,7 +32,7 @@ def gzip(sourcefile):
def gunzip(gzipfile):
'''
- Uses the gzip command to create gzip files
+ Uses the gunzip command to unpack gzip files
CLI Example to create ``/tmp/sourcefile.txt``::
diff --git a/salt/modules/cmd.py b/salt/modules/cmd.py
index f2a60aaab7c0..4586386d50f0 100644
--- a/salt/modules/cmd.py
+++ b/salt/modules/cmd.py
@@ -9,6 +9,7 @@
import os
import subprocess
import tempfile
+import salt.utils
# Set up logging
log = logging.getLogger(__name__)
@@ -19,15 +20,39 @@
# Set up the default outputters
__outputter__ = {
- 'run': 'txt'
- }
+ 'run': 'txt',
+}
+def _run(cmd,
+ cwd=DEFAULT_CWD,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ quiet=False):
+ '''
+ Do the DRY thing and only call subprocess.Popen() once
+ '''
+ ret = {}
+ if not quiet:
+ log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
+ proc = subprocess.Popen(cmd,
+ cwd=cwd,
+ shell=True,
+ stdout=stdout,
+ stderr=stderr,
+ )
+ out = proc.communicate()
+ ret['stdout'] = out[0]
+ ret['stderr'] = out[1]
+ ret['retcode'] = proc.returncode
+ ret['pid'] = proc.pid
+
+ return ret
-def _is_exec(path):
+def _run_quiet(cmd, cwd=DEFAULT_CWD):
'''
- Return true if the passed path exists and is execuatable
+ Helper for running commands quietly for minion startup
'''
- return os.path.exists(path) and os.access(path, os.X_OK)
+ return _run(cmd, cwd, stderr=subprocess.STDOUT, quiet=True)['stdout']
def run(cmd, cwd=DEFAULT_CWD):
@@ -36,14 +61,9 @@ def run(cmd, cwd=DEFAULT_CWD):
CLI Example::
- salt '*' cmd.run "ls -l | grep foo | awk '{print $2}'"
+ salt '*' cmd.run "ls -l | awk '/foo/{print $2}'"
'''
- log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
- out = subprocess.Popen(cmd,
- shell=True,
- cwd=cwd,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT).communicate()[0]
+ out = _run(cmd, cwd=cwd, stderr=subprocess.STDOUT)['stdout']
log.debug(out)
return out
@@ -54,13 +74,9 @@ def run_stdout(cmd, cwd=DEFAULT_CWD):
CLI Example::
- salt '*' cmd.run "ls -l | grep foo | awk '{print $2}'"
+ salt '*' cmd.run_stdout "ls -l | awk '/foo/{print $2}'"
'''
- log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
- stdout = subprocess.Popen(cmd,
- shell=True,
- cwd=cwd,
- stdout=subprocess.PIPE).communicate()[0]
+ stdout = _run(cmd, cwd=cwd)["stdout"]
log.debug(stdout)
return stdout
@@ -71,13 +87,9 @@ def run_stderr(cmd, cwd=DEFAULT_CWD):
CLI Example::
- salt '*' cmd.run "ls -l | grep foo | awk '{print $2}'"
+ salt '*' cmd.run_stderr "ls -l | awk '/foo/{print $2}'"
'''
- log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
- stderr = subprocess.Popen(cmd,
- shell=True,
- cwd=cwd,
- stderr=subprocess.PIPE).communicate()[0]
+ stderr = _run(cmd, cwd=cwd)["stderr"]
log.debug(stderr)
return stderr
@@ -88,22 +100,12 @@ def run_all(cmd, cwd=DEFAULT_CWD):
CLI Example::
- salt '*' cmd.run_all "ls -l | grep foo | awk '{print $2}'"
+ salt '*' cmd.run_all "ls -l | awk '/foo/{print $2}'"
'''
- log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
- ret = {}
- proc = subprocess.Popen(cmd,
- shell=True,
- cwd=cwd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out = proc.communicate()
- ret['stdout'] = out[0]
- ret['stderr'] = out[1]
- ret['retcode'] = proc.returncode
- ret['pid'] = proc.pid
- if not ret['retcode']:
+ ret = _run(cmd, cwd=cwd)
+ if ret['retcode'] != 0:
log.error('Command {0} failed'.format(cmd))
+ log.error('retcode: {0}'.format(ret['retcode']))
log.error('stdout: {0}'.format(ret['stdout']))
log.error('stderr: {0}'.format(ret['stderr']))
else:
@@ -130,16 +132,19 @@ def has_exec(cmd):
CLI Example::
- salt '*' cat
+ salt '*' cmd.has_exec cat
'''
- if cmd.startswith('/'):
- return _is_exec(cmd)
- for path in os.environ['PATH'].split(os.pathsep):
- fn_ = os.path.join(path, cmd)
- if _is_exec(fn_):
- return True
- return False
+ return bool(salt.utils.which(cmd))
+def which(cmd):
+ '''
+ Returns the path of an executable available on the minion, None otherwise
+
+ CLI Example::
+
+ salt '*' cmd.which cat
+ '''
+ return salt.utils.which(cmd)
def exec_code(lang, code, cwd=DEFAULT_CWD):
'''
@@ -151,10 +156,10 @@ def exec_code(lang, code, cwd=DEFAULT_CWD):
salt '*' cmd.exec_code ruby 'puts "cheese"'
'''
- fd, cfn = tempfile.mkstemp()
- open(cfn, 'w+').write(code)
- return subprocess.Popen(lang + ' ' + cfn,
- shell=True,
- cwd=cwd,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT).communicate()[0]
+ fd, codefile = tempfile.mkstemp()
+ open(codefile, 'w+').write(code)
+
+ cmd = '{0} {1}'.format(lang, codefile)
+ ret = run(cmd, cwd=cwd)
+ os.remove(codefile)
+ return ret
diff --git a/salt/modules/cp.py b/salt/modules/cp.py
index c6d13832856a..62213ce95e45 100644
--- a/salt/modules/cp.py
+++ b/salt/modules/cp.py
@@ -43,6 +43,17 @@ def get_file(path, dest, env='base'):
return client.get_file(path, dest, False, env)
+def get_url(path, dest, env='base'):
+ '''
+ Used to get a single file from a URL.
+ For example,
+ cp.get_url salt://my/file /tmp/mine
+ cp.get_url http://www.slashdot.org /tmp/index.html
+ '''
+ client = salt.minion.FileClient(__opts__)
+ return client.get_url(path, dest, False, env)
+
+
def cache_file(path, env='base'):
'''
Used to cache a single file in the local salt-master file cache.
@@ -77,14 +88,53 @@ def cache_master(env='base'):
return client.cache_master(env)
+def cache_local_file(path):
+ '''
+ Cache a local file on the minion in the localfiles cache
+ '''
+ if not os.path.exists(path):
+ return ''
+
+ path_cached = is_cached(path)
+
+ # If the file has already been cached, return the path
+ if path_cached:
+ path_hash = hash_file(path)
+ path_cached_hash = hash_file(path_cached)
+
+ if path_hash['hsum'] == path_cached_hash['hsum']:
+ return path_cached
+
+ # The file hasn't been cached or has changed; cache it
+ client = salt.minion.FileClient(__opts__)
+ return client.cache_local_file(path)
+
+
def list_master(env='base'):
'''
- Retrieve all of the files on the master and cache them locally
+ List all of the files stored on the master
'''
client = salt.minion.FileClient(__opts__)
return client.file_list(env)
+def list_minion(env='base'):
+ '''
+ List all of the files cached on the minion
+ '''
+ client = salt.minion.FileClient(__opts__)
+ return client.file_local_list(env)
+
+
+def is_cached(path, env='base'):
+ '''
+ Return a boolean if the given path on the master has been cached on the
+ minion
+ '''
+ client = salt.minion.FileClient(__opts__)
+ return client.is_cached(path, env)
+
+
def hash_file(path, env='base'):
'''
Return the hash of a file, to get the hash of a file on the
diff --git a/salt/modules/cron.py b/salt/modules/cron.py
index 348cd37180d5..e64c7b1270be 100644
--- a/salt/modules/cron.py
+++ b/salt/modules/cron.py
@@ -3,6 +3,7 @@
'''
import tempfile
+import os
TAG = '# Lines below here are managed by Salt, do not edit\n'
@@ -41,12 +42,14 @@ def _render_tab(lst):
def _write_cron(user, lines):
'''
- Takes a list of lines to be commited to a user's crontab and writes it
+ Takes a list of lines to be committed to a user's crontab and writes it
'''
tmpd, path = tempfile.mkstemp()
open(path, 'w+').writelines(lines)
cmd = 'crontab -u {0} {1}'.format(user, path)
- return __salt__['cmd.run_all'](cmd)
+ ret = __salt__['cmd.run_all'](cmd)
+ os.remove(path)
+ return ret
def raw_cron(user):
@@ -100,6 +103,9 @@ def list_tab(user):
ret['pre'].append(line)
return ret
+# For consistency's sake
+ls = list_tab
+
def set_special(user, special, cmd):
'''
@@ -180,7 +186,7 @@ def rm_job(user, minute, hour, dom, month, dow, cmd):
for ind in range(len(lst['crons'])):
if cmd == lst['crons'][ind]['cmd']:
rm_ = ind
- if rm_ != None:
+ if rm_ is not None:
lst['crons'].pop(rm_)
ret = 'removed'
comdat = _write_cron(user, _render_tab(lst))
@@ -188,3 +194,5 @@ def rm_job(user, minute, hour, dom, month, dow, cmd):
# Failed to commit, return the error
return comdat['stderr']
return ret
+
+rm = rm_job
diff --git a/salt/modules/data.py b/salt/modules/data.py
new file mode 100644
index 000000000000..a70c6ffb215b
--- /dev/null
+++ b/salt/modules/data.py
@@ -0,0 +1,87 @@
+'''
+Manage a local persistent data structure that can hold any arbitrairy data
+specific to the minion
+'''
+
+import os
+import salt.payload
+import ast
+
+def load():
+ '''
+ Return all of the data in the minion datastore
+
+ CLI Example::
+
+ salt '*' data.load
+ '''
+ serial = salt.payload.Serial(__opts__)
+
+ try:
+ fn_ = open(os.path.join(__opts__['cachedir'], 'datastore'), "r")
+ return serial.load(fn_)
+ except (IOError, OSError):
+ return {}
+
+def dump(new_data):
+ '''
+ Replace the entire datastore with a passed data structure
+
+ CLI Example::
+
+ salt '*' data.dump '{'eggs': 'spam'}'
+ '''
+ if not isinstance(new_data, dict):
+ if isinstance(ast.literal_eval(new_data), dict):
+ new_data = ast.literal_eval(new_data)
+ else:
+ return False
+
+ try:
+ fn_ = open(os.path.join(__opts__['cachedir'], 'datastore'), "w")
+
+ serial = salt.payload.Serial(__opts__)
+ serial.dump(new_data, fn_)
+
+ return True
+
+ except (IOError, OSError):
+ return False
+
+def update(key, value):
+ '''
+ Update a key with a value in the minion datastore
+
+ CLI Example::
+
+ salt '*' data.update
+ '''
+ store = load()
+ store[key] = value
+ dump(store)
+ return True
+
+def getval(key):
+ '''
+ Get a value from the minion datastore
+
+ CLI Example::
+
+ salt '*' data.getval
+ '''
+ store = load()
+ return store[key]
+
+def getvals(keys):
+ '''
+ Get values from the minion datastore
+
+ CLI Example::
+
+ salt '*' data.getvals
+ '''
+ store = load()
+ ret = []
+ for key in keys:
+ ret.append(store[key])
+ return ret
diff --git a/salt/modules/disk.py b/salt/modules/disk.py
index f1c6ecce903f..3f3ea717b776 100644
--- a/salt/modules/disk.py
+++ b/salt/modules/disk.py
@@ -2,9 +2,17 @@
Module for gathering disk information
'''
-# FIXME: we want module internal calls rather than using subprocess directly
-import subprocess
-
+def __virtual__():
+ '''
+ Only work on posix-like systems
+ '''
+ # Disable on these platorms, specific service modules exist:
+ disable = [
+ 'Windows',
+ ]
+ if __grains__['os'] in disable:
+ return False
+ return 'disk'
def usage():
'''
@@ -14,22 +22,54 @@ def usage():
salt '*' disk.usage
'''
- cmd = 'df -P'
+ if __grains__['kernel'] == 'Linux':
+ cmd = 'df -P'
+ else:
+ cmd = 'df'
ret = {}
- out = subprocess.Popen(cmd,
- shell=True,
- stdout=subprocess.PIPE).communicate()[0].split('\n')
+ out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
- if not line.count(' '):
+ if not line:
continue
if line.startswith('Filesystem'):
continue
comps = line.split()
- ret[comps[0]] = {
- '1K-blocks': comps[1],
- 'available': comps[3],
- 'capacity': comps[4],
- 'mountpoint': comps[5],
- 'used': comps[2]
+ ret[comps[5]] = {
+ 'filesystem': comps[0],
+ '1K-blocks': comps[1],
+ 'used': comps[2],
+ 'available': comps[3],
+ 'capacity': comps[4],
}
return ret
+
+def inodeusage():
+ '''
+ Return inode usage information for volumes mounted on this minion
+
+ CLI Example::
+
+ salt '*' disk.inodeusage
+ '''
+ cmd = 'df -i'
+ ret = {}
+ out = __salt__['cmd.run'](cmd).split('\n')
+ for line in out:
+ if line.startswith('Filesystem'):
+ continue
+ comps = line.split()
+ # Don't choke on empty lines
+ if not comps:
+ continue
+
+ try:
+ ret[comps[5]] = {
+ 'inodes': comps[1],
+ 'used': comps[2],
+ 'free': comps[3],
+ 'use': comps[4],
+ 'filesystem': comps[0],
+ }
+ except IndexError:
+ print "DEBUG: comps='%s'" % comps
+ return ret
diff --git a/salt/modules/ebuild.py b/salt/modules/ebuild.py
index 5f2bc3436688..bca005942560 100644
--- a/salt/modules/ebuild.py
+++ b/salt/modules/ebuild.py
@@ -5,7 +5,7 @@
try:
import portage
except ImportError:
- None
+ pass
def __virtual__():
'''
@@ -78,7 +78,7 @@ def refresh_db():
else:
return True
-def install(pkg, refresh=False):
+def install(pkg, refresh=False, **kwargs):
'''
Install the passed package
@@ -101,7 +101,7 @@ def install(pkg, refresh=False):
new_pkgs = list_pkgs()
for pkg in new_pkgs:
- if old_pkgs.has_key(pkg):
+ if pkg in old_pkgs:
if old_pkgs[pkg] == new_pkgs[pkg]:
continue
else:
@@ -136,7 +136,7 @@ def update(pkg, refresh=False):
new_pkgs = list_pkgs()
for pkg in new_pkgs:
- if old_pkgs.has_key(pkg):
+ if pkg in old_pkgs:
if old_pkgs[pkg] == new_pkgs[pkg]:
continue
else:
@@ -170,7 +170,7 @@ def upgrade(refresh=False):
new_pkgs = list_pkgs()
for pkg in new_pkgs:
- if old_pkgs.has_key(pkg):
+ if pkg in old_pkgs:
if old_pkgs[pkg] == new_pkgs[pkg]:
continue
else:
@@ -200,7 +200,7 @@ def remove(pkg):
new_pkgs = list_pkgs()
for pkg in old_pkgs:
- if not new_pkgs.has_key(pkg):
+ if not pkg in new_pkgs:
ret_pkgs.append(pkg)
return ret_pkgs
diff --git a/salt/modules/file.py b/salt/modules/file.py
index a97e0189df03..2f1b4d620b50 100644
--- a/salt/modules/file.py
+++ b/salt/modules/file.py
@@ -6,12 +6,19 @@
# TODO: We should add the capability to do u+r type operations here
# some time in the future
-import grp
-import hashlib
import os
+import grp
import pwd
+import time
+import hashlib
import salt.utils.find
+from salt.exceptions import SaltInvocationError
+
+__outputter__ = {
+ 'touch': 'txt',
+ 'append': 'txt',
+}
def gid_to_group(gid):
@@ -44,7 +51,7 @@ def group_to_gid(group):
def get_gid(path):
'''
- Return the user that owns a given file
+ Return the id of the group that owns a given file
CLI Example::
@@ -57,7 +64,7 @@ def get_gid(path):
def get_group(path):
'''
- Return the user that owns a given file
+ Return the group that owns a given file
CLI Example::
@@ -85,7 +92,7 @@ def uid_to_user(uid):
def user_to_uid(user):
'''
- Convert user name to a gid
+ Convert user name to a uid
CLI Example::
@@ -99,7 +106,7 @@ def user_to_uid(user):
def get_uid(path):
'''
- Return the user that owns a given file
+ Return the id of the user that owns a given file
CLI Example::
@@ -142,7 +149,7 @@ def get_mode(path):
def set_mode(path, mode):
'''
- Set the more of a file
+ Set the mode of a file
CLI Example::
@@ -208,7 +215,7 @@ def get_sum(path, form='md5'):
CLI Example::
- salt '*' /etc/passwd sha512
+ salt '*' file.get_sum /etc/passwd sha512
'''
if not os.path.isfile(path):
return 'File not found'
@@ -311,9 +318,9 @@ def find(path, *opts):
CLI Examples::
- salt '*' / type=f name=\*.bak size=+10m
- salt '*' /var mtime=+30d size=+10m print=path,size,mtime
- salt '*' /var/log name=\*.[0-9] mtime=+30d size=+10m delete
+ salt '*' file.find / type=f name=\*.bak size=+10m
+ salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime
+ salt '*' file.find /var/log name=\*.[0-9] mtime=+30d size=+10m delete
'''
opts_dict = {}
for opt in opts:
@@ -327,3 +334,215 @@ def find(path, *opts):
ret = [p for p in f.find(path)]
ret.sort()
return ret
+
+def _sed_esc(s):
+ '''
+ Escape single quotes and forward slashes
+ '''
+ return '{0}'.format(s).replace("'", "'\"'\"'").replace("/", "\/")
+
+def sed(path, before, after, limit='', backup='.bak', options='-r -e',
+ flags='g'):
+ '''
+ Make a simple edit to a file
+
+ Equivalent to::
+
+ sed "// s///"
+
+ path
+ The full path to the file to be edited
+ before
+ A pattern to find in order to replace with ``after``
+ after
+ Text that will replace ``before``
+ limit : ``''``
+ An initial pattern to search for before searching for ``before``
+ backup : ``.bak``
+ The file will be backed up before edit with this file extension;
+ **WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
+ overwrite this backup
+ options : ``-r -e``
+ Options to pass to sed
+ flags : ``g``
+ Flags to modify the sed search; e.g., ``i`` for case-insensitve pattern
+ matching
+
+ Forward slashes and single quotes will be escaped automatically in the
+ ``before`` and ``after`` patterns.
+
+ Usage::
+
+ salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
+
+ .. versionadded:: 0.9.5
+ '''
+ # Largely inspired by Fabric's contrib.files.sed()
+
+ before = _sed_esc(before)
+ after = _sed_esc(after)
+
+ cmd = r"sed {backup}{options} '{limit}s/{before}/{after}/{flags}' {path}".format(
+ backup = '-i{0} '.format(backup) if backup else '',
+ options = options,
+ limit = '/{0}/ '.format(limit) if limit else '',
+ before = before,
+ after = after,
+ flags = flags,
+ path = path)
+
+ return __salt__['cmd.run'](cmd)
+
+def uncomment(path, regex, char='#', backup='.bak'):
+ '''
+ Uncomment specified commented lines in a file
+
+ path
+ The full path to the file to be edited
+ regex
+ A regular expression used to find the lines that are to be uncommented.
+ This regex should not include the comment character. A leading ``^``
+ character will be stripped for convenience (for easily switching
+ between comment() and uncomment()).
+ char : ``#``
+ The character to remove in order to uncomment a line; if a single
+ whitespace character follows the comment it will also be removed
+ backup : ``.bak``
+ The file will be backed up before edit with this file extension;
+ **WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
+ overwrite this backup
+
+ Usage::
+
+ salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
+
+ .. versionadded:: 0.9.5
+ '''
+ # Largely inspired by Fabric's contrib.files.uncomment()
+
+ return __salt__['file.sed'](path,
+ before=r'^([[:space:]]*){0}[[:space:]]?'.format(char),
+ after=r'\1',
+ limit=regex.lstrip('^'),
+ backup=backup)
+
+def comment(path, regex, char='#', backup='.bak'):
+ '''
+ Comment out specified lines in a file
+
+ path
+ The full path to the file to be edited
+ regex
+ A regular expression used to find the lines that are to be commented;
+ this pattern will be wrapped in parenthesis and will move any
+ preceding/trailing ``^`` or ``$`` characters outside the parenthesis
+ (e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``)
+ char : ``#``
+ The character to be inserted at the beginning of a line in order to
+ comment it out
+ backup : ``.bak``
+ The file will be backed up before edit with this file extension
+
+ .. warning::
+
+ This backup will be overwritten each time ``sed`` / ``comment`` /
+ ``uncomment`` is called. Meaning the backup will only be useful
+ after the first invocation.
+
+ Usage::
+
+ salt '*' file.comment /etc/modules pcspkr
+
+ .. versionadded:: 0.9.5
+ '''
+ # Largely inspired by Fabric's contrib.files.comment()
+
+ regex = "{0}({1}){2}".format(
+ '^' if regex.startswith('^') else '',
+ regex.lstrip('^').rstrip('$'),
+ '$' if regex.endswith('$') else '')
+
+ return __salt__['file.sed'](
+ path,
+ before=regex,
+ after=r'{0}\1'.format(char),
+ backup=backup)
+
+def contains(path, text, limit=''):
+ '''
+ Return True if the file at ``path`` contains ``text``
+
+ Usage::
+
+ salt '*' file.contains /etc/crontab 'mymaintenance.sh'
+
+ .. versionadded:: 0.9.5
+ '''
+ # Largely inspired by Fabric's contrib.files.contains()
+
+ if not os.path.exists(path):
+ return False
+
+ result = __salt__['file.sed'](path, text, '&', limit=limit, backup='',
+ options='-n -r -e', flags='gp')
+
+ return bool(result)
+
+def append(path, *args):
+ '''
+ Append text to the end of a file
+
+ Usage::
+
+ salt '*' file.append /etc/motd \\
+ "With all thine offerings thou shalt offer salt."\\
+ "Salt is what makes things taste bad when it isn't in them."
+
+ .. versionadded:: 0.9.5
+ '''
+ # Largely inspired by Fabric's contrib.files.append()
+
+ with open(path, "a") as f:
+ for line in args:
+ f.write('{0}\n'.format(line))
+
+ return "Wrote {0} lines to '{1}'".format(len(args), path)
+
+def touch(name, atime=None, mtime=None):
+ '''
+ Just like 'nix's "touch" command, create a file if it
+ doesn't exist or simply update the atime and mtime if
+ it already does.
+
+ atime:
+ Access time in Unix epoch time
+ mtime:
+ Last modification in Unix epoch time
+
+ Usage::
+ salt '*' file.touch /var/log/emptyfile
+
+ .. versionadded:: 0.9.5
+ '''
+ if atime and atime.isdigit():
+ atime = int(atime)
+ if mtime and mtime.isdigit():
+ mtime = int(mtime)
+ try:
+ with open(name, "a"):
+ if not atime and not mtime:
+ times = None
+ elif not mtime and atime:
+ times = (atime, time.time())
+ elif not atime and mtime:
+ times = (time.time(), mtime)
+ else:
+ times = (atime, mtime)
+ os.utime(name, times)
+ except TypeError as exc:
+ msg = "atime and mtime must be integers"
+ raise SaltInvocationError(msg)
+ except (IOError, OSError) as exc:
+ return False
+
+ return os.path.exists(name)
diff --git a/salt/modules/freebsdkmod.py b/salt/modules/freebsdkmod.py
new file mode 100644
index 000000000000..6fb2e9f4b0b3
--- /dev/null
+++ b/salt/modules/freebsdkmod.py
@@ -0,0 +1,127 @@
+'''
+Module to manage FreeBSD kernel modules
+'''
+
+import os
+
+
+def __virtual__():
+ '''
+ Only runs on FreeBSD systems
+ '''
+ return 'kmod' if __grains__['kernel'] == 'FreeBSD' else False
+
+
+def _new_mods(pre_mods, post_mods):
+ '''
+ Return a list of the new modules, pass an kldstat dict before running
+ modprobe and one after modprobe has run
+ '''
+ pre = set()
+ post = set()
+ for mod in pre_mods:
+ pre.add(mod['module'])
+ for mod in post_mods:
+ post.add(mod['module'])
+ return list(post.difference(pre))
+
+
+def _rm_mods(pre_mods, post_mods):
+ '''
+ Return a list of the new modules, pass an kldstat dict before running
+ modprobe and one after modprobe has run
+ '''
+ pre = set()
+ post = set()
+ for mod in pre_mods:
+ pre.add(mod['module'])
+ for mod in post_mods:
+ post.add(mod['module'])
+ return list(pre.difference(post))
+
+
+def available():
+ '''
+ Return a list of all available kernel modules
+
+ CLI Example::
+
+ salt '*' kmod.available
+ '''
+ ret = []
+ for path in __salt__['cmd.run']('ls /boot/kernel | grep .ko$').split('\n'):
+ bpath = os.path.basename(path)
+ comps = bpath.split('.')
+ if 'ko' in comps:
+ # This is a kernel module, return it without the .ko extension
+ ret.append('.'.join(comps[:comps.index('ko')]))
+ return ret
+
+
+def check_available(mod):
+ '''
+ Check to see if the specified kernel module is available
+
+ CLI Example::
+
+ salt '*' kmod.check_available kvm
+ '''
+ if mod in available():
+ # the module is available, return True
+ return True
+ return False
+
+
+def lsmod():
+ '''
+ Return a dict containing information about currently loaded modules
+
+ CLI Example::
+
+ salt '*' kmod.lsmod
+ '''
+ ret = []
+ for line in __salt__['cmd.run']('kldstat').split('\n'):
+ comps = line.split()
+ if not len(comps) > 2:
+ continue
+ if comps[0] == 'Module':
+ continue
+ mdat = {}
+ mdat['module'] = comps[0]
+ mdat['size'] = comps[1]
+ mdat['depcount'] = comps[2]
+ if len(comps) > 3:
+ mdat['deps'] = comps[3].split(',')
+ else:
+ mdat['deps'] = []
+ ret.append(mdat)
+ return ret
+
+
+def load(mod):
+ '''
+ Load the specified kernel module
+
+ CLI Example::
+
+ salt '*' kmod.load kvm
+ '''
+ pre_mods = kldstat()
+ data = __salt__['cmd.run_all']('kldload {0}'.format(mod))
+ post_mods = kldstat()
+ return _new_mods(pre_mods, post_mods)
+
+
+def remove(mod):
+ '''
+ Remove the specified kernel module
+
+ CLI Example::
+
+ salt '*' kmod.remove kvm
+ '''
+ pre_mods = kldstat()
+ data = __salt__['cmd.run_all']('kldunload {0}'.format(mod))
+ post_mods = kldstat()
+ return _rm_mods(pre_mods, post_mods)
diff --git a/salt/modules/freebsdpkg.py b/salt/modules/freebsdpkg.py
index 716562204b99..b3bfe1a3d9e5 100644
--- a/salt/modules/freebsdpkg.py
+++ b/salt/modules/freebsdpkg.py
@@ -71,7 +71,7 @@ def list_pkgs():
def refresh_db():
'''
- Update the ports tree with portsnap. If the ports tre does not exist it
+ Update the ports tree with portsnap. If the ports tree does not exist it
will be downloaded and set up.
CLI Example::
@@ -85,8 +85,7 @@ def refresh_db():
__salt__['cmd.run']('portsnap update')
-# FIXME: Unused argument 'refresh'
-def install(name, refresh=False):
+def install(name, **kwargs):
'''
Install the passed package
diff --git a/salt/modules/gentoo_service.py b/salt/modules/gentoo_service.py
new file mode 100644
index 000000000000..8b4b7fc4494e
--- /dev/null
+++ b/salt/modules/gentoo_service.py
@@ -0,0 +1,163 @@
+'''
+Top level package command wrapper, used to translate the os detected by the
+grains to the correct service manager
+'''
+
+import os
+
+
+def __virtual__():
+ '''
+ Only work on systems which default to systemd
+ '''
+ if __grains__['os'] == 'Gentoo':
+ return 'service'
+ return False
+
+
+def get_enabled():
+ '''
+ Return a list of service that are enabled on boot
+
+ CLI Example::
+
+ salt '*' service.get_enabled
+ '''
+ ret = set()
+ lines = __salt__['cmd.run']('rc-update show').strip().split('\n')
+ for line in lines:
+ if not '|' in line:
+ continue
+ if 'shutdown' in line:
+ continue
+ ret.add(line.split('|')[0].strip())
+ return sorted(ret)
+
+
+def get_disabled():
+ '''
+ Return a set of services that are installed but disabled
+
+ CLI Example::
+
+ salt '*' service.get_enabled
+ '''
+ ret = set()
+ lines = __salt__['cmd.run']('rc-update -v show').strip().split('\n')
+ for line in lines:
+ if not '|' in line:
+ continue
+ elif 'shutdown' in line:
+ continue
+ comps = line.split()
+ if len(comps) < 3:
+ ret.add(comps[0])
+ return sorted(ret)
+
+def get_all():
+ '''
+ Return all available boot services
+
+ CLI Example::
+
+ salt '*' service.get_enabled
+ '''
+ return sorted(get_enabled() + get_disabled())
+
+
+def start(name):
+ '''
+ Start the specified service
+
+ CLI Example::
+
+ salt '*' service.start
+ '''
+ cmd = '/etc/init.d/{0} start'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def stop(name):
+ '''
+ Stop the specified service
+
+ CLI Example::
+
+ salt '*' service.stop
+ '''
+ cmd = '/etc/init.d/{0} stop'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def restart(name):
+ '''
+ Restart the named service
+
+ CLI Example::
+
+ salt '*' service.restart
+ '''
+ cmd = '/etc/init.d/{0} restart'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def status(name, sig=None):
+ '''
+ Return the status for a service, returns the PID or an empty string if the
+ service is running or not, pass a signature to use to find the service via
+ ps
+
+ CLI Example::
+
+ salt '*' service.status [service signature]
+ '''
+ sig = name if not sig else sig
+ cmd = "{0[ps]} | grep {1} | grep -v grep | awk '{{print $2}}'".format(
+ __grains__, sig)
+ return __salt__['cmd.run'](cmd).strip()
+
+def enable(name):
+ '''
+ Enable the named service to start at boot
+
+ CLI Example::
+
+ salt '*' service.enable
+ '''
+ cmd = 'rc-update add {0} default'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+def disable(name):
+ '''
+ Disable the named service to start at boot
+
+ CLI Example::
+
+ salt '*' service.disable
+ '''
+ cmd = 'rc-update delete {0} default'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+def enabled(name):
+ '''
+ Return True if the named servioce is enabled, false otherwise
+
+ CLI Example::
+
+ salt '*' service.enabled
+ '''
+ if name in get_enabled():
+ return True
+ return False
+
+def disabled(name):
+ '''
+ Return True if the named servioce is enabled, false otherwise
+
+ CLI Example::
+
+ salt '*' service.enabled
+ '''
+ if name in get_disabled():
+ return True
+ return False
diff --git a/salt/modules/grains.py b/salt/modules/grains.py
index b8b8fa477940..9c4a26952dbc 100644
--- a/salt/modules/grains.py
+++ b/salt/modules/grains.py
@@ -5,6 +5,12 @@
# Seed the grains dict so cython will build
__grains__ = {}
+# Change the default outputter to make it more readable
+__outputter__ = {
+ 'item' : 'txt',
+ 'items': 'yaml',
+}
+
def items():
'''
@@ -17,7 +23,7 @@ def items():
return __grains__
-def item(key):
+def item(key=None):
'''
Return a singe component of the grains data
@@ -25,6 +31,14 @@ def item(key):
salt '*' grains.item os
'''
- if key in __grains__:
- return __grains__[key]
- return ''
+ return __grains__.get(key, '')
+
+def ls():
+ '''
+ Return a list of all available grains
+
+ CLI Example::
+
+ salt '*' grains.ls
+ '''
+ return sorted(__grains__)
diff --git a/salt/modules/groupadd.py b/salt/modules/groupadd.py
index f1e6167e3e6f..530172716f87 100644
--- a/salt/modules/groupadd.py
+++ b/salt/modules/groupadd.py
@@ -74,11 +74,11 @@ def getent():
def chgid(name, gid):
'''
- Change the default shell of the user
+ Change the gid for a named group
CLI Example::
- salt '*' user.chshell foo /bin/zsh
+ salt '*' group.chgid foo 4376
'''
pre_gid = __salt__['file.group_to_gid'](name)
if gid == pre_gid:
diff --git a/salt/modules/hosts.py b/salt/modules/hosts.py
index bf2fd804c642..4e8d4f814a6c 100644
--- a/salt/modules/hosts.py
+++ b/salt/modules/hosts.py
@@ -4,6 +4,11 @@
import os
+def __get_hosts_filename():
+ if __grains__['kernel'].startswith('Windows'):
+ return 'C:\Windows\System32\drivers\etc\hosts'
+ else:
+ return '/etc/hosts'
def list_hosts():
'''
@@ -15,7 +20,7 @@ def list_hosts():
salt '*' hosts.list_hosts
'''
- hfn = '/etc/hosts'
+ hfn = __get_hosts_filename()
ret = {}
if not os.path.isfile(hfn):
return ret
@@ -26,7 +31,11 @@ def list_hosts():
if line.startswith('#'):
continue
comps = line.split()
- ret[comps[0]] = comps[1:]
+ if comps[0] in ret:
+ # maybe log a warning ?
+ ret[comps[0]].extend(comps[1:])
+ else:
+ ret[comps[0]] = comps[1:]
return ret
@@ -42,7 +51,7 @@ def get_ip(host):
return ''
# Look for the op
for addr in hosts:
- if hosts[addr].count(host):
+ if host in hosts[addr]:
return addr
# ip not found
return ''
@@ -71,20 +80,20 @@ def has_pair(ip, alias):
hosts = list_hosts()
if ip not in hosts:
return False
- if hosts[ip].count(alias):
+ if alias in hosts[ip]:
return True
return False
def set_host(ip, alias):
'''
- Set the host entry in th hosts file for the given ip, this will overwrite
+ Set the host entry in the hosts file for the given ip, this will overwrite
any previous entry for the given ip
CLI Example::
salt '*' hosts.set_host
'''
- hfn = '/etc/hosts'
+ hfn = __get_hosts_filename()
ovr = False
if not os.path.isfile(hfn):
return False
@@ -97,9 +106,15 @@ def set_host(ip, alias):
continue
comps = tmpline.split()
if comps[0] == ip:
- lines[ind] = ip + '\t\t' + alias + '\n'
- ovr = True
+ if not ovr:
+ lines[ind] = ip + '\t\t' + alias + '\n'
+ ovr = True
+ else: # remove other entries
+ lines[ind] = ''
if not ovr:
+ # make sure there is a newline
+ if lines and not lines[-1].endswith(('\n', '\r')):
+ lines[-1] = '%s\n' % lines[-1]
line = ip + '\t\t' + alias + '\n'
lines.append(line)
open(hfn, 'w+').writelines(lines)
@@ -115,7 +130,7 @@ def rm_host(ip, alias):
'''
if not has_pair(ip, alias):
return True
- hfn = '/etc/hosts'
+ hfn = __get_hosts_filename()
lines = open(hfn).readlines()
for ind in range(len(lines)):
tmpline = lines[ind].strip()
@@ -146,7 +161,7 @@ def add_host(ip, alias):
CLI Example::
salt '*' hosts.add_host
'''
- hfn = '/etc/hosts'
+ hfn = __get_hosts_filename()
ovr = False
if not os.path.isfile(hfn):
return False
@@ -165,7 +180,12 @@ def add_host(ip, alias):
newline += '\t' + alias
lines.append(newline)
ovr = True
+ # leave any other matching entries alone
+ break
if not ovr:
+ # make sure there is a newline
+ if lines and not lines[-1].endswith(('\n', '\r')):
+ lines[-1] = '%s\n' % lines[-1]
line = ip + '\t\t' + alias + '\n'
lines.append(line)
open(hfn, 'w+').writelines(lines)
diff --git a/salt/modules/kmod.py b/salt/modules/kmod.py
index 5d52cf5f84db..a6dfd77d46eb 100644
--- a/salt/modules/kmod.py
+++ b/salt/modules/kmod.py
@@ -52,24 +52,21 @@ def available():
for path in __salt__['cmd.run']('modprobe -l').split('\n'):
bpath = os.path.basename(path)
comps = bpath.split('.')
- if comps.count('ko'):
+ if 'ko' in comps:
# This is a kernel module, return it without the .ko extension
ret.append('.'.join(comps[:comps.index('ko')]))
- return ret
+ return sorted(list(ret))
def check_available(mod):
'''
- Check to see if the speciified kernel module is available
+ Check to see if the specified kernel module is available
CLI Example::
salt '*' kmod.check_available kvm
'''
- if available().count(mod):
- # the module is available, return True
- return True
- return False
+ return mod in available()
def lsmod():
@@ -87,10 +84,11 @@ def lsmod():
continue
if comps[0] == 'Module':
continue
- mdat = {}
- mdat['module'] = comps[0]
- mdat['size'] = comps[1]
- mdat['depcount'] = comps[2]
+ mdat = {
+ 'size': comps[1],
+ 'module': comps[0],
+ 'depcount': comps[2],
+ }
if len(comps) > 3:
mdat['deps'] = comps[3].split(',')
else:
diff --git a/salt/modules/linux_sysctl.py b/salt/modules/linux_sysctl.py
index 4317c32b1e64..64918851c613 100644
--- a/salt/modules/linux_sysctl.py
+++ b/salt/modules/linux_sysctl.py
@@ -3,6 +3,11 @@
'''
import os
+from salt.exceptions import CommandExecutionError
+
+__outputter__ = {
+ 'assign': 'txt',
+}
def __virtual__():
@@ -24,9 +29,9 @@ def show():
ret = {}
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
- if not line.count(' '):
+ if not line:
continue
- if not line.count(' = '):
+ if ' = ' not in line:
continue
comps = line.split(' = ')
ret[comps[0]] = comps[1]
@@ -53,6 +58,10 @@ def assign(name, value):
CLI Example:
salt '*' sysctl.assign net.ipv4.ip_forward 1
'''
+ sysctl_file = '/proc/sys/{0}'.format(name.replace('.', '/'))
+ if not os.path.exists(sysctl_file):
+ raise CommandExecutionError('sysctl {0} does not exist'.format(name))
+
cmd = 'sysctl -w {0}={1}'.format(name, value)
ret = {}
out = __salt__['cmd.run'](cmd).strip()
@@ -63,7 +72,7 @@ def assign(name, value):
def persist(name, value, config='/etc/sysctl.conf'):
'''
- Assign and persist a simple sysctl paramater for this minion
+ Assign and persist a simple sysctl parameter for this minion
CLI Example::
@@ -80,7 +89,7 @@ def persist(name, value, config='/etc/sysctl.conf'):
if line.startswith('#'):
nlines.append(line)
continue
- if not line.count('='):
+ if '=' not in line:
nlines.append(line)
continue
comps = line.split('=')
diff --git a/salt/modules/moosefs.py b/salt/modules/moosefs.py
index d02a945c802e..a3f098d6ee4d 100644
--- a/salt/modules/moosefs.py
+++ b/salt/modules/moosefs.py
@@ -20,7 +20,7 @@ def dirinfo(path, opts=None):
output = out['stdout'].split('\n')
for line in output:
- if not line.count(' '):
+ if not line:
continue
comps = line.split(':')
ret[comps[0].strip()] = comps[1].strip()
@@ -42,7 +42,7 @@ def fileinfo(path):
output = out['stdout'].split('\n')
for line in output:
- if not line.count(' '):
+ if not line:
continue
if '/' in line:
comps = line.split('/')
@@ -85,7 +85,7 @@ def mounts():
output = out['stdout'].split('\n')
for line in output:
- if not line.count(' '):
+ if not line:
continue
if 'fuse.mfs' in line:
comps = line.split(' ')
@@ -130,7 +130,7 @@ def getgoal(path, opts=None):
}
else:
for line in output:
- if not line.count(' '):
+ if not line:
continue
if path in line:
continue
diff --git a/salt/modules/mount.py b/salt/modules/mount.py
index 32e430320f04..ee183fc3d43b 100644
--- a/salt/modules/mount.py
+++ b/salt/modules/mount.py
@@ -113,7 +113,7 @@ def set_fstab(
salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4
'''
# Fix the opts type if it is a list
- if type(opts) == type(list()):
+ if isinstance(opts, list):
opts = ','.join(opts)
lines = []
change = False
@@ -189,7 +189,7 @@ def mount(name, device, mkmnt=False, fstype='', opts='defaults'):
salt '*' mount.mount /mnt/foo /dev/sdz1 True
'''
- if type(opts) == type(str()):
+ if isinstance(opts, basestring):
opts = opts.split(',')
if not os.path.exists(name) and mkmnt:
os.makedirs(name)
@@ -212,7 +212,7 @@ def remount(name, device, mkmnt=False, fstype='', opts='defaults'):
salt '*' mount.remount /mnt/foo /dev/sdz1 True
'''
- if type(opts) == type(str()):
+ if isinstance(opts, basestring):
opts = opts.split(',')
mnts = active()
if name in mnts:
diff --git a/salt/modules/mysql.py b/salt/modules/mysql.py
old mode 100644
new mode 100755
index 75ce0c850306..73d9b58c8bee
--- a/salt/modules/mysql.py
+++ b/salt/modules/mysql.py
@@ -12,21 +12,52 @@
mysql.db: 'mysql'
'''
+import logging
import MySQLdb
+import MySQLdb.cursors
+log = logging.getLogger(__name__)
__opts__ = {}
+def __check_table(name, table):
+ db = connect()
+ cur = db.cursor(MySQLdb.cursors.DictCursor)
+ query = "CHECK TABLE `%s`.`%s`" % (name,table,)
+ log.debug("Doing query: {0}".format(query,))
+ cur.execute(query)
+ results = cur.fetchall()
+ log.debug(results)
+ return results
+
+def __repair_table(name, table):
+ db = connect()
+ cur = db.cursor(MySQLdb.cursors.DictCursor)
+ query = "REPAIR TABLE `%s`.`%s`" % (name,table,)
+ log.debug("Doing query: {0}".format(query,))
+ cur.execute(query)
+ results = cur.fetchall()
+ log.debug(results)
+ return results
+
+def __optimize_table(name, table):
+ db = connect()
+ cur = db.cursor(MySQLdb.cursors.DictCursor)
+ query = "OPTIMIZE TABLE `%s`.`%s`" % (name,table,)
+ log.debug("Doing query: {0}".format(query,))
+ cur.execute(query)
+ results = cur.fetchall()
+ log.debug(results)
+ return results
-def connect():
+def connect(**kwargs):
'''
wrap authentication credentials here
'''
-
- hostname = __opts__['mysql.host']
- username = __opts__['mysql.user']
- password = __opts__['mysql.pass']
- dbport = __opts__['mysql.port']
- dbname = __opts__['mysql.db']
+ hostname = kwargs.get('host', __opts__['mysql.host'])
+ username = kwargs.get('user', __opts__['mysql.user'])
+ password = kwargs.get('pass', __opts__['mysql.pass'])
+ dbport = kwargs.get('port', __opts__['mysql.port'])
+ dbname = kwargs.get('db', __opts__['mysql.db'])
db = MySQLdb.connect(
hostname,
@@ -73,3 +104,384 @@ def version():
cur.execute('SELECT VERSION()')
row = cur.fetchone()
return row
+
+def slave_lag():
+ '''
+ Return the number of seconds that a slave SQL server is lagging behind the
+ master, if the host is not a slave it will return -1. If the server is
+ configured to be a slave but replication but slave IO is not running then
+ -2 will be returned.
+
+ CLI Example::
+
+ salt '*' mysql.slave_lag
+ '''
+ db = connect()
+ cur = db.cursor(MySQLdb.cursors.DictCursor)
+ cur.execute("show slave status")
+ results = cur.fetchone()
+ if cur.rowcount == 0:
+ # Server is not a slave if master is not defined. Return empty tuple
+ # in this case. Could probably check to see if Slave_IO_Running and
+ # Slave_SQL_Running are both set to 'Yes' as well to be really really
+ # sure that it is a slave.
+ return -1
+ else:
+ if results['Slave_IO_Running'] == 'Yes':
+ return results['Seconds_Behind_Master']
+ else:
+ # Replication is broken if you get here.
+ return -2
+
+
+def free_slave():
+ '''
+ Frees a slave from its master. This is a WIP, do not use.
+ '''
+ slave_db = connect()
+ slave_cur = slave_db.cursor(MySQLdb.cursors.DictCursor)
+ slave_cur.execute("show slave status")
+ slave_status = slave_cur.fetchone()
+ master = {'host': slave_status['Master_Host']}
+
+ try:
+ # Try to connect to the master and flush logs before promoting to
+ # master. This may fail if the master is no longer available.
+ # I am also assuming that the admin password is the same on both
+ # servers here, and only overriding the host option in the connect
+ # function.
+ master_db = connect(**master)
+ master_cur = master_db.cursor()
+ master_cur.execute("flush logs")
+ master_db.close()
+ except MySQLdb.OperationalError:
+ pass
+
+ slave_cur.execute("stop slave")
+ slave_cur.execute("reset master")
+ slave_cur.execute("change master to MASTER_HOST=''")
+ slave_cur.execute("show slave status")
+ results = slave_cur.fetchone()
+
+ if results is None:
+ return 'promoted'
+ else:
+ return 'failed'
+
+'''
+Database related actions
+'''
+def db_list():
+ '''
+ Return a list of databases of a MySQL server using the output
+ from the ``SHOW DATABASES`` query.
+
+ CLI Example::
+
+ salt '*' mysqldb.db_list
+ '''
+ ret = []
+ db = connect()
+ cur = db.cursor()
+ cur.execute('SHOW DATABASES')
+ results = cur.fetchall()
+ for dbs in results:
+ ret.append(dbs[0])
+
+ log.debug(ret)
+ return ret
+
+def db_tables(name):
+ '''
+ Shows the tables in the given MySQL database (if exists)
+
+ CLI Example::
+
+ salt '*' mysqldb.db_tables 'database'
+ '''
+ if not db_exists(name):
+ log.info("Database '{0}' does not exist".format(name,))
+ return False
+
+ ret = []
+ db = connect()
+ cur = db.cursor()
+ query = "SHOW TABLES IN %s" % name
+ log.debug("Doing query: {0}".format(query,))
+
+ cur.execute(query)
+ results = cur.fetchall()
+ for table in results:
+ ret.append(table[0])
+ log.debug(ret)
+ return ret
+
+def db_exists(name):
+ '''
+ Checks if a database exists on the MySQL server.
+
+ CLI Example::
+
+ salt '*' mysqldb.db_exists 'dbname'
+ '''
+ db = connect()
+ cur = db.cursor()
+ query = "SHOW DATABASES LIKE '%s'" % name
+ log.debug("Doing query: {0}".format(query,))
+ cur.execute( query )
+ result_set = cur.fetchall()
+ if cur.rowcount == 1:
+ return True
+ return False
+
+
+def db_create(name):
+ '''
+ Adds a databases to the MySQL server.
+
+ CLI Example::
+
+ salt '*' mysqldb.db_create 'dbname'
+ '''
+ # check if db exists
+ if db_exists(name):
+ log.info("DB '{0}' already exists".format(name,))
+ return False
+
+ # db doesnt exist, proceed
+ db = connect()
+ cur = db.cursor()
+ query = "CREATE DATABASE %s;" % name
+ log.debug("Query: {0}".format(query,))
+ if cur.execute( query ):
+ log.info("DB '{0}' created".format(name,))
+ return True
+ return False
+
+def db_remove(name):
+ '''
+ Removes a databases from the MySQL server.
+
+ CLI Example::
+
+ salt '*' mysqldb.db_remove 'dbname'
+ '''
+ # check if db exists
+ if not db_exists(name):
+ log.info("DB '{0}' does not exist".format(name,))
+ return False
+
+ if name in ('mysql','information_scheme'):
+ log.info("DB '{0}' may not be removed".format(name,))
+ return False
+
+ # db doesnt exist, proceed
+ db = connect()
+ cur = db.cursor()
+ query = "DROP DATABASE %s;" % name
+ log.debug("Doing query: {0}".format(query,))
+ cur.execute( query )
+
+ if not db_exists(name):
+ log.info("Database '{0}' has been removed".format(name,))
+ return True
+
+ log.info("Database '{0}' has not been removed".format(name,))
+ return False
+
+'''
+User related actions
+'''
+def user_list():
+ '''
+ Return a list of users on a MySQL server
+
+ CLI Example::
+
+ salt '*' mysqldb.user_list
+ '''
+ db = connect()
+ cur = db.cursor(MySQLdb.cursors.DictCursor)
+ cur.execute('SELECT User,Host FROM mysql.user')
+ results = cur.fetchall()
+ log.debug(results)
+ return results
+
+def user_exists(user,
+ host='localhost'):
+ '''
+ Checks if a user exists on the MySQL server.
+
+ CLI Example::
+
+ salt '*' mysqldb.user_exists 'username' 'hostname'
+ '''
+ db = connect()
+ cur = db.cursor()
+ query = "SELECT User,Host FROM mysql.user WHERE User = '%s' AND Host = '%s'" % (user, host,)
+ log.debug("Doing query: {0}".format(query,))
+ cur.execute( query )
+ if cur.rowcount == 1:
+ return True
+ return False
+
+def user_info(user,
+ host='localhost'):
+ '''
+ Get full info on a MySQL user
+
+ CLI Example::
+
+ salt '*' mysqldb.user_info root localhost
+ '''
+ db = connect()
+ cur = db.cursor (MySQLdb.cursors.DictCursor)
+ query = "SELECT * FROM mysql.user WHERE User = '%s' AND Host = '%s'" % (user, host,)
+ log.debug("Query: {0}".format(query,))
+ cur.execute(query)
+ result = cur.fetchone()
+ log.debug( result )
+ return result
+
+def user_create(user,
+ host='localhost',
+ password=None):
+ '''
+ Creates a MySQL user.
+
+ CLI Example::
+
+ salt '*' mysqldb.user_create 'username' 'hostname' 'password'
+ '''
+ if user_exists(user,host):
+ log.info("User '{0}'@'{1}' already exists".format(user,host,))
+ return False
+
+ db = connect()
+ cur = db.cursor ()
+ query = "CREATE USER '%s'@'%s'" % (user, host,)
+ if password is not None:
+ query = query + " IDENTIFIED BY '%s'" % password
+
+ log.debug("Query: {0}".format(query,))
+ cur.execute( query )
+
+ if user_exists(user,host):
+ log.info("User '{0}'@'{1}' has been created".format(user,host,))
+ return True
+
+ log.info("User '{0}'@'{1}' is not created".format(user,host,))
+ return False
+
+def user_chpass(user,
+ host='localhost',
+ password=None):
+ '''
+ Change password for MySQL user
+
+ CLI Example::
+
+ salt '*' mysqldb.user_chpass frank localhost newpassword
+ '''
+ if password is None:
+ log.error('No password provided')
+ return False
+
+ db = connect()
+ cur = db.cursor ()
+ query = "UPDATE mysql.user SET password=PASSWORD(\"%s\") WHERE User='%s' AND Host = '%s';" % (password,user,host,)
+ log.debug("Query: {0}".format(query,))
+ if cur.execute( query ):
+ log.info("Password for user '{0}'@'{1}' has been changed".format(user,host,))
+ return True
+
+ log.info("Password for user '{0}'@'{1}' is not changed".format(user,host,))
+ return False
+
+def user_remove(user,
+ host='localhost'):
+ '''
+ Delete MySQL user
+
+ CLI Example::
+
+ salt '*' mysqldb.user_remove frank localhost
+ '''
+ db = connect()
+ cur = db.cursor ()
+ query = "DROP USER '%s'@'%s'" % (user, host,)
+ log.debug("Query: {0}".format(query,))
+ cur.execute(query)
+ result = cur.fetchone()
+ if not user_exists(user,host):
+ log.info("User '{0}'@'{1}' has been removed".format(user,host,))
+ return True
+
+ log.info("User '{0}'@'{1}' has NOT been removed".format(user,host,))
+ return False
+
+'''
+Maintenance
+'''
+def db_check(name,
+ table=None):
+ '''
+ Repairs the full database or just a given table
+
+ CLI Example::
+
+ salt '*' mysqldb.db_check dbname
+ '''
+ ret = []
+ if table is None:
+ # we need to check all tables
+ tables = db_tables(name)
+ for table in tables:
+ log.info("Checking table '%s' in db '%s..'".format(name,table,))
+ ret.append( __check_table(name,table) )
+ else:
+ log.info("Checking table '%s' in db '%s'..".format(name,table,))
+ ret = __check_table(name,table)
+ return ret
+
+def db_repair(name,
+ table=None):
+ '''
+ Repairs the full database or just a given table
+
+ CLI Example::
+
+ salt '*' mysqldb.db_repair dbname
+ '''
+ ret = []
+ if table is None:
+ # we need to repair all tables
+ tables = db_tables(name)
+ for table in tables:
+ log.info("Repairing table '%s' in db '%s..'".format(name,table,))
+ ret.append( __repair_table(name,table) )
+ else:
+ log.info("Repairing table '%s' in db '%s'..".format(name,table,))
+ ret = __repair_table(name,table)
+ return ret
+
+def db_optimize(name,
+ table=None):
+ '''
+ Optimizes the full database or just a given table
+
+ CLI Example::
+
+ salt '*' mysqldb.db_optimize dbname
+ '''
+ ret = []
+ if table is None:
+ # we need to optimize all tables
+ tables = db_tables(name)
+ for table in tables:
+ log.info("Optimizing table '%s' in db '%s..'".format(name,table,))
+ ret.append( __optimize_table(name,table) )
+ else:
+ log.info("Optimizing table '%s' in db '%s'..".format(name,table,))
+ ret = __optimize_table(name,table)
+ return ret
\ No newline at end of file
diff --git a/salt/modules/network.py b/salt/modules/network.py
index c4eac866d1fc..55bd07f03c67 100644
--- a/salt/modules/network.py
+++ b/salt/modules/network.py
@@ -4,7 +4,13 @@
from string import ascii_letters, digits
import socket
-import subprocess
+import salt.utils
+
+__outputter__ = {
+ 'dig': 'txt',
+ 'ping': 'txt',
+ 'netstat': 'txt',
+}
def _sanitize_host(host):
@@ -25,13 +31,10 @@ def ping(host):
salt '*' network.ping archlinux.org -c 4
'''
cmd = 'ping -c 4 %s' % _sanitize_host(host)
-
- out = subprocess.Popen(cmd,
- shell=True,
- stdout=subprocess.PIPE).communicate()[0]
- return out
+ return __salt__['cmd.run'](cmd)
+# FIXME: Does not work with: netstat 1.42 (2001-04-15) from net-tools 1.6.0 (Ubuntu 10.10)
def netstat():
'''
Return information on open ports and states
@@ -40,14 +43,10 @@ def netstat():
salt '*' network.netstat
'''
- cmd = 'netstat -tulpnea'
ret = []
- out = subprocess.Popen(cmd,
- shell=True,
- stdout=subprocess.PIPE).communicate()[0].split('\n')
+ cmd = 'netstat -tulpnea'
+ out = __salt__['cmd.run'](cmd)
for line in out:
- if not line.count(' '):
- continue
comps = line.split()
if line.startswith('tcp'):
ret.append({
@@ -73,6 +72,8 @@ def netstat():
return ret
+# FIXME: This is broken on: Modern traceroute for Linux, version 2.0.14, May 10 2010 (Ubuntu 10.10)
+# FIXME: traceroute is deprecated, make this fall back to tracepath
def traceroute(host):
'''
Performs a traceroute to a 3rd party host
@@ -81,13 +82,12 @@ def traceroute(host):
salt '*' network.traceroute archlinux.org
'''
- cmd = 'traceroute %s' % _sanitize_host(host)
ret = []
- out = subprocess.Popen(cmd,
- shell=True,
- stdout=subprocess.PIPE).communicate()[0].split('\n')
+ cmd = 'traceroute %s' % _sanitize_host(host)
+ out = __salt__['cmd.run'](cmd)
+
for line in out:
- if not line.count(' '):
+ if not ' ' in line:
continue
if line.startswith('traceroute'):
continue
@@ -115,11 +115,7 @@ def dig(host):
salt '*' network.dig archlinux.org
'''
cmd = 'dig %s' % _sanitize_host(host)
-
- out = subprocess.Popen(cmd,
- shell=True,
- stdout=subprocess.PIPE).communicate()[0]
- return out
+ return __salt__['cmd.run'](cmd)
def isportopen(host, port):
@@ -138,4 +134,3 @@ def isportopen(host, port):
out = sock.connect_ex((_sanitize_host(host), int(port)))
return out
-
diff --git a/salt/modules/pacman.py b/salt/modules/pacman.py
index e636fa5fc9f3..163f776dd9aa 100644
--- a/salt/modules/pacman.py
+++ b/salt/modules/pacman.py
@@ -62,7 +62,7 @@ def list_pkgs():
ret = {}
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
- if not line.count(' '):
+ if not line:
continue
comps = line.split()
ret[comps[0]] = comps[1]
@@ -88,14 +88,14 @@ def refresh_db():
if not line:
continue
key = line.strip().split()[0]
- if line.count('is up to date'):
+ if 'is up to date' in line:
ret[key] = False
- elif line.count('downloading'):
+ elif 'downloading' in line:
ret[key] = True
return ret
-def install(name, refresh=False):
+def install(name, refresh=False, **kwargs):
'''
Install the passed package, add refresh=True to install with an -Sy
diff --git a/salt/modules/pip.py b/salt/modules/pip.py
new file mode 100644
index 000000000000..d5aafa1c03c7
--- /dev/null
+++ b/salt/modules/pip.py
@@ -0,0 +1,71 @@
+'''
+Install Python packages with pip to either the system or a virtualenv
+'''
+__opts__ = {
+ 'pip_bin': 'pip',
+}
+
+import os
+
+def _get_pip_bin(pip, env):
+ '''
+ Return the pip command to call, either from a virtualenv, an argument
+ passed in, or from the global modules options
+ '''
+ if env:
+ return os.path.join(env, 'bin', 'pip')
+ else:
+ return pip if pip else __opts__['pip_bin']
+
+def install(env='', requirements='', pkgs='', pip_bin=''):
+ '''
+ Install packages with pip
+
+ Install packages individually or from a pip requirements file. Install
+ packages globally or to a virtualenv.
+
+ env : None
+ The path to a virtualenv that pip should install to. This option takes
+ precendence over the ``pip_bin`` argument.
+ requirements : None
+ The path to a pip requirements file to install from
+ pkgs : None
+ A list of space-separated packages to install
+ pip_bin : 'pip'
+ The name (and optionally path) of the pip command to call. This option
+ will be ignored if the ``env`` argument is given since it will default
+ to the pip that is installed in the virtualenv. This option can also be
+ set in the minion config file as ``pip.pip_bin``.
+
+ CLI Example::
+
+ salt '*' pip.install /var/www/myvirtualenv.com \\
+ /path/to/requirements.txt
+ '''
+ cmd = '{pip_bin} install {env} {reqs} {pkgs}'.format(
+ pip_bin=_get_pip_bin(pip_bin, env),
+ env='-E {0}'.format(env if env else ''),
+ reqs='-r {0}'.format(requirements if requirements else ''),
+ pkgs=pkgs)
+
+ return __salt__['cmd.run'](cmd)
+
+def freeze(env='', pip_bin=''):
+ '''
+ Return a list of installed packages either globally or in the specified
+ virtualenv
+
+ env : None
+ The path to a virtualenv that pip should install to. This option takes
+ precendence over the ``pip_bin`` argument.
+ pip_bin : 'pip'
+ The name (and optionally path) of the pip command to call. This option
+ will be ignored if the ``env`` argument is given since it will default
+ to the pip that is installed in the virtualenv. This option can also be
+ set in the minion config file as ``pip.pip_bin``.
+ '''
+ # Using freeze with -E seems to be twitchy on older pips so call the pip
+ # inside the venv if using a venv
+ cmd = '{0} freeze'.format(_get_pip_bin(pip_bin, env))
+
+ return __salt__['cmd.run'](cmd).split('\n')
diff --git a/salt/modules/ps.py b/salt/modules/ps.py
index 25dd9f8bae19..bd56864e9b3f 100644
--- a/salt/modules/ps.py
+++ b/salt/modules/ps.py
@@ -49,6 +49,10 @@ def top(num_processes=5, interval=3):
def get_pid_list():
'''
Return a list of process ids (PIDs) for all running processes.
+
+ CLI Example::
+
+ salt '*' ps.get_pid_list
'''
return psutil.get_pid_list()
@@ -61,7 +65,11 @@ def cpu_percent(interval=0.1, per_cpu=False):
the number of seconds to sample CPU usage over
per_cpu
if True return an array of CPU percent busy for each CPU, otherwise
- aggregate all precents into one number
+ aggregate all percents into one number
+
+ CLI Example::
+
+ salt '*' ps.cpu_percent
'''
if per_cpu:
result = []
@@ -79,7 +87,11 @@ def cpu_times(per_cpu=False):
per_cpu
if True return an array of percents for each CPU, otherwise aggregate
- all precents into one number
+ all percents into one number
+
+ CLI Example::
+
+ salt '*' ps.cpu_times
'''
if per_cpu:
result = []
@@ -90,31 +102,47 @@ def cpu_times(per_cpu=False):
return result
-def phymem_usage():
+def physical_memory_usage():
'''
Return a dict that describes free and available physical memory.
+
+ CLI Examples::
+
+ salt '*' ps.physical_memory_usage
'''
return dict(psutil.phymem_usage()._asdict())
-def virtmem_usage():
+def virtual_memory_usage():
'''
Return a dict that describes free and available memory, both physical
and virtual.
+
+ CLI Example::
+
+ salt '*' virtual_memory_usage
'''
return dict(psutil.virtmem_usage()._asdict())
-def cached_phymem():
+def cached_physical_memory():
'''
Return the amount cached memory.
+
+ CLI Example::
+
+ salt '*' ps.cached_physical_memory
'''
return psutil.cached_phymem()
-def phymem_buffers():
+def physical_memory_buffers():
'''
Return the amount of physical memory buffers.
+
+ CLI Example::
+
+ salt '*' ps.physical_memory_buffers
'''
return psutil.phymem_buffers()
@@ -127,6 +155,10 @@ def disk_partitions(all=False):
all
if set to False, only return local, physical partitions (hard disk,
USB, CD/DVD partitions). If True, return all filesystems.
+
+ CLI Example::
+
+ salt '*' ps.disk_partitions
'''
result = []
for partition in psutil.disk_partitions(all):
@@ -138,6 +170,10 @@ def disk_usage(path):
'''
Given a path, return a dict listing the total available space as well as
the free space, and used space.
+
+ CLI Example::
+
+ salt '*' ps.disk_usage /home
'''
return dict(psutil.disk_usage(path)._asdict())
@@ -146,6 +182,10 @@ def disk_partition_usage(all=False):
'''
Return a list of disk partitions plus the mount point, filesystem and usage
statistics.
+
+ CLI Example::
+
+ salt '*' ps.disk_partition_usage
'''
result = disk_partitions(all)
for partition in result:
@@ -153,22 +193,34 @@ def disk_partition_usage(all=False):
return result
-def TOTAL_PHYMEM():
+def total_physical_memory():
'''
Return the total number of bytes of physical memory.
+
+ CLI Example::
+
+ salt '*' ps.total_physical_memory
'''
return psutil.TOTAL_PHYMEM
-def NUM_CPUS():
+def num_cpus():
'''
Return the number of CPUs.
+
+ CLI Example::
+
+ salt '*' ps.num_cpus
'''
return psutil.NUM_CPUS
-def BOOT_TIME():
+def boot_time():
'''
Return the boot time in number of seconds since the epoch began.
+
+ CLI Example::
+
+ salt '*' ps.boot_time
'''
return psutil.BOOT_TIME
diff --git a/salt/modules/publish.py b/salt/modules/publish.py
index d48369c0419a..1175bbc8567f 100644
--- a/salt/modules/publish.py
+++ b/salt/modules/publish.py
@@ -3,9 +3,10 @@
'''
import zmq
+import ast
import salt.crypt
-
+import salt.payload
def _get_socket():
'''
@@ -35,13 +36,21 @@ def publish(tgt, fun, arg=None, expr_form='glob', returner=''):
salt system.example.com publish.publish '*' cmd.run 'ls -la /tmp'
'''
+ serial = salt.payload.Serial(__opts__)
if fun == 'publish.publish':
# Need to log something here
return {}
+
if not arg:
arg = []
- else:
- arg = arg.split(',')
+
+ try:
+ if isinstance(ast.literal_eval(arg), dict):
+ arg = [arg,]
+ except:
+ if isinstance(arg, str):
+ arg = arg.split(',')
+
auth = salt.crypt.SAuth(__opts__)
tok = auth.gen_token('salt')
payload = {'enc': 'aes'}
@@ -55,5 +64,5 @@ def publish(tgt, fun, arg=None, expr_form='glob', returner=''):
'id': __opts__['id']}
payload['load'] = auth.crypticle.dumps(load)
socket = _get_socket()
- socket.send_pyobj(payload)
- return auth.crypticle.loads(socket.recv_pyobj())
+ socket.send(serial.dumps(payload))
+ return auth.crypticle.loads(serial.loads(socket.recv()))
diff --git a/salt/modules/pw_group.py b/salt/modules/pw_group.py
index 0bf550157c52..1a05af1a6504 100644
--- a/salt/modules/pw_group.py
+++ b/salt/modules/pw_group.py
@@ -73,11 +73,11 @@ def getent():
def chgid(name, gid):
'''
- Change the default shell of the user
+ Change the gid for a named group
CLI Example::
- salt '*' user.chshell foo /bin/zsh
+ salt '*' group.chgid foo 4376
'''
pre_gid = __salt__['file.group_to_gid'](name)
if gid == pre_gid:
diff --git a/salt/modules/pw_user.py b/salt/modules/pw_user.py
index 05964a37e4cc..6a1d0a0e84ac 100644
--- a/salt/modules/pw_user.py
+++ b/salt/modules/pw_user.py
@@ -27,7 +27,7 @@ def add(name,
salt '*' user.add name
'''
- if type(groups) == type(str()):
+ if isinstance(groups, basestring):
groups = groups.split(',')
cmd = 'pw useradd -s {0} '.format(shell)
if uid:
@@ -169,7 +169,7 @@ def chgroups(name, groups, append=False):
salt '*' user.chgroups foo wheel,root True
'''
- if type(groups) == type(str()):
+ if isinstance(groups, basestring):
groups = groups.split(',')
ugrps = set(list_groups(name))
if ugrps == set(groups):
@@ -206,7 +206,7 @@ def info(name):
def list_groups(name):
'''
- Return a list of groups the named user belings to
+ Return a list of groups the named user belongs to
CLI Example::
diff --git a/salt/modules/rh_service.py b/salt/modules/rh_service.py
new file mode 100644
index 000000000000..74c717ad431d
--- /dev/null
+++ b/salt/modules/rh_service.py
@@ -0,0 +1,183 @@
+'''
+Top level package command wrapper, used to translate the os detected by the
+grains to the correct service manager
+'''
+
+import os
+
+
+def __virtual__():
+ '''
+ Only work on systems which default to systemd
+ '''
+ # Disable on these platforms, specific service modules exist:
+ enable = [
+ 'RedHat',
+ 'CentOS',
+ 'Fedora',
+ ]
+ if __grains__['os'] in enable:
+ if __grains__['os'] == 'Fedora':
+ if __grains__['osrelease'] > 15:
+ return False
+ return 'service'
+ return False
+
+def _runlevel():
+ '''
+ Return the current runlevel
+ '''
+ out = __salt__['cmd.run']('runlevel').strip()
+ return out.split()[1]
+
+
+def get_enabled():
+ '''
+ Return the enabled services
+
+ CLI Example::
+
+ salt '*' service.get_enabled
+ '''
+ rlevel = _runlevel()
+ ret = set()
+ cmd = 'chkconfig --list'
+ lines = __salt__['cmd.run'](cmd).split('\n')
+ for line in lines:
+ comps = line.split()
+ if not comps:
+ continue
+ if '{0}:on'.format(rlevel) in line:
+ ret.add(comps[0])
+ return sorted(ret)
+
+def get_disabled():
+ '''
+ Return the disabled services
+
+ CLI Example::
+
+ salt '*' service.get_enabled
+ '''
+ rlevel = _runlevel()
+ ret = set()
+ cmd = 'chkconfig --list'
+ lines = __salt__['cmd.run'](cmd).split('\n')
+ for line in lines:
+ comps = line.split()
+ if not comps:
+ continue
+ if not '{0}:on'.format(rlevel) in line:
+ ret.add(comps[0])
+ return sorted(ret)
+
+def get_all():
+ '''
+ Return all installed services
+
+ CLI Example::
+
+ salt '*' service.get_enabled
+ '''
+ return sorted(get_enabled() + get_disabled())
+
+def start(name):
+ '''
+ Start the specified service
+
+ CLI Example::
+
+ salt '*' service.start
+ '''
+ cmd = 'service {0} start'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def stop(name):
+ '''
+ Stop the specified service
+
+ CLI Example::
+
+ salt '*' service.stop
+ '''
+ cmd = 'service {0} stop'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def restart(name):
+ '''
+ Restart the named service
+
+ CLI Example::
+
+ salt '*' service.restart
+ '''
+ cmd = 'service {0} restart'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def status(name, sig=None):
+ '''
+ Return the status for a service, returns the PID or an empty string if the
+ service is running or not, pass a signature to use to find the service via
+ ps
+
+ CLI Example::
+
+ salt '*' service.status [service signature]
+ '''
+ sig = name if not sig else sig
+ cmd = "{0[ps]} | grep {1} | grep -v grep | awk '{{print $2}}'".format(
+ __grains__, sig)
+ return __salt__['cmd.run'](cmd).strip()
+
+
+def enable(name):
+ '''
+ Enable the named service to start at boot
+
+ CLI Example::
+
+ salt '*' service.enable
+ '''
+ cmd = 'chkconfig {0} on'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def disable(name):
+ '''
+ Disable the named service to start at boot
+
+ CLI Example::
+
+ salt '*' service.disable
+ '''
+ cmd = 'chkconfig {0} off'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def enabled(name):
+ '''
+ Check to see if the named service is enabled to start on boot
+
+ CLI Example::
+
+ salt '*' service.enabled
+ '''
+ if name in get_enabled():
+ return True
+ return False
+
+
+def disabled(name):
+ '''
+ Check to see if the named service is disabled to start on boot
+
+ CLI Example::
+
+ salt '*' service.disabled
+ '''
+ if name in get_disabled():
+ return True
+ return False
diff --git a/salt/modules/saltutil.py b/salt/modules/saltutil.py
new file mode 100644
index 000000000000..5f24f7e91294
--- /dev/null
+++ b/salt/modules/saltutil.py
@@ -0,0 +1,140 @@
+'''
+The Saltutil module is used to manage the state of the salt minion itself. It is
+used to manage minion modules as well as automate updates to the salt minion
+'''
+
+import os
+import hashlib
+import shutil
+import logging
+
+log = logging.getLogger(__name__)
+
+def _sync(form, env):
+ '''
+ Sync the given directory in the given environment
+ '''
+ if isinstance(env, str):
+ env = env.split(',')
+ ret = []
+ remote = set()
+ source = os.path.join('salt://_{0}'.format(form))
+ mod_dir = os.path.join(__opts__['extension_modules'], '{0}'.format(form))
+ if not os.path.isdir(mod_dir):
+ os.makedirs(mod_dir)
+ cache = []
+ for sub_env in env:
+ cache.extend(__salt__['cp.cache_dir'](source, sub_env))
+ for fn_ in cache:
+ remote.add(os.path.basename(fn_))
+ dest = os.path.join(mod_dir,
+ os.path.basename(fn_)
+ )
+ if os.path.isfile(dest):
+ # The file is present, if the sum differes replace it
+ srch = hashlib.md5(open(fn_, 'r').read()).hexdigest()
+ dsth = hashlib.md5(open(dest, 'r').read()).hexdigest()
+ if srch != dsth:
+ # The downloaded file differes, replace!
+ shutil.copyfile(fn_, dest)
+ ret.append('{0}.{1}'.format(form, os.path.basename(fn_)))
+ else:
+ shutil.copyfile(fn_, dest)
+ ret.append('{0}.{1}'.format(form, os.path.basename(fn_)))
+ if ret:
+ open(os.path.join(__opts__['cachedir'], '.module_refresh'), 'w+').write('')
+ if __opts__.get('clean_dynamic_modules', True):
+ current = set(os.listdir(mod_dir))
+ for fn_ in current.difference(remote):
+ full = os.path.join(mod_dir, fn_)
+ if os.path.isfile(full):
+ os.remove(full)
+ return ret
+
+
+def sync_modules(env='base'):
+ '''
+ Sync the modules from the _modules directory on the salt master file
+ server. This function is environment aware, pass the desired environment
+ to grab the contents of the _modules directory, base is the default
+ environment.
+
+ CLI Example::
+
+ salt '*' saltutil.sync_modules
+ '''
+ return _sync('modules', env)
+
+
+def sync_states(env='base'):
+ '''
+ Sync the states from the _states directory on the salt master file
+ server. This function is environment aware, pass the desired environment
+ to grab the contents of the _states directory, base is the default
+ environment.
+
+ CLI Example::
+
+ salt '*' saltutil.sync_states
+ '''
+ return _sync('states', env)
+
+
+def sync_grains(env='base'):
+ '''
+ Sync the grains from the _grains directory on the salt master file
+ server. This function is environment aware, pass the desired environment
+ to grab the contents of the _grains directory, base is the default
+ environment.
+
+ CLI Example::
+
+ salt '*' saltutil.sync_grains
+ '''
+ return _sync('grains', env)
+
+
+def sync_renderers(env='base'):
+ '''
+ Sync the renderers from the _renderers directory on the salt master file
+ server. This function is environment aware, pass the desired environment
+ to grab the contents of the _renderers directory, base is the default
+ environment.
+
+ CLI Example::
+
+ salt '*' saltutil.sync_renderers
+ '''
+ return _sync('renderers', env)
+
+
+def sync_returners(env='base'):
+ '''
+ Sync the returners from the _returners directory on the salt master file
+ server. This function is environment aware, pass the desired environment
+ to grab the contents of the _returners directory, base is the default
+ environment.
+
+ CLI Example::
+
+ salt '*' saltutil.sync_returners
+ '''
+ return _sync('returners', env)
+
+
+def sync_all(env='base'):
+ '''
+ Sync down all of the dynamic modules from the file server for a specific
+ environment
+
+ CLI Example::
+
+ salt '*' saltutil.sync_all
+ '''
+ ret = []
+ ret.append(sync_modules(env))
+ ret.append(sync_states(env))
+ ret.append(sync_grains(env))
+ ret.append(sync_renderers(env))
+ ret.append(sync_returners(env))
+ return ret
diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py
index dfbd14fec08c..e1c3be646e47 100644
--- a/salt/modules/selinux.py
+++ b/salt/modules/selinux.py
@@ -4,18 +4,26 @@
import os
+__selinux_fs_path__ = None
def __virtual__():
'''
Check if the os is Linux, and then if selinux is running in permissive or
enforcing mode.
'''
+ global __selinux_fs_path__
if __grains__['kernel'] == 'Linux':
- if os.path.isdir('/selinux'):
- if os.path.isfile('/selinux/enforce'):
- return 'selinux'
+ # systems running systemd (e.g. Fedora 15 and newer)
+ # have the selinux filesystem in a different location
+ for directory in ['/sys/fs/selinux', '/selinux']:
+ if os.path.isdir(directory):
+ if os.path.isfile(os.path.join(directory, 'enforce')):
+ __selinux_fs_path__ = directory
+ return 'selinux'
return False
+def selinux_fs_path():
+ return __selinux_fs_path__
def getenforce():
'''
@@ -25,7 +33,7 @@ def getenforce():
salt '*' selinux.getenforce
'''
- if open('/selinux/enforce', 'r').read() == '0':
+ if open(os.path.join(__selinux_fs_path__, 'enforce'), 'r').read() == '0':
return 'Permissive'
else:
return 'Enforcing'
@@ -38,7 +46,7 @@ def setenforce(mode):
if isinstance(mode, str):
if mode.lower() == 'enforcing':
mode = '1'
- elif mode.lower() == 'Permissive':
+ elif mode.lower() == 'permissive':
mode = '0'
else:
return 'Invalid mode {0}'.format(mode)
diff --git a/salt/modules/service.py b/salt/modules/service.py
index 9e7016248106..7378b6e08a77 100644
--- a/salt/modules/service.py
+++ b/salt/modules/service.py
@@ -1,6 +1,6 @@
'''
-Top level package command wrapper, used to translate the os detected by the
-grains to the correct service manager
+The default service module, if not otherwise specified salt will fall back
+to this basic module
'''
import os
@@ -12,8 +12,25 @@
'RedHat': '/etc/init.d',
'Ubuntu': '/etc/init.d',
'Gentoo': '/etc/init.d',
+ 'CentOS': '/etc/init.d',
}
+def __virtual__():
+ '''
+ Only work on systems which default to systemd
+ '''
+ # Disable on these platforms, specific service modules exist:
+ disable = [
+ 'RedHat',
+ 'CentOS',
+ 'Fedora',
+ 'Gentoo',
+ 'Windows',
+ ]
+ if __grains__['os'] in disable:
+ return False
+ return 'service'
+
def start(name):
'''
@@ -68,3 +85,4 @@ def status(name, sig=None):
cmd = "{0[ps]} | grep {1} | grep -v grep | awk '{{print $2}}'".format(
__grains__, sig)
return __salt__['cmd.run'](cmd).strip()
+
diff --git a/salt/modules/shadow.py b/salt/modules/shadow.py
index cd683a0aea43..0ff3907f92ac 100644
--- a/salt/modules/shadow.py
+++ b/salt/modules/shadow.py
@@ -12,7 +12,7 @@ def info(name):
CLI Example::
- salt '*' shadow.user root
+ salt '*' shadow.info root
'''
try:
data = spwd.getspnam(name)
@@ -46,7 +46,7 @@ def set_password(name, password):
CLI Example::
- salt '*' root $1$UYCIxa628.9qXjpQCjM4a..
+ salt '*' shadow.set_password root $1$UYCIxa628.9qXjpQCjM4a..
'''
s_file = '/etc/shadow'
ret = {}
diff --git a/salt/modules/solr.py b/salt/modules/solr.py
index 9d8ff5c67fc8..7d9af8ba28b1 100644
--- a/salt/modules/solr.py
+++ b/salt/modules/solr.py
@@ -3,12 +3,12 @@
=======================
Author: Jed Glazner
-Version: 0.2
-Modified: 9/20/2011
+Version: 0.2.1
+Modified: 12/09/2011
This module uses http requests to talk to the apache solr request handlers
to gather information and report errors. Because of this the minion doesn't
-nescessarily need to reside on the actual slave. However if you want to
+necessarily need to reside on the actual slave. However if you want to
use the signal function the minion must reside on the physical solr host.
This module supports multi-core and standard setups. Certain methods are
@@ -66,7 +66,9 @@
#sane defaults
__opts__ = {'solr.cores': [],
- 'solr.baseurl': 'http://localhost:8983/solr',
+ 'solr.host': 'localhost',
+ 'solr.port': '8983',
+ 'solr.baseurl':'/solr',
'solr.type':'master',
'solr.request_timeout': None,
'solr.init_script': '/etc/rc.d/solr',
@@ -83,9 +85,9 @@ def __virtual__():
PRIVATE METHOD
Solr needs to be installed to use this.
- Return: str/bool Indicates weather solr is present or not
+ Return: str/bool::
- TODO:// currently __salt__ is not available to call in this method because
+ TODO:// currently __salt__ is not available to call in this method because
all the salt modules have not been loaded yet. Use a grains module?
'''
return 'solr'
@@ -96,13 +98,40 @@ def __virtual__():
return False
+def _get_none_or_value(value):
+ '''
+ PRIVATE METHOD
+ Checks to see if the value of a primitive or built-in container such as
+ a list, dict, set, tuple etc is empty or none. None type is returned if the
+ value is empty/None/False. Number data types that are 0 will return None.
+
+ value : obj
+ The primitive or built-in container to evaluate.
+
+ Return: None or value
+ '''
+ if value is None:
+ return None
+ elif not value:
+ return value
+ # if it's a string, and it's not empty check for none
+ elif isinstance(value, basestring):
+ if value.lower() == 'none':
+ return None
+ return value
+ # return None
+ else:
+ return None
+
def _check_for_cores():
'''
PRIVATE METHOD
Checks to see if using_cores has been set or not. if it's been set
return it, otherwise figure it out and set it. Then return it
- Return: bool True indicates that cores are used.
+ Return: boolean::
+
+ True if one or more cores defined in __opts__['solr.cores']
'''
if len(__opts__['solr.cores']) > 0:
return True
@@ -114,115 +143,166 @@ def _get_return_dict(success=True, data={}, errors=[], warnings=[]):
PRIVATE METHOD
Creates a new return dict with default values. Defaults may be overwritten.
- Param: bool success Default = True
- Param: dict data Default = {}
- Param: list errors Default = []
- Param: list warnings Default= []
+ success : boolean (True)
+ True indicates a successful result.
+ data : dict ({})
+ Data to be returned to the caller.
+ errors : list ([()])
+ A list of error messages to be returned to the caller
+ warnings : list ([])
+ A list of warnings to be returned to the caller.
+
+ Return: dict::
+
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
'''
- ret = {'success':success,
- 'data':data,
- 'errors':errors,
+ ret = {'success':success,
+ 'data':data,
+ 'errors':errors,
'warnings':warnings}
return ret
-def _update_return_dict(ret, success, data, errors, warnings=[]):
+def _update_return_dict(ret, success, data, errors=[], warnings=[]):
'''
PRIVATE METHOD
Updates the return dictionary and returns it.
- Param: dict ret: The original returning dictionary to update
- Param: bool success: Indicates if the call was successful.
- Param: dict data: The data to update.
- Param: list errors: Errors list to append to the return
- Return: dict {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ ret : dict
+ The original return dict to update. The ret param should have
+ been created from _get_return_dict()
+ success : boolean (True)
+ True indicates a successful result.
+ data : dict ({})
+ Data to be returned to the caller.
+ errors : list ([()])
+ A list of error messages to be returned to the caller
+ warnings : list ([])
+ A list of warnings to be returned to the caller.
+
+ Return: dict::
+
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
'''
ret['success'] = success
ret['data'].update(data)
ret['errors'] = ret['errors'] + errors
ret['warnings'] = ret['warnings'] + warnings
- return ret
+ return ret
-def _format_url(handler,core_name=None,extra=[]):
+def _format_url(handler, host=None, core_name=None, extra=[]):
'''
PRIVATE METHOD
Formats the url based on parameters, and if cores are used or not
- Param: str request_handler: The request handler to hit
- Param: str core_name (None): The name of the solr core if using cores.
- Leave this blank if you are not using cores or
- if you want to check all cores.
- Param: list extra ([]): A list of additional name value pairs ['name=value]
- Return: str formatted url
+ handler : str
+ The request handler to hit.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default
+ core_name : str (None)
+ The name of the solr core if using cores. Leave this blank if you
+ are not using cores or if you want to check all cores.
+ extra : list ([])
+ A list of name value pairs in string format. eg ['name=value']
+
+ Return: str::
+
+ A fullly formatted url (http://:/solr/?wt=json&
'''
+ if _get_none_or_value(host) is None or host == 'None':
+ host = __opts__['solr.host']
+ port = __opts__['solr.port']
baseurl = __opts__['solr.baseurl']
- if core_name is None:
- if extra is None:
- return "{0}/{1}?wt=json".format(baseurl, handler)
+ if _get_none_or_value(core_name) is None:
+ if extra is None or len(extra) == 0:
+ return "http://{0}:{1}{2}/{3}?wt=json".format(
+ host, port, baseurl, handler)
else:
- return "{0}/{1}?wt=json&{2}".format(baseurl, handler,"&".join(extra))
+ return "http://{0}:{1}{2}/{3}?wt=json&{4}".format(
+ host, port, baseurl, handler,"&".join(extra))
else:
- if extra is None:
- return "{0}/{1}/{2}?wt=json".format(baseurl, core_name, handler)
+ if extra is None or len(extra) == 0:
+ return "http://{0}:{1}{2}/{3}/{4}?wt=json".format(
+ host,port,baseurl,core_name,handler)
else:
- return "{0}/{1}/{2}?wt=json&{3}".format(baseurl, core_name,
- handler,"&".join(extra))
+ return "http://{0}:{1}{2}/{3}/{4}?wt=json&{5}".format(
+ host,port,baseurl,core_name,handler,"&".join(extra))
-def _http_request(url):
+def _http_request(url, request_timeout=None):
'''
PRIVATE METHOD
Uses json.load to fetch the json results from the solr api.
- Param: str Url (A formatted url to and to urllib)
- Return: dict {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ url : str
+ a complete url that can be passed to urllib.open
+ request_timeout : int (None)
+ The number of seconds before the timeout should fail. Leave blank/None to
+ use the default. __opts__['solr.request_timeout']
+
+ Return: dict::
- TODO://Add a timeout param.
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
'''
try:
+
request_timeout = __opts__['solr.request_timeout']
if request_timeout is None:
data = json.load(urllib2.urlopen(url))
else:
- data = json.load(urllib2.urlopen(url,timeout=request_timeout))
- return _get_return_dict(True, data,[])
+ data = json.load(urllib2.urlopen(url, timeout=request_timeout))
+ return _get_return_dict(True, data, [])
except Exception as e:
- return _get_return_dict(False, {}, ["{0} : {1}".format(url,e)])
+ return _get_return_dict(False, {}, ["{0} : {1}".format(url, e)])
-def _replication_request(replication_command, core_name=None, params=[]):
+def _replication_request(command, host=None, core_name=None, params=[]):
'''
PRIVATE METHOD
- Performs the requested replication command and returns a dictionary with
- success, errors and data as keys. The data object will contain the json
+ Performs the requested replication command and returns a dictionary with
+ success, errors and data as keys. The data object will contain the json
response.
- Param: str replication_command: The replication command to execute
- Param: str core_name (None): The name of the solr core if using cores.
- Leave this blank if you are not using cores or
- if you want to check all cores.
- Param: list params ([]): Any additional parameters you want send.
- Should be a list of strings in name=value format.
- Return: dict {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ command : str
+ The replication command to execute.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default
+ core_name: str (None)
+ The name of the solr core if using cores. Leave this blank if you are
+ not using cores or if you want to check all cores.
+ params : list ([])
+ Any additional parameters you want to send. Should be a lsit of
+ strings in name=value format. eg ['name=value']
+
+ Return: dict::
+
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
'''
- extra = ["command={0}".format(replication_command)] + params
- url = _format_url('replication',core_name=core_name,extra=extra)
+ extra = ["command={0}".format(command)] + params
+ url = _format_url('replication', host=host, core_name=core_name,
+ extra=extra)
return _http_request(url)
-def _get_admin_info(command, core_name=None):
+def _get_admin_info(command, host=None, core_name=None):
'''
PRIVATE METHOD
- Calls the _http_request method and passes the admin command to execute
- and stores the data. This data is fairly static but should be refreshed
- periodically to make sure everything this OK. The data object will contain
- the json response.
-
- Param: str command: The admin command to run
- Param: str core_name (None): The name of the solr core if using cores.
- Leave this blank if you are not using cores or
- if you want to check all cores.
- Return: dict {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
- '''
- url = _format_url("admin/{0}".format(command), core_name=core_name)
+ Calls the _http_request method and passes the admin command to execute
+ and stores the data. This data is fairly static but should be refreshed
+ periodically to make sure everything this OK. The data object will contain
+ the json response.
+
+ command : str
+ The admin command to execute.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default
+ core_name: str (None)
+ The name of the solr core if using cores. Leave this blank if you are
+ not using cores or if you want to check all cores.
+
+ Return: dict::
+
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
+ '''
+ url = _format_url("admin/{0}".format(command), host, core_name=core_name)
resp = _http_request(url)
return resp
@@ -231,7 +311,8 @@ def _is_master():
PRIVATE METHOD
Simple method to determine if the minion is configured as master or slave
- Return: bool
+ Return: boolean::
+ True if __opts__['solr.type'] = master
'''
if __opts__['solr.type'] == 'master':
return True
@@ -241,43 +322,58 @@ def _merge_options(options):
'''
PRIVATE METHOD
updates the default import options from __opts__['solr.dih.import_options']
- with the dictionary passed in. Also converts booleans to strings
+ with the dictionary passed in. Also converts booleans to strings
to pass to solr.
- Param: dict options {str:bool}: Dictionary that over rides default options
- Return: dict {str:str}
- '''
+ options : dict
+ Dictionary the over rides the default options defined in
+ __opts__['solr.dih.import_options']
+ Return: dict::
+ {option:boolean}
+ '''
defaults = __opts__['solr.dih.import_options']
- if type(options) == dict:
+ if isinstance(options, dict):
defaults.update(options)
- for (k,v) in defaults.items():
- if type(v) is bool:
- defaults[k] = str(bool(v)).lower()
+ for (k, v) in defaults.items():
+ if isinstance(v, bool):
+ defaults[k] = str(v).lower()
return defaults
-
-def _pre_index_check(handler, core_name=None):
+def _pre_index_check(handler, host=None, core_name=None):
'''
- PRIVATE METHOD
+ PRIVATE METHOD - MASTER CALL
Does a pre-check to make sure that all the options are set and that
- we can talk to solr before trying to send a command to solr.
+ we can talk to solr before trying to send a command to solr. This
+ Command should only be issued to masters.
+
+ handler : str
+ The import handler to check the state of
+ host : str (None):
+ The solr host to query. __opts__['host'] is default
+ core_name (None):
+ The name of the solr core if using cores. Leave this blank if you are
+ not using cores or if you want to check all cores.
+ REQUIRED if you are using cores.
+ Return: dict::
+
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
'''
#make sure that it's a master minion
- if not _is_master():
+ if _get_none_or_value(host) is None and not _is_master():
err = ['solr.pre_indexing_check can only be called by "master" minions']
return _get_return_dict(False, err)
'''
- solr can run out of memory quickly if the dih is processing multiple
- handlers at the same time, so if it's a multicore setup require a
+ solr can run out of memory quickly if the dih is processing multiple
+ handlers at the same time, so if it's a multicore setup require a
core_name param.
'''
- if core_name is None and _check_for_cores():
+ if _get_none_or_value(core_name) is None and _check_for_cores():
errors = ['solr.full_import is not safe to multiple handlers at once']
return _get_return_dict(False, errors=errors)
#check to make sure that we're not already indexing
- resp = import_status(handler, core_name)
+ resp = import_status(handler, host, core_name)
if resp['success']:
status = resp['data']['status']
if status == 'busy':
@@ -285,10 +381,10 @@ def _pre_index_check(handler, core_name=None):
return _get_return_dict(True, warnings=warn)
if status != 'idle':
errors = ['Unknown status: "{0}"'.format(status)]
- return _get_return_dict(False, data=resp['data'],errors=errors)
+ return _get_return_dict(False, data=resp['data'], errors=errors)
else:
errors = ['Status check failed. Response details: {0}'.format(resp)]
- return _get_return_dict(False, data=resp['data'],errors=errors)
+ return _get_return_dict(False, data=resp['data'], errors=errors)
return resp
@@ -299,13 +395,17 @@ def _find_value(ret_dict, key, path=None):
and return the value stored.
TODO:// this method doesn't really work very well, and it's not really very
useful in it's current state. The purpose for this method is to
- simplify parsing the json ouput so you can just pass the key
+ simplify parsing the json output so you can just pass the key
you want to find and have it return the value.
+ ret : dict
+ The dictionary to search through. Typically this will be a dict
+ returned from solr.
+ key : str
+ The key (str) to find in the dictionary
- Param: dict return_dict: The return dictionary
- Param: str key: The key to find in the dictionary.
+ Return: list>::
- Return: list [{path:path, value:value}]
+ [{path:path, value:value}]
'''
if path is None:
path = key
@@ -316,11 +416,11 @@ def _find_value(ret_dict, key, path=None):
for (k, v) in ret_dict.items():
if k == key:
ret.append({path:v})
- if type(v) is list:
+ if isinstance(v, list):
for x in v:
- if type(x) is dict:
+ if isinstance(x, dict):
ret = ret + _find_value(x, key, path)
- if type(v) is dict:
+ if isinstance(v, dict):
ret = ret + _find_value(v, key, path)
return ret
@@ -336,9 +436,9 @@ def lucene_version(core_name=None):
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
- Return: dict::
+ Return: dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
@@ -346,8 +446,8 @@ def lucene_version(core_name=None):
'''
ret = _get_return_dict()
#do we want to check for all the cores?
- if core_name is None and _check_for_cores():
- success=True
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ success = True
for name in __opts__['solr.cores']:
resp = _get_admin_info('system', core_name=name )
if resp['success']:
@@ -355,8 +455,8 @@ def lucene_version(core_name=None):
data = {name: {'lucene_version':version}}
else:#generally this means that an exception happened.
data = {name:{'lucene_version':None}}
- success=False
- ret = _update_return_dict(ret,success, data, resp['errors'])
+ success = False
+ ret = _update_return_dict(ret, success, data, resp['errors'])
return ret
else:
resp = _get_admin_info('system', core_name=core_name)
@@ -376,9 +476,9 @@ def version(core_name=None):
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
@@ -386,46 +486,48 @@ def version(core_name=None):
'''
ret = _get_return_dict()
#do we want to check for all the cores?
- if core_name is None and _check_for_cores():
- success=True
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ success = True
for name in __opts__['solr.cores']:
resp = _get_admin_info('system', core_name=name )
if resp['success']:
lucene = resp['data']['lucene']
data = {name:{'version':lucene['solr-spec-version']}}
else:
- success=False
+ success = False
data = {name:{'version':None}}
- ret = _update_return_dict(ret, success, data,
+ ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
return ret
else:
resp = _get_admin_info('system', core_name=core_name)
if resp['success']:
version = resp['data']['lucene']['solr-spec-version']
- return _get_return_dict(True, {'version':version},
+ return _get_return_dict(True, {'version':version},
reps['errors'], resp['warnings'])
else:
return resp
-def optimize(core_name=None):
+def optimize(host=None, core_name=None):
'''
- Optimize the solr index. Optimizing the index is a good way to keep Search
- queries fast, but it is a very expensive operation. The ideal process is to
- run this with a master/slave configuration. Then you can optimize the
- master, and push the optimized index to the slaves. If you are running a
- single solr instance, or if you are going to run this on a slave be aware
- than search performance will be horrible while this command is being run.
- Additionally it can take a LONG time to run and your http request may
- timeout. If that happens adjust your timeout settings.
-
+ Search queries fast, but it is a very expensive operation. The ideal
+ process is to run this with a master/slave configuration. Then you
+ can optimize the master, and push the optimized index to the slaves.
+ If you are running a single solr instance, or if you are going to run
+ this on a slave be aware than search performance will be horrible
+ while this command is being run. Additionally it can take a LONG time
+ to run and your http request may timeout. If that happens adjust your
+ timeout settings.
+
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
@@ -433,69 +535,75 @@ def optimize(core_name=None):
'''
ret = _get_return_dict()
- if core_name is None and _check_for_cores():
- success=True
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ success = True
for name in __opts__['solr.cores']:
- url = _format_url('update',core_name=name,extra=["optimize=true"])
+ url = _format_url('update', host=host, core_name=name,
+ extra=["optimize=true"])
resp = _http_request(url)
if resp['success']:
data = {name : {'data':resp['data']}}
- ret = _update_return_dict(ret, success, data,
+ ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
else:
- success=False
+ success = False
data = {name : {'data':resp['data']}}
- ret = _update_return_dict(ret, success, data,
+ ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
- return ret
+ return ret
else:
- url = _format_url('update',core_name=core_name,extra=["optimize=true"])
+ url = _format_url('update', host=host, core_name=core_name,
+ extra=["optimize=true"])
return _http_request(url)
-def ping(core_name=None):
+def ping(host=None, core_name=None):
'''
Does a health check on solr, makes sure solr can talk to the indexes.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
salt '*' solr.ping music
'''
ret = _get_return_dict()
- if core_name is None and _check_for_cores():
- success=True
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ success = True
for name in __opts__['solr.cores']:
- resp = _get_admin_info('ping', core_name=name)
+ resp = _get_admin_info('ping', host=host, core_name=name)
if resp['success']:
data = {name:{'status':resp['data']['status']}}
else:
- success=False
+ success = False
data = {name:{'status':None}}
- ret = _update_return_dict(ret,success, data, resp['errors'])
+ ret = _update_return_dict(ret, success, data, resp['errors'])
return ret
else:
- resp = _get_admin_info('ping', core_name=core_name)
+ resp = _get_admin_info('ping', host=host, core_name=core_name)
return resp
-def is_replication_enabled(core_name=None):
+def is_replication_enabled(host=None, core_name=None):
'''
- USED ONLY BY SLAVES
+ SLAVE CALL
Check for errors, and determine if a slave is replicating or not.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
@@ -504,11 +612,11 @@ def is_replication_enabled(core_name=None):
ret = _get_return_dict()
success = True
# since only slaves can call this let's check the config:
- if __opts__['solr.type'] != 'slave':
+ if self._is_master() and is_none(host) is None:
errors = ['Only "slave" minions can run "is_replication_enabled"']
return ret.update({'success':False, 'errors':errors})
#define a convenience method so we don't duplicate code
- def _checks(ret, success, resp,core):
+ def _checks(ret, success, resp, core):
if response['success']:
slave = resp['data']['details']['slave']
# we need to initialize this to false in case there is an error
@@ -516,8 +624,8 @@ def _checks(ret, success, resp,core):
replication_enabled = 'false'
master_url = slave['masterUrl']
#check for errors on the slave
- if slave.has_key('ERROR'):
- success=False
+ if 'ERROR' in slave:
+ success = False
err = "{0}: {1} - {2}".format(name, slave['ERROR'], master_url)
resp['errors'].append(err)
#if there is an error return everything
@@ -525,9 +633,9 @@ def _checks(ret, success, resp,core):
else:
enabled = slave['masterDetails']['master']['replicationEnabled']
'''
- if replication is turned off on the master, or polling is
- disabled we need to return false. These may not not errors,
- but the purpose of this call is to check to see if the slaves
+ if replication is turned off on the master, or polling is
+ disabled we need to return false. These may not not errors,
+ but the purpose of this call is to check to see if the slaves
can replicate.
'''
if enabled == 'false':
@@ -537,137 +645,156 @@ def _checks(ret, success, resp,core):
success = False
resp['warning'].append("Polling is disabled")
#update the return
- ret = _update_return_dict(ret, success, data,
+ ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
return (ret, success)
- if core_name is None and _check_for_cores():
+ if _get_none_or_value(core_name) is None and _check_for_cores():
for name in __opts__['solr.cores']:
- response = _replication_request('details', core_name=name)
- ret, success = _checks(ret, success, response,name)
+ response = _replication_request('details', host=host,
+ core_name=name)
+ ret, success = _checks(ret, success, response, name)
else:
- response = _replication_request('details', core_name=core_name)
- ret, success = _checks(ret, success, response,core_name)
+ response = _replication_request('details', host=host,
+ core_name=core_name)
+ ret, success = _checks(ret, success, response, core_name)
return ret
-def match_index_versions(core_name=None):
+def match_index_versions(host=None, core_name=None):
'''
- SLAVE ONLY
- Verifies that the master and the slave versions are in sync by comparing
- the index version. If you are constantly pushing updates the index the
- master and slave versions will seldom match.
-
+ SLAVE CALL
+ Verifies that the master and the slave versions are in sync by
+ comparing the index version. If you are constantly pushing updates
+ the index the master and slave versions will seldom match. A solution
+ to this is pause indexing every so often to allow the slave to replicate
+ and then call this method before allowing indexing to resume.
+
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
salt '*' solr.match_index_versions music
'''
# since only slaves can call this let's check the config:
- if _is_master():
+ ret = _get_return_dict()
+ success = True
+ if _is_master() and _get_none_or_value(host) is None:
e = ['solr.match_index_versions can only be called by "slave" minions']
return ret.update({'success':False, 'errors':e})
#get the default return dict
- ret = _get_return_dict()
- success = True
def _match(ret, success, resp, core):
if response['success']:
slave = resp['data']['details']['slave']
master_url = resp['data']['details']['slave']['masterUrl']
- if slave.has_key('ERROR'):
+ if 'ERROR' in slave:
error = slave['ERROR']
- success=False
+ success = False
err = "{0}: {1} - {2}".format(name, error, master_url)
resp['errors'].append(err)
- #if there was an error return the entire response so the
+ #if there was an error return the entire response so the
#alterer can get what it wants
data = slave if core is None else {core : {'data': slave}}
else:
- versions = {'master':slave['masterDetails']['indexVersion'],
+ versions = {'master':slave['masterDetails']['master']['replicatableIndexVersion'],
'slave' : resp['data']['details']['indexVersion'],
'next_replication' : slave['nextExecutionAt'],
- 'failed_list' : slave['replicationFailedAtList']
+ 'failed_list': []
}
- #check the index versions
- if index_versions['master'] != index_versions['slave']:
+ if 'replicationFailedAtList' in slave:
+ versions.update({'failed_list' : slave['replicationFailedAtList']})
+ #check the index versions
+ if versions['master'] != versions['slave']:
success = False
err = "Master and Slave index versions do not match."
resp['errors'].append(err)
data = versions if core is None else {core:{'data':versions}}
- ret = _update_return_dict(ret, success, data,
+ ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
+ else:
+ success = False
+ err = resp['errors']
+ data = resp['data']
+ ret = _update_return_dict(ret, success, data, errors=err)
return (ret, success)
-
- #check all cores?
- if core_name is None and _check_for_cores():
+
+ #check all cores?
+ if _get_none_or_value(core_name) is None and _check_for_cores():
success = True
for name in __opts__['solr.cores']:
- response = _replication_request('details', core_name=name)
- ret, success = _match(ret, success, response, name)
+ response = _replication_request('details', host=host,
+ core_name=name)
+ ret, success = _match(ret, success, response, name)
else:
- response = _replication_request('details', core_name=core_name)
+ response = _replication_request('details', host=host,
+ core_name=core_name)
ret, success = _match(ret , success, response, core_name)
return ret
-def replication_details(core_name=None):
+def replication_details(host=None, core_name=None):
'''
Get the full replication details.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
salt '*' solr.replication_details music
'''
ret = _get_return_dict()
- if core_name is None:
- success=True
+ if _get_none_or_value(core_name) is None:
+ success = True
for name in __opts__['solr.cores']:
- resp = _replication_request('details', core_name=name)
+ resp = _replication_request('details', host=host, core_name=name)
data = {name : {'data':resp['data']}}
- ret = _update_return_dict(ret, success, data,
+ ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
else:
- resp = _replication_request('details', core_name=core_name)
+ resp = _replication_request('details', host=host, core_name=core_name)
if resp['success']:
- ret = _update_return_dict(ret, success, resp['data'],
+ ret = _update_return_dict(ret, success, resp['data'],
resp['errors'], resp['warnings'])
else:
return resp
return ret
-def backup(core_name=None, append_core_to_path=False):
+def backup(host=None, core_name=None, append_core_to_path=False):
'''
Tell solr make a backup. This method can be mis-leading since it uses the
backup api. If an error happens during the backup you are not notified.
The status: 'OK' in the response simply means that solr received the
request successfully.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
- append_core_to_path : str (False)
+ append_core_to_path : boolean (False)
If True add the name of the core to the backup path. Assumes that
minion backup path is not None.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
@@ -682,19 +809,20 @@ def backup(core_name=None, append_core_to_path=False):
path += os.path.sep
ret = _get_return_dict()
- if core_name is None and _check_for_cores():
- success=True
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ success = True
for name in __opts__['solr.cores']:
params = []
if path is not None:
path = path + name if append_core_to_path else path
params.append("&location={0}".format(path + name))
params.append("&numberToKeep={0}".format(numBackups))
- resp = _replication_request('backup', core_name=name, params=params)
+ resp = _replication_request('backup', host=host, core_name=name,
+ params=params)
if not resp['success']:
- success=False
+ success = False
data = {name : {'data': resp['data']}}
- ret = _update_return_dict(ret, success, data,
+ ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
return ret
else:
@@ -704,87 +832,97 @@ def backup(core_name=None, append_core_to_path=False):
if path is not None:
params = ["location={0}".format(path)]
params.append("&numberToKeep={0}".format(numBackups))
- resp = _replication_request('backup',core_name=core_name,params=params)
+ resp = _replication_request('backup', host=host, core_name=core_name,
+ params=params)
return resp
-def set_is_polling(polling, core_name=None):
+def set_is_polling(polling, host=None, core_name=None):
'''
- SLAVE ONLY
+ SLAVE CALL
Prevent the slaves from polling the master for updates.
- polling : bool
+ polling : boolean
True will enable polling. False will disable it.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
salt '*' solr.set_is_polling False
'''
- ret = _get_return_dict()
+ ret = _get_return_dict()
# since only slaves can call this let's check the config:
- if _is_master():
+ if _is_master() and _get_none_or_value(host) is None:
err = ['solr.set_is_polling can only be called by "slave" minions']
return ret.update({'success':False, 'errors':err})
cmd = "enablepoll" if polling else "disapblepoll"
- if core_name is None and _check_for_cores():
- success=True
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ success = True
for name in __opts__['solr.cores']:
- resp = set_is_polling(cmd, core_name=name)
+ resp = set_is_polling(cmd, host=host, core_name=name)
if not resp['success']:
success = False
data = {name : {'data' : resp['data']}}
- ret = _update_return_dict(ret, success, data,
+ ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
return ret
else:
- resp = _replication_request(cmd, core_name=name)
+ resp = _replication_request(cmd, host=host, core_name=name)
return resp
-def set_replication_enabled(status, core_name):
+def set_replication_enabled(status, host=None, core_name=None):
'''
MASTER ONLY
Sets the master to ignore poll requests from the slaves. Useful when you
don't want the slaves replicating during indexing or when clearing the
index.
- status : bool
+ status : boolean
Sets the replication status to the specified state.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
+
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to set the status on all cores.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
+
+ CLI Example::
+
+ salt '*' solr.set_replication_enabled false, None, music
'''
- if __opts__['solr.type'] != 'master':
- return _get_return_dict(False,
+ if not _is_master() and _get_none_or_value(host) is None:
+ return _get_return_dict(False,
errors=['Only minions configured as master can run this'])
cmd = 'enablereplication' if status else 'disablereplication'
- if core_name is None and _check_for_cores():
+ if _get_none_or_value(core_name) is None and _check_for_cores():
ret = _get_return_dict()
- success=True
+ success = True
for name in __opts__['solr.cores']:
- resp = set_replication_enabled(status, name)
+ resp = set_replication_enabled(status, host, name)
if not resp['success']:
success = False
data = {name : {'data' : resp['data']}}
- ret = _update_return_dict(ret, success, data,
+ ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
return ret
else:
if status:
- return _replication_request(cmd, core_name=core_name)
+ return _replication_request(cmd, host=host, core_name=core_name)
else:
- return _replication_request(cmd, core_name=core_name)
+ return _replication_request(cmd, host=host, core_name=core_name)
def signal(signal=None):
'''
@@ -802,14 +940,18 @@ def signal(signal=None):
'''
ret = _get_return_dict()
- valid_signals = 'start stop restart'
- if not valid_signals.count(signal):
- return
+ valid_signals = ('start', 'stop', 'restart')
+
+ # Give a friendly error message for invalid signals
+ # TODO: Fix this logic to be reusable and used by apache.signal
+ if signal not in valid_signals:
+ msg = valid_signals[:-1] + ('or {0}'.format(valid_signals[-1]),)
+ return '{0} is an invalid signal. Try: one of: {1}'.format(signal, ', '.join(msg))
cmd = "{0} {1}".format(__opts__['solr.init_script'], signal)
out = __salt__['cmd.run'](cmd)
-def reload_core(core_name):
+def reload_core(host=None, core_name=None):
'''
MULTI-CORE HOSTS ONLY
Load a new core from the same configuration as an existing registered core.
@@ -817,37 +959,81 @@ def reload_core(core_name):
requests. Once it has finished, all new request will go to the "new" core,
and the "old" core will be unloaded.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core_name : str
The name of the core to reload
- Return : dict::
+ Return : dict::
+
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
+
+ CLI Example::
+
+ salt '*' solr.reload_core None music
{'success':bool, 'data':dict, 'errors':list, 'warnings':list}
'''
+ ret = _get_return_dict()
+ if not _check_for_cores():
+ err = ['solr.reload_core can only be called by "multi-core" minions']
+ return ret.update({'success':False, 'errors':err})
+
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ success = True
+ for name in __opts__['solr.cores']:
+ resp = reload_core(host, name)
+ if not resp['success']:
+ success = False
+ data = {name : {'data' : resp['data']}}
+ ret = _update_return_dict(ret, success, data,
+ resp['errors'], resp['warnings'])
+ return ret
extra = ['action=RELOAD', 'core={0}'.format(core_name)]
- url = _format_url('admin/cores', None, extra=extra)
+ url = _format_url('admin/cores', host=host, core_name=None, extra=extra)
return _http_request(url)
-def core_status(core_name):
+def core_status(host=None, core_name=None):
'''
MULTI-CORE HOSTS ONLY
Get the status for a given core or all cores if no core is specified
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core_name : str
The name of the core to reload
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
+
+ CLI Example::
+
+ salt '*' solr.core_status None music
'''
+ ret = _get_return_dict()
+ if not _check_for_cores():
+ err = ['solr.reload_core can only be called by "multi-core" minions']
+ return ret.update({'success':False, 'errors':err})
+
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ success = True
+ for name in __opts__['solr.cores']:
+ resp = reload_core(host, name)
+ if not resp['success']:
+ success = False
+ data = {name : {'data' : resp['data']}}
+ ret = _update_return_dict(ret, success, data,
+ resp['errors'], resp['warnings'])
+ return ret
extra = ['action=STATUS', 'core={0}'.format(core_name)]
- url = _format_url('admin/cores', None, extra=extra)
+ url = _format_url('admin/cores', host=host, core_name=None, extra=extra)
print url
return _http_request(url)
################### DIH (Direct Import Handler) COMMANDS #####################
-def reload_import_config(handler, core_name=None, verbose=False):
+def reload_import_config(handler, host=None, core_name=None, verbose=False):
'''
MASTER ONLY
re-loads the handler config XML file.
@@ -855,32 +1041,38 @@ def reload_import_config(handler, core_name=None, verbose=False):
handler : str
The name of the data import handler.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core : str (None)
The core the handler belongs to.
- verbose : bool (False)
+ verbose : boolean (False)
Run the command with verbose output.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
- salt '*' solr.reload_import_config dataimport music {'clean':True}
+ salt '*' solr.reload_import_config dataimport None music {'clean':True}
'''
#make sure that it's a master minion
- if not _is_master():
+ if not _is_master() and _get_none_or_value(host) is None:
err = ['solr.pre_indexing_check can only be called by "master" minions']
return _get_return_dict(False, err)
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ err = ['No core specified when minion is configured as "multi-core".']
+ return _get_return_dict(False, err)
+
params = ['command=reload-config']
if verbose:
params.append("verbose=true")
- url = _format_url(handler,core_name=core_name,extra=params)
+ url = _format_url(handler, host=host, core_name=core_name, extra=params)
return _http_request(url)
-def abort_import(handler, core_name=None, verbose=False):
+def abort_import(handler, host=None, core_name=None, verbose=False):
'''
MASTER ONLY
Aborts an existing import command to the specified handler.
@@ -889,30 +1081,36 @@ def abort_import(handler, core_name=None, verbose=False):
handler : str
The name of the data import handler.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core : str (None)
The core the handler belongs to.
- verbose : bool (False)
+ verbose : boolean (False)
Run the command with verbose output.
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
- salt '*' solr.abort_import dataimport music {'clean':True}
+ salt '*' solr.abort_import dataimport None music {'clean':True}
'''
- if not _is_master():
+ if not _is_master() and _get_none_or_value(host) is None:
err = ['solr.abort_import can only be called on "master" minions']
return _get_return_dict(False, errors=err)
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ err = ['No core specified when minion is configured as "multi-core".']
+ return _get_return_dict(False, err)
+
params = ['command=abort']
if verbose:
params.append("verbose=true")
- url = _format_url(handler,core_name=core_name,extra=params)
+ url = _format_url(handler, host=host, core_name=core_name, extra=params)
return _http_request(url)
-def full_import(handler, core_name=None, options={}, extra=[]):
+def full_import(handler, host=None, core_name=None, options={}, extra=[]):
'''
MASTER ONLY
Submits an import command to the specified handler using specified options.
@@ -921,6 +1119,8 @@ def full_import(handler, core_name=None, options={}, extra=[]):
handler : str
The name of the data import handler.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core : str (None)
The core the handler belongs to.
options : dict (__opts__)
@@ -930,34 +1130,39 @@ def full_import(handler, core_name=None, options={}, extra=[]):
extra : dict ([])
Extra name value pairs to pass to the handler. e.g. ["name=value"]
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
- salt '*' solr.full_import dataimport music {'clean':True}
+ salt '*' solr.full_import dataimport None music {'clean':True}
'''
if not _is_master():
err = ['solr.full_import can only be called on "master" minions']
return _get_return_dict(False, errors=err)
- resp = _pre_index_check(handler, core_name)
+ if _get_none_or_value(core_name) is None and _check_for_cores():
+ err = ['No core specified when minion is configured as "multi-core".']
+ return _get_return_dict(False, err)
+
+ resp = _pre_index_check(handler, host, core_name)
if not resp['success']:
return resp
options = _merge_options(options)
if options['clean']:
- resp = set_replication_enabled(False, core_name)
+ resp = set_replication_enabled(False, host=host, core_name=core_name)
if not resp['success']:
errors = ['Failed to set the replication status on the master.']
return _get_return_dict(False, errors=errors)
params = ['command=full-import']
- for (k,v) in options.items():
- params.append("&{0}={1}".format(k,v))
- url = _format_url(handler,core_name=core_name,extra=params + extra)
+ for (k, v) in options.items():
+ params.append("&{0}={1}".format(k, v))
+ url = _format_url(handler, host=host, core_name=core_name,
+ extra=params + extra)
return _http_request(url)
-def delta_import(handler, core_name=None, options={}, extra=[]):
+def delta_import(handler, host=None, core_name=None, options={}, extra=[]):
'''
Submits an import command to the specified handler using specified options.
This command can only be run if the minion is is configured with
@@ -965,43 +1170,48 @@ def delta_import(handler, core_name=None, options={}, extra=[]):
handler : str
The name of the data import handler.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core : str (None)
The core the handler belongs to.
options : dict (__opts__)
A list of options such as clean, optimize commit, verbose, and
pause_replication. leave blank to use __opts__ defaults. options will
be merged with __opts__
+
extra : dict ([])
Extra name value pairs to pass to the handler. eg ["name=value"]
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
- salt '*' solr.delta_import dataimport music {'clean':True}
+ salt '*' solr.delta_import dataimport None music {'clean':True}
'''
- if not _is_master():
+ if not _is_master() and _get_none_or_value(host) is None:
err = ['solr.delta_import can only be called on "master" minions']
return _get_return_dict(False, errors=err)
- resp = _pre_index_check(handler, core_name)
+ resp = _pre_index_check(handler, host=host, core_name=core_name)
if not resp['success']:
return resp
options = _merge_options(options)
- if options['clean']:
- resp = set_replication_enabled(False, core_name)
+ #if we're nuking data, and we're multi-core disable replication for safty
+ if options['clean'] and _check_for_cores():
+ resp = set_replication_enabled(False, host=host, core_name=core_name)
if not resp['success']:
errors = ['Failed to set the replication status on the master.']
return _get_return_dict(False, errors=errors)
params = ['command=delta-import']
- for (k,v) in options.items():
- params.append("{0}={1}".format(k,v))
- url = _format_url(handler, core_name=core_name, extra=params + extra)
+ for (k, v) in options.items():
+ params.append("{0}={1}".format(k, v))
+ url = _format_url(handler, host=host, core_name=core_name,
+ extra=params + extra)
return _http_request(url)
-def import_status(handler, core_name=None, verbose=False):
+def import_status(handler, host=None, core_name=None, verbose=False):
'''
Submits an import command to the specified handler using specified options.
This command can only be run if the minion is is configured with
@@ -1009,25 +1219,27 @@ def import_status(handler, core_name=None, verbose=False):
handler : str
The name of the data import handler.
+ host : str (None)
+ The solr host to query. __opts__['host'] is default.
core : str (None)
The core the handler belongs to.
- verbose : bool (False)
+ verbose : boolean (False)
Specifies verbose output
- Return : dict::
+ Return : dict::
- {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
+ {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example::
- salt '*' solr.import_status dataimport music False
+ salt '*' solr.import_status dataimport None music False
'''
- if not _is_master():
+ if not _is_master() and _get_none_or_value(host) is None:
errors = ['solr.import_status can only be called by "master" minions']
return _get_return_dict(False, errors=errors)
extra = ["command=status"]
if verbose:
extra.append("verbose=true")
- url = _format_url(handler,core_name=core_name,extra=extra)
+ url = _format_url(handler, host=host, core_name=core_name, extra=extra)
return _http_request(url)
diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py
index fdc6d7e28ede..534788086ae6 100644
--- a/salt/modules/ssh.py
+++ b/salt/modules/ssh.py
@@ -12,9 +12,9 @@ def _refine_enc(enc):
'''
rsa = ['r', 'rsa', 'ssh-rsa']
dss = ['d', 'dsa', 'dss', 'ssh-dss']
- if rsa.count(enc):
+ if enc in rsa:
return 'ssh-rsa'
- elif dss.count(enc):
+ elif enc in dss:
return 'ssh-dss'
else:
return 'ssh-rsa'
@@ -86,7 +86,7 @@ def host_keys(keydir=None):
# Set up the default keydir - needs to support sshd_config parsing in the
# future
if not keydir:
- if __grains__['Linux']:
+ if __grains__['kernel'] == 'Linux':
keydir = '/etc/ssh'
keys = {}
for fn_ in os.listdir(keydir):
diff --git a/salt/modules/state.py b/salt/modules/state.py
index 286b4e6abb88..346342ac94fb 100644
--- a/salt/modules/state.py
+++ b/salt/modules/state.py
@@ -2,11 +2,15 @@
Control the state system on the minion
'''
+import os
+
import salt.state
__outputter__ = {
'highstate': 'highstate',
+ 'sls': 'highstate',
+ 'top': 'highstate',
}
@@ -75,9 +79,36 @@ def highstate():
return st_.call_highstate()
+def sls(mods, env='base'):
+ '''
+ Execute a set list of state modules from an environment, default
+ environment is base
+
+ CLI Example:
+
+ salt '*' state.sls core,edit.vim dev
+ '''
+ st_ = salt.state.HighState(__opts__)
+ if isinstance(mods, str):
+ mods = mods.split(',')
+ high, errors = st_.render_highstate({env: mods})
+ if errors:
+ return errors
+ return st_.state.call_high(high)
+
+
+def top(topfn):
+ '''
+ Execute a specific top file instead of the default
+ '''
+ st_ = salt.state.HighState(__opts__)
+ st_.opts['state_top'] = os.path.join('salt://', topfn)
+ return st_.call_highstate()
+
+
def show_highstate():
'''
- Retrive the highstate data from the salt master and display it
+ Retrieve the highstate data from the salt master and display it
CLI Example::
diff --git a/salt/modules/status.py b/salt/modules/status.py
index 4690f89210c2..cc73debee959 100644
--- a/salt/modules/status.py
+++ b/salt/modules/status.py
@@ -6,8 +6,6 @@
import fnmatch
import os
import re
-import subprocess
-
__opts__ = {}
@@ -71,8 +69,7 @@ def uptime():
salt '*' status.uptime
'''
- return subprocess.Popen(['uptime'],
- stdout=subprocess.PIPE).communicate()[0].strip()
+ return __salt__['cmd.run']('uptime').strip()
def loadavg():
@@ -107,7 +104,7 @@ def cpustats():
stats = open(procf, 'r').read().split('\n')
ret = {}
for line in stats:
- if not line.count(' '):
+ if not line:
continue
comps = line.split()
if comps[0] == 'cpu':
@@ -144,7 +141,7 @@ def meminfo():
stats = open(procf, 'r').read().split('\n')
ret = {}
for line in stats:
- if not line.count(' '):
+ if not line:
continue
comps = line.split()
comps[0] = comps[0].replace(':', '')
@@ -170,7 +167,7 @@ def cpuinfo():
stats = open(procf, 'r').read().split('\n')
ret = {}
for line in stats:
- if not line.count(' '):
+ if not line:
continue
comps = line.split(':')
comps[0] = comps[0].strip()
@@ -195,7 +192,7 @@ def diskstats():
stats = open(procf, 'r').read().split('\n')
ret = {}
for line in stats:
- if not line.count(' '):
+ if not line:
continue
comps = line.split()
ret[comps[2]] = {'major': _number(comps[0]),
@@ -285,7 +282,7 @@ def vmstats():
stats = open(procf, 'r').read().split('\n')
ret = {}
for line in stats:
- if not line.count(' '):
+ if not line:
continue
comps = line.split()
ret[comps[0]] = _number(comps[1])
@@ -307,7 +304,7 @@ def netstats():
ret = {}
headers = ['']
for line in stats:
- if not line.count(' '):
+ if not line:
continue
comps = line.split()
if comps[0] == headers[0]:
@@ -339,7 +336,7 @@ def netdev():
stats = open(procf, 'r').read().split('\n')
ret = {}
for line in stats:
- if not line.count(' '):
+ if not line:
continue
if line.find(':') < 0:
continue
@@ -376,12 +373,10 @@ def w():
salt '*' status.w
'''
- users = subprocess.Popen(['w -h'],
- shell=True,
- stdout=subprocess.PIPE).communicate()[0].split('\n')
user_list = []
+ users = __salt__['cmd.run']('w -h').split('\n')
for row in users:
- if not row.count(' '):
+ if not row:
continue
comps = row.split()
rec = {'idle': comps[3],
@@ -397,7 +392,7 @@ def w():
def all_status():
'''
- Return a composite of all status data and info for this minoon.
+ Return a composite of all status data and info for this minion.
Warning: There is a LOT here!
CLI Example::
diff --git a/salt/modules/systemd.py b/salt/modules/systemd.py
new file mode 100644
index 000000000000..1024ca3ceb8d
--- /dev/null
+++ b/salt/modules/systemd.py
@@ -0,0 +1,166 @@
+'''
+Provide the service module for systemd
+'''
+
+import os
+
+
+def __virtual__():
+ '''
+ Only work on systems which default to systemd
+ '''
+ if __grains__['os'] == 'Fedora' and __grains__['osrelease'] > 15:
+ return 'service'
+ return False
+
+
+def get_enabled():
+ '''
+ Return a list of all enabled services
+
+ CLI Example::
+
+ salt '*' service.get_enabled
+ '''
+ ret = []
+ for serv in get_all():
+ cmd = 'systemctl is-enabled {0}.service'.format(serv)
+ if not __salt__['cmd.retcode'](cmd):
+ ret.append(serv)
+ return sorted(ret)
+
+def get_disabled():
+ '''
+ Return a list of all disabled services
+
+ CLI Example::
+
+ salt '*' service.get_disabled
+ '''
+ ret = []
+ for serv in get_all():
+ cmd = 'systemctl is-enabled {0}.service'.format(serv)
+ if __salt__['cmd.retcode'](cmd):
+ ret.append(serv)
+ return sorted(ret)
+
+def get_all():
+ '''
+ Return a list of all available services
+
+ CLI Example::
+
+ salt '*' service.get_all
+ '''
+ ret = set()
+ sdir = '/lib/systemd/system'
+ if not os.path.isdir('/lib/systemd/system'):
+ return []
+ for fn_ in os.listdir(sdir):
+ if fn_.endswith('.service'):
+ ret.add(fn_[:fn_.rindex('.')])
+ return sorted(list(ret))
+
+def start(name):
+ '''
+ Start the specified service with systemd
+
+ CLI Example::
+
+ salt '*' service.start
+ '''
+ cmd = 'systemctl start {0}.service'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def stop(name):
+ '''
+ Stop the specifed service with systemd
+
+ CLI Example::
+
+ salt '*' service.stop
+ '''
+ cmd = 'systemctl stop {0}.service'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def restart(name):
+ '''
+ Start the specified service with systemd
+
+ CLI Example::
+
+ salt '*' service.start
+ '''
+ cmd = 'systemctl restart {0}.service'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+# The unused sig argument is required to maintain consistency in the state
+# system
+def status(name, sig=None):
+ '''
+ Return the status for a service via systemd, returns the PID if the service
+ is running or an empty string if the service is not running
+
+ CLI Example::
+
+ salt '*' service.status
+ '''
+ cmd = 'systemctl show {0}.service'.format(name)
+ ret = __salt__['cmd.run'](cmd)
+ index1 = ret.find('\nMainPID=')
+ index2 = ret.find('\n', index1+9)
+ mainpid = ret[index1+9:index2]
+ if mainpid == '0':
+ return ''
+ return mainpid
+
+
+def enable(name):
+ '''
+ Enable the named service to start when the system boots
+
+ CLI Example::
+
+ salt '*' service.enable
+ '''
+ cmd = 'systemctl enable {0}.service'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def disable(name):
+ '''
+ Disable the named service to not start when the system boots
+
+ CLI Example::
+
+ salt '*' service.disable
+ '''
+ cmd = 'systemctl disable {0}.service'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def enabled(name):
+ '''
+ Return if the named service is enabled to start on boot
+
+ CLI Example::
+
+ salt '*' service.enabled
+ '''
+ cmd = 'systemctl is-enabled {0}.service'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def disabled(name):
+ '''
+ Return if the named service is disabled to start on boot
+
+ CLI Example::
+
+ salt '*' service.disabled
+ '''
+ cmd = 'systemctl is-enabled {0}.service'.format(name)
+ return bool(__salt__['cmd.retcode'](cmd))
diff --git a/salt/modules/test.py b/salt/modules/test.py
index 2b63b8d47a27..b743d0f4b499 100644
--- a/salt/modules/test.py
+++ b/salt/modules/test.py
@@ -38,6 +38,17 @@ def ping():
'''
return True
+def version():
+ '''
+ Return the version of salt on the minion
+
+ CLI Example::
+
+ salt '*' test.version
+ '''
+ import salt
+ return salt.__version__
+
def conf_test():
'''
diff --git a/salt/modules/tomcat.py b/salt/modules/tomcat.py
index 3617039a0efe..aee85efc97df 100644
--- a/salt/modules/tomcat.py
+++ b/salt/modules/tomcat.py
@@ -27,7 +27,7 @@ def version():
out = __salt__['cmd.run'](cmd).split('\n')
ret = out[0].split(': ')
for line in out:
- if not line.count(' '):
+ if not line:
continue
if 'Server version' in line:
comps = line.split(': ')
@@ -40,13 +40,13 @@ def fullversion():
CLI Example::
- salt '*' full.fullversion
+ salt '*' tomcat.fullversion
'''
cmd = __catalina_home() + '/bin/catalina.sh version'
ret = {}
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
- if not line.count(' '):
+ if not line:
continue
if ': ' in line:
comps = line.split(': ')
diff --git a/salt/modules/useradd.py b/salt/modules/useradd.py
index 1a9c9d8b0ea6..f617269af510 100644
--- a/salt/modules/useradd.py
+++ b/salt/modules/useradd.py
@@ -26,7 +26,7 @@ def add(name,
salt '*' user.add name
'''
- if type(groups) == type(str()):
+ if isinstance(groups, basestring):
groups = groups.split(',')
cmd = 'useradd -s {0} '.format(shell)
if uid:
@@ -170,7 +170,7 @@ def chgroups(name, groups, append=False):
salt '*' user.chgroups foo wheel,root True
'''
- if type(groups) == type(str()):
+ if isinstance(groups, basestring):
groups = groups.split(',')
ugrps = set(list_groups(name))
if ugrps == set(groups):
@@ -194,20 +194,29 @@ def info(name):
salt '*' user.info root
'''
ret = {}
- data = pwd.getpwnam(name)
- ret['gid'] = data.pw_gid
- ret['groups'] = list_groups(name)
- ret['home'] = data.pw_dir
- ret['name'] = data.pw_name
- ret['passwd'] = data.pw_passwd
- ret['shell'] = data.pw_shell
- ret['uid'] = data.pw_uid
+ try:
+ data = pwd.getpwnam(name)
+ ret['gid'] = data.pw_gid
+ ret['groups'] = list_groups(name)
+ ret['home'] = data.pw_dir
+ ret['name'] = data.pw_name
+ ret['passwd'] = data.pw_passwd
+ ret['shell'] = data.pw_shell
+ ret['uid'] = data.pw_uid
+ except KeyError:
+ ret['gid'] = ''
+ ret['groups'] = ''
+ ret['home'] = ''
+ ret['name'] = ''
+ ret['passwd'] = ''
+ ret['shell'] = ''
+ ret['uid'] = ''
return ret
def list_groups(name):
'''
- Return a list of groups the named user belings to
+ Return a list of groups the named user belongs to
CLI Example::
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
index 0e3423d62b85..1584bba80bda 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
@@ -40,7 +40,7 @@ def _get_dom(vm_):
Return a domain object for the named vm
'''
conn = __get_conn()
- if not list_vms().count(vm_):
+ if vm_ not in list_vms():
raise Exception('The specified vm is not present')
return conn.lookupByName(vm_)
@@ -169,12 +169,12 @@ def get_disks(vm_):
target = targets[0]
else:
continue
- if target.attributes.keys().count('dev')\
- and source.attributes.keys().count('file'):
- disks[target.getAttribute('dev')] =\
+ if 'dev' in target.attributes.keys() \
+ and 'file' in source.attributes.keys():
+ disks[target.getAttribute('dev')] = \
{'file': source.getAttribute('file')}
for dev in disks:
- disks[dev].update(yaml.safe_load(subprocess.Popen('qemu-img info '\
+ disks[dev].update(yaml.safe_load(subprocess.Popen('qemu-img info ' \
+ disks[dev]['file'],
shell=True,
stdout=subprocess.PIPE).communicate()[0]))
@@ -405,6 +405,33 @@ def seed_non_shared_migrate(disks, force=False):
return True
+def set_autostart(vm_, state='on'):
+ '''
+ Set the autostart flag on a VM so that the VM will start with the host
+ system on reboot.
+
+ CLI Example::
+ salt "*" virt.enable_autostart
+ '''
+
+ dom = _get_dom(vm_)
+
+ if state == 'on':
+ if dom.setAutostart(1) == 0:
+ return True
+ else:
+ return False
+
+ elif state == 'off':
+ if dom.setAutostart(0) == 0:
+ return True
+ else:
+ return False
+
+ else:
+ # return False if state is set to something other then on or off
+ return False
+
def destroy(vm_):
'''
Hard power down the virtual machine, this is equivalent to pulling the
@@ -443,7 +470,7 @@ def purge(vm_, dirs=False):
'''
Recursively destroy and delete a virtual machine, pass True for dir's to
also delete the directories containing the virtual machine disk images -
- USE WITH EXTREAME CAUTION!
+ USE WITH EXTREME CAUTION!
CLI Example::
@@ -482,11 +509,9 @@ def is_kvm_hyper():
'''
if __grains__['virtual'] != 'physical':
return False
- if not open('/proc/modules').read().count('kvm_'):
+ if 'kvm_' not in open('/proc/modules').read():
return False
- libvirt_ret = subprocess.Popen('ps aux',
- shell=True,
- stdout=subprocess.PIPE).communicate()[0].count('libvirtd')
+ libvirt_ret = __salt__['cmd.run'](__grains__['ps']).count('libvirtd')
if not libvirt_ret:
return False
return True
diff --git a/salt/modules/virtualenv.py b/salt/modules/virtualenv.py
new file mode 100644
index 000000000000..e83d86b8301e
--- /dev/null
+++ b/salt/modules/virtualenv.py
@@ -0,0 +1,57 @@
+'''
+Create virtualenv environments
+'''
+__opts__ = {
+ 'venv_bin': 'virtualenv',
+}
+
+def create(path,
+ venv_bin='',
+ no_site_packages=False,
+ system_site_packages=False,
+ clear=False,
+ python='',
+ extra_search_dir='',
+ never_download=False,
+ prompt=''):
+ '''
+ Create a virtualenv
+
+ path
+ The path to create the virtualenv
+ venv_bin : 'virtualenv'
+ The name (and optionally path) of the virtualenv command. This can also
+ be set globally in the minion config file as ``virtualenv.venv_bin``.
+ no_site_packages : False
+ Passthrough argument given to virtualenv
+ system_site_packages : False
+ Passthrough argument given to virtualenv
+ clear : False
+ Passthrough argument given to virtualenv
+ python : (default)
+ Passthrough argument given to virtualenv
+ extra_search_dir : (default)
+ Passthrough argument given to virtualenv
+ never_download : (default)
+ Passthrough argument given to virtualenv
+ prompt : (default)
+ Passthrough argument given to virtualenv
+
+ CLI Example::
+
+ salt '*' pip.virtualenv /path/to/new/virtualenv
+ '''
+ cmd = '{venv_bin} {args} {path}'.format(
+ venv_bin=venv_bin if venv_bin else __opts__['venv_bin'],
+ args=''.join([
+ ' --no-site-packages' if no_site_packages else '',
+ ' --system-site-packages' if system_site_packages else '',
+ ' --clear' if clear else '',
+ ' --python {0}'.format(python) if python else '',
+ ' --extra-search-dir {0}'.format(extra_search_dir
+ ) if extra_search_dir else '',
+ ' --never-download' if never_download else '',
+ ' --prompt {0}'.format(prompt) if prompt else '']),
+ path=path)
+
+ return __salt__['cmd.run'](cmd)
diff --git a/salt/modules/win_disk.py b/salt/modules/win_disk.py
new file mode 100644
index 000000000000..31af7e163d23
--- /dev/null
+++ b/salt/modules/win_disk.py
@@ -0,0 +1,60 @@
+'''
+Module for gathering disk information on Windows
+'''
+is_windows = True
+try:
+ import ctypes
+ import string
+ import win32api
+except ImportError:
+ is_windows = False
+
+def __virtual__():
+ '''
+ Only works on Windows systems
+ '''
+ if not is_windows:
+ return False
+ return 'disk'
+
+def usage():
+ '''
+ Return usage information for volumes mounted on this minion
+
+ CLI Example::
+
+ salt '*' disk.usage
+ '''
+ if __grains__['kernel'] == 'Windows':
+ drives = []
+ ret = {}
+ drive_bitmask = ctypes.windll.kernel32.GetLogicalDrives()
+ for letter in string.uppercase:
+ if drive_bitmask & 1:
+ drives.append(letter)
+ drive_bitmask >>= 1
+ for drive in drives:
+ try:
+ sectorspercluster, bytespersector, freeclusters, totalclusters =\
+ win32api.GetDiskFreeSpace('{0}:\\'.format(drive))
+ totalsize = sectorspercluster * bytespersector * totalclusters
+ available_space = sectorspercluster * bytespersector * freeclusters
+ used = totalsize - available_space
+ capacity = int(used / float(totalsize) * 100)
+ ret['{0}:\\'.format(drive)] = {
+ 'filesystem': '{0}:\\'.format(drive),
+ '1K-blocks': totalsize,
+ 'used': used,
+ 'available': available_space,
+ 'capacity': '{0}%'.format(capacity),
+ }
+ except:
+ ret['{0}:\\'.format(drive)] = {
+ 'filesystem': '{0}:\\'.format(drive),
+ '1K-blocks': None,
+ 'used': None,
+ 'available': None,
+ 'capacity': None,
+ }
+ return ret
+
diff --git a/salt/modules/win_service.py b/salt/modules/win_service.py
new file mode 100644
index 000000000000..6f196b68a17f
--- /dev/null
+++ b/salt/modules/win_service.py
@@ -0,0 +1,202 @@
+'''
+Windows Service module.
+'''
+
+import os
+import time
+
+
+def __virtual__():
+ '''
+ Only works on Windows systems
+ '''
+ if __grains__['os'] == 'Windows':
+ return 'service'
+ else:
+ return False
+
+def get_enabled():
+ '''
+ Return the enabled services
+
+ CLI Example::
+
+ salt '*' service.get_enabled
+ '''
+ ret = set()
+ services = []
+ cmd = 'sc query type= service'
+ lines = __salt__['cmd.run'](cmd).split('\n')
+ for line in lines:
+ if 'SERVICE_NAME:' in line:
+ comps = line.split(':', 1)
+ if not len(comps) > 1:
+ continue
+ services.append(comps[1].strip())
+ for service in services:
+ cmd2 = 'sc qc "{0}"'.format(service)
+ lines = __salt__['cmd.run'](cmd2).split('\n')
+ for line in lines:
+ if 'AUTO_START' in line:
+ ret.add(service)
+ return sorted(ret)
+
+def get_disabled():
+ '''
+ Return the disabled services
+
+ CLI Example::
+
+ salt '*' service.get_disabled
+ '''
+ ret = set()
+ services = []
+ cmd = 'sc query type= service'
+ lines = __salt__['cmd.run'](cmd).split('\n')
+ for line in lines:
+ if 'SERVICE_NAME:' in line:
+ comps = line.split(':', 1)
+ if not len(comps) > 1:
+ continue
+ services.append(comps[1].strip())
+ for service in services:
+ cmd2 = 'sc qc "{0}"'.format(service)
+ lines = __salt__['cmd.run'](cmd2).split('\n')
+ for line in lines:
+ if 'DEMAND_START' in line:
+ ret.add(service)
+ elif 'DISABLED' in line:
+ ret.add(service)
+ return sorted(ret)
+
+def get_all():
+ '''
+ Return all installed services
+
+ CLI Example::
+
+ salt '*' service.get_enabled
+ '''
+ return sorted(get_enabled() + get_disabled())
+
+def start(name):
+ '''
+ Start the specified service
+
+ CLI Example::
+
+ salt '*' service.start
+ '''
+ cmd = 'sc start "{0}"'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def stop(name):
+ '''
+ Stop the specified service
+
+ CLI Example::
+
+ salt '*' service.stop
+ '''
+ cmd = 'sc stop "{0}"'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def restart(name):
+ '''
+ Restart the named service
+
+ CLI Example::
+
+ salt '*' service.restart
+ '''
+ stopcmd = 'sc stop "{0}"'.format(name)
+ stopped = __salt__['cmd.run'](stopcmd)
+ servicestate = status(name)
+ while True:
+ servicestate = status(name)
+ if servicestate == '':
+ break
+ else:
+ time.sleep(2)
+ startcmd = 'sc start "{0}"'.format(name)
+ return not __salt__['cmd.retcode'](startcmd)
+
+
+def status(name, sig=None):
+ '''
+ Return the status for a service, returns the PID or an empty string if the
+ service is running or not, pass a signature to use to find the service via
+ ps
+
+ CLI Example::
+
+ salt '*' service.status [service signature]
+ '''
+ cmd = 'sc query "{0}"'.format(name)
+ status = __salt__['cmd.run'](cmd).split('\n')
+ for line in status:
+ if 'RUNNING' in line:
+ return getsid(name)
+ elif 'PENDING' in line:
+ return getsid(name)
+ return ''
+
+def getsid(name):
+ '''
+ Return the sid for this windows service
+ '''
+ cmd = 'sc showsid "{0}"'.format(name)
+ lines = __salt__['cmd.run'](cmd).split('\n')
+ for line in lines:
+ if 'SERVICE SID:' in line:
+ comps = line.split(':', 1)
+ if comps[1] > 1:
+ return comps[1].strip()
+ else:
+ return None
+
+def enable(name):
+ '''
+ Enable the named service to start at boot
+
+ CLI Example::
+
+ salt '*' service.enable
+ '''
+ cmd = 'sc config "{0}" start= auto'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def disable(name):
+ '''
+ Disable the named service to start at boot
+
+ CLI Example::
+
+ salt '*' service.disable
+ '''
+ cmd = 'sc config "{0}" start= demand'.format(name)
+ return not __salt__['cmd.retcode'](cmd)
+
+
+def enabled(name):
+ '''
+ Check to see if the named service is enabled to start on boot
+
+ CLI Example::
+
+ salt '*' service.enabled
+ '''
+ return name in get_enabled()
+
+def disabled(name):
+ '''
+ Check to see if the named service is disabled to start on boot
+
+ CLI Example::
+
+ salt '*' service.disabled
+ '''
+ return name in get_disabled()
diff --git a/salt/modules/win_useradd.py b/salt/modules/win_useradd.py
new file mode 100644
index 000000000000..392a58e77384
--- /dev/null
+++ b/salt/modules/win_useradd.py
@@ -0,0 +1,192 @@
+'''
+Manage Windows users with the net user command
+
+NOTE: This currently only works with local user accounts, not domain accounts
+'''
+
+def __virtual__():
+ '''
+ Set the user module if the kernel is Windows
+ '''
+ return 'user' if __grains__['kernel'] == 'Windows' else False
+
+
+def add(name, password):
+ '''
+ Add a user to the minion
+
+ CLI Example::
+
+ salt '*' user.add name password
+ '''
+ cmd = 'net user {0} {1} /add'.format(name, password)
+ ret = __salt__['cmd.run_all'](cmd)
+
+ return not ret['retcode']
+
+
+def delete(name):
+ '''
+ Remove a user from the minion
+
+ CLI Example::
+
+ salt '*' user.delete name
+ '''
+ cmd = 'net user {0} /delete'.format(name)
+ ret = __salt__['cmd.run_all'](cmd)
+
+ return not ret['retcode']
+
+
+def setpassword(name, password):
+ '''
+ Set a user's password
+
+ CLI Example::
+
+ salt '*' user.setpassword name password
+ '''
+ cmd = 'net user {0} {1}'.format(name, password)
+ ret = __salt__['cmd.run_all'](cmd)
+
+ return not ret['retcode']
+
+
+def addgroup(name, group):
+ '''
+ Add user to a group
+
+ CLI Example::
+
+ salt '*' user.addgroup username groupname
+ '''
+ user = info(name)
+ if not user:
+ return False
+ if group in user['groups']:
+ return True
+ cmd = 'net localgroup {0} {1} /add'.format(group, name)
+ ret = __salt__['cmd.run_all'](cmd)
+
+ return not ret['retcode']
+
+
+def removegroup(name, group):
+ '''
+ Remove user from a group
+
+ CLI Example::
+
+ salt '*' user.removegroup username groupname
+ '''
+ user = info(name)
+ if not user:
+ return False
+ if group not in user['groups']:
+ return True
+ cmd = 'net localgroup {0} {1} /delete'.format(group, name)
+ ret = __salt__['cmd.run_all'](cmd)
+
+ return not ret['retcode']
+
+
+def chhome(name, home):
+ '''
+ Change the home directory of the user
+
+ CLI Example::
+
+ salt '*' user.chhome foo \\\\fileserver\\home\\foo
+ '''
+ pre_info = info(name)
+ if not pre_info:
+ return False
+ if home == pre_info['home']:
+ return True
+ cmd = 'net user {0} /homedir:{1}'.format(name, home)
+ __salt__['cmd.run'](cmd)
+ post_info = info(name)
+ if post_info['home'] != pre_info['home']:
+ if post_info['home'] == home:
+ return True
+ return False
+
+
+def chprofile(name, profile):
+ '''
+ Change the profile directory of the user
+
+ CLI Example::
+
+ salt '*' user.chprofile foo \\\\fileserver\\profiles\\foo
+ '''
+ pre_info = info(name)
+ if not pre_info:
+ return False
+ if profile == pre_info['profile']:
+ return True
+ cmd = 'net user {0} /profilepath:{1}'.format(name, profile)
+ __salt__['cmd.run'](cmd)
+ post_info = info(name)
+ if post_info['profile'] != pre_info['profile']:
+ if post_info['profile'] == profile:
+ return True
+ return False
+
+def info(name):
+ '''
+ Return user information
+
+ CLI Example::
+
+ salt '*' user.info root
+ '''
+ ret = {}
+ items = {}
+ cmd = 'net user {0}'.format(name)
+ lines = __salt__['cmd.run'](cmd).split('\n')
+ for line in lines:
+ if 'name could not be found' in line:
+ return False
+ if 'successfully' not in line:
+ comps = line.split(' ', 1)
+ if not len(comps) > 1:
+ continue
+ items[comps[0].strip()] = comps[1].strip()
+ grouplist = []
+ groups = items['Local Group Memberships'].split(' ')
+ for group in groups:
+ if not group:
+ continue
+ grouplist.append(group.strip('*'))
+
+ ret['fullname'] = items['Full Name']
+ ret['name'] = items['User name']
+ ret['comment'] = items['Comment']
+ ret['active'] = items['Account active']
+ ret['logonscript'] = items['Logon script']
+ ret['profile'] = items['User profile']
+ ret['home'] = items['Home directory']
+ ret['groups'] = grouplist
+
+ return ret
+
+
+def list_groups(name):
+ '''
+ Return a list of groups the named user belongs to
+
+ CLI Example::
+
+ salt '*' user.list_groups foo
+ '''
+ ugrp = set()
+ try:
+ user = info(name)['groups']
+ except:
+ return False
+ for group in user:
+ ugrp.add(group)
+
+ return sorted(list(ugrp))
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
index fbe9fe3c40c9..287d99a62319 100644
--- a/salt/modules/yumpkg.py
+++ b/salt/modules/yumpkg.py
@@ -3,16 +3,29 @@
'''
import yum
import rpm
+import logging
from rpmUtils.arch import getBaseArch
+log = logging.getLogger(__name__)
+
def __virtual__():
'''
Confine this module to yum based systems
'''
- # We don't need to support pre-yum OSes because they don't support
- # python <= 2.6
- dists = 'CentOS Scientific RedHat Fedora'
- return 'pkg' if dists.count(__grains__['os']) else False
+ # Return this for pkg on RHEL/Fedora based distros that ship with python
+ # 2.6 or greater.
+ dists = ('CentOS', 'Scientific', 'RedHat')
+ if __grains__['os'] == 'Fedora':
+ if int(__grains__['osrelease'].split('.')[0]) >= 11:
+ return 'pkg'
+ else:
+ return False
+ else:
+ if __grains__['os'] in dists:
+ if int(__grains__['osrelease'].split('.')[0]) >= 6:
+ return 'pkg'
+ else:
+ return False
def _list_removed(old, new):
@@ -23,14 +36,14 @@ def _list_removed(old, new):
for pkg in old:
if pkg not in new:
pkgs.append(pkg)
-
+
return pkgs
def _compare_versions(old, new):
'''
Returns a dict that that displays old and new versions for a package after
- install/upgrade of package.
+ install/upgrade of package.
'''
pkgs = {}
for npkg in new:
@@ -56,14 +69,14 @@ def available_version(name):
CLI Example::
salt '*' pkg.available_version
- '''
- yb = yum.YumBase()
- # look for available packages only, if package is already installed with
+ '''
+ yb = yum.YumBase()
+ # look for available packages only, if package is already installed with
# latest version it will not show up here. If we want to use wildcards
- # here we can, but for now its exactmatch only.
+ # here we can, but for now its exact match only.
versions_list = []
for pkgtype in ['available', 'updates']:
-
+
pl = yb.doPackageLists(pkgtype)
exactmatch, matched, unmatched = yum.packages.parsePackages(pl, [name])
# build a list of available packages from either available or updates
@@ -72,18 +85,19 @@ def available_version(name):
# on available, and only iterate though updates if we don't..
for pkg in exactmatch:
if pkg.arch == getBaseArch():
- versions_list.append('-'.join([pkg.version, pkg.release]))
-
+ rel = pkg.release.rpartition('.')[0]
+ versions_list.append('-'.join([pkg.version, rel]))
+
if len(versions_list) == 0:
# if versions_list is empty return empty string. It may make sense
# to also check if a package is installed and on latest version
# already and return a message saying 'up to date' or something along
# those lines.
return ''
-
+
# remove the duplicate items from the list and return the first one
return list(set(versions_list))[0]
-
+
def version(name):
'''
@@ -115,13 +129,15 @@ def list_pkgs(*args):
# if no args are passed in get all packages
if len(args) == 0:
for h in ts.dbMatch():
- pkgs[h['name']] = '-'.join([h['version'],h['release']])
+ rel = h['release'].rpartition('.')[0]
+ pkgs[h['name']] = '-'.join([h['version'], rel])
else:
# get package version for each package in *args
for arg in args:
for h in ts.dbMatch('name', arg):
- pkgs[h['name']] = '-'.join([h['version'],h['release']])
-
+ rel = h['release'].rpartition('.')[0]
+ pkgs[h['name']] = '-'.join([h['version'], rel])
+
return pkgs
@@ -137,7 +153,7 @@ def refresh_db():
yb = yum.YumBase()
yb.cleanMetadata()
return True
-
+
def clean_metadata():
'''
@@ -150,10 +166,19 @@ def clean_metadata():
return refresh_db()
-def install(pkgs, refresh=False):
+def install(pkgs, refresh=False, repo='', skip_verify=False):
'''
- Install the passed package(s), add refresh=True to clean out the yum
- database before executing
+ Install the passed package(s)
+
+ pkg
+ The name of the package to be installed
+ refresh : False
+ Clean out the yum database before executing
+ repo : (default)
+ Specify a package repository to install from
+ (e.g., ``yum --enablerepo=somerepo``)
+ skip_verify : False
+ Skip the GPG verification check (e.g., ``--nogpgcheck``)
Return a dict containing the new package names and versions::
@@ -162,19 +187,30 @@ def install(pkgs, refresh=False):
CLI Example::
- salt '*' pkg.install
+ salt '*' pkg.install 'package package package'
'''
if refresh:
refresh_db()
-
- pkgs = pkgs.split(',')
+
+ if ',' in pkgs:
+ pkgs = pkgs.split(',')
+ else:
+ pkgs = pkgs.split(' ')
+
old = list_pkgs(*pkgs)
-
+
yb = yum.YumBase()
setattr(yb.conf, 'assumeyes', True)
-
+ setattr(yb.conf, 'gpgcheck', not skip_verify)
+
+ if repo:
+ yb.repos.enableRepo(repo)
+
for pkg in pkgs:
- yb.install(name=pkg)
+ try:
+ yb.install(name=pkg)
+ except yum.Errors.InstallError:
+ log.error('Package {0} failed to install'.format(pkg))
# Resolve Deps before attempting install. This needs to be improved
# by also tracking any deps that may get upgraded/installed during this
# process. For now only the version of the package(s) you request be
@@ -184,7 +220,7 @@ def install(pkgs, refresh=False):
yb.closeRpmDB()
new = list_pkgs(*pkgs)
-
+
return _compare_versions(old, new)
@@ -204,17 +240,17 @@ def upgrade():
yb = yum.YumBase()
setattr(yb.conf, 'assumeyes', True)
-
+
old = list_pkgs()
-
+
# ideally we would look in the yum transaction and get info on all the
# packages that are going to be upgraded and only look up old/new version
- # info on those packages.
+ # info on those packages.
yb.update()
yb.resolveDeps()
yb.processTransaction(rpmDisplay=yum.rpmtrans.NoOutputCallBack())
yb.closeRpmDB()
-
+
new = list_pkgs()
return _compare_versions(old, new)
@@ -229,22 +265,22 @@ def remove(pkgs):
salt '*' pkg.remove
'''
-
+
yb = yum.YumBase()
setattr(yb.conf, 'assumeyes', True)
pkgs = pkgs.split(',')
old = list_pkgs(*pkgs)
-
+
# same comments as in upgrade for remove.
for pkg in pkgs:
yb.remove(name=pkg)
-
+
yb.resolveDeps()
yb.processTransaction(rpmDisplay=yum.rpmtrans.NoOutputCallBack())
yb.closeRpmDB()
-
+
new = list_pkgs(*pkgs)
-
+
return _list_removed(old, new)
diff --git a/salt/modules/yumpkg5.py b/salt/modules/yumpkg5.py
new file mode 100644
index 000000000000..eafb1f6ba935
--- /dev/null
+++ b/salt/modules/yumpkg5.py
@@ -0,0 +1,231 @@
+'''
+Support for YUM
+'''
+import logging
+from collections import namedtuple
+
+from salt.exceptions import PkgParseError
+
+logger = logging.getLogger(__name__)
+
+def __virtual__():
+ '''
+ Confine this module to yum based systems
+ '''
+ # Return this for pkg on RHEL/Fedora based distros that do not ship with
+ # python 2.6 or greater.
+ dists = ('CentOS', 'Scientific', 'RedHat')
+ if __grains__['os'] == 'Fedora':
+ if int(__grains__['osrelease'].split('.')[0]) < 11:
+ return 'pkg'
+ else:
+ return False
+ else:
+ if __grains__['os'] in dists:
+ if int(__grains__['osrelease'].split('.')[0]) <= 5:
+ return 'pkg'
+ else:
+ return False
+
+
+def _parse_yum(arg):
+ '''
+ A small helper to parse yum output; returns a list of namedtuples
+ '''
+ cmd = 'yum -q {0}'.format(arg)
+ out = __salt__['cmd.run_stdout'](cmd)
+ YumOut = namedtuple('YumOut', ('name', 'version', 'status'))
+
+ results = []
+
+ for line in out.split('\n'):
+ if len(line.split()) == 3:
+ namearchstr, versionstr, pkgstatus = line.split()
+ pkgname = namearchstr.rpartition('.')[0]
+ pkgver = versionstr.rpartition('.')[0]
+
+ results.append(YumOut(pkgname, pkgver, pkgstatus))
+
+ return results
+
+
+def _list_removed(old, new):
+ '''
+ List the packages which have been removed between the two package objects
+ '''
+ pkgs = []
+ for pkg in old:
+ if pkg not in new:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def available_version(name):
+ '''
+ The available version of the package in the repository
+
+ CLI Example::
+
+ salt '*' pkg.available_version
+ '''
+ out = _parse_yum('list {0}'.format(name))
+ return out[0].version if out else ''
+
+
+def version(name):
+ '''
+ Returns a version if the package is installed, else returns an empty string
+
+ CLI Example::
+
+ salt '*' pkg.version
+ '''
+ pkgs = list_pkgs()
+ if name in pkgs:
+ return pkgs[name]
+ else:
+ return ''
+
+
+def list_pkgs():
+ '''
+ List the packages currently installed in a dict::
+
+ {'': ''}
+
+ CLI Example::
+
+ salt '*' pkg.list_pkgs
+ '''
+ out = _parse_yum('list installed')
+ return dict([(i.name, i.version) for i in out])
+
+def refresh_db():
+ '''
+ Since yum refreshes the database automatically, this runs a yum clean,
+ so that the next yum operation will have a clean database
+
+ CLI Example::
+
+ salt '*' pkg.refresh_db
+ '''
+ cmd = 'yum -q clean dbcache'
+ retcode = __salt__['cmd.retcode'](cmd)
+ return True
+
+
+def install(pkg, refresh=False, repo='', skip_verify=False):
+ '''
+ Install the passed package
+
+ pkg
+ The name of the package to be installed
+ refresh : False
+ Clean out the yum database before executing
+ repo : (default)
+ Specify a package repository to install from
+ (e.g., ``yum --enablerepo=somerepo``)
+ skip_verify : False
+ Skip the GPG verification check (e.g., ``--nogpgcheck``)
+
+ Return a dict containing the new package names and versions::
+
+ {'': {'old': '',
+ 'new': '']}
+
+ CLI Example::
+
+ salt '*' pkg.install
+ '''
+ old = list_pkgs()
+
+ cmd = 'yum -y {repo} {gpgcheck} install {pkg}'.format(
+ repo='--enablerepo={0}'.format(repo) if repo else '',
+ gpgcheck='--nogpgcheck' if skip_verify else '',
+ pkg=pkg,
+ )
+
+ if refresh:
+ refresh_db()
+ retcode = __salt__['cmd.retcode'](cmd)
+ new = list_pkgs()
+ pkgs = {}
+ for npkg in new:
+ if npkg in old:
+ if old[npkg] == new[npkg]:
+ # no change in the package
+ continue
+ else:
+ # the package was here before and the version has changed
+ pkgs[npkg] = {'old': old[npkg],
+ 'new': new[npkg]}
+ else:
+ # the package is freshly installed
+ pkgs[npkg] = {'old': '',
+ 'new': new[npkg]}
+ return pkgs
+
+
+def upgrade():
+ '''
+ Run a full system upgrade, a yum upgrade
+
+ Return a dict containing the new package names and versions::
+
+ {'': {'old': '',
+ 'new': '']}
+
+ CLI Example::
+
+ salt '*' pkg.upgrade
+ '''
+ old = list_pkgs()
+ cmd = 'yum -q -y upgrade'
+ retcode = __salt__['cmd.retcode'](cmd)
+ new = list_pkgs()
+ pkgs = {}
+ for npkg in new:
+ if npkg in old:
+ if old[npkg] == new[npkg]:
+ # no change in the package
+ continue
+ else:
+ # the package was here before and the version has changed
+ pkgs[npkg] = {'old': old[npkg],
+ 'new': new[npkg]}
+ else:
+ # the package is freshly installed
+ pkgs[npkg] = {'old': '',
+ 'new': new[npkg]}
+ return pkgs
+
+
+def remove(pkg):
+ '''
+ Remove a single package with yum remove
+
+ Return a list containing the removed packages:
+
+ CLI Example::
+
+ salt '*' pkg.remove
+ '''
+ old = list_pkgs()
+ cmd = 'yum -q -y remove ' + pkg
+ retcode = __salt__['cmd.retcode'](cmd)
+ new = list_pkgs()
+ return _list_removed(old, new)
+
+
+def purge(pkg):
+ '''
+ Yum does not have a purge, this function calls remove
+
+ Return a list containing the removed packages:
+
+ CLI Example::
+
+ salt '*' pkg.purge
+ '''
+ return remove(pkg)
+
diff --git a/salt/msgpack/__init__.py b/salt/msgpack/__init__.py
new file mode 100644
index 000000000000..6362b611e158
--- /dev/null
+++ b/salt/msgpack/__init__.py
@@ -0,0 +1,20 @@
+# coding: utf-8
+from salt.msgpack.__version__ import *
+from salt.msgpack._msgpack import *
+
+# alias for compatibility to simplejson/marshal/pickle.
+load = unpack
+loads = unpackb
+
+dump = pack
+dumps = packb
+
+def packs(*args, **kw):
+ from warnings import warn
+ warn("msgpack.packs() is deprecated. Use packb() instead.", DeprecationWarning)
+ return packb(*args, **kw)
+
+def unpacks(*args, **kw):
+ from warnings import warn
+ warn("msgpack.unpacks() is deprecated. Use unpackb() instead.", DeprecationWarning)
+ return unpackb(*args, **kw)
diff --git a/salt/msgpack/__version__.py b/salt/msgpack/__version__.py
new file mode 100644
index 000000000000..84e88e8c4972
--- /dev/null
+++ b/salt/msgpack/__version__.py
@@ -0,0 +1 @@
+version = (0, 1, 12)
diff --git a/salt/msgpack/_msgpack.c b/salt/msgpack/_msgpack.c
new file mode 100644
index 000000000000..b6fe3201540f
--- /dev/null
+++ b/salt/msgpack/_msgpack.c
@@ -0,0 +1,6762 @@
+/* Generated by Cython 0.15.1 on Tue Dec 27 21:35:07 2011 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#else
+
+#include /* For offsetof */
+#ifndef offsetof
+#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+
+#if PY_VERSION_HEX < 0x02040000
+ #define METH_COEXIST 0
+ #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type)
+ #define PyDict_Contains(d,o) PySequence_Contains(d,o)
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PY_FORMAT_SIZE_T ""
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o)
+ #define PyNumber_Index(o) PyNumber_Int(o)
+ #define PyIndex_Check(o) PyNumber_Check(o)
+ #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
+ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+ #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
+ #define PyVarObject_HEAD_INIT(type, size) \
+ PyObject_HEAD_INIT(type) size,
+ #define PyType_Modified(t)
+
+ typedef struct {
+ void *buf;
+ PyObject *obj;
+ Py_ssize_t len;
+ Py_ssize_t itemsize;
+ int readonly;
+ int ndim;
+ char *format;
+ Py_ssize_t *shape;
+ Py_ssize_t *strides;
+ Py_ssize_t *suboffsets;
+ void *internal;
+ } Py_buffer;
+
+ #define PyBUF_SIMPLE 0
+ #define PyBUF_WRITABLE 0x0001
+ #define PyBUF_FORMAT 0x0004
+ #define PyBUF_ND 0x0008
+ #define PyBUF_STRIDES (0x0010 | PyBUF_ND)
+ #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
+ #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
+ #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
+ #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
+
+#endif
+
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define Py_TPFLAGS_CHECKTYPES 0
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+
+#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define PyBytesObject PyStringObject
+ #define PyBytes_Type PyString_Type
+ #define PyBytes_Check PyString_Check
+ #define PyBytes_CheckExact PyString_CheckExact
+ #define PyBytes_FromString PyString_FromString
+ #define PyBytes_FromStringAndSize PyString_FromStringAndSize
+ #define PyBytes_FromFormat PyString_FromFormat
+ #define PyBytes_DecodeEscape PyString_DecodeEscape
+ #define PyBytes_AsString PyString_AsString
+ #define PyBytes_AsStringAndSize PyString_AsStringAndSize
+ #define PyBytes_Size PyString_Size
+ #define PyBytes_AS_STRING PyString_AS_STRING
+ #define PyBytes_GET_SIZE PyString_GET_SIZE
+ #define PyBytes_Repr PyString_Repr
+ #define PyBytes_Concat PyString_Concat
+ #define PyBytes_ConcatAndDel PyString_ConcatAndDel
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
+ #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
+#endif
+#ifndef PySet_CheckExact
+ #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyIntObject PyLongObject
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+
+#if PY_VERSION_HEX < 0x03020000
+ typedef long Py_hash_t;
+ #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+ #define __Pyx_PyInt_AsHash_t PyInt_AsLong
+#else
+ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
+#endif
+
+
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+
+#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
+ #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
+ #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
+ #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
+#else
+ #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
+ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
+ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
+ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
+ #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
+ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
+ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
+ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
+ #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
+ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
+ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
+ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
+ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
+ #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
+#else
+ #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
+ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
+ #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ #define __Pyx_NAMESTR(n) ((char *)(n))
+ #define __Pyx_DOCSTR(n) ((char *)(n))
+#else
+ #define __Pyx_NAMESTR(n) (n)
+ #define __Pyx_DOCSTR(n) (n)
+#endif
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+#define _USE_MATH_DEFINES
+#endif
+#include
+#define __PYX_HAVE__msgpack___msgpack
+#define __PYX_HAVE_API__msgpack___msgpack
+#include "stdio.h"
+#include "pythread.h"
+#include "stdlib.h"
+#include "string.h"
+#include "pack.h"
+#include "unpack.h"
+#ifdef _OPENMP
+#include
+#endif /* _OPENMP */
+
+#ifdef PYREX_WITHOUT_ASSERTIONS
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+
+/* inline attribute */
+#ifndef CYTHON_INLINE
+ #if defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+/* unused attribute */
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || defined(__INTEL_COMPILER)
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+
+typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
+
+
+/* Type Conversion Predeclarations */
+
+#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s)
+#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s))
+
+#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
+#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
+
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
+
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+
+
+#ifdef __GNUC__
+ /* Test for GCC > 2.95 */
+ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+ #else /* __GNUC__ > 2 ... */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+ #endif /* __GNUC__ > 2 ... */
+#else /* __GNUC__ */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "_msgpack.pyx",
+ "bool.pxd",
+ "complex.pxd",
+};
+
+/*--- Type declarations ---*/
+struct __pyx_obj_7msgpack_8_msgpack_Unpacker;
+struct __pyx_obj_7msgpack_8_msgpack_Packer;
+struct __pyx_opt_args_7msgpack_8_msgpack_6Packer__pack;
+
+/* "msgpack/_msgpack.pyx":85
+ * free(self.pk.buf);
+ *
+ * cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: # <<<<<<<<<<<<<<
+ * cdef long long llval
+ * cdef unsigned long long ullval
+ */
+struct __pyx_opt_args_7msgpack_8_msgpack_6Packer__pack {
+ int __pyx_n;
+ int nest_limit;
+};
+
+/* "msgpack/_msgpack.pyx":253
+ * object_hook=object_hook, list_hook=list_hook, encoding=encoding, unicode_errors=unicode_errors)
+ *
+ * cdef class Unpacker(object): # <<<<<<<<<<<<<<
+ * """
+ * Streaming unpacker.
+ */
+struct __pyx_obj_7msgpack_8_msgpack_Unpacker {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_7msgpack_8_msgpack_Unpacker *__pyx_vtab;
+ template_context ctx;
+ char *buf;
+ size_t buf_size;
+ size_t buf_head;
+ size_t buf_tail;
+ PyObject *file_like;
+ PyObject *file_like_read;
+ Py_ssize_t read_size;
+ int use_list;
+ PyObject *object_hook;
+ PyObject *_bencoding;
+ PyObject *_berrors;
+ char *encoding;
+ char *unicode_errors;
+};
+
+
+/* "msgpack/_msgpack.pyx":38
+ * cdef int DEFAULT_RECURSE_LIMIT=511
+ *
+ * cdef class Packer(object): # <<<<<<<<<<<<<<
+ * """MessagePack Packer
+ *
+ */
+struct __pyx_obj_7msgpack_8_msgpack_Packer {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_7msgpack_8_msgpack_Packer *__pyx_vtab;
+ struct msgpack_packer pk;
+ PyObject *_default;
+ PyObject *_bencoding;
+ PyObject *_berrors;
+ char *encoding;
+ char *unicode_errors;
+};
+
+
+
+/* "msgpack/_msgpack.pyx":253
+ * object_hook=object_hook, list_hook=list_hook, encoding=encoding, unicode_errors=unicode_errors)
+ *
+ * cdef class Unpacker(object): # <<<<<<<<<<<<<<
+ * """
+ * Streaming unpacker.
+ */
+
+struct __pyx_vtabstruct_7msgpack_8_msgpack_Unpacker {
+ PyObject *(*append_buffer)(struct __pyx_obj_7msgpack_8_msgpack_Unpacker *, void *, Py_ssize_t);
+ PyObject *(*fill_buffer)(struct __pyx_obj_7msgpack_8_msgpack_Unpacker *);
+ PyObject *(*unpack)(struct __pyx_obj_7msgpack_8_msgpack_Unpacker *, int __pyx_skip_dispatch);
+};
+static struct __pyx_vtabstruct_7msgpack_8_msgpack_Unpacker *__pyx_vtabptr_7msgpack_8_msgpack_Unpacker;
+
+
+/* "msgpack/_msgpack.pyx":38
+ * cdef int DEFAULT_RECURSE_LIMIT=511
+ *
+ * cdef class Packer(object): # <<<<<<<<<<<<<<
+ * """MessagePack Packer
+ *
+ */
+
+struct __pyx_vtabstruct_7msgpack_8_msgpack_Packer {
+ int (*_pack)(struct __pyx_obj_7msgpack_8_msgpack_Packer *, PyObject *, struct __pyx_opt_args_7msgpack_8_msgpack_6Packer__pack *__pyx_optional_args);
+};
+static struct __pyx_vtabstruct_7msgpack_8_msgpack_Packer *__pyx_vtabptr_7msgpack_8_msgpack_Packer;
+
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
+ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+ #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+ #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+ #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+ #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+ #define __Pyx_RefNannyDeclarations
+ #define __Pyx_RefNannySetupContext(name)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XINCREF(r) Py_XINCREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+ #define __Pyx_XGOTREF(r)
+ #define __Pyx_XGIVEREF(r)
+#endif /* CYTHON_REFNANNY */
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
+
+static CYTHON_INLINE int __Pyx_CheckKeywordStrings(PyObject *kwdict,
+ const char* function_name, int kw_allowed); /*proto*/
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
+
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name, PyObject* kw_name); /*proto*/
+
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/
+
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
+
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
+
+static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/
+
+static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *);
+
+static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *);
+
+static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *);
+
+static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *);
+
+static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *);
+
+static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *);
+
+static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *);
+
+static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *);
+
+static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *);
+
+static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *);
+
+static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *);
+
+static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *);
+
+static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *);
+
+static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *);
+
+static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *);
+
+static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *);
+
+static int __Pyx_check_binary_version(void);
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/
+
+static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
+
+static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno,
+ int __pyx_lineno, const char *__pyx_filename); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+/* Module declarations from 'cpython.version' */
+
+/* Module declarations from 'cpython.ref' */
+
+/* Module declarations from 'cpython.exc' */
+
+/* Module declarations from 'cpython.module' */
+
+/* Module declarations from 'cpython.mem' */
+
+/* Module declarations from 'cpython.tuple' */
+
+/* Module declarations from 'cpython.list' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from 'cpython.sequence' */
+
+/* Module declarations from 'cpython.mapping' */
+
+/* Module declarations from 'cpython.iterator' */
+
+/* Module declarations from 'cpython.type' */
+
+/* Module declarations from 'cpython.number' */
+
+/* Module declarations from 'cpython.int' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.bool' */
+static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0;
+
+/* Module declarations from 'cpython.long' */
+
+/* Module declarations from 'cpython.float' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.complex' */
+static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0;
+
+/* Module declarations from 'cpython.string' */
+
+/* Module declarations from 'cpython.unicode' */
+
+/* Module declarations from 'cpython.dict' */
+
+/* Module declarations from 'cpython.instance' */
+
+/* Module declarations from 'cpython.function' */
+
+/* Module declarations from 'cpython.method' */
+
+/* Module declarations from 'cpython.weakref' */
+
+/* Module declarations from 'cpython.getargs' */
+
+/* Module declarations from 'cpython.pythread' */
+
+/* Module declarations from 'cpython.pystate' */
+
+/* Module declarations from 'cpython.cobject' */
+
+/* Module declarations from 'cpython.oldbuffer' */
+
+/* Module declarations from 'cpython.set' */
+
+/* Module declarations from 'cpython.buffer' */
+
+/* Module declarations from 'cpython.bytes' */
+
+/* Module declarations from 'cpython.pycapsule' */
+
+/* Module declarations from 'cpython' */
+
+/* Module declarations from 'libc.stdlib' */
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'msgpack._msgpack' */
+static PyTypeObject *__pyx_ptype_7msgpack_8_msgpack_Packer = 0;
+static PyTypeObject *__pyx_ptype_7msgpack_8_msgpack_Unpacker = 0;
+static int __pyx_v_7msgpack_8_msgpack_DEFAULT_RECURSE_LIMIT;
+#define __Pyx_MODULE_NAME "msgpack._msgpack"
+int __pyx_module_is_main_msgpack___msgpack = 0;
+
+/* Implementation of 'msgpack._msgpack' */
+static PyObject *__pyx_builtin_MemoryError;
+static PyObject *__pyx_builtin_TypeError;
+static PyObject *__pyx_builtin_ValueError;
+static PyObject *__pyx_builtin_AssertionError;
+static PyObject *__pyx_builtin_StopIteration;
+static char __pyx_k_1[] = "Unable to allocate internal buffer.";
+static char __pyx_k_3[] = "utf-8";
+static char __pyx_k_4[] = "default must be a callable.";
+static char __pyx_k_9[] = "Too deep.";
+static char __pyx_k_11[] = "Can't encode utf-8 no encoding is specified";
+static char __pyx_k_13[] = "can't serialize %r";
+static char __pyx_k_16[] = "object_hook must be a callable.";
+static char __pyx_k_18[] = "list_hook must be a callable.";
+static char __pyx_k_20[] = "`file_like.read` must be a callable.";
+static char __pyx_k_27[] = "unpacker.feed() is not be able to use with`file_like`.";
+static char __pyx_k_29[] = "Unable to enlarge internal buffer.";
+static char __pyx_k_31[] = "No more unpack data.";
+static char __pyx_k_33[] = "Unpack failed: error = %d";
+static char __pyx_k_34[] = "msgpack._msgpack";
+static char __pyx_k__o[] = "o";
+static char __pyx_k__gc[] = "gc";
+static char __pyx_k__pack[] = "pack";
+static char __pyx_k__read[] = "read";
+static char __pyx_k__ascii[] = "ascii";
+static char __pyx_k__packb[] = "packb";
+static char __pyx_k__write[] = "write";
+static char __pyx_k__enable[] = "enable";
+static char __pyx_k__encode[] = "encode";
+static char __pyx_k__packed[] = "packed";
+static char __pyx_k__stream[] = "stream";
+static char __pyx_k__strict[] = "strict";
+static char __pyx_k__unpack[] = "unpack";
+static char __pyx_k__default[] = "default";
+static char __pyx_k__disable[] = "disable";
+static char __pyx_k__unpackb[] = "unpackb";
+static char __pyx_k____main__[] = "__main__";
+static char __pyx_k____test__[] = "__test__";
+static char __pyx_k__encoding[] = "encoding";
+static char __pyx_k__use_list[] = "use_list";
+static char __pyx_k__TypeError[] = "TypeError";
+static char __pyx_k__file_like[] = "file_like";
+static char __pyx_k__list_hook[] = "list_hook";
+static char __pyx_k__read_size[] = "read_size";
+static char __pyx_k__ValueError[] = "ValueError";
+static char __pyx_k___gc_enable[] = "_gc_enable";
+static char __pyx_k__MemoryError[] = "MemoryError";
+static char __pyx_k___gc_disable[] = "_gc_disable";
+static char __pyx_k__object_hook[] = "object_hook";
+static char __pyx_k__StopIteration[] = "StopIteration";
+static char __pyx_k__AssertionError[] = "AssertionError";
+static char __pyx_k__unicode_errors[] = "unicode_errors";
+static PyObject *__pyx_kp_s_1;
+static PyObject *__pyx_kp_s_11;
+static PyObject *__pyx_kp_s_13;
+static PyObject *__pyx_kp_s_16;
+static PyObject *__pyx_kp_s_18;
+static PyObject *__pyx_kp_s_20;
+static PyObject *__pyx_kp_s_27;
+static PyObject *__pyx_kp_s_29;
+static PyObject *__pyx_kp_s_3;
+static PyObject *__pyx_kp_s_31;
+static PyObject *__pyx_kp_s_33;
+static PyObject *__pyx_n_s_34;
+static PyObject *__pyx_kp_s_4;
+static PyObject *__pyx_kp_s_9;
+static PyObject *__pyx_n_s__AssertionError;
+static PyObject *__pyx_n_s__MemoryError;
+static PyObject *__pyx_n_s__StopIteration;
+static PyObject *__pyx_n_s__TypeError;
+static PyObject *__pyx_n_s__ValueError;
+static PyObject *__pyx_n_s____main__;
+static PyObject *__pyx_n_s____test__;
+static PyObject *__pyx_n_s___gc_disable;
+static PyObject *__pyx_n_s___gc_enable;
+static PyObject *__pyx_n_s__ascii;
+static PyObject *__pyx_n_s__default;
+static PyObject *__pyx_n_s__disable;
+static PyObject *__pyx_n_s__enable;
+static PyObject *__pyx_n_s__encode;
+static PyObject *__pyx_n_s__encoding;
+static PyObject *__pyx_n_s__file_like;
+static PyObject *__pyx_n_s__gc;
+static PyObject *__pyx_n_s__list_hook;
+static PyObject *__pyx_n_s__o;
+static PyObject *__pyx_n_s__object_hook;
+static PyObject *__pyx_n_s__pack;
+static PyObject *__pyx_n_s__packb;
+static PyObject *__pyx_n_s__packed;
+static PyObject *__pyx_n_s__read;
+static PyObject *__pyx_n_s__read_size;
+static PyObject *__pyx_n_s__stream;
+static PyObject *__pyx_n_s__strict;
+static PyObject *__pyx_n_s__unicode_errors;
+static PyObject *__pyx_n_s__unpack;
+static PyObject *__pyx_n_s__unpackb;
+static PyObject *__pyx_n_s__use_list;
+static PyObject *__pyx_n_s__write;
+static PyObject *__pyx_int_0;
+static int __pyx_k_8;
+static PyObject *__pyx_k_tuple_2;
+static PyObject *__pyx_k_tuple_5;
+static PyObject *__pyx_k_tuple_6;
+static PyObject *__pyx_k_tuple_7;
+static PyObject *__pyx_k_tuple_10;
+static PyObject *__pyx_k_tuple_12;
+static PyObject *__pyx_k_tuple_14;
+static PyObject *__pyx_k_tuple_15;
+static PyObject *__pyx_k_tuple_17;
+static PyObject *__pyx_k_tuple_19;
+static PyObject *__pyx_k_tuple_21;
+static PyObject *__pyx_k_tuple_22;
+static PyObject *__pyx_k_tuple_23;
+static PyObject *__pyx_k_tuple_24;
+static PyObject *__pyx_k_tuple_25;
+static PyObject *__pyx_k_tuple_26;
+static PyObject *__pyx_k_tuple_28;
+static PyObject *__pyx_k_tuple_30;
+static PyObject *__pyx_k_tuple_32;
+
+/* "msgpack/_msgpack.pyx":54
+ * cdef char *unicode_errors
+ *
+ * def __cinit__(self): # <<<<<<<<<<<<<<
+ * cdef int buf_size = 1024*1024
+ * self.pk.buf = malloc(buf_size);
+ */
+
+static int __pyx_pf_7msgpack_8_msgpack_6Packer___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pf_7msgpack_8_msgpack_6Packer___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ int __pyx_v_buf_size;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__cinit__");
+ if (unlikely(PyTuple_GET_SIZE(__pyx_args) > 0)) {
+ __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); return -1;}
+ if (unlikely(__pyx_kwds) && unlikely(PyDict_Size(__pyx_kwds) > 0) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__cinit__", 0))) return -1;
+
+ /* "msgpack/_msgpack.pyx":55
+ *
+ * def __cinit__(self):
+ * cdef int buf_size = 1024*1024 # <<<<<<<<<<<<<<
+ * self.pk.buf = malloc(buf_size);
+ * if self.pk.buf == NULL:
+ */
+ __pyx_v_buf_size = 1048576;
+
+ /* "msgpack/_msgpack.pyx":56
+ * def __cinit__(self):
+ * cdef int buf_size = 1024*1024
+ * self.pk.buf = malloc(buf_size); # <<<<<<<<<<<<<<
+ * if self.pk.buf == NULL:
+ * raise MemoryError("Unable to allocate internal buffer.")
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->pk.buf = ((char *)malloc(__pyx_v_buf_size));
+
+ /* "msgpack/_msgpack.pyx":57
+ * cdef int buf_size = 1024*1024
+ * self.pk.buf = malloc(buf_size);
+ * if self.pk.buf == NULL: # <<<<<<<<<<<<<<
+ * raise MemoryError("Unable to allocate internal buffer.")
+ * self.pk.buf_size = buf_size
+ */
+ __pyx_t_1 = (((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->pk.buf == NULL);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":58
+ * self.pk.buf = malloc(buf_size);
+ * if self.pk.buf == NULL:
+ * raise MemoryError("Unable to allocate internal buffer.") # <<<<<<<<<<<<<<
+ * self.pk.buf_size = buf_size
+ * self.pk.length = 0
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_MemoryError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "msgpack/_msgpack.pyx":59
+ * if self.pk.buf == NULL:
+ * raise MemoryError("Unable to allocate internal buffer.")
+ * self.pk.buf_size = buf_size # <<<<<<<<<<<<<<
+ * self.pk.length = 0
+ *
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->pk.buf_size = __pyx_v_buf_size;
+
+ /* "msgpack/_msgpack.pyx":60
+ * raise MemoryError("Unable to allocate internal buffer.")
+ * self.pk.buf_size = buf_size
+ * self.pk.length = 0 # <<<<<<<<<<<<<<
+ *
+ * def __init__(self, default=None, encoding='utf-8', unicode_errors='strict'):
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->pk.length = 0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("msgpack._msgpack.Packer.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":62
+ * self.pk.length = 0
+ *
+ * def __init__(self, default=None, encoding='utf-8', unicode_errors='strict'): # <<<<<<<<<<<<<<
+ * if default is not None:
+ * if not PyCallable_Check(default):
+ */
+
+static int __pyx_pf_7msgpack_8_msgpack_6Packer_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pf_7msgpack_8_msgpack_6Packer_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_default = 0;
+ PyObject *__pyx_v_encoding = 0;
+ PyObject *__pyx_v_unicode_errors = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ char *__pyx_t_4;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__default,&__pyx_n_s__encoding,&__pyx_n_s__unicode_errors,0};
+ __Pyx_RefNannySetupContext("__init__");
+ {
+ PyObject* values[3] = {0,0,0};
+ values[0] = ((PyObject *)Py_None);
+ values[1] = ((PyObject *)__pyx_kp_s_3);
+ values[2] = ((PyObject *)__pyx_n_s__strict);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__default);
+ if (value) { values[0] = value; kw_args--; }
+ }
+ case 1:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__encoding);
+ if (value) { values[1] = value; kw_args--; }
+ }
+ case 2:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__unicode_errors);
+ if (value) { values[2] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_default = values[0];
+ __pyx_v_encoding = values[1];
+ __pyx_v_unicode_errors = values[2];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("msgpack._msgpack.Packer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "msgpack/_msgpack.pyx":63
+ *
+ * def __init__(self, default=None, encoding='utf-8', unicode_errors='strict'):
+ * if default is not None: # <<<<<<<<<<<<<<
+ * if not PyCallable_Check(default):
+ * raise TypeError("default must be a callable.")
+ */
+ __pyx_t_1 = (__pyx_v_default != Py_None);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":64
+ * def __init__(self, default=None, encoding='utf-8', unicode_errors='strict'):
+ * if default is not None:
+ * if not PyCallable_Check(default): # <<<<<<<<<<<<<<
+ * raise TypeError("default must be a callable.")
+ * self._default = default
+ */
+ __pyx_t_1 = (!PyCallable_Check(__pyx_v_default));
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":65
+ * if default is not None:
+ * if not PyCallable_Check(default):
+ * raise TypeError("default must be a callable.") # <<<<<<<<<<<<<<
+ * self._default = default
+ * if encoding is None:
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_k_tuple_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "msgpack/_msgpack.pyx":66
+ * if not PyCallable_Check(default):
+ * raise TypeError("default must be a callable.")
+ * self._default = default # <<<<<<<<<<<<<<
+ * if encoding is None:
+ * self.encoding = NULL
+ */
+ __Pyx_INCREF(__pyx_v_default);
+ __Pyx_GIVEREF(__pyx_v_default);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_default);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_default);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_default = __pyx_v_default;
+
+ /* "msgpack/_msgpack.pyx":67
+ * raise TypeError("default must be a callable.")
+ * self._default = default
+ * if encoding is None: # <<<<<<<<<<<<<<
+ * self.encoding = NULL
+ * self.unicode_errors = NULL
+ */
+ __pyx_t_1 = (__pyx_v_encoding == Py_None);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":68
+ * self._default = default
+ * if encoding is None:
+ * self.encoding = NULL # <<<<<<<<<<<<<<
+ * self.unicode_errors = NULL
+ * else:
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->encoding = NULL;
+
+ /* "msgpack/_msgpack.pyx":69
+ * if encoding is None:
+ * self.encoding = NULL
+ * self.unicode_errors = NULL # <<<<<<<<<<<<<<
+ * else:
+ * if isinstance(encoding, unicode):
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->unicode_errors = NULL;
+ goto __pyx_L8;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":71
+ * self.unicode_errors = NULL
+ * else:
+ * if isinstance(encoding, unicode): # <<<<<<<<<<<<<<
+ * self._bencoding = encoding.encode('ascii')
+ * else:
+ */
+ __pyx_t_2 = ((PyObject *)((PyObject*)(&PyUnicode_Type)));
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_encoding, __pyx_t_2);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":72
+ * else:
+ * if isinstance(encoding, unicode):
+ * self._bencoding = encoding.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * self._bencoding = encoding
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_encoding, __pyx_n_s__encode); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_bencoding);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_bencoding);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_bencoding = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L9;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":74
+ * self._bencoding = encoding.encode('ascii')
+ * else:
+ * self._bencoding = encoding # <<<<<<<<<<<<<<
+ * self.encoding = PyBytes_AsString(self._bencoding)
+ * if isinstance(unicode_errors, unicode):
+ */
+ __Pyx_INCREF(__pyx_v_encoding);
+ __Pyx_GIVEREF(__pyx_v_encoding);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_bencoding);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_bencoding);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_bencoding = __pyx_v_encoding;
+ }
+ __pyx_L9:;
+
+ /* "msgpack/_msgpack.pyx":75
+ * else:
+ * self._bencoding = encoding
+ * self.encoding = PyBytes_AsString(self._bencoding) # <<<<<<<<<<<<<<
+ * if isinstance(unicode_errors, unicode):
+ * self._berrors = unicode_errors.encode('ascii')
+ */
+ __pyx_t_3 = ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_bencoding;
+ __Pyx_INCREF(__pyx_t_3);
+ __pyx_t_4 = PyBytes_AsString(__pyx_t_3); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->encoding = __pyx_t_4;
+
+ /* "msgpack/_msgpack.pyx":76
+ * self._bencoding = encoding
+ * self.encoding = PyBytes_AsString(self._bencoding)
+ * if isinstance(unicode_errors, unicode): # <<<<<<<<<<<<<<
+ * self._berrors = unicode_errors.encode('ascii')
+ * else:
+ */
+ __pyx_t_3 = ((PyObject *)((PyObject*)(&PyUnicode_Type)));
+ __Pyx_INCREF(__pyx_t_3);
+ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_unicode_errors, __pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":77
+ * self.encoding = PyBytes_AsString(self._bencoding)
+ * if isinstance(unicode_errors, unicode):
+ * self._berrors = unicode_errors.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * self._berrors = unicode_errors
+ */
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_unicode_errors, __pyx_n_s__encode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_k_tuple_7), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_berrors);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_berrors);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_berrors = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L10;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":79
+ * self._berrors = unicode_errors.encode('ascii')
+ * else:
+ * self._berrors = unicode_errors # <<<<<<<<<<<<<<
+ * self.unicode_errors = PyBytes_AsString(self._berrors)
+ *
+ */
+ __Pyx_INCREF(__pyx_v_unicode_errors);
+ __Pyx_GIVEREF(__pyx_v_unicode_errors);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_berrors);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_berrors);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_berrors = __pyx_v_unicode_errors;
+ }
+ __pyx_L10:;
+
+ /* "msgpack/_msgpack.pyx":80
+ * else:
+ * self._berrors = unicode_errors
+ * self.unicode_errors = PyBytes_AsString(self._berrors) # <<<<<<<<<<<<<<
+ *
+ * def __dealloc__(self):
+ */
+ __pyx_t_2 = ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->_berrors;
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_4 = PyBytes_AsString(__pyx_t_2); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->unicode_errors = __pyx_t_4;
+ }
+ __pyx_L8:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("msgpack._msgpack.Packer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":82
+ * self.unicode_errors = PyBytes_AsString(self._berrors)
+ *
+ * def __dealloc__(self): # <<<<<<<<<<<<<<
+ * free(self.pk.buf);
+ *
+ */
+
+static void __pyx_pf_7msgpack_8_msgpack_6Packer_2__dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_pf_7msgpack_8_msgpack_6Packer_2__dealloc__(PyObject *__pyx_v_self) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__dealloc__");
+
+ /* "msgpack/_msgpack.pyx":83
+ *
+ * def __dealloc__(self):
+ * free(self.pk.buf); # <<<<<<<<<<<<<<
+ *
+ * cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
+ */
+ free(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->pk.buf);
+
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "msgpack/_msgpack.pyx":85
+ * free(self.pk.buf);
+ *
+ * cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: # <<<<<<<<<<<<<<
+ * cdef long long llval
+ * cdef unsigned long long ullval
+ */
+
+static int __pyx_f_7msgpack_8_msgpack_6Packer__pack(struct __pyx_obj_7msgpack_8_msgpack_Packer *__pyx_v_self, PyObject *__pyx_v_o, struct __pyx_opt_args_7msgpack_8_msgpack_6Packer__pack *__pyx_optional_args) {
+ int __pyx_v_nest_limit = __pyx_k_8;
+ PY_LONG_LONG __pyx_v_llval;
+ unsigned PY_LONG_LONG __pyx_v_ullval;
+ long __pyx_v_longval;
+ double __pyx_v_fval;
+ char *__pyx_v_rawval;
+ int __pyx_v_ret;
+ PyObject *__pyx_v_d = 0;
+ PyObject *__pyx_v_k = NULL;
+ PyObject *__pyx_v_v = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ unsigned PY_LONG_LONG __pyx_t_3;
+ PY_LONG_LONG __pyx_t_4;
+ long __pyx_t_5;
+ double __pyx_t_6;
+ char *__pyx_t_7;
+ Py_ssize_t __pyx_t_8;
+ PyObject *__pyx_t_9 = NULL;
+ PyObject *(*__pyx_t_10)(PyObject *);
+ PyObject *__pyx_t_11 = NULL;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ PyObject *(*__pyx_t_14)(PyObject *);
+ int __pyx_t_15;
+ struct __pyx_opt_args_7msgpack_8_msgpack_6Packer__pack __pyx_t_16;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_pack");
+ if (__pyx_optional_args) {
+ if (__pyx_optional_args->__pyx_n > 0) {
+ __pyx_v_nest_limit = __pyx_optional_args->nest_limit;
+ }
+ }
+ __Pyx_INCREF(__pyx_v_o);
+
+ /* "msgpack/_msgpack.pyx":94
+ * cdef dict d
+ *
+ * if nest_limit < 0: # <<<<<<<<<<<<<<
+ * raise ValueError("Too deep.")
+ *
+ */
+ __pyx_t_1 = (__pyx_v_nest_limit < 0);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":95
+ *
+ * if nest_limit < 0:
+ * raise ValueError("Too deep.") # <<<<<<<<<<<<<<
+ *
+ * if o is None:
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "msgpack/_msgpack.pyx":97
+ * raise ValueError("Too deep.")
+ *
+ * if o is None: # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_nil(&self.pk)
+ * elif isinstance(o, bool):
+ */
+ __pyx_t_1 = (__pyx_v_o == Py_None);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":98
+ *
+ * if o is None:
+ * ret = msgpack_pack_nil(&self.pk) # <<<<<<<<<<<<<<
+ * elif isinstance(o, bool):
+ * if o:
+ */
+ __pyx_v_ret = msgpack_pack_nil((&__pyx_v_self->pk));
+ goto __pyx_L4;
+ }
+
+ /* "msgpack/_msgpack.pyx":99
+ * if o is None:
+ * ret = msgpack_pack_nil(&self.pk)
+ * elif isinstance(o, bool): # <<<<<<<<<<<<<<
+ * if o:
+ * ret = msgpack_pack_true(&self.pk)
+ */
+ __pyx_t_2 = ((PyObject *)((PyObject*)__pyx_ptype_7cpython_4bool_bool));
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_t_2);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":100
+ * ret = msgpack_pack_nil(&self.pk)
+ * elif isinstance(o, bool):
+ * if o: # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_true(&self.pk)
+ * else:
+ */
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_o); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":101
+ * elif isinstance(o, bool):
+ * if o:
+ * ret = msgpack_pack_true(&self.pk) # <<<<<<<<<<<<<<
+ * else:
+ * ret = msgpack_pack_false(&self.pk)
+ */
+ __pyx_v_ret = msgpack_pack_true((&__pyx_v_self->pk));
+ goto __pyx_L5;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":103
+ * ret = msgpack_pack_true(&self.pk)
+ * else:
+ * ret = msgpack_pack_false(&self.pk) # <<<<<<<<<<<<<<
+ * elif PyLong_Check(o):
+ * if o > 0:
+ */
+ __pyx_v_ret = msgpack_pack_false((&__pyx_v_self->pk));
+ }
+ __pyx_L5:;
+ goto __pyx_L4;
+ }
+
+ /* "msgpack/_msgpack.pyx":104
+ * else:
+ * ret = msgpack_pack_false(&self.pk)
+ * elif PyLong_Check(o): # <<<<<<<<<<<<<<
+ * if o > 0:
+ * ullval = o
+ */
+ __pyx_t_1 = PyLong_Check(__pyx_v_o);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":105
+ * ret = msgpack_pack_false(&self.pk)
+ * elif PyLong_Check(o):
+ * if o > 0: # <<<<<<<<<<<<<<
+ * ullval = o
+ * ret = msgpack_pack_unsigned_long_long(&self.pk, ullval)
+ */
+ __pyx_t_2 = PyObject_RichCompare(__pyx_v_o, __pyx_int_0, Py_GT); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":106
+ * elif PyLong_Check(o):
+ * if o > 0:
+ * ullval = o # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_unsigned_long_long(&self.pk, ullval)
+ * else:
+ */
+ __pyx_t_3 = __Pyx_PyInt_AsUnsignedLongLong(__pyx_v_o); if (unlikely((__pyx_t_3 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ullval = __pyx_t_3;
+
+ /* "msgpack/_msgpack.pyx":107
+ * if o > 0:
+ * ullval = o
+ * ret = msgpack_pack_unsigned_long_long(&self.pk, ullval) # <<<<<<<<<<<<<<
+ * else:
+ * llval = o
+ */
+ __pyx_v_ret = msgpack_pack_unsigned_long_long((&__pyx_v_self->pk), __pyx_v_ullval);
+ goto __pyx_L6;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":109
+ * ret = msgpack_pack_unsigned_long_long(&self.pk, ullval)
+ * else:
+ * llval = o # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_long_long(&self.pk, llval)
+ * elif PyInt_Check(o):
+ */
+ __pyx_t_4 = __Pyx_PyInt_AsLongLong(__pyx_v_o); if (unlikely((__pyx_t_4 == (PY_LONG_LONG)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_llval = __pyx_t_4;
+
+ /* "msgpack/_msgpack.pyx":110
+ * else:
+ * llval = o
+ * ret = msgpack_pack_long_long(&self.pk, llval) # <<<<<<<<<<<<<<
+ * elif PyInt_Check(o):
+ * longval = o
+ */
+ __pyx_v_ret = msgpack_pack_long_long((&__pyx_v_self->pk), __pyx_v_llval);
+ }
+ __pyx_L6:;
+ goto __pyx_L4;
+ }
+
+ /* "msgpack/_msgpack.pyx":111
+ * llval = o
+ * ret = msgpack_pack_long_long(&self.pk, llval)
+ * elif PyInt_Check(o): # <<<<<<<<<<<<<<
+ * longval = o
+ * ret = msgpack_pack_long(&self.pk, longval)
+ */
+ __pyx_t_1 = PyInt_Check(__pyx_v_o);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":112
+ * ret = msgpack_pack_long_long(&self.pk, llval)
+ * elif PyInt_Check(o):
+ * longval = o # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_long(&self.pk, longval)
+ * elif PyFloat_Check(o):
+ */
+ __pyx_t_5 = __Pyx_PyInt_AsLong(__pyx_v_o); if (unlikely((__pyx_t_5 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_longval = __pyx_t_5;
+
+ /* "msgpack/_msgpack.pyx":113
+ * elif PyInt_Check(o):
+ * longval = o
+ * ret = msgpack_pack_long(&self.pk, longval) # <<<<<<<<<<<<<<
+ * elif PyFloat_Check(o):
+ * fval = o
+ */
+ __pyx_v_ret = msgpack_pack_long((&__pyx_v_self->pk), __pyx_v_longval);
+ goto __pyx_L4;
+ }
+
+ /* "msgpack/_msgpack.pyx":114
+ * longval = o
+ * ret = msgpack_pack_long(&self.pk, longval)
+ * elif PyFloat_Check(o): # <<<<<<<<<<<<<<
+ * fval = o
+ * ret = msgpack_pack_double(&self.pk, fval)
+ */
+ __pyx_t_1 = PyFloat_Check(__pyx_v_o);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":115
+ * ret = msgpack_pack_long(&self.pk, longval)
+ * elif PyFloat_Check(o):
+ * fval = o # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_double(&self.pk, fval)
+ * elif PyBytes_Check(o):
+ */
+ __pyx_t_6 = __pyx_PyFloat_AsDouble(__pyx_v_o); if (unlikely((__pyx_t_6 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_fval = __pyx_t_6;
+
+ /* "msgpack/_msgpack.pyx":116
+ * elif PyFloat_Check(o):
+ * fval = o
+ * ret = msgpack_pack_double(&self.pk, fval) # <<<<<<<<<<<<<<
+ * elif PyBytes_Check(o):
+ * rawval = o
+ */
+ __pyx_v_ret = msgpack_pack_double((&__pyx_v_self->pk), __pyx_v_fval);
+ goto __pyx_L4;
+ }
+
+ /* "msgpack/_msgpack.pyx":117
+ * fval = o
+ * ret = msgpack_pack_double(&self.pk, fval)
+ * elif PyBytes_Check(o): # <<<<<<<<<<<<<<
+ * rawval = o
+ * ret = msgpack_pack_raw(&self.pk, len(o))
+ */
+ __pyx_t_1 = PyBytes_Check(__pyx_v_o);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":118
+ * ret = msgpack_pack_double(&self.pk, fval)
+ * elif PyBytes_Check(o):
+ * rawval = o # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_raw(&self.pk, len(o))
+ * if ret == 0:
+ */
+ __pyx_t_7 = PyBytes_AsString(__pyx_v_o); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_rawval = __pyx_t_7;
+
+ /* "msgpack/_msgpack.pyx":119
+ * elif PyBytes_Check(o):
+ * rawval = o
+ * ret = msgpack_pack_raw(&self.pk, len(o)) # <<<<<<<<<<<<<<
+ * if ret == 0:
+ * ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ */
+ __pyx_t_8 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = msgpack_pack_raw((&__pyx_v_self->pk), __pyx_t_8);
+
+ /* "msgpack/_msgpack.pyx":120
+ * rawval = o
+ * ret = msgpack_pack_raw(&self.pk, len(o))
+ * if ret == 0: # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ * elif PyUnicode_Check(o):
+ */
+ __pyx_t_1 = (__pyx_v_ret == 0);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":121
+ * ret = msgpack_pack_raw(&self.pk, len(o))
+ * if ret == 0:
+ * ret = msgpack_pack_raw_body(&self.pk, rawval, len(o)) # <<<<<<<<<<<<<<
+ * elif PyUnicode_Check(o):
+ * if not self.encoding:
+ */
+ __pyx_t_8 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = msgpack_pack_raw_body((&__pyx_v_self->pk), __pyx_v_rawval, __pyx_t_8);
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+ goto __pyx_L4;
+ }
+
+ /* "msgpack/_msgpack.pyx":122
+ * if ret == 0:
+ * ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ * elif PyUnicode_Check(o): # <<<<<<<<<<<<<<
+ * if not self.encoding:
+ * raise TypeError("Can't encode utf-8 no encoding is specified")
+ */
+ __pyx_t_1 = PyUnicode_Check(__pyx_v_o);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":123
+ * ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ * elif PyUnicode_Check(o):
+ * if not self.encoding: # <<<<<<<<<<<<<<
+ * raise TypeError("Can't encode utf-8 no encoding is specified")
+ * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+ */
+ __pyx_t_1 = (!(__pyx_v_self->encoding != 0));
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":124
+ * elif PyUnicode_Check(o):
+ * if not self.encoding:
+ * raise TypeError("Can't encode utf-8 no encoding is specified") # <<<<<<<<<<<<<<
+ * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+ * rawval = o
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "msgpack/_msgpack.pyx":125
+ * if not self.encoding:
+ * raise TypeError("Can't encode utf-8 no encoding is specified")
+ * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors) # <<<<<<<<<<<<<<
+ * rawval = o
+ * ret = msgpack_pack_raw(&self.pk, len(o))
+ */
+ __pyx_t_2 = PyUnicode_AsEncodedString(__pyx_v_o, __pyx_v_self->encoding, __pyx_v_self->unicode_errors); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_v_o);
+ __pyx_v_o = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "msgpack/_msgpack.pyx":126
+ * raise TypeError("Can't encode utf-8 no encoding is specified")
+ * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+ * rawval = o # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_raw(&self.pk, len(o))
+ * if ret == 0:
+ */
+ __pyx_t_7 = PyBytes_AsString(__pyx_v_o); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_rawval = __pyx_t_7;
+
+ /* "msgpack/_msgpack.pyx":127
+ * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+ * rawval = o
+ * ret = msgpack_pack_raw(&self.pk, len(o)) # <<<<<<<<<<<<<<
+ * if ret == 0:
+ * ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ */
+ __pyx_t_8 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = msgpack_pack_raw((&__pyx_v_self->pk), __pyx_t_8);
+
+ /* "msgpack/_msgpack.pyx":128
+ * rawval = o
+ * ret = msgpack_pack_raw(&self.pk, len(o))
+ * if ret == 0: # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ * elif PyDict_Check(o):
+ */
+ __pyx_t_1 = (__pyx_v_ret == 0);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":129
+ * ret = msgpack_pack_raw(&self.pk, len(o))
+ * if ret == 0:
+ * ret = msgpack_pack_raw_body(&self.pk, rawval, len(o)) # <<<<<<<<<<<<<<
+ * elif PyDict_Check(o):
+ * d = o
+ */
+ __pyx_t_8 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = msgpack_pack_raw_body((&__pyx_v_self->pk), __pyx_v_rawval, __pyx_t_8);
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+ goto __pyx_L4;
+ }
+
+ /* "msgpack/_msgpack.pyx":130
+ * if ret == 0:
+ * ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ * elif PyDict_Check(o): # <<<<<<<<<<<<<<
+ * d = o
+ * ret = msgpack_pack_map(&self.pk, len(d))
+ */
+ __pyx_t_1 = PyDict_Check(__pyx_v_o);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":131
+ * ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ * elif PyDict_Check(o):
+ * d = o # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_map(&self.pk, len(d))
+ * if ret == 0:
+ */
+ if (!(likely(PyDict_CheckExact(__pyx_v_o))||((__pyx_v_o) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected dict, got %.200s", Py_TYPE(__pyx_v_o)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_INCREF(__pyx_v_o);
+ __pyx_v_d = ((PyObject*)__pyx_v_o);
+
+ /* "msgpack/_msgpack.pyx":132
+ * elif PyDict_Check(o):
+ * d = o
+ * ret = msgpack_pack_map(&self.pk, len(d)) # <<<<<<<<<<<<<<
+ * if ret == 0:
+ * for k,v in d.items():
+ */
+ if (unlikely(((PyObject *)__pyx_v_d) == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_8 = PyDict_Size(((PyObject *)__pyx_v_d));
+ __pyx_v_ret = msgpack_pack_map((&__pyx_v_self->pk), __pyx_t_8);
+
+ /* "msgpack/_msgpack.pyx":133
+ * d = o
+ * ret = msgpack_pack_map(&self.pk, len(d))
+ * if ret == 0: # <<<<<<<<<<<<<<
+ * for k,v in d.items():
+ * ret = self._pack(k, nest_limit-1)
+ */
+ __pyx_t_1 = (__pyx_v_ret == 0);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":134
+ * ret = msgpack_pack_map(&self.pk, len(d))
+ * if ret == 0:
+ * for k,v in d.items(): # <<<<<<<<<<<<<<
+ * ret = self._pack(k, nest_limit-1)
+ * if ret != 0: break
+ */
+ if (unlikely(((PyObject *)__pyx_v_d) == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%s'", "items"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_2 = PyDict_Items(__pyx_v_d); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyList_CheckExact(__pyx_t_2) || PyTuple_CheckExact(__pyx_t_2)) {
+ __pyx_t_9 = __pyx_t_2; __Pyx_INCREF(__pyx_t_9); __pyx_t_8 = 0;
+ __pyx_t_10 = NULL;
+ } else {
+ __pyx_t_8 = -1; __pyx_t_9 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_10 = Py_TYPE(__pyx_t_9)->tp_iternext;
+ }
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ for (;;) {
+ if (PyList_CheckExact(__pyx_t_9)) {
+ if (__pyx_t_8 >= PyList_GET_SIZE(__pyx_t_9)) break;
+ __pyx_t_2 = PyList_GET_ITEM(__pyx_t_9, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++;
+ } else if (PyTuple_CheckExact(__pyx_t_9)) {
+ if (__pyx_t_8 >= PyTuple_GET_SIZE(__pyx_t_9)) break;
+ __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_9, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++;
+ } else {
+ __pyx_t_2 = __pyx_t_10(__pyx_t_9);
+ if (unlikely(!__pyx_t_2)) {
+ if (PyErr_Occurred()) {
+ if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear();
+ else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_2);
+ }
+ if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) {
+ PyObject* sequence = __pyx_t_2;
+ if (likely(PyTuple_CheckExact(sequence))) {
+ if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) {
+ if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2);
+ else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence));
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_11 = PyTuple_GET_ITEM(sequence, 0);
+ __pyx_t_12 = PyTuple_GET_ITEM(sequence, 1);
+ } else {
+ if (unlikely(PyList_GET_SIZE(sequence) != 2)) {
+ if (PyList_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2);
+ else __Pyx_RaiseNeedMoreValuesError(PyList_GET_SIZE(sequence));
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_11 = PyList_GET_ITEM(sequence, 0);
+ __pyx_t_12 = PyList_GET_ITEM(sequence, 1);
+ }
+ __Pyx_INCREF(__pyx_t_11);
+ __Pyx_INCREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ } else {
+ Py_ssize_t index = -1;
+ __pyx_t_13 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_14 = Py_TYPE(__pyx_t_13)->tp_iternext;
+ index = 0; __pyx_t_11 = __pyx_t_14(__pyx_t_13); if (unlikely(!__pyx_t_11)) goto __pyx_L13_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_11);
+ index = 1; __pyx_t_12 = __pyx_t_14(__pyx_t_13); if (unlikely(!__pyx_t_12)) goto __pyx_L13_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_12);
+ if (__Pyx_IternextUnpackEndCheck(__pyx_t_14(__pyx_t_13), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ goto __pyx_L14_unpacking_done;
+ __pyx_L13_unpacking_failed:;
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_StopIteration)) PyErr_Clear();
+ if (!PyErr_Occurred()) __Pyx_RaiseNeedMoreValuesError(index);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_L14_unpacking_done:;
+ }
+ __Pyx_XDECREF(__pyx_v_k);
+ __pyx_v_k = __pyx_t_11;
+ __pyx_t_11 = 0;
+ __Pyx_XDECREF(__pyx_v_v);
+ __pyx_v_v = __pyx_t_12;
+ __pyx_t_12 = 0;
+
+ /* "msgpack/_msgpack.pyx":135
+ * if ret == 0:
+ * for k,v in d.items():
+ * ret = self._pack(k, nest_limit-1) # <<<<<<<<<<<<<<
+ * if ret != 0: break
+ * ret = self._pack(v, nest_limit-1)
+ */
+ __pyx_t_16.__pyx_n = 1;
+ __pyx_t_16.nest_limit = (__pyx_v_nest_limit - 1);
+ __pyx_t_15 = ((struct __pyx_vtabstruct_7msgpack_8_msgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_k, &__pyx_t_16); if (unlikely(__pyx_t_15 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = __pyx_t_15;
+
+ /* "msgpack/_msgpack.pyx":136
+ * for k,v in d.items():
+ * ret = self._pack(k, nest_limit-1)
+ * if ret != 0: break # <<<<<<<<<<<<<<
+ * ret = self._pack(v, nest_limit-1)
+ * if ret != 0: break
+ */
+ __pyx_t_1 = (__pyx_v_ret != 0);
+ if (__pyx_t_1) {
+ goto __pyx_L12_break;
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+
+ /* "msgpack/_msgpack.pyx":137
+ * ret = self._pack(k, nest_limit-1)
+ * if ret != 0: break
+ * ret = self._pack(v, nest_limit-1) # <<<<<<<<<<<<<<
+ * if ret != 0: break
+ * elif PySequence_Check(o):
+ */
+ __pyx_t_16.__pyx_n = 1;
+ __pyx_t_16.nest_limit = (__pyx_v_nest_limit - 1);
+ __pyx_t_15 = ((struct __pyx_vtabstruct_7msgpack_8_msgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_v, &__pyx_t_16); if (unlikely(__pyx_t_15 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = __pyx_t_15;
+
+ /* "msgpack/_msgpack.pyx":138
+ * if ret != 0: break
+ * ret = self._pack(v, nest_limit-1)
+ * if ret != 0: break # <<<<<<<<<<<<<<
+ * elif PySequence_Check(o):
+ * ret = msgpack_pack_array(&self.pk, len(o))
+ */
+ __pyx_t_1 = (__pyx_v_ret != 0);
+ if (__pyx_t_1) {
+ goto __pyx_L12_break;
+ goto __pyx_L16;
+ }
+ __pyx_L16:;
+ }
+ __pyx_L12_break:;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+ goto __pyx_L4;
+ }
+
+ /* "msgpack/_msgpack.pyx":139
+ * ret = self._pack(v, nest_limit-1)
+ * if ret != 0: break
+ * elif PySequence_Check(o): # <<<<<<<<<<<<<<
+ * ret = msgpack_pack_array(&self.pk, len(o))
+ * if ret == 0:
+ */
+ __pyx_t_1 = PySequence_Check(__pyx_v_o);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":140
+ * if ret != 0: break
+ * elif PySequence_Check(o):
+ * ret = msgpack_pack_array(&self.pk, len(o)) # <<<<<<<<<<<<<<
+ * if ret == 0:
+ * for v in o:
+ */
+ __pyx_t_8 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = msgpack_pack_array((&__pyx_v_self->pk), __pyx_t_8);
+
+ /* "msgpack/_msgpack.pyx":141
+ * elif PySequence_Check(o):
+ * ret = msgpack_pack_array(&self.pk, len(o))
+ * if ret == 0: # <<<<<<<<<<<<<<
+ * for v in o:
+ * ret = self._pack(v, nest_limit-1)
+ */
+ __pyx_t_1 = (__pyx_v_ret == 0);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":142
+ * ret = msgpack_pack_array(&self.pk, len(o))
+ * if ret == 0:
+ * for v in o: # <<<<<<<<<<<<<<
+ * ret = self._pack(v, nest_limit-1)
+ * if ret != 0: break
+ */
+ if (PyList_CheckExact(__pyx_v_o) || PyTuple_CheckExact(__pyx_v_o)) {
+ __pyx_t_9 = __pyx_v_o; __Pyx_INCREF(__pyx_t_9); __pyx_t_8 = 0;
+ __pyx_t_10 = NULL;
+ } else {
+ __pyx_t_8 = -1; __pyx_t_9 = PyObject_GetIter(__pyx_v_o); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_10 = Py_TYPE(__pyx_t_9)->tp_iternext;
+ }
+ for (;;) {
+ if (PyList_CheckExact(__pyx_t_9)) {
+ if (__pyx_t_8 >= PyList_GET_SIZE(__pyx_t_9)) break;
+ __pyx_t_2 = PyList_GET_ITEM(__pyx_t_9, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++;
+ } else if (PyTuple_CheckExact(__pyx_t_9)) {
+ if (__pyx_t_8 >= PyTuple_GET_SIZE(__pyx_t_9)) break;
+ __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_9, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++;
+ } else {
+ __pyx_t_2 = __pyx_t_10(__pyx_t_9);
+ if (unlikely(!__pyx_t_2)) {
+ if (PyErr_Occurred()) {
+ if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear();
+ else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_2);
+ }
+ __Pyx_XDECREF(__pyx_v_v);
+ __pyx_v_v = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "msgpack/_msgpack.pyx":143
+ * if ret == 0:
+ * for v in o:
+ * ret = self._pack(v, nest_limit-1) # <<<<<<<<<<<<<<
+ * if ret != 0: break
+ * elif self._default:
+ */
+ __pyx_t_16.__pyx_n = 1;
+ __pyx_t_16.nest_limit = (__pyx_v_nest_limit - 1);
+ __pyx_t_15 = ((struct __pyx_vtabstruct_7msgpack_8_msgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_v, &__pyx_t_16); if (unlikely(__pyx_t_15 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = __pyx_t_15;
+
+ /* "msgpack/_msgpack.pyx":144
+ * for v in o:
+ * ret = self._pack(v, nest_limit-1)
+ * if ret != 0: break # <<<<<<<<<<<<<<
+ * elif self._default:
+ * o = self._default(o)
+ */
+ __pyx_t_1 = (__pyx_v_ret != 0);
+ if (__pyx_t_1) {
+ goto __pyx_L19_break;
+ goto __pyx_L20;
+ }
+ __pyx_L20:;
+ }
+ __pyx_L19_break:;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ goto __pyx_L17;
+ }
+ __pyx_L17:;
+ goto __pyx_L4;
+ }
+
+ /* "msgpack/_msgpack.pyx":145
+ * ret = self._pack(v, nest_limit-1)
+ * if ret != 0: break
+ * elif self._default: # <<<<<<<<<<<<<<
+ * o = self._default(o)
+ * ret = self._pack(o, nest_limit-1)
+ */
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_self->_default); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":146
+ * if ret != 0: break
+ * elif self._default:
+ * o = self._default(o) # <<<<<<<<<<<<<<
+ * ret = self._pack(o, nest_limit-1)
+ * else:
+ */
+ __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_9));
+ __Pyx_INCREF(__pyx_v_o);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_o);
+ __Pyx_GIVEREF(__pyx_v_o);
+ __pyx_t_2 = PyObject_Call(__pyx_v_self->_default, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_o);
+ __pyx_v_o = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "msgpack/_msgpack.pyx":147
+ * elif self._default:
+ * o = self._default(o)
+ * ret = self._pack(o, nest_limit-1) # <<<<<<<<<<<<<<
+ * else:
+ * raise TypeError("can't serialize %r" % (o,))
+ */
+ __pyx_t_16.__pyx_n = 1;
+ __pyx_t_16.nest_limit = (__pyx_v_nest_limit - 1);
+ __pyx_t_15 = ((struct __pyx_vtabstruct_7msgpack_8_msgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_o, &__pyx_t_16); if (unlikely(__pyx_t_15 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = __pyx_t_15;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":149
+ * ret = self._pack(o, nest_limit-1)
+ * else:
+ * raise TypeError("can't serialize %r" % (o,)) # <<<<<<<<<<<<<<
+ * return ret
+ *
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_INCREF(__pyx_v_o);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_o);
+ __Pyx_GIVEREF(__pyx_v_o);
+ __pyx_t_9 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_13), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_9));
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_9));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_9));
+ __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_9, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_L4:;
+
+ /* "msgpack/_msgpack.pyx":150
+ * else:
+ * raise TypeError("can't serialize %r" % (o,))
+ * return ret # <<<<<<<<<<<<<<
+ *
+ * def pack(self, object obj):
+ */
+ __pyx_r = __pyx_v_ret;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_XDECREF(__pyx_t_13);
+ __Pyx_AddTraceback("msgpack._msgpack.Packer._pack", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_d);
+ __Pyx_XDECREF(__pyx_v_k);
+ __Pyx_XDECREF(__pyx_v_v);
+ __Pyx_XDECREF(__pyx_v_o);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":152
+ * return ret
+ *
+ * def pack(self, object obj): # <<<<<<<<<<<<<<
+ * cdef int ret
+ * ret = self._pack(obj, DEFAULT_RECURSE_LIMIT)
+ */
+
+static PyObject *__pyx_pf_7msgpack_8_msgpack_6Packer_3pack(PyObject *__pyx_v_self, PyObject *__pyx_v_obj); /*proto*/
+static char __pyx_doc_7msgpack_8_msgpack_6Packer_3pack[] = "Packer.pack(self, obj)";
+static PyObject *__pyx_pf_7msgpack_8_msgpack_6Packer_3pack(PyObject *__pyx_v_self, PyObject *__pyx_v_obj) {
+ int __pyx_v_ret;
+ PyObject *__pyx_v_buf = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ struct __pyx_opt_args_7msgpack_8_msgpack_6Packer__pack __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("pack");
+
+ /* "msgpack/_msgpack.pyx":154
+ * def pack(self, object obj):
+ * cdef int ret
+ * ret = self._pack(obj, DEFAULT_RECURSE_LIMIT) # <<<<<<<<<<<<<<
+ * if ret:
+ * raise TypeError
+ */
+ __pyx_t_2.__pyx_n = 1;
+ __pyx_t_2.nest_limit = __pyx_v_7msgpack_8_msgpack_DEFAULT_RECURSE_LIMIT;
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7msgpack_8_msgpack_Packer *)((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->__pyx_vtab)->_pack(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self), __pyx_v_obj, &__pyx_t_2); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = __pyx_t_1;
+
+ /* "msgpack/_msgpack.pyx":155
+ * cdef int ret
+ * ret = self._pack(obj, DEFAULT_RECURSE_LIMIT)
+ * if ret: # <<<<<<<<<<<<<<
+ * raise TypeError
+ * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ */
+ if (__pyx_v_ret) {
+
+ /* "msgpack/_msgpack.pyx":156
+ * ret = self._pack(obj, DEFAULT_RECURSE_LIMIT)
+ * if ret:
+ * raise TypeError # <<<<<<<<<<<<<<
+ * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ * self.pk.length = 0
+ */
+ __Pyx_Raise(__pyx_builtin_TypeError, 0, 0, 0);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "msgpack/_msgpack.pyx":157
+ * if ret:
+ * raise TypeError
+ * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) # <<<<<<<<<<<<<<
+ * self.pk.length = 0
+ * return buf
+ */
+ __pyx_t_3 = ((PyObject *)PyBytes_FromStringAndSize(((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->pk.buf, ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->pk.length)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_v_buf = ((PyObject*)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "msgpack/_msgpack.pyx":158
+ * raise TypeError
+ * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ * self.pk.length = 0 # <<<<<<<<<<<<<<
+ * return buf
+ *
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)__pyx_v_self)->pk.length = 0;
+
+ /* "msgpack/_msgpack.pyx":159
+ * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ * self.pk.length = 0
+ * return buf # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_buf));
+ __pyx_r = ((PyObject *)__pyx_v_buf);
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("msgpack._msgpack.Packer.pack", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_buf);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":162
+ *
+ *
+ * def pack(object o, object stream, default=None, encoding='utf-8', unicode_errors='strict'): # <<<<<<<<<<<<<<
+ * """
+ * pack an object `o` and write it to stream)."""
+ */
+
+static PyObject *__pyx_pf_7msgpack_8_msgpack_pack(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_7msgpack_8_msgpack_pack[] = "pack(o, stream, default=None, encoding='utf-8', unicode_errors='strict')\n\n pack an object `o` and write it to stream).";
+static PyMethodDef __pyx_mdef_7msgpack_8_msgpack_pack = {__Pyx_NAMESTR("pack"), (PyCFunction)__pyx_pf_7msgpack_8_msgpack_pack, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_7msgpack_8_msgpack_pack)};
+static PyObject *__pyx_pf_7msgpack_8_msgpack_pack(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_o = 0;
+ PyObject *__pyx_v_stream = 0;
+ PyObject *__pyx_v_default = 0;
+ PyObject *__pyx_v_encoding = 0;
+ PyObject *__pyx_v_unicode_errors = 0;
+ PyObject *__pyx_v_packer = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__o,&__pyx_n_s__stream,&__pyx_n_s__default,&__pyx_n_s__encoding,&__pyx_n_s__unicode_errors,0};
+ __Pyx_RefNannySetupContext("pack");
+ __pyx_self = __pyx_self;
+ {
+ PyObject* values[5] = {0,0,0,0,0};
+ values[2] = ((PyObject *)Py_None);
+ values[3] = ((PyObject *)__pyx_kp_s_3);
+ values[4] = ((PyObject *)__pyx_n_s__strict);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__o);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__stream);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("pack", 0, 2, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__default);
+ if (value) { values[2] = value; kw_args--; }
+ }
+ case 3:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__encoding);
+ if (value) { values[3] = value; kw_args--; }
+ }
+ case 4:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__unicode_errors);
+ if (value) { values[4] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "pack") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_o = values[0];
+ __pyx_v_stream = values[1];
+ __pyx_v_default = values[2];
+ __pyx_v_encoding = values[3];
+ __pyx_v_unicode_errors = values[4];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("pack", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("msgpack._msgpack.pack", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "msgpack/_msgpack.pyx":165
+ * """
+ * pack an object `o` and write it to stream)."""
+ * packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors) # <<<<<<<<<<<<<<
+ * stream.write(packer.pack(o))
+ *
+ */
+ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__default), __pyx_v_default) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__encoding), __pyx_v_encoding) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__unicode_errors), __pyx_v_unicode_errors) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyEval_CallObjectWithKeywords(((PyObject *)((PyObject*)__pyx_ptype_7msgpack_8_msgpack_Packer)), ((PyObject *)__pyx_empty_tuple), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __pyx_v_packer = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "msgpack/_msgpack.pyx":166
+ * pack an object `o` and write it to stream)."""
+ * packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors)
+ * stream.write(packer.pack(o)) # <<<<<<<<<<<<<<
+ *
+ * def packb(object o, default=None, encoding='utf-8', unicode_errors='strict'):
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_stream, __pyx_n_s__write); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_packer, __pyx_n_s__pack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_INCREF(__pyx_v_o);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
+ __Pyx_GIVEREF(__pyx_v_o);
+ __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("msgpack._msgpack.pack", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_packer);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":168
+ * stream.write(packer.pack(o))
+ *
+ * def packb(object o, default=None, encoding='utf-8', unicode_errors='strict'): # <<<<<<<<<<<<<<
+ * """
+ * pack o and return packed bytes."""
+ */
+
+static PyObject *__pyx_pf_7msgpack_8_msgpack_1packb(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_7msgpack_8_msgpack_1packb[] = "packb(o, default=None, encoding='utf-8', unicode_errors='strict')\n\n pack o and return packed bytes.";
+static PyMethodDef __pyx_mdef_7msgpack_8_msgpack_1packb = {__Pyx_NAMESTR("packb"), (PyCFunction)__pyx_pf_7msgpack_8_msgpack_1packb, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_7msgpack_8_msgpack_1packb)};
+static PyObject *__pyx_pf_7msgpack_8_msgpack_1packb(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_o = 0;
+ PyObject *__pyx_v_default = 0;
+ PyObject *__pyx_v_encoding = 0;
+ PyObject *__pyx_v_unicode_errors = 0;
+ PyObject *__pyx_v_packer = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__o,&__pyx_n_s__default,&__pyx_n_s__encoding,&__pyx_n_s__unicode_errors,0};
+ __Pyx_RefNannySetupContext("packb");
+ __pyx_self = __pyx_self;
+ {
+ PyObject* values[4] = {0,0,0,0};
+ values[1] = ((PyObject *)Py_None);
+ values[2] = ((PyObject *)__pyx_kp_s_3);
+ values[3] = ((PyObject *)__pyx_n_s__strict);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__o);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__default);
+ if (value) { values[1] = value; kw_args--; }
+ }
+ case 2:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__encoding);
+ if (value) { values[2] = value; kw_args--; }
+ }
+ case 3:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__unicode_errors);
+ if (value) { values[3] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "packb") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_o = values[0];
+ __pyx_v_default = values[1];
+ __pyx_v_encoding = values[2];
+ __pyx_v_unicode_errors = values[3];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("packb", 0, 1, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("msgpack._msgpack.packb", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "msgpack/_msgpack.pyx":171
+ * """
+ * pack o and return packed bytes."""
+ * packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors) # <<<<<<<<<<<<<<
+ * return packer.pack(o)
+ *
+ */
+ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__default), __pyx_v_default) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__encoding), __pyx_v_encoding) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__unicode_errors), __pyx_v_unicode_errors) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyEval_CallObjectWithKeywords(((PyObject *)((PyObject*)__pyx_ptype_7msgpack_8_msgpack_Packer)), ((PyObject *)__pyx_empty_tuple), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __pyx_v_packer = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "msgpack/_msgpack.pyx":172
+ * pack o and return packed bytes."""
+ * packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors)
+ * return packer.pack(o) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_packer, __pyx_n_s__pack); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_INCREF(__pyx_v_o);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_o);
+ __Pyx_GIVEREF(__pyx_v_o);
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("msgpack._msgpack.packb", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_packer);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":196
+ *
+ *
+ * def unpackb(object packed, object object_hook=None, object list_hook=None, bint use_list=0, encoding=None, unicode_errors="strict"): # <<<<<<<<<<<<<<
+ * """
+ * Unpack packed_bytes to object. Returns an unpacked object."""
+ */
+
+static PyObject *__pyx_pf_7msgpack_8_msgpack_2unpackb(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_7msgpack_8_msgpack_2unpackb[] = "unpackb(packed, object_hook=None, list_hook=None, int use_list=0, encoding=None, unicode_errors='strict')\n\n Unpack packed_bytes to object. Returns an unpacked object.";
+static PyMethodDef __pyx_mdef_7msgpack_8_msgpack_2unpackb = {__Pyx_NAMESTR("unpackb"), (PyCFunction)__pyx_pf_7msgpack_8_msgpack_2unpackb, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_7msgpack_8_msgpack_2unpackb)};
+static PyObject *__pyx_pf_7msgpack_8_msgpack_2unpackb(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_packed = 0;
+ PyObject *__pyx_v_object_hook = 0;
+ PyObject *__pyx_v_list_hook = 0;
+ int __pyx_v_use_list;
+ PyObject *__pyx_v_encoding = 0;
+ PyObject *__pyx_v_unicode_errors = 0;
+ template_context __pyx_v_ctx;
+ size_t __pyx_v_off;
+ int __pyx_v_ret;
+ char *__pyx_v_buf;
+ Py_ssize_t __pyx_v_buf_len;
+ void *__pyx_v_enc;
+ void *__pyx_v_err;
+ PyObject *__pyx_v_bencoding = NULL;
+ PyObject *__pyx_v_berrors = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ char *__pyx_t_5;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__packed,&__pyx_n_s__object_hook,&__pyx_n_s__list_hook,&__pyx_n_s__use_list,&__pyx_n_s__encoding,&__pyx_n_s__unicode_errors,0};
+ __Pyx_RefNannySetupContext("unpackb");
+ __pyx_self = __pyx_self;
+ {
+ PyObject* values[6] = {0,0,0,0,0,0};
+ values[1] = ((PyObject *)Py_None);
+ values[2] = ((PyObject *)Py_None);
+ values[4] = ((PyObject *)Py_None);
+ values[5] = ((PyObject *)__pyx_n_s__strict);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__packed);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__object_hook);
+ if (value) { values[1] = value; kw_args--; }
+ }
+ case 2:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__list_hook);
+ if (value) { values[2] = value; kw_args--; }
+ }
+ case 3:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__use_list);
+ if (value) { values[3] = value; kw_args--; }
+ }
+ case 4:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__encoding);
+ if (value) { values[4] = value; kw_args--; }
+ }
+ case 5:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__unicode_errors);
+ if (value) { values[5] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "unpackb") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_packed = values[0];
+ __pyx_v_object_hook = values[1];
+ __pyx_v_list_hook = values[2];
+ if (values[3]) {
+ __pyx_v_use_list = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_use_list == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ } else {
+ __pyx_v_use_list = ((int)0);
+ }
+ __pyx_v_encoding = values[4];
+ __pyx_v_unicode_errors = values[5];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("unpackb", 0, 1, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("msgpack._msgpack.unpackb", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "msgpack/_msgpack.pyx":200
+ * Unpack packed_bytes to object. Returns an unpacked object."""
+ * cdef template_context ctx
+ * cdef size_t off = 0 # <<<<<<<<<<<<<<
+ * cdef int ret
+ *
+ */
+ __pyx_v_off = 0;
+
+ /* "msgpack/_msgpack.pyx":205
+ * cdef char* buf
+ * cdef Py_ssize_t buf_len
+ * PyObject_AsReadBuffer(packed, &buf, &buf_len) # <<<<<<<<<<<<<<
+ *
+ * if encoding is None:
+ */
+ __pyx_t_1 = PyObject_AsReadBuffer(__pyx_v_packed, ((const void* *)(&__pyx_v_buf)), (&__pyx_v_buf_len)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "msgpack/_msgpack.pyx":207
+ * PyObject_AsReadBuffer(packed, &buf, &buf_len)
+ *
+ * if encoding is None: # <<<<<<<<<<<<<<
+ * enc = NULL
+ * err = NULL
+ */
+ __pyx_t_2 = (__pyx_v_encoding == Py_None);
+ if (__pyx_t_2) {
+
+ /* "msgpack/_msgpack.pyx":208
+ *
+ * if encoding is None:
+ * enc = NULL # <<<<<<<<<<<<<<
+ * err = NULL
+ * else:
+ */
+ __pyx_v_enc = NULL;
+
+ /* "msgpack/_msgpack.pyx":209
+ * if encoding is None:
+ * enc = NULL
+ * err = NULL # <<<<<<<<<<<<<<
+ * else:
+ * if isinstance(encoding, unicode):
+ */
+ __pyx_v_err = NULL;
+ goto __pyx_L6;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":211
+ * err = NULL
+ * else:
+ * if isinstance(encoding, unicode): # <<<<<<<<<<<<<<
+ * bencoding = encoding.encode('ascii')
+ * else:
+ */
+ __pyx_t_3 = ((PyObject *)((PyObject*)(&PyUnicode_Type)));
+ __Pyx_INCREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_encoding, __pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_2) {
+
+ /* "msgpack/_msgpack.pyx":212
+ * else:
+ * if isinstance(encoding, unicode):
+ * bencoding = encoding.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * bencoding = encoding
+ */
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_encoding, __pyx_n_s__encode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_bencoding = __pyx_t_4;
+ __pyx_t_4 = 0;
+ goto __pyx_L7;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":214
+ * bencoding = encoding.encode('ascii')
+ * else:
+ * bencoding = encoding # <<<<<<<<<<<<<<
+ * if isinstance(unicode_errors, unicode):
+ * berrors = unicode_errors.encode('ascii')
+ */
+ __Pyx_INCREF(__pyx_v_encoding);
+ __pyx_v_bencoding = __pyx_v_encoding;
+ }
+ __pyx_L7:;
+
+ /* "msgpack/_msgpack.pyx":215
+ * else:
+ * bencoding = encoding
+ * if isinstance(unicode_errors, unicode): # <<<<<<<<<<<<<<
+ * berrors = unicode_errors.encode('ascii')
+ * else:
+ */
+ __pyx_t_4 = ((PyObject *)((PyObject*)(&PyUnicode_Type)));
+ __Pyx_INCREF(__pyx_t_4);
+ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_unicode_errors, __pyx_t_4);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_2) {
+
+ /* "msgpack/_msgpack.pyx":216
+ * bencoding = encoding
+ * if isinstance(unicode_errors, unicode):
+ * berrors = unicode_errors.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * berrors = unicode_errors
+ */
+ __pyx_t_4 = PyObject_GetAttr(__pyx_v_unicode_errors, __pyx_n_s__encode); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_k_tuple_15), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_v_berrors = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L8;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":218
+ * berrors = unicode_errors.encode('ascii')
+ * else:
+ * berrors = unicode_errors # <<<<<<<<<<<<<<
+ * enc = PyBytes_AsString(bencoding)
+ * err = PyBytes_AsString(berrors)
+ */
+ __Pyx_INCREF(__pyx_v_unicode_errors);
+ __pyx_v_berrors = __pyx_v_unicode_errors;
+ }
+ __pyx_L8:;
+
+ /* "msgpack/_msgpack.pyx":219
+ * else:
+ * berrors = unicode_errors
+ * enc = PyBytes_AsString(bencoding) # <<<<<<<<<<<<<<
+ * err = PyBytes_AsString(berrors)
+ *
+ */
+ __pyx_t_5 = PyBytes_AsString(__pyx_v_bencoding); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_enc = __pyx_t_5;
+
+ /* "msgpack/_msgpack.pyx":220
+ * berrors = unicode_errors
+ * enc = PyBytes_AsString(bencoding)
+ * err = PyBytes_AsString(berrors) # <<<<<<<<<<<<<<
+ *
+ * template_init(&ctx)
+ */
+ __pyx_t_5 = PyBytes_AsString(__pyx_v_berrors); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_err = __pyx_t_5;
+ }
+ __pyx_L6:;
+
+ /* "msgpack/_msgpack.pyx":222
+ * err = PyBytes_AsString(berrors)
+ *
+ * template_init(&ctx) # <<<<<<<<<<<<<<
+ * ctx.user.use_list = use_list
+ * ctx.user.object_hook = ctx.user.list_hook = NULL
+ */
+ template_init((&__pyx_v_ctx));
+
+ /* "msgpack/_msgpack.pyx":223
+ *
+ * template_init(&ctx)
+ * ctx.user.use_list = use_list # <<<<<<<<<<<<<<
+ * ctx.user.object_hook = ctx.user.list_hook = NULL
+ * ctx.user.encoding = enc
+ */
+ __pyx_v_ctx.user.use_list = __pyx_v_use_list;
+
+ /* "msgpack/_msgpack.pyx":224
+ * template_init(&ctx)
+ * ctx.user.use_list = use_list
+ * ctx.user.object_hook = ctx.user.list_hook = NULL # <<<<<<<<<<<<<<
+ * ctx.user.encoding = enc
+ * ctx.user.unicode_errors = err
+ */
+ __pyx_v_ctx.user.object_hook = NULL;
+ __pyx_v_ctx.user.list_hook = NULL;
+
+ /* "msgpack/_msgpack.pyx":225
+ * ctx.user.use_list = use_list
+ * ctx.user.object_hook = ctx.user.list_hook = NULL
+ * ctx.user.encoding = enc # <<<<<<<<<<<<<<
+ * ctx.user.unicode_errors = err
+ * if object_hook is not None:
+ */
+ __pyx_v_ctx.user.encoding = __pyx_v_enc;
+
+ /* "msgpack/_msgpack.pyx":226
+ * ctx.user.object_hook = ctx.user.list_hook = NULL
+ * ctx.user.encoding = enc
+ * ctx.user.unicode_errors = err # <<<<<<<<<<<<<<
+ * if object_hook is not None:
+ * if not PyCallable_Check(object_hook):
+ */
+ __pyx_v_ctx.user.unicode_errors = __pyx_v_err;
+
+ /* "msgpack/_msgpack.pyx":227
+ * ctx.user.encoding = enc
+ * ctx.user.unicode_errors = err
+ * if object_hook is not None: # <<<<<<<<<<<<<<
+ * if not PyCallable_Check(object_hook):
+ * raise TypeError("object_hook must be a callable.")
+ */
+ __pyx_t_2 = (__pyx_v_object_hook != Py_None);
+ if (__pyx_t_2) {
+
+ /* "msgpack/_msgpack.pyx":228
+ * ctx.user.unicode_errors = err
+ * if object_hook is not None:
+ * if not PyCallable_Check(object_hook): # <<<<<<<<<<<<<<
+ * raise TypeError("object_hook must be a callable.")
+ * ctx.user.object_hook = object_hook
+ */
+ __pyx_t_2 = (!PyCallable_Check(__pyx_v_object_hook));
+ if (__pyx_t_2) {
+
+ /* "msgpack/_msgpack.pyx":229
+ * if object_hook is not None:
+ * if not PyCallable_Check(object_hook):
+ * raise TypeError("object_hook must be a callable.") # <<<<<<<<<<<<<<
+ * ctx.user.object_hook = object_hook
+ * if list_hook is not None:
+ */
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_k_tuple_17), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "msgpack/_msgpack.pyx":230
+ * if not PyCallable_Check(object_hook):
+ * raise TypeError("object_hook must be a callable.")
+ * ctx.user.object_hook = object_hook # <<<<<<<<<<<<<<
+ * if list_hook is not None:
+ * if not PyCallable_Check(list_hook):
+ */
+ __pyx_v_ctx.user.object_hook = ((PyObject *)__pyx_v_object_hook);
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "msgpack/_msgpack.pyx":231
+ * raise TypeError("object_hook must be a callable.")
+ * ctx.user.object_hook = object_hook
+ * if list_hook is not None: # <<<<<<<<<<<<<<
+ * if not PyCallable_Check(list_hook):
+ * raise TypeError("list_hook must be a callable.")
+ */
+ __pyx_t_2 = (__pyx_v_list_hook != Py_None);
+ if (__pyx_t_2) {
+
+ /* "msgpack/_msgpack.pyx":232
+ * ctx.user.object_hook = object_hook
+ * if list_hook is not None:
+ * if not PyCallable_Check(list_hook): # <<<<<<<<<<<<<<
+ * raise TypeError("list_hook must be a callable.")
+ * ctx.user.list_hook = list_hook
+ */
+ __pyx_t_2 = (!PyCallable_Check(__pyx_v_list_hook));
+ if (__pyx_t_2) {
+
+ /* "msgpack/_msgpack.pyx":233
+ * if list_hook is not None:
+ * if not PyCallable_Check(list_hook):
+ * raise TypeError("list_hook must be a callable.") # <<<<<<<<<<<<<<
+ * ctx.user.list_hook = list_hook
+ * _gc_disable()
+ */
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_k_tuple_19), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "msgpack/_msgpack.pyx":234
+ * if not PyCallable_Check(list_hook):
+ * raise TypeError("list_hook must be a callable.")
+ * ctx.user.list_hook = list_hook # <<<<<<<<<<<<<<
+ * _gc_disable()
+ * try:
+ */
+ __pyx_v_ctx.user.list_hook = ((PyObject *)__pyx_v_list_hook);
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "msgpack/_msgpack.pyx":235
+ * raise TypeError("list_hook must be a callable.")
+ * ctx.user.list_hook = list_hook
+ * _gc_disable() # <<<<<<<<<<<<<<
+ * try:
+ * ret = template_execute(&ctx, buf, buf_len, &off)
+ */
+ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s___gc_disable); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+ /* "msgpack/_msgpack.pyx":236
+ * ctx.user.list_hook = list_hook
+ * _gc_disable()
+ * try: # <<<<<<<<<<<<<<
+ * ret = template_execute(&ctx, buf, buf_len, &off)
+ * finally:
+ */
+ /*try:*/ {
+
+ /* "msgpack/_msgpack.pyx":237
+ * _gc_disable()
+ * try:
+ * ret = template_execute(&ctx, buf, buf_len, &off) # <<<<<<<<<<<<<<
+ * finally:
+ * _gc_enable()
+ */
+ __pyx_t_1 = template_execute((&__pyx_v_ctx), __pyx_v_buf, __pyx_v_buf_len, (&__pyx_v_off)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 237; __pyx_clineno = __LINE__; goto __pyx_L14;}
+ __pyx_v_ret = __pyx_t_1;
+ }
+
+ /* "msgpack/_msgpack.pyx":239
+ * ret = template_execute(&ctx, buf, buf_len, &off)
+ * finally:
+ * _gc_enable() # <<<<<<<<<<<<<<
+ * if ret == 1:
+ * return template_data(&ctx)
+ */
+ /*finally:*/ {
+ int __pyx_why;
+ PyObject *__pyx_exc_type, *__pyx_exc_value, *__pyx_exc_tb;
+ int __pyx_exc_lineno;
+ __pyx_exc_type = 0; __pyx_exc_value = 0; __pyx_exc_tb = 0; __pyx_exc_lineno = 0;
+ __pyx_why = 0; goto __pyx_L15;
+ __pyx_L14: {
+ __pyx_why = 4;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_ErrFetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb);
+ __pyx_exc_lineno = __pyx_lineno;
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___gc_enable); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L16_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L16_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ goto __pyx_L17;
+ __pyx_L16_error:;
+ if (__pyx_why == 4) {
+ Py_XDECREF(__pyx_exc_type);
+ Py_XDECREF(__pyx_exc_value);
+ Py_XDECREF(__pyx_exc_tb);
+ }
+ goto __pyx_L1_error;
+ __pyx_L17:;
+ switch (__pyx_why) {
+ case 4: {
+ __Pyx_ErrRestore(__pyx_exc_type, __pyx_exc_value, __pyx_exc_tb);
+ __pyx_lineno = __pyx_exc_lineno;
+ __pyx_exc_type = 0;
+ __pyx_exc_value = 0;
+ __pyx_exc_tb = 0;
+ goto __pyx_L1_error;
+ }
+ }
+ }
+
+ /* "msgpack/_msgpack.pyx":240
+ * finally:
+ * _gc_enable()
+ * if ret == 1: # <<<<<<<<<<<<<<
+ * return template_data(&ctx)
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_ret == 1);
+ if (__pyx_t_2) {
+
+ /* "msgpack/_msgpack.pyx":241
+ * _gc_enable()
+ * if ret == 1:
+ * return template_data(&ctx) # <<<<<<<<<<<<<<
+ * else:
+ * return None
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = template_data((&__pyx_v_ctx)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L18;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":243
+ * return template_data(&ctx)
+ * else:
+ * return None # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+ }
+ __pyx_L18:;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("msgpack._msgpack.unpackb", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_bencoding);
+ __Pyx_XDECREF(__pyx_v_berrors);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":246
+ *
+ *
+ * def unpack(object stream, object object_hook=None, object list_hook=None, bint use_list=0, encoding=None, unicode_errors="strict"): # <<<<<<<<<<<<<<
+ * """
+ * unpack an object from stream.
+ */
+
+static PyObject *__pyx_pf_7msgpack_8_msgpack_3unpack(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_7msgpack_8_msgpack_3unpack[] = "unpack(stream, object_hook=None, list_hook=None, int use_list=0, encoding=None, unicode_errors='strict')\n\n unpack an object from stream.\n ";
+static PyMethodDef __pyx_mdef_7msgpack_8_msgpack_3unpack = {__Pyx_NAMESTR("unpack"), (PyCFunction)__pyx_pf_7msgpack_8_msgpack_3unpack, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_7msgpack_8_msgpack_3unpack)};
+static PyObject *__pyx_pf_7msgpack_8_msgpack_3unpack(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_stream = 0;
+ PyObject *__pyx_v_object_hook = 0;
+ PyObject *__pyx_v_list_hook = 0;
+ int __pyx_v_use_list;
+ PyObject *__pyx_v_encoding = 0;
+ PyObject *__pyx_v_unicode_errors = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__stream,&__pyx_n_s__object_hook,&__pyx_n_s__list_hook,&__pyx_n_s__use_list,&__pyx_n_s__encoding,&__pyx_n_s__unicode_errors,0};
+ __Pyx_RefNannySetupContext("unpack");
+ __pyx_self = __pyx_self;
+ {
+ PyObject* values[6] = {0,0,0,0,0,0};
+ values[1] = ((PyObject *)Py_None);
+ values[2] = ((PyObject *)Py_None);
+ values[4] = ((PyObject *)Py_None);
+ values[5] = ((PyObject *)__pyx_n_s__strict);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__stream);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__object_hook);
+ if (value) { values[1] = value; kw_args--; }
+ }
+ case 2:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__list_hook);
+ if (value) { values[2] = value; kw_args--; }
+ }
+ case 3:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__use_list);
+ if (value) { values[3] = value; kw_args--; }
+ }
+ case 4:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__encoding);
+ if (value) { values[4] = value; kw_args--; }
+ }
+ case 5:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__unicode_errors);
+ if (value) { values[5] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "unpack") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_stream = values[0];
+ __pyx_v_object_hook = values[1];
+ __pyx_v_list_hook = values[2];
+ if (values[3]) {
+ __pyx_v_use_list = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_use_list == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ } else {
+ __pyx_v_use_list = ((int)0);
+ }
+ __pyx_v_encoding = values[4];
+ __pyx_v_unicode_errors = values[5];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("unpack", 0, 1, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("msgpack._msgpack.unpack", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "msgpack/_msgpack.pyx":250
+ * unpack an object from stream.
+ * """
+ * return unpackb(stream.read(), use_list=use_list, # <<<<<<<<<<<<<<
+ * object_hook=object_hook, list_hook=list_hook, encoding=encoding, unicode_errors=unicode_errors)
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__unpackb); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_stream, __pyx_n_s__read); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_use_list); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__use_list), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+ /* "msgpack/_msgpack.pyx":251
+ * """
+ * return unpackb(stream.read(), use_list=use_list,
+ * object_hook=object_hook, list_hook=list_hook, encoding=encoding, unicode_errors=unicode_errors) # <<<<<<<<<<<<<<
+ *
+ * cdef class Unpacker(object):
+ */
+ if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__object_hook), __pyx_v_object_hook) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__list_hook), __pyx_v_list_hook) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__encoding), __pyx_v_encoding) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__unicode_errors), __pyx_v_unicode_errors) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_4;
+ __pyx_t_4 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("msgpack._msgpack.unpack", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":297
+ * cdef char *unicode_errors
+ *
+ * def __cinit__(self): # <<<<<<<<<<<<<<
+ * self.buf = NULL
+ *
+ */
+
+static int __pyx_pf_7msgpack_8_msgpack_8Unpacker___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pf_7msgpack_8_msgpack_8Unpacker___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__cinit__");
+ if (unlikely(PyTuple_GET_SIZE(__pyx_args) > 0)) {
+ __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); return -1;}
+ if (unlikely(__pyx_kwds) && unlikely(PyDict_Size(__pyx_kwds) > 0) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__cinit__", 0))) return -1;
+
+ /* "msgpack/_msgpack.pyx":298
+ *
+ * def __cinit__(self):
+ * self.buf = NULL # <<<<<<<<<<<<<<
+ *
+ * def __dealloc__(self):
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->buf = NULL;
+
+ __pyx_r = 0;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":300
+ * self.buf = NULL
+ *
+ * def __dealloc__(self): # <<<<<<<<<<<<<<
+ * free(self.buf)
+ * self.buf = NULL
+ */
+
+static void __pyx_pf_7msgpack_8_msgpack_8Unpacker_1__dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_pf_7msgpack_8_msgpack_8Unpacker_1__dealloc__(PyObject *__pyx_v_self) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__dealloc__");
+
+ /* "msgpack/_msgpack.pyx":301
+ *
+ * def __dealloc__(self):
+ * free(self.buf) # <<<<<<<<<<<<<<
+ * self.buf = NULL
+ *
+ */
+ free(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->buf);
+
+ /* "msgpack/_msgpack.pyx":302
+ * def __dealloc__(self):
+ * free(self.buf)
+ * self.buf = NULL # <<<<<<<<<<<<<<
+ *
+ * def __init__(self, file_like=None, Py_ssize_t read_size=1024*1024, bint use_list=0,
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->buf = NULL;
+
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "msgpack/_msgpack.pyx":304
+ * self.buf = NULL
+ *
+ * def __init__(self, file_like=None, Py_ssize_t read_size=1024*1024, bint use_list=0, # <<<<<<<<<<<<<<
+ * object object_hook=None, object list_hook=None,
+ * encoding=None, unicode_errors='strict'):
+ */
+
+static int __pyx_pf_7msgpack_8_msgpack_8Unpacker_2__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pf_7msgpack_8_msgpack_8Unpacker_2__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_file_like = 0;
+ Py_ssize_t __pyx_v_read_size;
+ int __pyx_v_use_list;
+ PyObject *__pyx_v_object_hook = 0;
+ PyObject *__pyx_v_list_hook = 0;
+ PyObject *__pyx_v_encoding = 0;
+ PyObject *__pyx_v_unicode_errors = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ char *__pyx_t_4;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__file_like,&__pyx_n_s__read_size,&__pyx_n_s__use_list,&__pyx_n_s__object_hook,&__pyx_n_s__list_hook,&__pyx_n_s__encoding,&__pyx_n_s__unicode_errors,0};
+ __Pyx_RefNannySetupContext("__init__");
+ {
+ PyObject* values[7] = {0,0,0,0,0,0,0};
+ values[0] = ((PyObject *)Py_None);
+
+ /* "msgpack/_msgpack.pyx":305
+ *
+ * def __init__(self, file_like=None, Py_ssize_t read_size=1024*1024, bint use_list=0,
+ * object object_hook=None, object list_hook=None, # <<<<<<<<<<<<<<
+ * encoding=None, unicode_errors='strict'):
+ * self.use_list = use_list
+ */
+ values[3] = ((PyObject *)Py_None);
+ values[4] = ((PyObject *)Py_None);
+
+ /* "msgpack/_msgpack.pyx":306
+ * def __init__(self, file_like=None, Py_ssize_t read_size=1024*1024, bint use_list=0,
+ * object object_hook=None, object list_hook=None,
+ * encoding=None, unicode_errors='strict'): # <<<<<<<<<<<<<<
+ * self.use_list = use_list
+ * self.file_like = file_like
+ */
+ values[5] = ((PyObject *)Py_None);
+ values[6] = ((PyObject *)__pyx_n_s__strict);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__file_like);
+ if (value) { values[0] = value; kw_args--; }
+ }
+ case 1:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__read_size);
+ if (value) { values[1] = value; kw_args--; }
+ }
+ case 2:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__use_list);
+ if (value) { values[2] = value; kw_args--; }
+ }
+ case 3:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__object_hook);
+ if (value) { values[3] = value; kw_args--; }
+ }
+ case 4:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__list_hook);
+ if (value) { values[4] = value; kw_args--; }
+ }
+ case 5:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__encoding);
+ if (value) { values[5] = value; kw_args--; }
+ }
+ case 6:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__unicode_errors);
+ if (value) { values[6] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_file_like = values[0];
+ if (values[1]) {
+ __pyx_v_read_size = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_read_size == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ } else {
+ __pyx_v_read_size = ((Py_ssize_t)1048576);
+ }
+ if (values[2]) {
+ __pyx_v_use_list = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_use_list == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ } else {
+ __pyx_v_use_list = ((int)0);
+ }
+ __pyx_v_object_hook = values[3];
+ __pyx_v_list_hook = values[4];
+ __pyx_v_encoding = values[5];
+ __pyx_v_unicode_errors = values[6];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 7, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("msgpack._msgpack.Unpacker.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "msgpack/_msgpack.pyx":307
+ * object object_hook=None, object list_hook=None,
+ * encoding=None, unicode_errors='strict'):
+ * self.use_list = use_list # <<<<<<<<<<<<<<
+ * self.file_like = file_like
+ * if file_like:
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->use_list = __pyx_v_use_list;
+
+ /* "msgpack/_msgpack.pyx":308
+ * encoding=None, unicode_errors='strict'):
+ * self.use_list = use_list
+ * self.file_like = file_like # <<<<<<<<<<<<<<
+ * if file_like:
+ * self.file_like_read = file_like.read
+ */
+ __Pyx_INCREF(__pyx_v_file_like);
+ __Pyx_GIVEREF(__pyx_v_file_like);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->file_like);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->file_like);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->file_like = __pyx_v_file_like;
+
+ /* "msgpack/_msgpack.pyx":309
+ * self.use_list = use_list
+ * self.file_like = file_like
+ * if file_like: # <<<<<<<<<<<<<<
+ * self.file_like_read = file_like.read
+ * if not PyCallable_Check(self.file_like_read):
+ */
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_file_like); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 309; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":310
+ * self.file_like = file_like
+ * if file_like:
+ * self.file_like_read = file_like.read # <<<<<<<<<<<<<<
+ * if not PyCallable_Check(self.file_like_read):
+ * raise ValueError("`file_like.read` must be a callable.")
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_file_like, __pyx_n_s__read); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 310; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->file_like_read);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->file_like_read);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->file_like_read = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "msgpack/_msgpack.pyx":311
+ * if file_like:
+ * self.file_like_read = file_like.read
+ * if not PyCallable_Check(self.file_like_read): # <<<<<<<<<<<<<<
+ * raise ValueError("`file_like.read` must be a callable.")
+ * self.read_size = read_size
+ */
+ __pyx_t_2 = ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->file_like_read;
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_1 = (!PyCallable_Check(__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":312
+ * self.file_like_read = file_like.read
+ * if not PyCallable_Check(self.file_like_read):
+ * raise ValueError("`file_like.read` must be a callable.") # <<<<<<<<<<<<<<
+ * self.read_size = read_size
+ * self.buf = malloc(read_size)
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_21), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "msgpack/_msgpack.pyx":313
+ * if not PyCallable_Check(self.file_like_read):
+ * raise ValueError("`file_like.read` must be a callable.")
+ * self.read_size = read_size # <<<<<<<<<<<<<<
+ * self.buf = malloc(read_size)
+ * if self.buf == NULL:
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->read_size = __pyx_v_read_size;
+
+ /* "msgpack/_msgpack.pyx":314
+ * raise ValueError("`file_like.read` must be a callable.")
+ * self.read_size = read_size
+ * self.buf = malloc(read_size) # <<<<<<<<<<<<<<
+ * if self.buf == NULL:
+ * raise MemoryError("Unable to allocate internal buffer.")
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->buf = ((char *)malloc(__pyx_v_read_size));
+
+ /* "msgpack/_msgpack.pyx":315
+ * self.read_size = read_size
+ * self.buf = malloc(read_size)
+ * if self.buf == NULL: # <<<<<<<<<<<<<<
+ * raise MemoryError("Unable to allocate internal buffer.")
+ * self.buf_size = read_size
+ */
+ __pyx_t_1 = (((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->buf == NULL);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":316
+ * self.buf = malloc(read_size)
+ * if self.buf == NULL:
+ * raise MemoryError("Unable to allocate internal buffer.") # <<<<<<<<<<<<<<
+ * self.buf_size = read_size
+ * self.buf_head = 0
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_MemoryError, ((PyObject *)__pyx_k_tuple_22), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "msgpack/_msgpack.pyx":317
+ * if self.buf == NULL:
+ * raise MemoryError("Unable to allocate internal buffer.")
+ * self.buf_size = read_size # <<<<<<<<<<<<<<
+ * self.buf_head = 0
+ * self.buf_tail = 0
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->buf_size = __pyx_v_read_size;
+
+ /* "msgpack/_msgpack.pyx":318
+ * raise MemoryError("Unable to allocate internal buffer.")
+ * self.buf_size = read_size
+ * self.buf_head = 0 # <<<<<<<<<<<<<<
+ * self.buf_tail = 0
+ * template_init(&self.ctx)
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->buf_head = 0;
+
+ /* "msgpack/_msgpack.pyx":319
+ * self.buf_size = read_size
+ * self.buf_head = 0
+ * self.buf_tail = 0 # <<<<<<<<<<<<<<
+ * template_init(&self.ctx)
+ * self.ctx.user.use_list = use_list
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->buf_tail = 0;
+
+ /* "msgpack/_msgpack.pyx":320
+ * self.buf_head = 0
+ * self.buf_tail = 0
+ * template_init(&self.ctx) # <<<<<<<<<<<<<<
+ * self.ctx.user.use_list = use_list
+ * self.ctx.user.object_hook = self.ctx.user.list_hook = NULL
+ */
+ template_init((&((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->ctx));
+
+ /* "msgpack/_msgpack.pyx":321
+ * self.buf_tail = 0
+ * template_init(&self.ctx)
+ * self.ctx.user.use_list = use_list # <<<<<<<<<<<<<<
+ * self.ctx.user.object_hook = self.ctx.user.list_hook = NULL
+ * if object_hook is not None:
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->ctx.user.use_list = __pyx_v_use_list;
+
+ /* "msgpack/_msgpack.pyx":322
+ * template_init(&self.ctx)
+ * self.ctx.user.use_list = use_list
+ * self.ctx.user.object_hook = self.ctx.user.list_hook = NULL # <<<<<<<<<<<<<<
+ * if object_hook is not None:
+ * if not PyCallable_Check(object_hook):
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->ctx.user.object_hook = ((PyObject *)NULL);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->ctx.user.list_hook = ((PyObject *)NULL);
+
+ /* "msgpack/_msgpack.pyx":323
+ * self.ctx.user.use_list = use_list
+ * self.ctx.user.object_hook = self.ctx.user.list_hook = NULL
+ * if object_hook is not None: # <<<<<<<<<<<<<<
+ * if not PyCallable_Check(object_hook):
+ * raise TypeError("object_hook must be a callable.")
+ */
+ __pyx_t_1 = (__pyx_v_object_hook != Py_None);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":324
+ * self.ctx.user.object_hook = self.ctx.user.list_hook = NULL
+ * if object_hook is not None:
+ * if not PyCallable_Check(object_hook): # <<<<<<<<<<<<<<
+ * raise TypeError("object_hook must be a callable.")
+ * self.ctx.user.object_hook = object_hook
+ */
+ __pyx_t_1 = (!PyCallable_Check(__pyx_v_object_hook));
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":325
+ * if object_hook is not None:
+ * if not PyCallable_Check(object_hook):
+ * raise TypeError("object_hook must be a callable.") # <<<<<<<<<<<<<<
+ * self.ctx.user.object_hook = object_hook
+ * if list_hook is not None:
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_k_tuple_23), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "msgpack/_msgpack.pyx":326
+ * if not PyCallable_Check(object_hook):
+ * raise TypeError("object_hook must be a callable.")
+ * self.ctx.user.object_hook = object_hook # <<<<<<<<<<<<<<
+ * if list_hook is not None:
+ * if not PyCallable_Check(list_hook):
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->ctx.user.object_hook = ((PyObject *)__pyx_v_object_hook);
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "msgpack/_msgpack.pyx":327
+ * raise TypeError("object_hook must be a callable.")
+ * self.ctx.user.object_hook = object_hook
+ * if list_hook is not None: # <<<<<<<<<<<<<<
+ * if not PyCallable_Check(list_hook):
+ * raise TypeError("list_hook must be a callable.")
+ */
+ __pyx_t_1 = (__pyx_v_list_hook != Py_None);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":328
+ * self.ctx.user.object_hook = object_hook
+ * if list_hook is not None:
+ * if not PyCallable_Check(list_hook): # <<<<<<<<<<<<<<
+ * raise TypeError("list_hook must be a callable.")
+ * self.ctx.user.list_hook = list_hook
+ */
+ __pyx_t_1 = (!PyCallable_Check(__pyx_v_list_hook));
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":329
+ * if list_hook is not None:
+ * if not PyCallable_Check(list_hook):
+ * raise TypeError("list_hook must be a callable.") # <<<<<<<<<<<<<<
+ * self.ctx.user.list_hook = list_hook
+ * if encoding is None:
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_k_tuple_24), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "msgpack/_msgpack.pyx":330
+ * if not PyCallable_Check(list_hook):
+ * raise TypeError("list_hook must be a callable.")
+ * self.ctx.user.list_hook = list_hook # <<<<<<<<<<<<<<
+ * if encoding is None:
+ * self.ctx.user.encoding = NULL
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->ctx.user.list_hook = ((PyObject *)__pyx_v_list_hook);
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "msgpack/_msgpack.pyx":331
+ * raise TypeError("list_hook must be a callable.")
+ * self.ctx.user.list_hook = list_hook
+ * if encoding is None: # <<<<<<<<<<<<<<
+ * self.ctx.user.encoding = NULL
+ * self.ctx.user.unicode_errors = NULL
+ */
+ __pyx_t_1 = (__pyx_v_encoding == Py_None);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":332
+ * self.ctx.user.list_hook = list_hook
+ * if encoding is None:
+ * self.ctx.user.encoding = NULL # <<<<<<<<<<<<<<
+ * self.ctx.user.unicode_errors = NULL
+ * else:
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->ctx.user.encoding = NULL;
+
+ /* "msgpack/_msgpack.pyx":333
+ * if encoding is None:
+ * self.ctx.user.encoding = NULL
+ * self.ctx.user.unicode_errors = NULL # <<<<<<<<<<<<<<
+ * else:
+ * if isinstance(encoding, unicode):
+ */
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->ctx.user.unicode_errors = NULL;
+ goto __pyx_L13;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":335
+ * self.ctx.user.unicode_errors = NULL
+ * else:
+ * if isinstance(encoding, unicode): # <<<<<<<<<<<<<<
+ * self._bencoding = encoding.encode('ascii')
+ * else:
+ */
+ __pyx_t_2 = ((PyObject *)((PyObject*)(&PyUnicode_Type)));
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_encoding, __pyx_t_2);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":336
+ * else:
+ * if isinstance(encoding, unicode):
+ * self._bencoding = encoding.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * self._bencoding = encoding
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_encoding, __pyx_n_s__encode); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 336; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_25), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 336; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_bencoding);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_bencoding);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_bencoding = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L14;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":338
+ * self._bencoding = encoding.encode('ascii')
+ * else:
+ * self._bencoding = encoding # <<<<<<<<<<<<<<
+ * self.ctx.user.encoding = PyBytes_AsString(self._bencoding)
+ * if isinstance(unicode_errors, unicode):
+ */
+ __Pyx_INCREF(__pyx_v_encoding);
+ __Pyx_GIVEREF(__pyx_v_encoding);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_bencoding);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_bencoding);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_bencoding = __pyx_v_encoding;
+ }
+ __pyx_L14:;
+
+ /* "msgpack/_msgpack.pyx":339
+ * else:
+ * self._bencoding = encoding
+ * self.ctx.user.encoding = PyBytes_AsString(self._bencoding) # <<<<<<<<<<<<<<
+ * if isinstance(unicode_errors, unicode):
+ * self._berrors = unicode_errors.encode('ascii')
+ */
+ __pyx_t_3 = ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_bencoding;
+ __Pyx_INCREF(__pyx_t_3);
+ __pyx_t_4 = PyBytes_AsString(__pyx_t_3); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->ctx.user.encoding = __pyx_t_4;
+
+ /* "msgpack/_msgpack.pyx":340
+ * self._bencoding = encoding
+ * self.ctx.user.encoding = PyBytes_AsString(self._bencoding)
+ * if isinstance(unicode_errors, unicode): # <<<<<<<<<<<<<<
+ * self._berrors = unicode_errors.encode('ascii')
+ * else:
+ */
+ __pyx_t_3 = ((PyObject *)((PyObject*)(&PyUnicode_Type)));
+ __Pyx_INCREF(__pyx_t_3);
+ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_unicode_errors, __pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":341
+ * self.ctx.user.encoding = PyBytes_AsString(self._bencoding)
+ * if isinstance(unicode_errors, unicode):
+ * self._berrors = unicode_errors.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * self._berrors = unicode_errors
+ */
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_unicode_errors, __pyx_n_s__encode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_k_tuple_26), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_berrors);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_berrors);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_berrors = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L15;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":343
+ * self._berrors = unicode_errors.encode('ascii')
+ * else:
+ * self._berrors = unicode_errors # <<<<<<<<<<<<<<
+ * self.ctx.user.unicode_errors = PyBytes_AsString(self._berrors)
+ *
+ */
+ __Pyx_INCREF(__pyx_v_unicode_errors);
+ __Pyx_GIVEREF(__pyx_v_unicode_errors);
+ __Pyx_GOTREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_berrors);
+ __Pyx_DECREF(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_berrors);
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_berrors = __pyx_v_unicode_errors;
+ }
+ __pyx_L15:;
+
+ /* "msgpack/_msgpack.pyx":344
+ * else:
+ * self._berrors = unicode_errors
+ * self.ctx.user.unicode_errors = PyBytes_AsString(self._berrors) # <<<<<<<<<<<<<<
+ *
+ * def feed(self, object next_bytes):
+ */
+ __pyx_t_2 = ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->_berrors;
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_4 = PyBytes_AsString(__pyx_t_2); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 344; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->ctx.user.unicode_errors = __pyx_t_4;
+ }
+ __pyx_L13:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("msgpack._msgpack.Unpacker.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":346
+ * self.ctx.user.unicode_errors = PyBytes_AsString(self._berrors)
+ *
+ * def feed(self, object next_bytes): # <<<<<<<<<<<<<<
+ * cdef char* buf
+ * cdef Py_ssize_t buf_len
+ */
+
+static PyObject *__pyx_pf_7msgpack_8_msgpack_8Unpacker_3feed(PyObject *__pyx_v_self, PyObject *__pyx_v_next_bytes); /*proto*/
+static char __pyx_doc_7msgpack_8_msgpack_8Unpacker_3feed[] = "Unpacker.feed(self, next_bytes)";
+static PyObject *__pyx_pf_7msgpack_8_msgpack_8Unpacker_3feed(PyObject *__pyx_v_self, PyObject *__pyx_v_next_bytes) {
+ char *__pyx_v_buf;
+ Py_ssize_t __pyx_v_buf_len;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("feed");
+
+ /* "msgpack/_msgpack.pyx":349
+ * cdef char* buf
+ * cdef Py_ssize_t buf_len
+ * if self.file_like is not None: # <<<<<<<<<<<<<<
+ * raise AssertionError(
+ * "unpacker.feed() is not be able to use with`file_like`.")
+ */
+ __pyx_t_1 = (((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->file_like != Py_None);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":350
+ * cdef Py_ssize_t buf_len
+ * if self.file_like is not None:
+ * raise AssertionError( # <<<<<<<<<<<<<<
+ * "unpacker.feed() is not be able to use with`file_like`.")
+ * PyObject_AsReadBuffer(next_bytes, &buf, &buf_len)
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_AssertionError, ((PyObject *)__pyx_k_tuple_28), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "msgpack/_msgpack.pyx":352
+ * raise AssertionError(
+ * "unpacker.feed() is not be able to use with`file_like`.")
+ * PyObject_AsReadBuffer(next_bytes, &buf, &buf_len) # <<<<<<<<<<<<<<
+ * self.append_buffer(buf, buf_len)
+ *
+ */
+ __pyx_t_3 = PyObject_AsReadBuffer(__pyx_v_next_bytes, ((const void* *)(&__pyx_v_buf)), (&__pyx_v_buf_len)); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "msgpack/_msgpack.pyx":353
+ * "unpacker.feed() is not be able to use with`file_like`.")
+ * PyObject_AsReadBuffer(next_bytes, &buf, &buf_len)
+ * self.append_buffer(buf, buf_len) # <<<<<<<<<<<<<<
+ *
+ * cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len):
+ */
+ __pyx_t_2 = ((struct __pyx_vtabstruct_7msgpack_8_msgpack_Unpacker *)((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->__pyx_vtab)->append_buffer(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self), __pyx_v_buf, __pyx_v_buf_len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("msgpack._msgpack.Unpacker.feed", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":355
+ * self.append_buffer(buf, buf_len)
+ *
+ * cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len): # <<<<<<<<<<<<<<
+ * cdef:
+ * char* buf = self.buf
+ */
+
+static PyObject *__pyx_f_7msgpack_8_msgpack_8Unpacker_append_buffer(struct __pyx_obj_7msgpack_8_msgpack_Unpacker *__pyx_v_self, void *__pyx_v__buf, Py_ssize_t __pyx_v__buf_len) {
+ char *__pyx_v_buf;
+ size_t __pyx_v_head;
+ size_t __pyx_v_tail;
+ size_t __pyx_v_buf_size;
+ size_t __pyx_v_new_size;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("append_buffer");
+
+ /* "msgpack/_msgpack.pyx":357
+ * cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len):
+ * cdef:
+ * char* buf = self.buf # <<<<<<<<<<<<<<
+ * size_t head = self.buf_head
+ * size_t tail = self.buf_tail
+ */
+ __pyx_v_buf = __pyx_v_self->buf;
+
+ /* "msgpack/_msgpack.pyx":358
+ * cdef:
+ * char* buf = self.buf
+ * size_t head = self.buf_head # <<<<<<<<<<<<<<
+ * size_t tail = self.buf_tail
+ * size_t buf_size = self.buf_size
+ */
+ __pyx_v_head = __pyx_v_self->buf_head;
+
+ /* "msgpack/_msgpack.pyx":359
+ * char* buf = self.buf
+ * size_t head = self.buf_head
+ * size_t tail = self.buf_tail # <<<<<<<<<<<<<<
+ * size_t buf_size = self.buf_size
+ * size_t new_size
+ */
+ __pyx_v_tail = __pyx_v_self->buf_tail;
+
+ /* "msgpack/_msgpack.pyx":360
+ * size_t head = self.buf_head
+ * size_t tail = self.buf_tail
+ * size_t buf_size = self.buf_size # <<<<<<<<<<<<<<
+ * size_t new_size
+ *
+ */
+ __pyx_v_buf_size = __pyx_v_self->buf_size;
+
+ /* "msgpack/_msgpack.pyx":363
+ * size_t new_size
+ *
+ * if tail + _buf_len > buf_size: # <<<<<<<<<<<<<<
+ * if ((tail - head) + _buf_len)*2 < buf_size:
+ * # move to front.
+ */
+ __pyx_t_1 = ((__pyx_v_tail + __pyx_v__buf_len) > __pyx_v_buf_size);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":364
+ *
+ * if tail + _buf_len > buf_size:
+ * if ((tail - head) + _buf_len)*2 < buf_size: # <<<<<<<<<<<<<<
+ * # move to front.
+ * memmove(buf, buf + head, tail - head)
+ */
+ __pyx_t_1 = ((((__pyx_v_tail - __pyx_v_head) + __pyx_v__buf_len) * 2) < __pyx_v_buf_size);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":366
+ * if ((tail - head) + _buf_len)*2 < buf_size:
+ * # move to front.
+ * memmove(buf, buf + head, tail - head) # <<<<<<<<<<<<<<
+ * tail -= head
+ * head = 0
+ */
+ memmove(__pyx_v_buf, (__pyx_v_buf + __pyx_v_head), (__pyx_v_tail - __pyx_v_head));
+
+ /* "msgpack/_msgpack.pyx":367
+ * # move to front.
+ * memmove(buf, buf + head, tail - head)
+ * tail -= head # <<<<<<<<<<<<<<
+ * head = 0
+ * else:
+ */
+ __pyx_v_tail = (__pyx_v_tail - __pyx_v_head);
+
+ /* "msgpack/_msgpack.pyx":368
+ * memmove(buf, buf + head, tail - head)
+ * tail -= head
+ * head = 0 # <<<<<<<<<<<<<<
+ * else:
+ * # expand buffer.
+ */
+ __pyx_v_head = 0;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":371
+ * else:
+ * # expand buffer.
+ * new_size = tail + _buf_len # <<<<<<<<<<<<<<
+ * if new_size < buf_size*2:
+ * new_size = buf_size*2
+ */
+ __pyx_v_new_size = (__pyx_v_tail + __pyx_v__buf_len);
+
+ /* "msgpack/_msgpack.pyx":372
+ * # expand buffer.
+ * new_size = tail + _buf_len
+ * if new_size < buf_size*2: # <<<<<<<<<<<<<<
+ * new_size = buf_size*2
+ * buf = realloc(buf, new_size)
+ */
+ __pyx_t_1 = (__pyx_v_new_size < (__pyx_v_buf_size * 2));
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":373
+ * new_size = tail + _buf_len
+ * if new_size < buf_size*2:
+ * new_size = buf_size*2 # <<<<<<<<<<<<<<
+ * buf = realloc(buf, new_size)
+ * if buf == NULL:
+ */
+ __pyx_v_new_size = (__pyx_v_buf_size * 2);
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "msgpack/_msgpack.pyx":374
+ * if new_size < buf_size*2:
+ * new_size = buf_size*2
+ * buf = realloc(buf, new_size) # <<<<<<<<<<<<<<
+ * if buf == NULL:
+ * # self.buf still holds old buffer and will be freed during
+ */
+ __pyx_v_buf = ((char *)realloc(__pyx_v_buf, __pyx_v_new_size));
+
+ /* "msgpack/_msgpack.pyx":375
+ * new_size = buf_size*2
+ * buf = realloc(buf, new_size)
+ * if buf == NULL: # <<<<<<<<<<<<<<
+ * # self.buf still holds old buffer and will be freed during
+ * # obj destruction
+ */
+ __pyx_t_1 = (__pyx_v_buf == NULL);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":378
+ * # self.buf still holds old buffer and will be freed during
+ * # obj destruction
+ * raise MemoryError("Unable to enlarge internal buffer.") # <<<<<<<<<<<<<<
+ * buf_size = new_size
+ *
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_MemoryError, ((PyObject *)__pyx_k_tuple_30), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "msgpack/_msgpack.pyx":379
+ * # obj destruction
+ * raise MemoryError("Unable to enlarge internal buffer.")
+ * buf_size = new_size # <<<<<<<<<<<<<<
+ *
+ * memcpy(buf + tail, (_buf), _buf_len)
+ */
+ __pyx_v_buf_size = __pyx_v_new_size;
+ }
+ __pyx_L4:;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "msgpack/_msgpack.pyx":381
+ * buf_size = new_size
+ *
+ * memcpy(buf + tail, (_buf), _buf_len) # <<<<<<<<<<<<<<
+ * self.buf = buf
+ * self.buf_head = head
+ */
+ memcpy((__pyx_v_buf + __pyx_v_tail), ((char *)__pyx_v__buf), __pyx_v__buf_len);
+
+ /* "msgpack/_msgpack.pyx":382
+ *
+ * memcpy(buf + tail, (_buf), _buf_len)
+ * self.buf = buf # <<<<<<<<<<<<<<
+ * self.buf_head = head
+ * self.buf_size = buf_size
+ */
+ __pyx_v_self->buf = __pyx_v_buf;
+
+ /* "msgpack/_msgpack.pyx":383
+ * memcpy(buf + tail, (_buf), _buf_len)
+ * self.buf = buf
+ * self.buf_head = head # <<<<<<<<<<<<<<
+ * self.buf_size = buf_size
+ * self.buf_tail = tail + _buf_len
+ */
+ __pyx_v_self->buf_head = __pyx_v_head;
+
+ /* "msgpack/_msgpack.pyx":384
+ * self.buf = buf
+ * self.buf_head = head
+ * self.buf_size = buf_size # <<<<<<<<<<<<<<
+ * self.buf_tail = tail + _buf_len
+ *
+ */
+ __pyx_v_self->buf_size = __pyx_v_buf_size;
+
+ /* "msgpack/_msgpack.pyx":385
+ * self.buf_head = head
+ * self.buf_size = buf_size
+ * self.buf_tail = tail + _buf_len # <<<<<<<<<<<<<<
+ *
+ * # prepare self.buf from file_like
+ */
+ __pyx_v_self->buf_tail = (__pyx_v_tail + __pyx_v__buf_len);
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("msgpack._msgpack.Unpacker.append_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":388
+ *
+ * # prepare self.buf from file_like
+ * cdef fill_buffer(self): # <<<<<<<<<<<<<<
+ * if self.file_like is not None:
+ * next_bytes = self.file_like_read(self.read_size)
+ */
+
+static PyObject *__pyx_f_7msgpack_8_msgpack_8Unpacker_fill_buffer(struct __pyx_obj_7msgpack_8_msgpack_Unpacker *__pyx_v_self) {
+ PyObject *__pyx_v_next_bytes = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ char *__pyx_t_4;
+ Py_ssize_t __pyx_t_5;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("fill_buffer");
+
+ /* "msgpack/_msgpack.pyx":389
+ * # prepare self.buf from file_like
+ * cdef fill_buffer(self):
+ * if self.file_like is not None: # <<<<<<<<<<<<<<
+ * next_bytes = self.file_like_read(self.read_size)
+ * if next_bytes:
+ */
+ __pyx_t_1 = (__pyx_v_self->file_like != Py_None);
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":390
+ * cdef fill_buffer(self):
+ * if self.file_like is not None:
+ * next_bytes = self.file_like_read(self.read_size) # <<<<<<<<<<<<<<
+ * if next_bytes:
+ * self.append_buffer(PyBytes_AsString(next_bytes),
+ */
+ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->read_size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_v_self->file_like_read, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __pyx_v_next_bytes = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "msgpack/_msgpack.pyx":391
+ * if self.file_like is not None:
+ * next_bytes = self.file_like_read(self.read_size)
+ * if next_bytes: # <<<<<<<<<<<<<<
+ * self.append_buffer(PyBytes_AsString(next_bytes),
+ * PyBytes_Size(next_bytes))
+ */
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_next_bytes); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_1) {
+
+ /* "msgpack/_msgpack.pyx":392
+ * next_bytes = self.file_like_read(self.read_size)
+ * if next_bytes:
+ * self.append_buffer(PyBytes_AsString(next_bytes), # <<<<<<<<<<<<<<
+ * PyBytes_Size(next_bytes))
+ * else:
+ */
+ __pyx_t_4 = PyBytes_AsString(__pyx_v_next_bytes); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "msgpack/_msgpack.pyx":393
+ * if next_bytes:
+ * self.append_buffer(PyBytes_AsString(next_bytes),
+ * PyBytes_Size(next_bytes)) # <<<<<<<<<<<<<<
+ * else:
+ * self.file_like = None
+ */
+ __pyx_t_5 = PyBytes_Size(__pyx_v_next_bytes); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = ((struct __pyx_vtabstruct_7msgpack_8_msgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->append_buffer(__pyx_v_self, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "msgpack/_msgpack.pyx":395
+ * PyBytes_Size(next_bytes))
+ * else:
+ * self.file_like = None # <<<<<<<<<<<<<<
+ *
+ * cpdef unpack(self):
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->file_like);
+ __Pyx_DECREF(__pyx_v_self->file_like);
+ __pyx_v_self->file_like = Py_None;
+ }
+ __pyx_L4:;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("msgpack._msgpack.Unpacker.fill_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_next_bytes);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":397
+ * self.file_like = None
+ *
+ * cpdef unpack(self): # <<<<<<<<<<<<<<
+ * """unpack one object"""
+ * cdef int ret
+ */
+
+static PyObject *__pyx_pf_7msgpack_8_msgpack_8Unpacker_4unpack(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_f_7msgpack_8_msgpack_8Unpacker_unpack(struct __pyx_obj_7msgpack_8_msgpack_Unpacker *__pyx_v_self, int __pyx_skip_dispatch) {
+ int __pyx_v_ret;
+ PyObject *__pyx_v_o = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("unpack");
+ /* Check if called by wrapper */
+ if (unlikely(__pyx_skip_dispatch)) ;
+ /* Check if overriden in Python */
+ else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) {
+ __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__unpack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_7msgpack_8_msgpack_8Unpacker_4unpack)) {
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ goto __pyx_L0;
+ }
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ }
+
+ /* "msgpack/_msgpack.pyx":400
+ * """unpack one object"""
+ * cdef int ret
+ * while 1: # <<<<<<<<<<<<<<
+ * _gc_disable()
+ * ret = template_execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
+ */
+ while (1) {
+ if (!1) break;
+
+ /* "msgpack/_msgpack.pyx":401
+ * cdef int ret
+ * while 1:
+ * _gc_disable() # <<<<<<<<<<<<<<
+ * ret = template_execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
+ * _gc_enable()
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___gc_disable); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "msgpack/_msgpack.pyx":402
+ * while 1:
+ * _gc_disable()
+ * ret = template_execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head) # <<<<<<<<<<<<<<
+ * _gc_enable()
+ * if ret == 1:
+ */
+ __pyx_t_3 = template_execute((&__pyx_v_self->ctx), __pyx_v_self->buf, __pyx_v_self->buf_tail, (&__pyx_v_self->buf_head)); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_ret = __pyx_t_3;
+
+ /* "msgpack/_msgpack.pyx":403
+ * _gc_disable()
+ * ret = template_execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
+ * _gc_enable() # <<<<<<<<<<<<<<
+ * if ret == 1:
+ * o = template_data(&self.ctx)
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s___gc_enable); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "msgpack/_msgpack.pyx":408
+ * template_init(&self.ctx)
+ * return o
+ * elif ret == 0: # <<<<<<<<<<<<<<
+ * if self.file_like is not None:
+ * self.fill_buffer()
+ */
+ switch (__pyx_v_ret) {
+
+ /* "msgpack/_msgpack.pyx":404
+ * ret = template_execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
+ * _gc_enable()
+ * if ret == 1: # <<<<<<<<<<<<<<
+ * o = template_data(&self.ctx)
+ * template_init(&self.ctx)
+ */
+ case 1:
+
+ /* "msgpack/_msgpack.pyx":405
+ * _gc_enable()
+ * if ret == 1:
+ * o = template_data(&self.ctx) # <<<<<<<<<<<<<<
+ * template_init(&self.ctx)
+ * return o
+ */
+ __pyx_t_1 = template_data((&__pyx_v_self->ctx)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v_o = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "msgpack/_msgpack.pyx":406
+ * if ret == 1:
+ * o = template_data(&self.ctx)
+ * template_init(&self.ctx) # <<<<<<<<<<<<<<
+ * return o
+ * elif ret == 0:
+ */
+ template_init((&__pyx_v_self->ctx));
+
+ /* "msgpack/_msgpack.pyx":407
+ * o = template_data(&self.ctx)
+ * template_init(&self.ctx)
+ * return o # <<<<<<<<<<<<<<
+ * elif ret == 0:
+ * if self.file_like is not None:
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_o);
+ __pyx_r = __pyx_v_o;
+ goto __pyx_L0;
+ break;
+
+ /* "msgpack/_msgpack.pyx":408
+ * template_init(&self.ctx)
+ * return o
+ * elif ret == 0: # <<<<<<<<<<<<<<
+ * if self.file_like is not None:
+ * self.fill_buffer()
+ */
+ case 0:
+
+ /* "msgpack/_msgpack.pyx":409
+ * return o
+ * elif ret == 0:
+ * if self.file_like is not None: # <<<<<<<<<<<<<<
+ * self.fill_buffer()
+ * continue
+ */
+ __pyx_t_4 = (__pyx_v_self->file_like != Py_None);
+ if (__pyx_t_4) {
+
+ /* "msgpack/_msgpack.pyx":410
+ * elif ret == 0:
+ * if self.file_like is not None:
+ * self.fill_buffer() # <<<<<<<<<<<<<<
+ * continue
+ * raise StopIteration("No more unpack data.")
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7msgpack_8_msgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->fill_buffer(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "msgpack/_msgpack.pyx":411
+ * if self.file_like is not None:
+ * self.fill_buffer()
+ * continue # <<<<<<<<<<<<<<
+ * raise StopIteration("No more unpack data.")
+ * else:
+ */
+ goto __pyx_L3_continue;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "msgpack/_msgpack.pyx":412
+ * self.fill_buffer()
+ * continue
+ * raise StopIteration("No more unpack data.") # <<<<<<<<<<<<<<
+ * else:
+ * raise ValueError("Unpack failed: error = %d" % (ret,))
+ */
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_StopIteration, ((PyObject *)__pyx_k_tuple_32), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ default:
+
+ /* "msgpack/_msgpack.pyx":414
+ * raise StopIteration("No more unpack data.")
+ * else:
+ * raise ValueError("Unpack failed: error = %d" % (ret,)) # <<<<<<<<<<<<<<
+ *
+ * def __iter__(self):
+ */
+ __pyx_t_1 = PyInt_FromLong(__pyx_v_ret); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_33), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ }
+ __pyx_L3_continue:;
+ }
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("msgpack._msgpack.Unpacker.unpack", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_o);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":397
+ * self.file_like = None
+ *
+ * cpdef unpack(self): # <<<<<<<<<<<<<<
+ * """unpack one object"""
+ * cdef int ret
+ */
+
+static PyObject *__pyx_pf_7msgpack_8_msgpack_8Unpacker_4unpack(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_7msgpack_8_msgpack_8Unpacker_4unpack[] = "Unpacker.unpack(self)\nunpack one object";
+static PyObject *__pyx_pf_7msgpack_8_msgpack_8Unpacker_4unpack(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("unpack");
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7msgpack_8_msgpack_Unpacker *)((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->__pyx_vtab)->unpack(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self), 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("msgpack._msgpack.Unpacker.unpack", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":416
+ * raise ValueError("Unpack failed: error = %d" % (ret,))
+ *
+ * def __iter__(self): # <<<<<<<<<<<<<<
+ * return self
+ *
+ */
+
+static PyObject *__pyx_pf_7msgpack_8_msgpack_8Unpacker_5__iter__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_7msgpack_8_msgpack_8Unpacker_5__iter__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__iter__");
+
+ /* "msgpack/_msgpack.pyx":417
+ *
+ * def __iter__(self):
+ * return self # <<<<<<<<<<<<<<
+ *
+ * def __next__(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self);
+ __pyx_r = __pyx_v_self;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "msgpack/_msgpack.pyx":419
+ * return self
+ *
+ * def __next__(self): # <<<<<<<<<<<<<<
+ * return self.unpack()
+ *
+ */
+
+static PyObject *__pyx_pf_7msgpack_8_msgpack_8Unpacker_6__next__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_7msgpack_8_msgpack_8Unpacker_6__next__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__next__");
+
+ /* "msgpack/_msgpack.pyx":420
+ *
+ * def __next__(self):
+ * return self.unpack() # <<<<<<<<<<<<<<
+ *
+ * # for debug.
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7msgpack_8_msgpack_Unpacker *)((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self)->__pyx_vtab)->unpack(((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)__pyx_v_self), 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("msgpack._msgpack.Unpacker.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+static struct __pyx_vtabstruct_7msgpack_8_msgpack_Packer __pyx_vtable_7msgpack_8_msgpack_Packer;
+
+static PyObject *__pyx_tp_new_7msgpack_8_msgpack_Packer(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_7msgpack_8_msgpack_Packer *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_7msgpack_8_msgpack_Packer *)o);
+ p->__pyx_vtab = __pyx_vtabptr_7msgpack_8_msgpack_Packer;
+ p->_default = Py_None; Py_INCREF(Py_None);
+ p->_bencoding = Py_None; Py_INCREF(Py_None);
+ p->_berrors = Py_None; Py_INCREF(Py_None);
+ if (__pyx_pf_7msgpack_8_msgpack_6Packer___cinit__(o, __pyx_empty_tuple, NULL) < 0) {
+ Py_DECREF(o); o = 0;
+ }
+ return o;
+}
+
+static void __pyx_tp_dealloc_7msgpack_8_msgpack_Packer(PyObject *o) {
+ struct __pyx_obj_7msgpack_8_msgpack_Packer *p = (struct __pyx_obj_7msgpack_8_msgpack_Packer *)o;
+ {
+ PyObject *etype, *eval, *etb;
+ PyErr_Fetch(&etype, &eval, &etb);
+ ++Py_REFCNT(o);
+ __pyx_pf_7msgpack_8_msgpack_6Packer_2__dealloc__(o);
+ if (PyErr_Occurred()) PyErr_WriteUnraisable(o);
+ --Py_REFCNT(o);
+ PyErr_Restore(etype, eval, etb);
+ }
+ Py_XDECREF(p->_default);
+ Py_XDECREF(p->_bencoding);
+ Py_XDECREF(p->_berrors);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_7msgpack_8_msgpack_Packer(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7msgpack_8_msgpack_Packer *p = (struct __pyx_obj_7msgpack_8_msgpack_Packer *)o;
+ if (p->_default) {
+ e = (*v)(p->_default, a); if (e) return e;
+ }
+ if (p->_bencoding) {
+ e = (*v)(p->_bencoding, a); if (e) return e;
+ }
+ if (p->_berrors) {
+ e = (*v)(p->_berrors, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_7msgpack_8_msgpack_Packer(PyObject *o) {
+ struct __pyx_obj_7msgpack_8_msgpack_Packer *p = (struct __pyx_obj_7msgpack_8_msgpack_Packer *)o;
+ PyObject* tmp;
+ tmp = ((PyObject*)p->_default);
+ p->_default = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_bencoding);
+ p->_bencoding = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_berrors);
+ p->_berrors = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyMethodDef __pyx_methods_7msgpack_8_msgpack_Packer[] = {
+ {__Pyx_NAMESTR("pack"), (PyCFunction)__pyx_pf_7msgpack_8_msgpack_6Packer_3pack, METH_O, __Pyx_DOCSTR(__pyx_doc_7msgpack_8_msgpack_6Packer_3pack)},
+ {0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_Packer = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_divide*/
+ #endif
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_coerce*/
+ #endif
+ 0, /*nb_int*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_long*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*nb_float*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_oct*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_hex*/
+ #endif
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_inplace_divide*/
+ #endif
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if PY_VERSION_HEX >= 0x02050000
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_Packer = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_Packer = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_Packer = {
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getreadbuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getwritebuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getsegcount*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getcharbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_getbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_releasebuffer*/
+ #endif
+};
+
+static PyTypeObject __pyx_type_7msgpack_8_msgpack_Packer = {
+ PyVarObject_HEAD_INIT(0, 0)
+ __Pyx_NAMESTR("msgpack._msgpack.Packer"), /*tp_name*/
+ sizeof(struct __pyx_obj_7msgpack_8_msgpack_Packer), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7msgpack_8_msgpack_Packer, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_Packer, /*tp_as_number*/
+ &__pyx_tp_as_sequence_Packer, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_Packer, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_Packer, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ __Pyx_DOCSTR("Packer(default=None, encoding='utf-8', unicode_errors='strict')\nMessagePack Packer\n\n usage:\n\n packer = Packer()\n astream.write(packer.pack(a))\n astream.write(packer.pack(b))\n "), /*tp_doc*/
+ __pyx_tp_traverse_7msgpack_8_msgpack_Packer, /*tp_traverse*/
+ __pyx_tp_clear_7msgpack_8_msgpack_Packer, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_7msgpack_8_msgpack_Packer, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pf_7msgpack_8_msgpack_6Packer_1__init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7msgpack_8_msgpack_Packer, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*tp_version_tag*/
+ #endif
+};
+static struct __pyx_vtabstruct_7msgpack_8_msgpack_Unpacker __pyx_vtable_7msgpack_8_msgpack_Unpacker;
+
+static PyObject *__pyx_tp_new_7msgpack_8_msgpack_Unpacker(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_7msgpack_8_msgpack_Unpacker *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)o);
+ p->__pyx_vtab = __pyx_vtabptr_7msgpack_8_msgpack_Unpacker;
+ p->file_like = Py_None; Py_INCREF(Py_None);
+ p->file_like_read = Py_None; Py_INCREF(Py_None);
+ p->object_hook = Py_None; Py_INCREF(Py_None);
+ p->_bencoding = Py_None; Py_INCREF(Py_None);
+ p->_berrors = Py_None; Py_INCREF(Py_None);
+ if (__pyx_pf_7msgpack_8_msgpack_8Unpacker___cinit__(o, __pyx_empty_tuple, NULL) < 0) {
+ Py_DECREF(o); o = 0;
+ }
+ return o;
+}
+
+static void __pyx_tp_dealloc_7msgpack_8_msgpack_Unpacker(PyObject *o) {
+ struct __pyx_obj_7msgpack_8_msgpack_Unpacker *p = (struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)o;
+ {
+ PyObject *etype, *eval, *etb;
+ PyErr_Fetch(&etype, &eval, &etb);
+ ++Py_REFCNT(o);
+ __pyx_pf_7msgpack_8_msgpack_8Unpacker_1__dealloc__(o);
+ if (PyErr_Occurred()) PyErr_WriteUnraisable(o);
+ --Py_REFCNT(o);
+ PyErr_Restore(etype, eval, etb);
+ }
+ Py_XDECREF(p->file_like);
+ Py_XDECREF(p->file_like_read);
+ Py_XDECREF(p->object_hook);
+ Py_XDECREF(p->_bencoding);
+ Py_XDECREF(p->_berrors);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_7msgpack_8_msgpack_Unpacker(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7msgpack_8_msgpack_Unpacker *p = (struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)o;
+ if (p->file_like) {
+ e = (*v)(p->file_like, a); if (e) return e;
+ }
+ if (p->file_like_read) {
+ e = (*v)(p->file_like_read, a); if (e) return e;
+ }
+ if (p->object_hook) {
+ e = (*v)(p->object_hook, a); if (e) return e;
+ }
+ if (p->_bencoding) {
+ e = (*v)(p->_bencoding, a); if (e) return e;
+ }
+ if (p->_berrors) {
+ e = (*v)(p->_berrors, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_7msgpack_8_msgpack_Unpacker(PyObject *o) {
+ struct __pyx_obj_7msgpack_8_msgpack_Unpacker *p = (struct __pyx_obj_7msgpack_8_msgpack_Unpacker *)o;
+ PyObject* tmp;
+ tmp = ((PyObject*)p->file_like);
+ p->file_like = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->file_like_read);
+ p->file_like_read = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->object_hook);
+ p->object_hook = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_bencoding);
+ p->_bencoding = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_berrors);
+ p->_berrors = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyMethodDef __pyx_methods_7msgpack_8_msgpack_Unpacker[] = {
+ {__Pyx_NAMESTR("feed"), (PyCFunction)__pyx_pf_7msgpack_8_msgpack_8Unpacker_3feed, METH_O, __Pyx_DOCSTR(__pyx_doc_7msgpack_8_msgpack_8Unpacker_3feed)},
+ {__Pyx_NAMESTR("unpack"), (PyCFunction)__pyx_pf_7msgpack_8_msgpack_8Unpacker_4unpack, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_7msgpack_8_msgpack_8Unpacker_4unpack)},
+ {__Pyx_NAMESTR("__next__"), (PyCFunction)__pyx_pf_7msgpack_8_msgpack_8Unpacker_6__next__, METH_NOARGS|METH_COEXIST, __Pyx_DOCSTR(0)},
+ {0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_Unpacker = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_divide*/
+ #endif
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_coerce*/
+ #endif
+ 0, /*nb_int*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_long*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*nb_float*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_oct*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_hex*/
+ #endif
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_inplace_divide*/
+ #endif
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if PY_VERSION_HEX >= 0x02050000
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_Unpacker = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_Unpacker = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_Unpacker = {
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getreadbuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getwritebuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getsegcount*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getcharbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_getbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_releasebuffer*/
+ #endif
+};
+
+static PyTypeObject __pyx_type_7msgpack_8_msgpack_Unpacker = {
+ PyVarObject_HEAD_INIT(0, 0)
+ __Pyx_NAMESTR("msgpack._msgpack.Unpacker"), /*tp_name*/
+ sizeof(struct __pyx_obj_7msgpack_8_msgpack_Unpacker), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7msgpack_8_msgpack_Unpacker, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_Unpacker, /*tp_as_number*/
+ &__pyx_tp_as_sequence_Unpacker, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_Unpacker, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_Unpacker, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ __Pyx_DOCSTR("Unpacker(file_like=None, Py_ssize_t read_size=1048576, int use_list=0, object_hook=None, list_hook=None, encoding=None, unicode_errors='strict')\n\n Streaming unpacker.\n read_size is used like file_like.read(read_size)\n\n `file_like` is a file-like object having `.read(n)` method.\n When `Unpacker` initialized with `file_like`, unpacker reads serialized data\n from it and `.feed()` method is not usable.\n\n `read_size` is used as `file_like.read(read_size)`. (default: 1M)\n\n If `use_list` is true, msgpack list is deserialized to Python list.\n Otherwise, it is deserialized to Python tuple. (default: False)\n\n `object_hook` is same to simplejson. If it is not None, it should be callable\n and Unpacker calls it when deserializing key-value.\n\n `encoding` is encoding used for decoding msgpack bytes. If it is None (default),\n msgpack bytes is deserialized to Python bytes.\n\n `unicode_errors` is used for decoding bytes.\n\n example::\n\n unpacker = Unpacker()\n while 1:\n buf = astream.read()\n unpacker.feed(buf)\n for o in unpacker:\n do_something(o)\n "), /*tp_doc*/
+ __pyx_tp_traverse_7msgpack_8_msgpack_Unpacker, /*tp_traverse*/
+ __pyx_tp_clear_7msgpack_8_msgpack_Unpacker, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ __pyx_pf_7msgpack_8_msgpack_8Unpacker_5__iter__, /*tp_iter*/
+ __pyx_pf_7msgpack_8_msgpack_8Unpacker_6__next__, /*tp_iternext*/
+ __pyx_methods_7msgpack_8_msgpack_Unpacker, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pf_7msgpack_8_msgpack_8Unpacker_2__init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7msgpack_8_msgpack_Unpacker, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*tp_version_tag*/
+ #endif
+};
+
+static PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ __Pyx_NAMESTR("_msgpack"),
+ 0, /* m_doc */
+ -1, /* m_size */
+ __pyx_methods /* m_methods */,
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0},
+ {&__pyx_kp_s_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 0, 1, 0},
+ {&__pyx_kp_s_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 0, 1, 0},
+ {&__pyx_kp_s_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 0, 1, 0},
+ {&__pyx_kp_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 0},
+ {&__pyx_kp_s_20, __pyx_k_20, sizeof(__pyx_k_20), 0, 0, 1, 0},
+ {&__pyx_kp_s_27, __pyx_k_27, sizeof(__pyx_k_27), 0, 0, 1, 0},
+ {&__pyx_kp_s_29, __pyx_k_29, sizeof(__pyx_k_29), 0, 0, 1, 0},
+ {&__pyx_kp_s_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 1, 0},
+ {&__pyx_kp_s_31, __pyx_k_31, sizeof(__pyx_k_31), 0, 0, 1, 0},
+ {&__pyx_kp_s_33, __pyx_k_33, sizeof(__pyx_k_33), 0, 0, 1, 0},
+ {&__pyx_n_s_34, __pyx_k_34, sizeof(__pyx_k_34), 0, 0, 1, 1},
+ {&__pyx_kp_s_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 0, 1, 0},
+ {&__pyx_kp_s_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 0, 1, 0},
+ {&__pyx_n_s__AssertionError, __pyx_k__AssertionError, sizeof(__pyx_k__AssertionError), 0, 0, 1, 1},
+ {&__pyx_n_s__MemoryError, __pyx_k__MemoryError, sizeof(__pyx_k__MemoryError), 0, 0, 1, 1},
+ {&__pyx_n_s__StopIteration, __pyx_k__StopIteration, sizeof(__pyx_k__StopIteration), 0, 0, 1, 1},
+ {&__pyx_n_s__TypeError, __pyx_k__TypeError, sizeof(__pyx_k__TypeError), 0, 0, 1, 1},
+ {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1},
+ {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1},
+ {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1},
+ {&__pyx_n_s___gc_disable, __pyx_k___gc_disable, sizeof(__pyx_k___gc_disable), 0, 0, 1, 1},
+ {&__pyx_n_s___gc_enable, __pyx_k___gc_enable, sizeof(__pyx_k___gc_enable), 0, 0, 1, 1},
+ {&__pyx_n_s__ascii, __pyx_k__ascii, sizeof(__pyx_k__ascii), 0, 0, 1, 1},
+ {&__pyx_n_s__default, __pyx_k__default, sizeof(__pyx_k__default), 0, 0, 1, 1},
+ {&__pyx_n_s__disable, __pyx_k__disable, sizeof(__pyx_k__disable), 0, 0, 1, 1},
+ {&__pyx_n_s__enable, __pyx_k__enable, sizeof(__pyx_k__enable), 0, 0, 1, 1},
+ {&__pyx_n_s__encode, __pyx_k__encode, sizeof(__pyx_k__encode), 0, 0, 1, 1},
+ {&__pyx_n_s__encoding, __pyx_k__encoding, sizeof(__pyx_k__encoding), 0, 0, 1, 1},
+ {&__pyx_n_s__file_like, __pyx_k__file_like, sizeof(__pyx_k__file_like), 0, 0, 1, 1},
+ {&__pyx_n_s__gc, __pyx_k__gc, sizeof(__pyx_k__gc), 0, 0, 1, 1},
+ {&__pyx_n_s__list_hook, __pyx_k__list_hook, sizeof(__pyx_k__list_hook), 0, 0, 1, 1},
+ {&__pyx_n_s__o, __pyx_k__o, sizeof(__pyx_k__o), 0, 0, 1, 1},
+ {&__pyx_n_s__object_hook, __pyx_k__object_hook, sizeof(__pyx_k__object_hook), 0, 0, 1, 1},
+ {&__pyx_n_s__pack, __pyx_k__pack, sizeof(__pyx_k__pack), 0, 0, 1, 1},
+ {&__pyx_n_s__packb, __pyx_k__packb, sizeof(__pyx_k__packb), 0, 0, 1, 1},
+ {&__pyx_n_s__packed, __pyx_k__packed, sizeof(__pyx_k__packed), 0, 0, 1, 1},
+ {&__pyx_n_s__read, __pyx_k__read, sizeof(__pyx_k__read), 0, 0, 1, 1},
+ {&__pyx_n_s__read_size, __pyx_k__read_size, sizeof(__pyx_k__read_size), 0, 0, 1, 1},
+ {&__pyx_n_s__stream, __pyx_k__stream, sizeof(__pyx_k__stream), 0, 0, 1, 1},
+ {&__pyx_n_s__strict, __pyx_k__strict, sizeof(__pyx_k__strict), 0, 0, 1, 1},
+ {&__pyx_n_s__unicode_errors, __pyx_k__unicode_errors, sizeof(__pyx_k__unicode_errors), 0, 0, 1, 1},
+ {&__pyx_n_s__unpack, __pyx_k__unpack, sizeof(__pyx_k__unpack), 0, 0, 1, 1},
+ {&__pyx_n_s__unpackb, __pyx_k__unpackb, sizeof(__pyx_k__unpackb), 0, 0, 1, 1},
+ {&__pyx_n_s__use_list, __pyx_k__use_list, sizeof(__pyx_k__use_list), 0, 0, 1, 1},
+ {&__pyx_n_s__write, __pyx_k__write, sizeof(__pyx_k__write), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_MemoryError = __Pyx_GetName(__pyx_b, __pyx_n_s__MemoryError); if (!__pyx_builtin_MemoryError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_TypeError = __Pyx_GetName(__pyx_b, __pyx_n_s__TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_AssertionError = __Pyx_GetName(__pyx_b, __pyx_n_s__AssertionError); if (!__pyx_builtin_AssertionError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_StopIteration = __Pyx_GetName(__pyx_b, __pyx_n_s__StopIteration); if (!__pyx_builtin_StopIteration) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static int __Pyx_InitCachedConstants(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants");
+
+ /* "msgpack/_msgpack.pyx":58
+ * self.pk.buf = malloc(buf_size);
+ * if self.pk.buf == NULL:
+ * raise MemoryError("Unable to allocate internal buffer.") # <<<<<<<<<<<<<<
+ * self.pk.buf_size = buf_size
+ * self.pk.length = 0
+ */
+ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_2));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_1));
+ PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2));
+
+ /* "msgpack/_msgpack.pyx":65
+ * if default is not None:
+ * if not PyCallable_Check(default):
+ * raise TypeError("default must be a callable.") # <<<<<<<<<<<<<<
+ * self._default = default
+ * if encoding is None:
+ */
+ __pyx_k_tuple_5 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_5));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_4));
+ PyTuple_SET_ITEM(__pyx_k_tuple_5, 0, ((PyObject *)__pyx_kp_s_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_5));
+
+ /* "msgpack/_msgpack.pyx":72
+ * else:
+ * if isinstance(encoding, unicode):
+ * self._bencoding = encoding.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * self._bencoding = encoding
+ */
+ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_6));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__ascii));
+ PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6));
+
+ /* "msgpack/_msgpack.pyx":77
+ * self.encoding = PyBytes_AsString(self._bencoding)
+ * if isinstance(unicode_errors, unicode):
+ * self._berrors = unicode_errors.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * self._berrors = unicode_errors
+ */
+ __pyx_k_tuple_7 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_7));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__ascii));
+ PyTuple_SET_ITEM(__pyx_k_tuple_7, 0, ((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_7));
+
+ /* "msgpack/_msgpack.pyx":95
+ *
+ * if nest_limit < 0:
+ * raise ValueError("Too deep.") # <<<<<<<<<<<<<<
+ *
+ * if o is None:
+ */
+ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_10));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_9));
+ PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_s_9));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_9));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10));
+
+ /* "msgpack/_msgpack.pyx":124
+ * elif PyUnicode_Check(o):
+ * if not self.encoding:
+ * raise TypeError("Can't encode utf-8 no encoding is specified") # <<<<<<<<<<<<<<
+ * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+ * rawval = o
+ */
+ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_12));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_11));
+ PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_s_11));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_11));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12));
+
+ /* "msgpack/_msgpack.pyx":212
+ * else:
+ * if isinstance(encoding, unicode):
+ * bencoding = encoding.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * bencoding = encoding
+ */
+ __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_14));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__ascii));
+ PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14));
+
+ /* "msgpack/_msgpack.pyx":216
+ * bencoding = encoding
+ * if isinstance(unicode_errors, unicode):
+ * berrors = unicode_errors.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * berrors = unicode_errors
+ */
+ __pyx_k_tuple_15 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_15));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__ascii));
+ PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15));
+
+ /* "msgpack/_msgpack.pyx":229
+ * if object_hook is not None:
+ * if not PyCallable_Check(object_hook):
+ * raise TypeError("object_hook must be a callable.") # <<<<<<<<<<<<<<
+ * ctx.user.object_hook = object_hook
+ * if list_hook is not None:
+ */
+ __pyx_k_tuple_17 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_17));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_16));
+ PyTuple_SET_ITEM(__pyx_k_tuple_17, 0, ((PyObject *)__pyx_kp_s_16));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_16));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_17));
+
+ /* "msgpack/_msgpack.pyx":233
+ * if list_hook is not None:
+ * if not PyCallable_Check(list_hook):
+ * raise TypeError("list_hook must be a callable.") # <<<<<<<<<<<<<<
+ * ctx.user.list_hook = list_hook
+ * _gc_disable()
+ */
+ __pyx_k_tuple_19 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_19));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_18));
+ PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_kp_s_18));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_18));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19));
+
+ /* "msgpack/_msgpack.pyx":312
+ * self.file_like_read = file_like.read
+ * if not PyCallable_Check(self.file_like_read):
+ * raise ValueError("`file_like.read` must be a callable.") # <<<<<<<<<<<<<<
+ * self.read_size = read_size
+ * self.buf = malloc(read_size)
+ */
+ __pyx_k_tuple_21 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_21));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_20));
+ PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_kp_s_20));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_20));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21));
+
+ /* "msgpack/_msgpack.pyx":316
+ * self.buf = malloc(read_size)
+ * if self.buf == NULL:
+ * raise MemoryError("Unable to allocate internal buffer.") # <<<<<<<<<<<<<<
+ * self.buf_size = read_size
+ * self.buf_head = 0
+ */
+ __pyx_k_tuple_22 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_22));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_1));
+ PyTuple_SET_ITEM(__pyx_k_tuple_22, 0, ((PyObject *)__pyx_kp_s_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_22));
+
+ /* "msgpack/_msgpack.pyx":325
+ * if object_hook is not None:
+ * if not PyCallable_Check(object_hook):
+ * raise TypeError("object_hook must be a callable.") # <<<<<<<<<<<<<<
+ * self.ctx.user.object_hook = object_hook
+ * if list_hook is not None:
+ */
+ __pyx_k_tuple_23 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_23));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_16));
+ PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, ((PyObject *)__pyx_kp_s_16));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_16));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23));
+
+ /* "msgpack/_msgpack.pyx":329
+ * if list_hook is not None:
+ * if not PyCallable_Check(list_hook):
+ * raise TypeError("list_hook must be a callable.") # <<<<<<<<<<<<<<
+ * self.ctx.user.list_hook = list_hook
+ * if encoding is None:
+ */
+ __pyx_k_tuple_24 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_24));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_18));
+ PyTuple_SET_ITEM(__pyx_k_tuple_24, 0, ((PyObject *)__pyx_kp_s_18));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_18));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_24));
+
+ /* "msgpack/_msgpack.pyx":336
+ * else:
+ * if isinstance(encoding, unicode):
+ * self._bencoding = encoding.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * self._bencoding = encoding
+ */
+ __pyx_k_tuple_25 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 336; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_25));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__ascii));
+ PyTuple_SET_ITEM(__pyx_k_tuple_25, 0, ((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_25));
+
+ /* "msgpack/_msgpack.pyx":341
+ * self.ctx.user.encoding = PyBytes_AsString(self._bencoding)
+ * if isinstance(unicode_errors, unicode):
+ * self._berrors = unicode_errors.encode('ascii') # <<<<<<<<<<<<<<
+ * else:
+ * self._berrors = unicode_errors
+ */
+ __pyx_k_tuple_26 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_26));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__ascii));
+ PyTuple_SET_ITEM(__pyx_k_tuple_26, 0, ((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ascii));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_26));
+
+ /* "msgpack/_msgpack.pyx":350
+ * cdef Py_ssize_t buf_len
+ * if self.file_like is not None:
+ * raise AssertionError( # <<<<<<<<<<<<<<
+ * "unpacker.feed() is not be able to use with`file_like`.")
+ * PyObject_AsReadBuffer(next_bytes, &buf, &buf_len)
+ */
+ __pyx_k_tuple_28 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_28));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_27));
+ PyTuple_SET_ITEM(__pyx_k_tuple_28, 0, ((PyObject *)__pyx_kp_s_27));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_27));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_28));
+
+ /* "msgpack/_msgpack.pyx":378
+ * # self.buf still holds old buffer and will be freed during
+ * # obj destruction
+ * raise MemoryError("Unable to enlarge internal buffer.") # <<<<<<<<<<<<<<
+ * buf_size = new_size
+ *
+ */
+ __pyx_k_tuple_30 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_30));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_29));
+ PyTuple_SET_ITEM(__pyx_k_tuple_30, 0, ((PyObject *)__pyx_kp_s_29));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_29));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_30));
+
+ /* "msgpack/_msgpack.pyx":412
+ * self.fill_buffer()
+ * continue
+ * raise StopIteration("No more unpack data.") # <<<<<<<<<<<<<<
+ * else:
+ * raise ValueError("Unpack failed: error = %d" % (ret,))
+ */
+ __pyx_k_tuple_32 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_32)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_32));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_31));
+ PyTuple_SET_ITEM(__pyx_k_tuple_32, 0, ((PyObject *)__pyx_kp_s_31));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_31));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_32));
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static int __Pyx_InitGlobals(void) {
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC init_msgpack(void); /*proto*/
+PyMODINIT_FUNC init_msgpack(void)
+#else
+PyMODINIT_FUNC PyInit__msgpack(void); /*proto*/
+PyMODINIT_FUNC PyInit__msgpack(void)
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ __Pyx_RefNannyDeclarations
+ #if CYTHON_REFNANNY
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+ if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+ }
+ #endif
+ __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__msgpack(void)");
+ if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #ifdef __pyx_binding_PyCFunctionType_USED
+ if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_msgpack"), __pyx_methods, 0, 0, PYTHON_API_VERSION);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ #if PY_MAJOR_VERSION < 3
+ Py_INCREF(__pyx_m);
+ #endif
+ __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME));
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ /*--- Initialize various global constants etc. ---*/
+ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_module_is_main_msgpack___msgpack) {
+ if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ }
+ /*--- Builtin init code ---*/
+ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ /*--- Constants init code ---*/
+ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ /*--- Global init code ---*/
+ /*--- Variable export code ---*/
+ /*--- Function export code ---*/
+ /*--- Type init code ---*/
+ __pyx_vtabptr_7msgpack_8_msgpack_Packer = &__pyx_vtable_7msgpack_8_msgpack_Packer;
+ __pyx_vtable_7msgpack_8_msgpack_Packer._pack = (int (*)(struct __pyx_obj_7msgpack_8_msgpack_Packer *, PyObject *, struct __pyx_opt_args_7msgpack_8_msgpack_6Packer__pack *__pyx_optional_args))__pyx_f_7msgpack_8_msgpack_6Packer__pack;
+ if (PyType_Ready(&__pyx_type_7msgpack_8_msgpack_Packer) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetVtable(__pyx_type_7msgpack_8_msgpack_Packer.tp_dict, __pyx_vtabptr_7msgpack_8_msgpack_Packer) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetAttrString(__pyx_m, "Packer", (PyObject *)&__pyx_type_7msgpack_8_msgpack_Packer) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_ptype_7msgpack_8_msgpack_Packer = &__pyx_type_7msgpack_8_msgpack_Packer;
+ __pyx_vtabptr_7msgpack_8_msgpack_Unpacker = &__pyx_vtable_7msgpack_8_msgpack_Unpacker;
+ __pyx_vtable_7msgpack_8_msgpack_Unpacker.append_buffer = (PyObject *(*)(struct __pyx_obj_7msgpack_8_msgpack_Unpacker *, void *, Py_ssize_t))__pyx_f_7msgpack_8_msgpack_8Unpacker_append_buffer;
+ __pyx_vtable_7msgpack_8_msgpack_Unpacker.fill_buffer = (PyObject *(*)(struct __pyx_obj_7msgpack_8_msgpack_Unpacker *))__pyx_f_7msgpack_8_msgpack_8Unpacker_fill_buffer;
+ __pyx_vtable_7msgpack_8_msgpack_Unpacker.unpack = (PyObject *(*)(struct __pyx_obj_7msgpack_8_msgpack_Unpacker *, int __pyx_skip_dispatch))__pyx_f_7msgpack_8_msgpack_8Unpacker_unpack;
+ if (PyType_Ready(&__pyx_type_7msgpack_8_msgpack_Unpacker) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetVtable(__pyx_type_7msgpack_8_msgpack_Unpacker.tp_dict, __pyx_vtabptr_7msgpack_8_msgpack_Unpacker) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetAttrString(__pyx_m, "Unpacker", (PyObject *)&__pyx_type_7msgpack_8_msgpack_Unpacker) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_ptype_7msgpack_8_msgpack_Unpacker = &__pyx_type_7msgpack_8_msgpack_Unpacker;
+ /*--- Type import code ---*/
+ __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ /*--- Variable import code ---*/
+ /*--- Function import code ---*/
+ /*--- Execution code ---*/
+
+ /* "msgpack/_msgpack.pyx":13
+ * from libc.stdlib cimport *
+ * from libc.string cimport *
+ * import gc # <<<<<<<<<<<<<<
+ * _gc_disable = gc.disable
+ * _gc_enable = gc.enable
+ */
+ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__gc), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gc, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "msgpack/_msgpack.pyx":14
+ * from libc.string cimport *
+ * import gc
+ * _gc_disable = gc.disable # <<<<<<<<<<<<<<
+ * _gc_enable = gc.enable
+ *
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__gc); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__disable); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s___gc_disable, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "msgpack/_msgpack.pyx":15
+ * import gc
+ * _gc_disable = gc.disable
+ * _gc_enable = gc.enable # <<<<<<<<<<<<<<
+ *
+ * cdef extern from "pack.h":
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__gc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__enable); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s___gc_enable, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "msgpack/_msgpack.pyx":36
+ * int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l)
+ *
+ * cdef int DEFAULT_RECURSE_LIMIT=511 # <<<<<<<<<<<<<<
+ *
+ * cdef class Packer(object):
+ */
+ __pyx_v_7msgpack_8_msgpack_DEFAULT_RECURSE_LIMIT = 511;
+
+ /* "msgpack/_msgpack.pyx":85
+ * free(self.pk.buf);
+ *
+ * cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: # <<<<<<<<<<<<<<
+ * cdef long long llval
+ * cdef unsigned long long ullval
+ */
+ __pyx_k_8 = __pyx_v_7msgpack_8_msgpack_DEFAULT_RECURSE_LIMIT;
+
+ /* "msgpack/_msgpack.pyx":162
+ *
+ *
+ * def pack(object o, object stream, default=None, encoding='utf-8', unicode_errors='strict'): # <<<<<<<<<<<<<<
+ * """
+ * pack an object `o` and write it to stream)."""
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7msgpack_8_msgpack_pack, NULL, __pyx_n_s_34); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__pack, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "msgpack/_msgpack.pyx":168
+ * stream.write(packer.pack(o))
+ *
+ * def packb(object o, default=None, encoding='utf-8', unicode_errors='strict'): # <<<<<<<<<<<<<<
+ * """
+ * pack o and return packed bytes."""
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7msgpack_8_msgpack_1packb, NULL, __pyx_n_s_34); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__packb, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "msgpack/_msgpack.pyx":196
+ *
+ *
+ * def unpackb(object packed, object object_hook=None, object list_hook=None, bint use_list=0, encoding=None, unicode_errors="strict"): # <<<<<<<<<<<<<<
+ * """
+ * Unpack packed_bytes to object. Returns an unpacked object."""
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7msgpack_8_msgpack_2unpackb, NULL, __pyx_n_s_34); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__unpackb, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "msgpack/_msgpack.pyx":246
+ *
+ *
+ * def unpack(object stream, object object_hook=None, object list_hook=None, bint use_list=0, encoding=None, unicode_errors="strict"): # <<<<<<<<<<<<<<
+ * """
+ * unpack an object from stream.
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7msgpack_8_msgpack_3unpack, NULL, __pyx_n_s_34); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__unpack, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "msgpack/_msgpack.pyx":1
+ * # coding: utf-8 # <<<<<<<<<<<<<<
+ * #cython: embedsignature=True
+ *
+ */
+ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ if (__pyx_m) {
+ __Pyx_AddTraceback("init msgpack._msgpack", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ Py_DECREF(__pyx_m); __pyx_m = 0;
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init msgpack._msgpack");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if PY_MAJOR_VERSION < 3
+ return;
+ #else
+ return __pyx_m;
+ #endif
+}
+
+/* Runtime support code */
+
+#if CYTHON_REFNANNY
+static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule((char *)modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+}
+#endif /* CYTHON_REFNANNY */
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result) {
+ if (dict != __pyx_b) {
+ PyErr_Clear();
+ result = PyObject_GetAttr(__pyx_b, name);
+ }
+ if (!result) {
+ PyErr_SetObject(PyExc_NameError, name);
+ }
+ }
+ return result;
+}
+
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *more_or_less;
+
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ PyErr_Format(PyExc_TypeError,
+ "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)",
+ func_name, more_or_less, num_expected,
+ (num_expected == 1) ? "" : "s", num_found);
+}
+
+static CYTHON_INLINE int __Pyx_CheckKeywordStrings(
+ PyObject *kwdict,
+ const char* function_name,
+ int kw_allowed)
+{
+ PyObject* key = 0;
+ Py_ssize_t pos = 0;
+ while (PyDict_Next(kwdict, &pos, &key, 0)) {
+ #if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key)))
+ #else
+ if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key)))
+ #endif
+ goto invalid_keyword_type;
+ }
+ if ((!kw_allowed) && unlikely(key))
+ goto invalid_keyword;
+ return 1;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%s() keywords must be strings", function_name);
+ return 0;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%s() got an unexpected keyword argument '%s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+ return 0;
+}
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyThreadState *tstate = PyThreadState_GET();
+
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+
+
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+ /* cause is unused */
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise , */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ type = 0;
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+
+ __Pyx_ErrRestore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+#else /* Python 3+ */
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+ if (tb == Py_None) {
+ tb = 0;
+ } else if (tb && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto bad;
+ }
+ if (value == Py_None)
+ value = 0;
+
+ if (PyExceptionInstance_Check(type)) {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto bad;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(value);
+ } else if (!PyExceptionClass_Check(type)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto bad;
+ }
+
+ if (cause) {
+ PyObject *fixed_cause;
+ if (PyExceptionClass_Check(cause)) {
+ fixed_cause = PyObject_CallObject(cause, NULL);
+ if (fixed_cause == NULL)
+ goto bad;
+ }
+ else if (PyExceptionInstance_Check(cause)) {
+ fixed_cause = cause;
+ Py_INCREF(fixed_cause);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "exception causes must derive from "
+ "BaseException");
+ goto bad;
+ }
+ if (!value) {
+ value = PyObject_CallObject(type, NULL);
+ }
+ PyException_SetCause(value, fixed_cause);
+ }
+
+ PyErr_SetObject(type, value);
+
+ if (tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ PyObject* tmp_tb = tstate->curexc_traceback;
+ if (tb != tmp_tb) {
+ Py_INCREF(tb);
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_tb);
+ }
+ }
+
+bad:
+ return;
+}
+#endif
+
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AS_STRING(kw_name));
+ #endif
+}
+
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ } else {
+ #if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) {
+ #else
+ if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) {
+ #endif
+ goto invalid_keyword_type;
+ } else {
+ for (name = first_kw_arg; *name; name++) {
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
+ PyUnicode_Compare(**name, key) == 0) break;
+ #else
+ if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
+ _PyString_Eq(**name, key)) break;
+ #endif
+ }
+ if (*name) {
+ values[name-argnames] = value;
+ } else {
+ /* unexpected keyword found */
+ for (name=argnames; name != first_kw_arg; name++) {
+ if (**name == key) goto arg_passed_twice;
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
+ PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice;
+ #else
+ if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
+ _PyString_Eq(**name, key)) goto arg_passed_twice;
+ #endif
+ }
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, **name);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%s() got an unexpected keyword argument '%s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
+ PyErr_Format(PyExc_ValueError,
+ "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack",
+ index, (index == 1) ? "" : "s");
+}
+
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
+ PyErr_Format(PyExc_ValueError,
+ "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected);
+}
+
+static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
+ if (unlikely(retval)) {
+ Py_DECREF(retval);
+ __Pyx_RaiseTooManyValuesError(expected);
+ return -1;
+ } else if (PyErr_Occurred()) {
+ if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
+ PyErr_Clear();
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) {
+ PyObject *py_import = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ py_import = __Pyx_GetAttrString(__pyx_b, "__import__");
+ if (!py_import)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ #if PY_VERSION_HEX >= 0x02050000
+ {
+ PyObject *py_level = PyInt_FromLong(level);
+ if (!py_level)
+ goto bad;
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, py_level, NULL);
+ Py_DECREF(py_level);
+ }
+ #else
+ if (level>0) {
+ PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
+ goto bad;
+ }
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, NULL);
+ #endif
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(py_import);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) {
+ const unsigned char neg_one = (unsigned char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned char" :
+ "value too large to convert to unsigned char");
+ }
+ return (unsigned char)-1;
+ }
+ return (unsigned char)val;
+ }
+ return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) {
+ const unsigned short neg_one = (unsigned short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned short" :
+ "value too large to convert to unsigned short");
+ }
+ return (unsigned short)-1;
+ }
+ return (unsigned short)val;
+ }
+ return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) {
+ const unsigned int neg_one = (unsigned int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned int" :
+ "value too large to convert to unsigned int");
+ }
+ return (unsigned int)-1;
+ }
+ return (unsigned int)val;
+ }
+ return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) {
+ const char neg_one = (char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to char" :
+ "value too large to convert to char");
+ }
+ return (char)-1;
+ }
+ return (char)val;
+ }
+ return (char)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) {
+ const short neg_one = (short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to short" :
+ "value too large to convert to short");
+ }
+ return (short)-1;
+ }
+ return (short)val;
+ }
+ return (short)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) {
+ const int neg_one = (int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to int" :
+ "value too large to convert to int");
+ }
+ return (int)-1;
+ }
+ return (int)val;
+ }
+ return (int)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) {
+ const signed char neg_one = (signed char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed char" :
+ "value too large to convert to signed char");
+ }
+ return (signed char)-1;
+ }
+ return (signed char)val;
+ }
+ return (signed char)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) {
+ const signed short neg_one = (signed short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed short" :
+ "value too large to convert to signed short");
+ }
+ return (signed short)-1;
+ }
+ return (signed short)val;
+ }
+ return (signed short)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) {
+ const signed int neg_one = (signed int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed int" :
+ "value too large to convert to signed int");
+ }
+ return (signed int)-1;
+ }
+ return (signed int)val;
+ }
+ return (signed int)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) {
+ const int neg_one = (int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to int" :
+ "value too large to convert to int");
+ }
+ return (int)-1;
+ }
+ return (int)val;
+ }
+ return (int)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
+ const unsigned long neg_one = (unsigned long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (unsigned long)-1;
+ }
+ return (unsigned long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (unsigned long)-1;
+ }
+ return (unsigned long)PyLong_AsUnsignedLong(x);
+ } else {
+ return (unsigned long)PyLong_AsLong(x);
+ }
+ } else {
+ unsigned long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (unsigned long)-1;
+ val = __Pyx_PyInt_AsUnsignedLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) {
+ const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned PY_LONG_LONG");
+ return (unsigned PY_LONG_LONG)-1;
+ }
+ return (unsigned PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned PY_LONG_LONG");
+ return (unsigned PY_LONG_LONG)-1;
+ }
+ return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
+ } else {
+ return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x);
+ }
+ } else {
+ unsigned PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (unsigned PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsUnsignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
+ const long neg_one = (long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long)-1;
+ }
+ return (long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long)-1;
+ }
+ return (long)PyLong_AsUnsignedLong(x);
+ } else {
+ return (long)PyLong_AsLong(x);
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (long)-1;
+ val = __Pyx_PyInt_AsLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
+ const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to PY_LONG_LONG");
+ return (PY_LONG_LONG)-1;
+ }
+ return (PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to PY_LONG_LONG");
+ return (PY_LONG_LONG)-1;
+ }
+ return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
+ } else {
+ return (PY_LONG_LONG)PyLong_AsLongLong(x);
+ }
+ } else {
+ PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
+ const signed long neg_one = (signed long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed long");
+ return (signed long)-1;
+ }
+ return (signed long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed long");
+ return (signed long)-1;
+ }
+ return (signed long)PyLong_AsUnsignedLong(x);
+ } else {
+ return (signed long)PyLong_AsLong(x);
+ }
+ } else {
+ signed long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (signed long)-1;
+ val = __Pyx_PyInt_AsSignedLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) {
+ const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed PY_LONG_LONG");
+ return (signed PY_LONG_LONG)-1;
+ }
+ return (signed PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed PY_LONG_LONG");
+ return (signed PY_LONG_LONG)-1;
+ }
+ return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
+ } else {
+ return (signed PY_LONG_LONG)PyLong_AsLongLong(x);
+ }
+ } else {
+ signed PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (signed PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsSignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static int __Pyx_check_binary_version(void) {
+ char ctversion[4], rtversion[4];
+ PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
+ PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
+ if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
+ char message[200];
+ PyOS_snprintf(message, sizeof(message),
+ "compiletime version %s of module '%.100s' "
+ "does not match runtime version %s",
+ ctversion, __Pyx_MODULE_NAME, rtversion);
+ #if PY_VERSION_HEX < 0x02050000
+ return PyErr_Warn(NULL, message);
+ #else
+ return PyErr_WarnEx(NULL, message, 1);
+ #endif
+ }
+ return 0;
+}
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0)
+ PyObject *ob = PyCapsule_New(vtable, 0, 0);
+#else
+ PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
+#endif
+ if (!ob)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__pyx_vtable__", ob) < 0)
+ goto bad;
+ Py_DECREF(ob);
+ return 0;
+bad:
+ Py_XDECREF(ob);
+ return -1;
+}
+
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
+ size_t size, int strict)
+{
+ PyObject *py_module = 0;
+ PyObject *result = 0;
+ PyObject *py_name = 0;
+ char warning[200];
+
+ py_module = __Pyx_ImportModule(module_name);
+ if (!py_module)
+ goto bad;
+ #if PY_MAJOR_VERSION < 3
+ py_name = PyString_FromString(class_name);
+ #else
+ py_name = PyUnicode_FromString(class_name);
+ #endif
+ if (!py_name)
+ goto bad;
+ result = PyObject_GetAttr(py_module, py_name);
+ Py_DECREF(py_name);
+ py_name = 0;
+ Py_DECREF(py_module);
+ py_module = 0;
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s.%s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+ if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) {
+ PyOS_snprintf(warning, sizeof(warning),
+ "%s.%s size changed, may indicate binary incompatibility",
+ module_name, class_name);
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyErr_Warn(NULL, warning) < 0) goto bad;
+ #else
+ if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
+ #endif
+ }
+ else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) {
+ PyErr_Format(PyExc_ValueError,
+ "%s.%s has the wrong size, try recompiling",
+ module_name, class_name);
+ goto bad;
+ }
+ return (PyTypeObject *)result;
+bad:
+ Py_XDECREF(py_module);
+ Py_XDECREF(result);
+ return NULL;
+}
+#endif
+
+#ifndef __PYX_HAVE_RT_ImportModule
+#define __PYX_HAVE_RT_ImportModule
+static PyObject *__Pyx_ImportModule(const char *name) {
+ PyObject *py_name = 0;
+ PyObject *py_module = 0;
+
+ #if PY_MAJOR_VERSION < 3
+ py_name = PyString_FromString(name);
+ #else
+ py_name = PyUnicode_FromString(name);
+ #endif
+ if (!py_name)
+ goto bad;
+ py_module = PyImport_Import(py_name);
+ Py_DECREF(py_name);
+ return py_module;
+bad:
+ Py_XDECREF(py_name);
+ return 0;
+}
+#endif
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno,
+ int __pyx_lineno, const char *__pyx_filename) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(__pyx_filename);
+ #else
+ py_srcfile = PyUnicode_FromString(__pyx_filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (__pyx_clineno) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*int kwonlyargcount,*/
+ #endif
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_GET(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else /* Python 3+ has unicode identifiers */
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+/* Type Conversion Functions */
+
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
+ PyNumberMethods *m;
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_VERSION_HEX < 0x03000000
+ if (PyInt_Check(x) || PyLong_Check(x))
+#else
+ if (PyLong_Check(x))
+#endif
+ return Py_INCREF(x), x;
+ m = Py_TYPE(x)->tp_as_number;
+#if PY_VERSION_HEX < 0x03000000
+ if (m && m->nb_int) {
+ name = "int";
+ res = PyNumber_Int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = PyNumber_Long(x);
+ }
+#else
+ if (m && m->nb_int) {
+ name = "int";
+ res = PyNumber_Long(x);
+ }
+#endif
+ if (res) {
+#if PY_VERSION_HEX < 0x03000000
+ if (!PyInt_Check(res) && !PyLong_Check(res)) {
+#else
+ if (!PyLong_Check(res)) {
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%s__ returned non-%s (type %.200s)",
+ name, name, Py_TYPE(res)->tp_name);
+ Py_DECREF(res);
+ return NULL;
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject* x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+#if PY_VERSION_HEX < 0x02050000
+ if (ival <= LONG_MAX)
+ return PyInt_FromLong((long)ival);
+ else {
+ unsigned char *bytes = (unsigned char *) &ival;
+ int one = 1; int little = (int)*(unsigned char*)&one;
+ return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
+ }
+#else
+ return PyInt_FromSize_t(ival);
+#endif
+}
+
+static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) {
+ unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x);
+ if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) {
+ return (size_t)-1;
+ } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to size_t");
+ return (size_t)-1;
+ }
+ return (size_t)val;
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/salt/msgpack/_msgpack.pyx b/salt/msgpack/_msgpack.pyx
new file mode 100644
index 000000000000..7a8174664567
--- /dev/null
+++ b/salt/msgpack/_msgpack.pyx
@@ -0,0 +1,427 @@
+# coding: utf-8
+#cython: embedsignature=True
+
+from cpython cimport *
+cdef extern from "Python.h":
+ ctypedef char* const_char_ptr "const char*"
+ ctypedef char* const_void_ptr "const void*"
+ ctypedef struct PyObject
+ cdef int PyObject_AsReadBuffer(object o, const_void_ptr* buff, Py_ssize_t* buf_len) except -1
+
+from libc.stdlib cimport *
+from libc.string cimport *
+import gc
+_gc_disable = gc.disable
+_gc_enable = gc.enable
+
+cdef extern from "pack.h":
+ struct msgpack_packer:
+ char* buf
+ size_t length
+ size_t buf_size
+
+ int msgpack_pack_int(msgpack_packer* pk, int d)
+ int msgpack_pack_nil(msgpack_packer* pk)
+ int msgpack_pack_true(msgpack_packer* pk)
+ int msgpack_pack_false(msgpack_packer* pk)
+ int msgpack_pack_long(msgpack_packer* pk, long d)
+ int msgpack_pack_long_long(msgpack_packer* pk, long long d)
+ int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d)
+ int msgpack_pack_double(msgpack_packer* pk, double d)
+ int msgpack_pack_array(msgpack_packer* pk, size_t l)
+ int msgpack_pack_map(msgpack_packer* pk, size_t l)
+ int msgpack_pack_raw(msgpack_packer* pk, size_t l)
+ int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l)
+
+cdef int DEFAULT_RECURSE_LIMIT=511
+
+cdef class Packer(object):
+ """MessagePack Packer
+
+ usage:
+
+ packer = Packer()
+ astream.write(packer.pack(a))
+ astream.write(packer.pack(b))
+ """
+ cdef msgpack_packer pk
+ cdef object _default
+ cdef object _bencoding
+ cdef object _berrors
+ cdef char *encoding
+ cdef char *unicode_errors
+
+ def __cinit__(self):
+ cdef int buf_size = 1024*1024
+ self.pk.buf = malloc(buf_size);
+ if self.pk.buf == NULL:
+ raise MemoryError("Unable to allocate internal buffer.")
+ self.pk.buf_size = buf_size
+ self.pk.length = 0
+
+ def __init__(self, default=None, encoding='utf-8', unicode_errors='strict'):
+ if default is not None:
+ if not PyCallable_Check(default):
+ raise TypeError("default must be a callable.")
+ self._default = default
+ if encoding is None:
+ self.encoding = NULL
+ self.unicode_errors = NULL
+ else:
+ if isinstance(encoding, unicode):
+ self._bencoding = encoding.encode('ascii')
+ else:
+ self._bencoding = encoding
+ self.encoding = PyBytes_AsString(self._bencoding)
+ if isinstance(unicode_errors, unicode):
+ self._berrors = unicode_errors.encode('ascii')
+ else:
+ self._berrors = unicode_errors
+ self.unicode_errors = PyBytes_AsString(self._berrors)
+
+ def __dealloc__(self):
+ free(self.pk.buf);
+
+ cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
+ cdef long long llval
+ cdef unsigned long long ullval
+ cdef long longval
+ cdef double fval
+ cdef char* rawval
+ cdef int ret
+ cdef dict d
+
+ if nest_limit < 0:
+ raise ValueError("Too deep.")
+
+ if o is None:
+ ret = msgpack_pack_nil(&self.pk)
+ elif isinstance(o, bool):
+ if o:
+ ret = msgpack_pack_true(&self.pk)
+ else:
+ ret = msgpack_pack_false(&self.pk)
+ elif PyLong_Check(o):
+ if o > 0:
+ ullval = o
+ ret = msgpack_pack_unsigned_long_long(&self.pk, ullval)
+ else:
+ llval = o
+ ret = msgpack_pack_long_long(&self.pk, llval)
+ elif PyInt_Check(o):
+ longval = o
+ ret = msgpack_pack_long(&self.pk, longval)
+ elif PyFloat_Check(o):
+ fval = o
+ ret = msgpack_pack_double(&self.pk, fval)
+ elif PyBytes_Check(o):
+ rawval = o
+ ret = msgpack_pack_raw(&self.pk, len(o))
+ if ret == 0:
+ ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ elif PyUnicode_Check(o):
+ if not self.encoding:
+ raise TypeError("Can't encode utf-8 no encoding is specified")
+ o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+ rawval = o
+ ret = msgpack_pack_raw(&self.pk, len(o))
+ if ret == 0:
+ ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ elif PyDict_Check(o):
+ d = o
+ ret = msgpack_pack_map(&self.pk, len(d))
+ if ret == 0:
+ for k,v in d.items():
+ ret = self._pack(k, nest_limit-1)
+ if ret != 0: break
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+ elif PySequence_Check(o):
+ ret = msgpack_pack_array(&self.pk, len(o))
+ if ret == 0:
+ for v in o:
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+ elif self._default:
+ o = self._default(o)
+ ret = self._pack(o, nest_limit-1)
+ else:
+ raise TypeError("can't serialize %r" % (o,))
+ return ret
+
+ def pack(self, object obj):
+ cdef int ret
+ ret = self._pack(obj, DEFAULT_RECURSE_LIMIT)
+ if ret:
+ raise TypeError
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+
+def pack(object o, object stream, default=None, encoding='utf-8', unicode_errors='strict'):
+ """
+ pack an object `o` and write it to stream)."""
+ packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors)
+ stream.write(packer.pack(o))
+
+def packb(object o, default=None, encoding='utf-8', unicode_errors='strict'):
+ """
+ pack o and return packed bytes."""
+ packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors)
+ return packer.pack(o)
+
+
+cdef extern from "unpack.h":
+ ctypedef struct msgpack_user:
+ int use_list
+ PyObject* object_hook
+ PyObject* list_hook
+ char *encoding
+ char *unicode_errors
+
+ ctypedef struct template_context:
+ msgpack_user user
+ PyObject* obj
+ size_t count
+ unsigned int ct
+ PyObject* key
+
+ int template_execute(template_context* ctx, const_char_ptr data,
+ size_t len, size_t* off) except -1
+ void template_init(template_context* ctx)
+ object template_data(template_context* ctx)
+
+
+def unpackb(object packed, object object_hook=None, object list_hook=None, bint use_list=0, encoding=None, unicode_errors="strict"):
+ """
+ Unpack packed_bytes to object. Returns an unpacked object."""
+ cdef template_context ctx
+ cdef size_t off = 0
+ cdef int ret
+
+ cdef char* buf
+ cdef Py_ssize_t buf_len
+ PyObject_AsReadBuffer(packed, &buf, &buf_len)
+
+ if encoding is None:
+ enc = NULL
+ err = NULL
+ else:
+ if isinstance(encoding, unicode):
+ bencoding = encoding.encode('ascii')
+ else:
+ bencoding = encoding
+ if isinstance(unicode_errors, unicode):
+ berrors = unicode_errors.encode('ascii')
+ else:
+ berrors = unicode_errors
+ enc = PyBytes_AsString(bencoding)
+ err = PyBytes_AsString(berrors)
+
+ template_init(&ctx)
+ ctx.user.use_list = use_list
+ ctx.user.object_hook = ctx.user.list_hook = NULL
+ ctx.user.encoding = enc
+ ctx.user.unicode_errors = err
+ if object_hook is not None:
+ if not PyCallable_Check(object_hook):
+ raise TypeError("object_hook must be a callable.")
+ ctx.user.object_hook = object_hook
+ if list_hook is not None:
+ if not PyCallable_Check(list_hook):
+ raise TypeError("list_hook must be a callable.")
+ ctx.user.list_hook = list_hook
+ _gc_disable()
+ try:
+ ret = template_execute(&ctx, buf, buf_len, &off)
+ finally:
+ _gc_enable()
+ if ret == 1:
+ return template_data(&ctx)
+ else:
+ return None
+
+
+def unpack(object stream, object object_hook=None, object list_hook=None, bint use_list=0, encoding=None, unicode_errors="strict"):
+ """
+ unpack an object from stream.
+ """
+ return unpackb(stream.read(), use_list=use_list,
+ object_hook=object_hook, list_hook=list_hook, encoding=encoding, unicode_errors=unicode_errors)
+
+cdef class Unpacker(object):
+ """
+ Streaming unpacker.
+ read_size is used like file_like.read(read_size)
+
+ `file_like` is a file-like object having `.read(n)` method.
+ When `Unpacker` initialized with `file_like`, unpacker reads serialized data
+ from it and `.feed()` method is not usable.
+
+ `read_size` is used as `file_like.read(read_size)`. (default: 1M)
+
+ If `use_list` is true, msgpack list is deserialized to Python list.
+ Otherwise, it is deserialized to Python tuple. (default: False)
+
+ `object_hook` is same to simplejson. If it is not None, it should be callable
+ and Unpacker calls it when deserializing key-value.
+
+ `encoding` is encoding used for decoding msgpack bytes. If it is None (default),
+ msgpack bytes is deserialized to Python bytes.
+
+ `unicode_errors` is used for decoding bytes.
+
+ example::
+
+ unpacker = Unpacker()
+ while 1:
+ buf = astream.read()
+ unpacker.feed(buf)
+ for o in unpacker:
+ do_something(o)
+ """
+ cdef template_context ctx
+ cdef char* buf
+ cdef size_t buf_size, buf_head, buf_tail
+ cdef object file_like
+ cdef object file_like_read
+ cdef Py_ssize_t read_size
+ cdef bint use_list
+ cdef object object_hook
+ cdef object _bencoding
+ cdef object _berrors
+ cdef char *encoding
+ cdef char *unicode_errors
+
+ def __cinit__(self):
+ self.buf = NULL
+
+ def __dealloc__(self):
+ free(self.buf)
+ self.buf = NULL
+
+ def __init__(self, file_like=None, Py_ssize_t read_size=1024*1024, bint use_list=0,
+ object object_hook=None, object list_hook=None,
+ encoding=None, unicode_errors='strict'):
+ self.use_list = use_list
+ self.file_like = file_like
+ if file_like:
+ self.file_like_read = file_like.read
+ if not PyCallable_Check(self.file_like_read):
+ raise ValueError("`file_like.read` must be a callable.")
+ self.read_size = read_size
+ self.buf =