Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-12:Update
python-rtslib
rtslib-git-update.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File rtslib-git-update.patch of Package python-rtslib
diff --git a/.gitignore b/.gitignore index 1d3e968..a44a8ac 100644 --- a/.gitignore +++ b/.gitignore @@ -19,5 +19,6 @@ doc/* debian/python-rtslib.substvars debian/rtslib-doc.debhelper.log debian/tmp/ -*.spec rtslib-* +\#*# +*~ diff --git a/Makefile b/Makefile index 895bb4f..e46a981 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # This file is part of RTSLib. -# Copyright (c) 2011-2013 by Datera, Inc +# Copyright (c) 2011-2014 by Datera, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -16,20 +16,39 @@ NAME = rtslib GIT_BRANCH = $$(git branch | grep \* | tr -d \*) -VERSION = $$(basename $$(git describe --tags | tr - .)) +VERSION = $$(basename $$(git describe --tags | tr - . | grep -o '[0-9].*$$')) all: @echo "Usage:" @echo @echo " make deb - Builds debian packages." + @echo " make debinstall - Builds and installs debian packages." + @echo " (requires sudo access)" @echo " make rpm - Builds rpm packages." @echo " make release - Generates the release tarball." @echo + @echo " make test - Runs the safe tests suite." + @echo " make test-all - Runs all tests, including dangerous system test." + @echo " This WILL mess-up your system target configuration!" + @echo " Requires sudo access to root privileges." + @echo @echo " make clean - Cleanup the local repository build files." @echo " make cleanall - Also remove dist/*" +test: + @echo "Running the safe tests suite..." + @(PYTHONPATH=$$(pwd); cd tests/safe ; python -u -m unittest discover) + +test-all: test + @echo "Will run the DESTRUCTIVE system tests suite now." + @echo "This requires sudo access to root privileges." + @echo "These tests WILL mess-up your system target configuration!" + @echo "Type CTRL-C to abort now or enter to continue..." + @read X + @(PYTHONPATH=$$(pwd); cd tests/system ; sudo python -m unittest discover) + clean: - @rm -fv ${NAME}/*.pyc ${NAME}/*.html + @rm -fv ${NAME}/*.pyc test/*.pyc ${NAME}/*.html .swp @rm -frv doc @rm -frv ${NAME}.egg-info MANIFEST build @rm -frv debian/tmp @@ -42,6 +61,8 @@ clean: @rm -frv results @rm -fv rpm/*.spec *.spec rpm/sed* sed* @rm -frv ${NAME}-* + @find . -name *~ -exec rm -v {} \; + @find . -name \#*\# -exec rm -v {} \; @echo "Finished cleanup." cleanall: clean @@ -68,7 +89,7 @@ build/release-stamp: rm -r rpm @echo "Generating rpm changelog..." @( \ - version=$$(basename $$(git describe HEAD --tags | tr - .)); \ + version=$$(basename $$(git describe HEAD --tags | tr - . | grep -o '[0-9].*$$')); \ author=$$(git show HEAD --format="format:%an <%ae>" -s); \ date=$$(git show HEAD --format="format:%ad" -s \ | awk '{print $$1,$$2,$$3,$$5}'); \ @@ -78,7 +99,7 @@ build/release-stamp: ) >> $$(ls build/${NAME}-${VERSION}/*.spec) @echo "Generating debian changelog..." @( \ - version=$$(basename $$(git describe HEAD --tags | tr - .)); \ + version=$$(basename $$(git describe HEAD --tags | tr - . | grep -o '[0-9].*$$')); \ author=$$(git show HEAD --format="format:%an <%ae>" -s); \ date=$$(git show HEAD --format="format:%aD" -s); \ day=$$(git show HEAD --format='format:%ai' -s \ @@ -117,6 +138,10 @@ build/deb-stamp: @for pkg in $$(ls dist/*_${VERSION}_*.deb); do echo " $${pkg}"; done @touch build/deb-stamp +debinstall: deb + @echo "Installing $$(ls dist/*_${VERSION}_*.deb)" + @sudo dpkg -i $$(ls dist/*_${VERSION}_*.deb) + rpm: release build/rpm-stamp build/rpm-stamp: @echo "Building rpm packages..." diff --git a/README b/README deleted file mode 100644 index fbc92e2..0000000 --- a/README +++ /dev/null @@ -1,16 +0,0 @@ -RTSLib Community Edition is a python library that provides an object API to -RisingTide Systems generic SCSI Target as well as third-party target fabric -modules written for it and backend storage objects. - -The latest version of this program might be obtained at: -http://www.risingtidesystems.com/git/ - -The git reposirory can be directly accessed from: -git://risingtidesystems.com/rtslib.git -git://linux-iscsi.org/rtslib.git - -It is useful for developing 3rd-party applications, as well as serving as a -foundation for RisingTide Systems userspace tools. - -For more information, see the rtslib API reference, available in html -format as a separate package. diff --git a/README.md b/README.md new file mode 100644 index 0000000..faa9b13 --- /dev/null +++ b/README.md @@ -0,0 +1,83 @@ +# RTSLib + +RTSLib is a Python library that provides the API to the Linux Kernel SCSI +Target subsystem (LIO), its backend storage objects subsystem (TCM) as well +as third-party Target Fabric Modules. + +RTSLib allows direct manipulation of all SCSI Target objects like storage +objects, SCSI targets, TPGs, LUNs and ACLs. It is part of the Linux Kernel's +SCSI Target's userspace management tools. + +## Usage scenarios + +RTSLib is used as the foundation for targetcli, the Linux Kernel's SCSI Target +configuration CLI and shell, in embedded storage systems and appliances, +commercial NAS and SAN systems as well as a tool for sysadmins writing their +own scripts to configure the SCSI Target subsystem. + +## Installation + +RTSLib is currently part of several Linux distributions, either under the +`rtslib` name or `python-rtslib`. In most cases, simply installing the version +packaged by your favorite Linux distribution is the best way to get it running. + + +## Building from source + +The packages are very easy to build and install from source as long as +you're familiar with your Linux Distribution's package manager: + +1. Clone the github repository for RTSLib using `git clone + https://github.com/Datera/rtslib.git`. + +2. Make sure build dependencies are installed. To build RTSLib, you will need: + + * GNU Make. + * python 2.6 or 2.7 + * A few python libraries: ipaddr, netifaces, configobj, python-epydoc + * A working LaTeX installation and ghostscript for building the + documentation, for example texlive-latex. + * Your favorite distribution's package developement tools, like rpm for + Redhat-based systems or dpkg-dev and debhelper for Debian systems. + +3. From the cloned git repository, run `make deb` to generate a Debian + package, or `make rpm` for a Redhat package. + +4. The newly built packages will be generated in the `dist/` directory. + +5. To cleanup the repository, use `make clean` or `make cleanall` which also + removes `dist/*` files. + +## Documentation + +The RTSLib packages do ship with a full API documentation in both HTML and PDF +formats, typically in `/usr/share/doc/python-rtslib/doc/`. + +Depending on your Linux distribution, the documentation might be shipped in a +separate package. + +An other good source of information is the http://linux-iscsi.org wiki, +offering many resources such as (not necessarily up-to-date) copies of the +RTSLib API Reference Guide (HTML at http://linux-iscsi.org/Doc/rtslib/html or +PDF at http://linux-iscsi.org/Doc/rtslib/rtslib-API-reference.pdf), and the +Targetcli User's Guide at http://linux-iscsi.org/wiki/targetcli. + +## Mailing-list + +All contributions, suggestions and bugfixes are welcome! + +To report a bug, submit a patch or simply stay up-to-date on the Linux SCSI +Target developments, you can subscribe to the Linux Kernel SCSI Target +development mailing-list by sending an email message containing only +`subscribe target-devel` to <mailto:majordomo@vger.kernel.org> + +The archives of this mailing-list can be found online at +http://dir.gmane.org/gmane.linux.scsi.target.devel + +## Author + +RTSLib was developed by Datera, Inc. +http://www.datera.io + +The original author and current maintainer is +Jerome Martin <jxm@netiant.com> diff --git a/debian/README.Debian b/debian/README.Debian deleted file mode 100644 index 78a1f14..0000000 --- a/debian/README.Debian +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2011-2013 by Datera, Inc - -Licensed under the Apache License, Version 2.0 (the "License"); you may -not use this file except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -License for the specific language governing permissions and limitations -under the License. diff --git a/debian/control b/debian/control index eeb95c8..abf4aab 100644 --- a/debian/control +++ b/debian/control @@ -1,13 +1,18 @@ Source: rtslib Section: python Priority: optional -Maintainer: Jerome Martin <jxm@risingtidesystems.com> -Build-Depends: debhelper(>= 7.0.1), python, python-epydoc -Standards-Version: 3.8.1 +Standards-Version: 3.9.2 +Homepage: https://github.com/Datera/rtslib +Maintainer: Jerome Martin <jxm@netiant.com> +Build-Depends: debhelper(>= 7.0.50~), python(>= 2.6.6-3~), python-ipaddr, python-netifaces, python-configobj, python-pyparsing, python-epydoc, texlive-latex-base, texlive-latex-extra, texlive-latex-recommended, lmodern, ghostscript, texlive-fonts-recommended Package: python-rtslib Architecture: all -Depends: python (>= 2.5)|python2.5|python2.6, python-configobj, python-netifaces, python-ipaddr -Suggests: rtslib-doc +Depends: ${python:Depends}, ${misc:Depends}, python-ipaddr, python-netifaces, python-configobj, python-pyparsing +Provides: ${python:Provides} Conflicts: rtsadmin-frozen -Description: RisingTide Systems generic SCSI target API in python. +Description: Python API to the Linux Kernel's SCSI Target subsystem (LIO) + Provides Python object mappings to LIO and TCM SCSI Target subsystems and + fabric modules, like storage objects, SCSI targets and LUNs. + . + Part of the Linux Kernel SCSI Target's userspace management tools diff --git a/debian/copyright b/debian/copyright index 4fac94e..8ac5d63 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,13 +1,4 @@ -This package was originally debianized by Jerome Martin <jxm@risingtidesystems.com> -on Fri Nov 18 12:00:01 UTC 2009. It is currently maintained by Jerome Martin -<jxm@risingtidesystems.com>. - -Upstream Author: Jerome Martin <jxm@risingtidesystems.com> - -Copyright: - -This file is part of RTSLib. -Copyright (c) 2011-2013 by Datera, Inc +Copyright (c) 2011-2014 by Datera, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain @@ -20,5 +11,3 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - diff --git a/debian/python-rtslib.dirs b/debian/python-rtslib.dirs deleted file mode 100644 index 7a9f9da..0000000 --- a/debian/python-rtslib.dirs +++ /dev/null @@ -1,2 +0,0 @@ -usr/share/python-support -var/target/fabric diff --git a/debian/python-rtslib.doc-base b/debian/python-rtslib.doc-base new file mode 100644 index 0000000..900df2d --- /dev/null +++ b/debian/python-rtslib.doc-base @@ -0,0 +1,12 @@ +Document: python-rtslib +Title: python-rtslib online documentation +Author: Jerome Martin <jxm@netiant.com> +Abstract: API reference documentation for python-rtslib +Section: System/Administration + +Format: HTML +Index: /usr/share/doc/python-rtslib/doc/html/index.html +Files: /usr/share/doc/python-rtslib/doc/html/*.html + +Format: PDF +Files: /usr/share/doc/python-rtslib/doc/pdf/rtslib_API_Documentation.pdf.gz diff --git a/debian/python-rtslib.docs b/debian/python-rtslib.docs index 10fe481..e45a304 100644 --- a/debian/python-rtslib.docs +++ b/debian/python-rtslib.docs @@ -1,3 +1,4 @@ -README +README.md COPYING -doc/html +specs/*.txt +doc/ diff --git a/debian/python-rtslib.install b/debian/python-rtslib.install index 0a59498..940f6c7 100644 --- a/debian/python-rtslib.install +++ b/debian/python-rtslib.install @@ -1,2 +1,3 @@ -lib/rtslib usr/share/python-support -specs/* var/target/fabric +lib/rtslib usr/share/pyshared +specs/*.spec var/target/fabric +policy/*.lio var/target/policy diff --git a/debian/python-rtslib.postinst b/debian/python-rtslib.postinst deleted file mode 100755 index 5de7eb9..0000000 --- a/debian/python-rtslib.postinst +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh -for lib in lib lib64; do - for python in python2.5 python2.6; do - if [ -e /usr/${lib}/${python} ]; then - if [ ! -e /usr/${lib}/${python}/rtslib ]; then - mkdir /usr/${lib}/${python}/rtslib - for source in /usr/share/python-support/rtslib/rtslib/*.py; do - ln -sf ${source} /usr/${lib}/${python}/rtslib/ - done - python_path=$(which ${python} 2>/dev/null) - if [ ! -z $python_path ]; then - ${python} -c "import compileall; compileall.compile_dir('/usr/${lib}/${python}/rtslib', force=1)" - fi - fi - fi - done -done diff --git a/debian/python-rtslib.preinst b/debian/python-rtslib.preinst deleted file mode 100755 index f4815e6..0000000 --- a/debian/python-rtslib.preinst +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -rm -f /usr/share/python-support/rtslib/rtslib/*.pyc -rm -f /usr/share/python-support/rtslib/rtslib/*.pyo diff --git a/debian/python-rtslib.prerm b/debian/python-rtslib.prerm deleted file mode 100755 index 8a089c4..0000000 --- a/debian/python-rtslib.prerm +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh -for lib in lib lib64; do - for python in python2.5 python2.6; do - if [ -e /usr/${lib}/${python}/rtslib ]; then - rm -rf /usr/${lib}/${python}/rtslib - fi - done -done diff --git a/debian/pyversions b/debian/pyversions deleted file mode 100644 index b3dc41e..0000000 --- a/debian/pyversions +++ /dev/null @@ -1 +0,0 @@ -2.5- diff --git a/debian/rules b/debian/rules index 5321084..83873e9 100755 --- a/debian/rules +++ b/debian/rules @@ -2,55 +2,28 @@ build_dir = build install_dir = debian/tmp -setup = /usr/bin/python ./setup.py --quiet -name = rtslib -binary: binary-indep -binary-arch: +%: + dh $@ --with python2 -binary-indep: build install - dh_testdir - dh_testroot - dh_installchangelogs - dh_installdocs - dh_installman - dh_install --list-missing --sourcedir $(install_dir) - dh_fixperms - dh_compress -X.py - dh_installdeb - dh_gencontrol - dh_md5sums - dh_builddeb +override_dh_auto_clean: + # manually clean any *.pyc files + rm -rf rtslib/*.pyc + [ ! -d doc ] || rm -rf doc -install: build - dh_testdir - dh_testroot - dh_installdirs - cp -R specs $(install_dir) +override_dh_auto_build: + python setup.py build --build-base $(build_dir) + test -d doc || mkdir doc -build: build-stamp -build-stamp: - dh_testdir - $(setup) build --build-base $(build_dir) install \ - --no-compile --install-purelib $(install_dir)/lib/rtslib \ - --install-scripts $(install_dir)/bin - echo "2.5, 2.6" > $(install_dir)/lib/rtslib/.version - mkdir -p $(build_dir) doc - cd $(build_dir); epydoc --no-sourcecode --html -n $(name) \ - --exclude configobj ../$(name)/*.py - cp -r $(build_dir)/html doc/ - touch build-stamp - -clean: - dh_testdir - dh_testroot - rm -f build-stamp - $(setup) clean - find . -name "*.pyc" | xargs rm -f - find . -name "*.pyo" | xargs rm -f - rm -rf $(build_dir) $(install_dir) - dh_clean - -.PHONY: binary binary-indep install build clean + mkdir -p doc/pdf + epydoc --no-sourcecode --pdf -n rtslib --exclude configobj rtslib/*.py + mv pdf/api.pdf doc/pdf/rtslib_API_Documentation.pdf + + mkdir -p doc/html + epydoc --no-sourcecode --html -n rtslib --exclude configobj rtslib/*.py + mv html doc/ +override_dh_auto_install: + python setup.py install --no-compile --install-purelib \ + $(install_dir)/lib --install-scripts $(install_dir)/bin diff --git a/policy/backstore_fileio.lio b/policy/backstore_fileio.lio new file mode 100644 index 0000000..355886a --- /dev/null +++ b/policy/backstore_fileio.lio @@ -0,0 +1,30 @@ +storage fileio disk %str { + path %str + size %bytes + buffered %bool(yes) + attribute { + block_size %int(512) + emulate_3pc %bool(yes) + emulate_caw %bool(yes) + emulate_dpo %bool(no) + emulate_fua_read %bool(no) + emulate_fua_write %bool(yes) + emulate_model_alias %bool(no) + emulate_rest_reord %bool(no) + emulate_tas %bool(yes) + emulate_tpu %bool(no) + emulate_tpws %bool(no) + emulate_ua_intlck_ctrl %bool(no) + emulate_write_cache %bool(no) + enforce_pr_isids %bool(yes) + fabric_max_sectors %int(8192) + is_nonrot %bool(no) + max_unmap_block_desc_count %int(1) + max_unmap_lba_count %int(8192) + max_write_same_len %int(4096) + optimal_sectors %int(8192) + queue_depth %int(128) + unmap_granularity %int(1) + unmap_granularity_alignment %int(0) + } +} diff --git a/policy/backstore_ramdisk.lio b/policy/backstore_ramdisk.lio new file mode 100644 index 0000000..413c853 --- /dev/null +++ b/policy/backstore_ramdisk.lio @@ -0,0 +1,29 @@ +storage rd_mcp disk %str { + size %bytes + nullio %bool(no) + attribute { + block_size %int(512) + emulate_3pc %bool(yes) + emulate_caw %bool(yes) + emulate_dpo %bool(no) + emulate_fua_read %bool(no) + emulate_fua_write %bool(no) + emulate_model_alias %bool(no) + emulate_rest_reord %bool(no) + emulate_tas %bool(yes) + emulate_tpu %bool(no) + emulate_tpws %bool(no) + emulate_ua_intlck_ctrl %bool(no) + emulate_write_cache %bool(no) + enforce_pr_isids %bool(yes) + fabric_max_sectors %int(8192) + is_nonrot %bool(no) + max_unmap_block_desc_count %bool(no) + max_unmap_lba_count %bool(no) + max_write_same_len %int(0) + optimal_sectors %int(1024) + queue_depth %int(32) + unmap_granularity %bool(no) + unmap_granularity_alignment %bool(no) + } +} diff --git a/policy/fabric_ib_srpt.lio b/policy/fabric_ib_srpt.lio new file mode 100644 index 0000000..da404c6 --- /dev/null +++ b/policy/fabric_ib_srpt.lio @@ -0,0 +1,68 @@ +fabric ib_srpt { + target %srpt_wwn { + acl %str { + attribute { + dataout_timeout %int(3) + dataout_timeout_retries %int(5) + default_erl %int(0) # TODO 0 1 2 only + nopin_response_timeout %int(30) + nopin_timeout %int(15) + random_datain_pdu_offsets %bool(no) + random_datain_seq_offsets %bool(no) + random_r2t_offsets %bool(no) + } + auth { + password %str("") + password_mutual %str("") + userid %str("") + userid_mutual %str("") + } + mapped_lun %int { + target_lun @(-3 lun) + write_protect %bool(no) + } + } + auth { + password %str("") + password_mutual %str("") + userid %str("") + userid_mutual %str("") + } + attribute { + authentication %bool(no) + default_erl %int(0) # TODO 0 1 or 2 + demo_mode_discovery %bool(yes) + cache_dynamic_acls %bool(no) + default_cmdsn_depth %int(16) + demo_mode_write_protect %bool(no) + generate_node_acls %bool(no) + login_timeout %int(15) + netif_timeout %int(2) + prod_mode_write_protect %bool(no) + } + lun %int backend %backend + parameter { + AuthMethod %str(CHAP) + DataDigest %str("CRC32C,None") + DataPDUInOrder %bool(yes) + DataSequenceInOrder %bool(yes) + DefaultTime2Retain %int(20) + DefaultTime2Wait %int(2) + ErrorRecoveryLevel %bool(no) + FirstBurstLength %int(65536) + HeaderDigest %str("CRC32C,None") + IFMarkInt %str("2048~65535") + IFMarker %bool(no) + ImmediateData %bool(yes) + InitialR2T %bool(yes) + MaxBurstLength %int(262144) + MaxConnections %int(1) + MaxOutstandingR2T %int(1) + MaxRecvDataSegmentLength %int(8192) + MaxXmitDataSegmentLength %int(262144) + OFMarkInt %str("2048~65535") + OFMarker %bool(no) + TargetAlias %str("LIO Target") + } + } +} diff --git a/policy/fabric_iscsi.lio b/policy/fabric_iscsi.lio new file mode 100644 index 0000000..3e71d13 --- /dev/null +++ b/policy/fabric_iscsi.lio @@ -0,0 +1,77 @@ +fabric iscsi { + discovery_auth { + enable %bool(yes) + mutual_password %str("") + mutual_userid %str("") + password %str("") + userid %str("") + } + target %iqn tpgt %int { + enable %bool(yes) + portal %ipport + acl %str { + attribute { + dataout_timeout %int(3) + dataout_timeout_retries %int(5) + default_erl %int(0) # TODO 0 1 2 only + nopin_response_timeout %int(30) + nopin_timeout %int(15) + random_datain_pdu_offsets %bool(no) + random_datain_seq_offsets %bool(no) + random_r2t_offsets %bool(no) + } + auth { + password %str("") + password_mutual %str("") + userid %str("") + userid_mutual %str("") + } + mapped_lun %int { + target_lun @(-3 lun) + write_protect %bool(no) + } + } + auth { + password %str("") + password_mutual %str("") + userid %str("") + userid_mutual %str("") + } + attribute { + authentication %bool(no) + default_erl %int(0) # TODO 0 1 or 2 + demo_mode_discovery %bool(yes) + cache_dynamic_acls %bool(no) + default_cmdsn_depth %int(16) + demo_mode_write_protect %bool(no) + generate_node_acls %bool(no) + login_timeout %int(15) + netif_timeout %int(2) + prod_mode_write_protect %bool(no) + } + lun %int backend %backend + parameter { + AuthMethod %str(CHAP) + DataDigest %str("CRC32C,None") + DataPDUInOrder %bool(yes) + DataSequenceInOrder %bool(yes) + DefaultTime2Retain %int(20) + DefaultTime2Wait %int(2) + ErrorRecoveryLevel %bool(no) + FirstBurstLength %int(65536) + HeaderDigest %str("CRC32C,None") + IFMarkInt %str("2048~65535") + IFMarker %bool(no) + ImmediateData %bool(yes) + InitialR2T %bool(yes) + MaxBurstLength %int(262144) + MaxConnections %int(1) + MaxOutstandingR2T %int(1) + MaxRecvDataSegmentLength %int(8192) + MaxXmitDataSegmentLength %int(262144) + OFMarkInt %str("2048~65535") + OFMarker %bool(no) + TargetAlias %str("LIO Target") + } + } +} diff --git a/policy/fabric_loopback.lio b/policy/fabric_loopback.lio new file mode 100644 index 0000000..ea3210d --- /dev/null +++ b/policy/fabric_loopback.lio @@ -0,0 +1 @@ +fabric loopback target %naa lun %int backend %backend diff --git a/policy/fabric_qla2xxx.lio b/policy/fabric_qla2xxx.lio new file mode 100644 index 0000000..33c099f --- /dev/null +++ b/policy/fabric_qla2xxx.lio @@ -0,0 +1,68 @@ +fabric qla2xxx { + target %qla2xxx_wwn { + acl %str { + attribute { + dataout_timeout %int(3) + dataout_timeout_retries %int(5) + default_erl %int(0) # TODO 0 1 2 only + nopin_response_timeout %int(30) + nopin_timeout %int(15) + random_datain_pdu_offsets %bool(no) + random_datain_seq_offsets %bool(no) + random_r2t_offsets %bool(no) + } + auth { + password %str("") + password_mutual %str("") + userid %str("") + userid_mutual %str("") + } + mapped_lun %int { + target_lun @(-3 lun) + write_protect %bool(no) + } + } + auth { + password %str("") + password_mutual %str("") + userid %str("") + userid_mutual %str("") + } + attribute { + authentication %bool(no) + default_erl %int(0) # TODO 0 1 or 2 + demo_mode_discovery %bool(yes) + cache_dynamic_acls %bool(no) + default_cmdsn_depth %int(16) + demo_mode_write_protect %bool(no) + generate_node_acls %bool(no) + login_timeout %int(15) + netif_timeout %int(2) + prod_mode_write_protect %bool(no) + } + lun %int backend %backend + parameter { + AuthMethod %str(CHAP) + DataDigest %str("CRC32C,None") + DataPDUInOrder %bool(yes) + DataSequenceInOrder %bool(yes) + DefaultTime2Retain %int(20) + DefaultTime2Wait %int(2) + ErrorRecoveryLevel %bool(no) + FirstBurstLength %int(65536) + HeaderDigest %str("CRC32C,None") + IFMarkInt %str("2048~65535") + IFMarker %bool(no) + ImmediateData %bool(yes) + InitialR2T %bool(yes) + MaxBurstLength %int(262144) + MaxConnections %int(1) + MaxOutstandingR2T %int(1) + MaxRecvDataSegmentLength %int(8192) + MaxXmitDataSegmentLength %int(262144) + OFMarkInt %str("2048~65535") + OFMarker %bool(no) + TargetAlias %str("LIO Target") + } + } +} diff --git a/policy/fabric_tcm_fc.lio b/policy/fabric_tcm_fc.lio new file mode 100644 index 0000000..71c2c82 --- /dev/null +++ b/policy/fabric_tcm_fc.lio @@ -0,0 +1,68 @@ +fabric tcm_fc { + target %fc_wwn { + acl %str { + attribute { + dataout_timeout %int(3) + dataout_timeout_retries %int(5) + default_erl %int(0) # TODO 0 1 2 only + nopin_response_timeout %int(30) + nopin_timeout %int(15) + random_datain_pdu_offsets %bool(no) + random_datain_seq_offsets %bool(no) + random_r2t_offsets %bool(no) + } + auth { + password %str("") + password_mutual %str("") + userid %str("") + userid_mutual %str("") + } + mapped_lun %int { + target_lun @(-3 lun) + write_protect %bool(no) + } + } + auth { + password %str("") + password_mutual %str("") + userid %str("") + userid_mutual %str("") + } + attribute { + authentication %bool(no) + default_erl %int(0) # TODO 0 1 or 2 + demo_mode_discovery %bool(yes) + cache_dynamic_acls %bool(no) + default_cmdsn_depth %int(16) + demo_mode_write_protect %bool(no) + generate_node_acls %bool(no) + login_timeout %int(15) + netif_timeout %int(2) + prod_mode_write_protect %bool(no) + } + lun %int backend %backend + parameter { + AuthMethod %str(CHAP) + DataDigest %str("CRC32C,None") + DataPDUInOrder %bool(yes) + DataSequenceInOrder %bool(yes) + DefaultTime2Retain %int(20) + DefaultTime2Wait %int(2) + ErrorRecoveryLevel %bool(no) + FirstBurstLength %int(65536) + HeaderDigest %str("CRC32C,None") + IFMarkInt %str("2048~65535") + IFMarker %bool(no) + ImmediateData %bool(yes) + InitialR2T %bool(yes) + MaxBurstLength %int(262144) + MaxConnections %int(1) + MaxOutstandingR2T %int(1) + MaxRecvDataSegmentLength %int(8192) + MaxXmitDataSegmentLength %int(262144) + OFMarkInt %str("2048~65535") + OFMarker %bool(no) + TargetAlias %str("LIO Target") + } + } +} diff --git a/policy/fabric_vhost.lio b/policy/fabric_vhost.lio new file mode 100644 index 0000000..a663660 --- /dev/null +++ b/policy/fabric_vhost.lio @@ -0,0 +1,5 @@ +fabric vhost { + target %naa tpgt %int { + lun %int backend %backend + } +} \ No newline at end of file diff --git a/rpm/python-rtslib.spec.tmpl b/rpm/python-rtslib.spec.tmpl index e30d57e..24e8f17 100644 --- a/rpm/python-rtslib.spec.tmpl +++ b/rpm/python-rtslib.spec.tmpl @@ -11,7 +11,7 @@ Source: %{oname}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-rpmroot BuildArch: noarch BuildRequires: python-devel, epydoc -Requires: python-configobj, python-netifaces, python-ipaddr +Requires: python-configobj, python-netifaces, python-ipaddr, python-pyparsing Vendor: Datera, Inc. %description @@ -30,9 +30,11 @@ mv html doc/ rm -rf %{buildroot} %{__python} setup.py install --skip-build --root %{buildroot} --prefix usr mkdir -p %{buildroot}/var/target/fabric -cp specs/* %{buildroot}/var/target/fabric +cp specs/*.spec %{buildroot}/var/target/fabric +mkdir -p %{buildroot}/var/target/policy +cp policy/*.lio %{buildroot}/var/target/policy mkdir -p %{buildroot}/usr/share/doc/python-rtslib-doc-%{version} -cp -r doc/* %{buildroot}/usr/share/doc/python-rtslib-doc-%{version}/ +cp -r doc/* specs/*.txt %{buildroot}/usr/share/doc/python-rtslib-doc-%{version}/ %clean rm -rf %{buildroot} @@ -42,6 +44,6 @@ rm -rf %{buildroot} %{python_sitelib} /var/target /usr/share/doc/python-rtslib-doc-%{version} -%doc COPYING README +%doc COPYING README.md %changelog diff --git a/rtslib/__init__.py b/rtslib/__init__.py index 46ed110..ae673d1 100644 --- a/rtslib/__init__.py +++ b/rtslib/__init__.py @@ -1,6 +1,6 @@ ''' This file is part of RTSLib. -Copyright (c) 2011-2013 by Datera, Inc +Copyright (c) 2011-2014 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain @@ -25,8 +25,13 @@ from target import NodeACL, NetworkPortal, TPG, Target, FabricModule from tcm import FileIOBackstore, IBlockBackstore from tcm import FileIOStorageObject, IBlockStorageObject -from tcm import PSCSIBackstore, RDDRBackstore, RDMCPBackstore -from tcm import PSCSIStorageObject, RDDRStorageObject, RDMCPStorageObject +from tcm import PSCSIBackstore, RDMCPBackstore +from tcm import PSCSIStorageObject, RDMCPStorageObject + +from config_filters import * +from config import Config, ConfigError +from config_tree import ConfigTree, NO_VALUE +from config_parser import ConfigParser, PolicyParser, PatternParser __version__ = 'GIT_VERSION' __author__ = "Jerome Martin <jxm@risingtidesystems.com>" diff --git a/rtslib/config.py b/rtslib/config.py new file mode 100644 index 0000000..ae6d359 --- /dev/null +++ b/rtslib/config.py @@ -0,0 +1,793 @@ +''' +This file is part of the LIO SCSI Target. + +Copyright (c) 2012-2014 by Datera, Inc. +More information on www.datera.io. + +Original author: Jerome Martin <jxm@netiant.com> + +Datera and LIO are trademarks of Datera, Inc., which may be registered in some +jurisdictions. + +Licensed under the Apache License, Version 2.0 (the "License"); you may +not use this file except in compliance with the License. You may obtain +a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. +''' +import os, re, time, copy, logging + +from rtslib.utils import is_ipv4_address, is_ipv6_address, is_valid_wwn + +from config_filters import * +from config_tree import ConfigTree, NO_VALUE +from config_parser import ConfigParser, PolicyParser, PatternParser + +DEBUG = False +if DEBUG: + logging.basicConfig() + log = logging.getLogger('Config') + log.setLevel(logging.DEBUG) +else: + log = logging.getLogger('Config') + log.setLevel(logging.INFO) + +# FIXME validate_* and _load_parse_tree are a mess !!! +# TODO Implement resync() to reload both policy and configfs state +# TODO Add class_match_ids (objs) and name_match_value (attrs) to search etc. +# Use it to simplify all "%s .*" tricks in cli +# TODO Implement commit_live() +# TODO Custom defaults load +# TODO Add copy() operation + +def dump_value(string): + if string == NO_VALUE: + return NO_VALUE + for char in " ~\t{}#',;": + if char in string: + return '"%s"' % string + if '"' in string: + return "'%s'" % string + elif not string: + return '""' + else: + return string + +def key_to_string(key): + strings = [] + for item in key: + strings.append(dump_value(item)) + return " ".join(strings) + +def is_valid_backend(value, parent): + cur = parent + while cur.parent is not None: + cur = cur.parent + (backend, _, disk) = value.partition(':') + if cur.search([("storage", backend), ("disk", disk)]): + return True + else: + return False + +def sort_key(node): + ''' + A sort key for configuration nodes, that ensures nodes potentially + referenced in the config come first: storage before fabric and lun + objects before acl objects. Also, attributes will be sorted before + objects, so that configuration dumps are easier to read, with simple + attributes coming before attribute groups. + ''' + node_type = node.data['type'] + obj_classes = ConfigParser.obj_classes + ordered_obj = {} + for k, v in enumerate(obj_classes.split()): + ordered_obj[v] = "%s%s" % (k, v) + + if node_type == 'attr': + key = ('0', node.key[0], node.key[1]) + elif node_type == 'group': + key = ('1', node.key[0]) + elif node_type == 'obj': + key = ('2', ordered_obj.get(node.key[0], node.key[0]), node.key[1]) + else: + raise ConfigError("Unknown configuration node type %s for %s" + % (node_type, node)) + + return key + +class ConfigError(Exception): + pass + +class Config(object): + ''' + The LIO configuration API. + + The Config object provide methods to edit, search, validate and update the + current configuration, and commit that configuration to the live system on + request. + + It features pattern-matching search for all configuration objects and + attributes as well as multi-level undo capabilities. In addition, all + configuration changes are staged before being applied, isolating the + current configuration from load-time and validation errors. + ''' + policy_dir = "/var/target/policy" + + def __init__(self): + data = {'source': {'operation': 'init', 'timestamp': time.time()}, + 'type': 'root', + 'policy_path': []} + self.policy = ConfigTree(data, sort_key, key_to_string) + self.reference = ConfigTree(data, sort_key, key_to_string) + + self._parser = ConfigParser() + self._policy_parser = PolicyParser() + self._pattern_parser = PatternParser() + self._configs = [ConfigTree(data, sort_key, key_to_string)] + self._load_policy() + + def _load_policy(self): + ''' + Loads all LIO system policy files. + ''' + filepaths = ["%s/%s" % (self.policy_dir, path) + for path in os.listdir(self.policy_dir) + if path.endswith(".lio")] + for filepath in filepaths: + log.debug('Loading policy file %s' % filepath) + parse_tree = self._policy_parser.parse_file(filepath) + source = {'operation': 'load', + 'filepath': filepath, + 'timestamp': time.time(), + 'mtime': os.path.getmtime(filepath)} + self._load_parse_tree(parse_tree, replace=False, + source=source, target='policy') + + def _load_parse_tree(self, parse_tree, cur_stage=None, + replace=False, source=None, + target='config', allow_new_attrs=False): + ''' + target can be 'config', 'policy' or 'reference' + ''' + # TODO accept 'defaults' target too + if source is None: + source = {} + if cur_stage is None: + update_target = True + if replace: + data = {'source': source, 'policy_path': [], 'type': 'root'} + stage = ConfigTree(data, sort_key, key_to_string) + elif target == 'config': + stage = self.current.get_clone() + stage.data['source'] = source + elif target == 'policy': + stage = self.policy.get_clone() + stage.data['source'] = source + elif target == 'reference': + stage = self.reference.get_clone() + stage.data['source'] = source + else: + update_target = False + stage = cur_stage + + loaded = [] + log.debug("Loading parse tree %s" % parse_tree) + for statement in parse_tree: + cur = stage + log.debug("Visiting statement %s" % statement) + for token in statement: + token['source'] = source + log.debug("Visiting token %s" % token) + if token['type'] == 'obj': + log.debug("Loading obj token: %s" % token) + if target != 'policy': + token = self.validate_obj(token, cur) + old = cur.get(token['key']) + cur = cur.cine(token['key'], token) + if not old: + loaded.append(cur) + if target != 'policy': + self._add_missing_attributes(cur) + log.debug("Added object %s" % cur.path) + elif token['type'] == 'attr': + log.debug("Loading attr token: %s" % token) + if target != 'policy': + token = self.validate_attr(token, cur, allow_new_attrs) + old_nodes = cur.search([(token['key'][0], ".*")]) + for old_node in old_nodes: + log.debug("Deleting old value: %s\nnew is: %s" + % (old_node.path, str(token['key']))) + deleted = cur.delete([old_node.key]) + log.debug("Deleted: %s" % str(deleted)) + cur = cur.cine(token['key'], token) + if old_nodes and old_nodes[0].key != cur.key: + loaded.append(cur) + log.debug("Added attribute %s" % cur.path) + elif token['type'] == 'group': + log.debug("Loading group token: %s" % token) + if target != 'policy': + log.debug("cur '%s' token '%s'" % (cur, token)) + token['policy_path'] = (cur.data['policy_path'] + + [(token['key'][0],)]) + old = cur.get(token['key']) + cur = cur.cine(token['key'], token) + if not old: + loaded.append(cur) + elif token['type'] == 'block': + log.debug("Loading block token: %s" % token) + for statement in token['statements']: + log.debug("_load_parse_tree recursion on block " + "statement: %s" % [statement]) + loaded.extend(self._load_parse_tree( + [statement], cur, source=source, + target=target, allow_new_attrs=allow_new_attrs)) + + if update_target: + if target == 'config': + self.current = stage + elif target == 'policy': + self.policy = stage + elif target == 'reference': + self.reference = stage + + return loaded + + def _add_missing_attributes(self, obj): + ''' + Given an obj node, add all missing attributes and attribute groups in + the configuration. + ''' + source = {'operation': 'auto', 'timestamp': time.time()} + policy_root = self.policy.get_path(obj.data['policy_path']) + for policy_node in [node for node in policy_root.nodes + if node.data['type'] == 'attr']: + attr = obj.search([(policy_node.key[0], ".*")]) + if not attr: + key = (policy_node.key[0], policy_node.data.get('val_dfl')) + data = {'key': key, 'type': 'attr', 'source': source, + 'val_dfl': policy_node.data.get('val_dfl'), + 'val_type': policy_node.data['val_type'], + 'required': key[1] is None, + 'policy_path': policy_node.path} + log.debug("obj.set(%s, %s)" % (str(key), data)) + obj.set(key, data) + + groups = [] + for policy_node in [node for node in policy_root.nodes + if node.data['type'] == 'group']: + group = obj.get((policy_node.key[0],)) + if not group: + key = (policy_node.key[0],) + data = {'key': key, 'type': 'group', 'source': source, + 'policy_path': policy_node.path} + groups.append(obj.set(key, data)) + else: + groups.append(group) + + for group in groups: + policy_root = self.policy.get_path(group.data['policy_path']) + for policy_node in [node for node in policy_root.nodes + if node.data['type'] == 'attr']: + attr = group.search([(policy_node.key[0], ".*")]) + if not attr: + key = (policy_node.key[0], policy_node.data.get('val_dfl')) + data = {'key': key, 'type': 'attr', 'source': source, + 'val_dfl': policy_node.data.get('val_dfl'), + 'val_type': policy_node.data['val_type'], + 'required': key[1] is None, + 'policy_path': policy_node.path} + group.set(key, data) + + def validate_val(self, value, val_type, parent=None): + valid_value = None + log.debug("validate_val(%s, %s)" % (value, val_type)) + if value == NO_VALUE: + return None + + if val_type == 'bool': + if value.lower() in ['yes', 'true', '1', 'enable']: + valid_value = 'yes' + elif value.lower() in ['no', 'false', '0', 'disable']: + valid_value = 'no' + elif val_type == 'bytes': + match = re.match(r'(\d+(\.\d*)?)([kKMGT]?B?$)', value) + if match: + qty = str(float(match.group(1))) + unit = match.group(3).upper() + if not unit.endswith('B'): + unit += 'B' + valid_value = "%s%s" % (qty, unit) + elif val_type == 'int': + try: + valid_value = str(int(value)) + except: + pass + elif val_type == 'ipport': + (addr, _, port) = value.rpartition(":") + try: + str(int(port)) + except: + pass + else: + if is_ipv4_address(addr) or is_ipv6_address(addr): + valid_value = value + elif val_type == 'posint': + try: + val = int(value) + except: + pass + else: + if val > 0: + valid_value = value + elif val_type == 'str': + valid_value = str(value) + forbidden = "*?[]" + for char in forbidden: + if char in valid_value: + valid_value = None + break + elif val_type == 'iqn': + if is_valid_wwn('iqn', value): + valid_value = value + elif val_type == 'naa': + if is_valid_wwn('naa', value): + valid_value = value + elif val_type == 'backend': + if is_valid_backend(value, parent): + valid_value = value + else: + raise ConfigError("Unknown value type '%s' when validating %s" + % (val_type, value)) + log.debug("validate_val(%s) is a valid %s: %s" + % (value, val_type, valid_value)) + return valid_value + + def validate_obj(self, token, parent): + log.debug("validate_obj(%s, %s)" % (token, parent.data)) + policy_search = parent.data['policy_path'] + [(token['key'][0], ".*")] + policy_nodes = self.policy.search(policy_search) + valid_token = copy.deepcopy(token) + expected_val_types = set() + + for policy_node in policy_nodes: + id_fixed = policy_node.data['id_fixed'] + id_type = policy_node.data['id_type'] + if id_fixed is not None: + expected_val_types.add("'%s'" % id_fixed) + if id_fixed == token['key'][1]: + valid_token['policy_path'] = policy_node.path + return valid_token + else: + expected_val_types.add(id_type) + valid_value = self.validate_val(valid_token['key'][1], id_type) + if valid_value is not None: + valid_token['key'] = (valid_token['key'][0], valid_value) + valid_token['policy_path'] = policy_node.path + return valid_token + + if not policy_nodes: + obj_type = ("%s %s" % (parent.path_str, token['key'][0])).strip() + raise ConfigError("Unknown object type: %s" % obj_type) + else: + raise ConfigError("Invalid %s identifier '%s': expected type %s" + % (token['key'][0], + token['key'][1], + ", ".join(expected_val_types))) + + def validate_attr(self, token, parent, allow_new_attr=False): + log.debug("validate_attr(%s, %s)" % (token, parent.data)) + if token['key'][1] is None: + return token + + policy_search = parent.data['policy_path'] + [(token['key'][0], ".*")] + policy_nodes = self.policy.search(policy_search) + valid_token = copy.deepcopy(token) + expected_val_types = set() + for policy_node in policy_nodes: + ref_path = policy_node.data['ref_path'] + valid_token['required'] = policy_node.data['required'] + valid_token['comment'] = policy_node.data['comment'] + valid_token['val_dfl'] = policy_node.data.get('val_dfl') + valid_token['val_type'] = policy_node.data['val_type'] + if ref_path is not None: + root = parent + if ref_path.startswith('-'): + (upno, _, down) = ref_path[1:].partition(' ') + for i in range(int(upno) - 1): + root = root.parent + else: + while not root.is_root: + root = root.parent + + search_path = [(down, token['key'][1])] + nodes = root.search(search_path) + + if len(nodes) == 1: + valid_token['ref_path'] = nodes[0].path_str + return valid_token + elif len(nodes) == 0: + raise ConfigError("Invalid reference for attribute %s: %s" + % (token['key'][0], search_path)) + else: + raise ConfigError("Unexpected reference error, got: %s" + % nodes) + + return valid_token + else: + expected_val_types.add(policy_node.data['val_type']) + if valid_token['key'][1] == NO_VALUE: + valid_value = NO_VALUE + else: + valid_value = \ + self.validate_val(valid_token['key'][1], + policy_node.data['val_type'], + parent=parent) + if valid_value is not None: + valid_token['key'] = (valid_token['key'][0], valid_value) + return valid_token + + if not policy_nodes: + if allow_new_attr: + valid_token['required'] = False + valid_token['comment'] = "Unknown" + valid_token['val_dfl'] = valid_token['key'][1] + valid_token['val_type'] = "raw" + valid_token['ref_path'] = None + return valid_token + else: + attr_name = ("%s %s" + % (parent.path_str, token['key'][0])).strip() + raise ConfigError("Unknown attribute: %s" % attr_name) + else: + raise ConfigError("Invalid %s value '%s': expected type %s" + % (token['key'][0], + token['key'][1], + ", ".join(expected_val_types))) + + @property + def current(self): + return self._configs[-1] + + @current.setter + def current(self, config_tree): + self._configs.append(config_tree) + + def undo(self): + ''' + Restores the previous state of the configuration, before the last set, + load, delete, update or clear operation. If there is nothing to undo, a + ConfigError exception will be raised. + ''' + if len(self._configs) < 2: + raise ConfigError("Nothing to undo") + else: + self._configs.pop() + + def set(self, configuration): + ''' + Evaluates the configuration (a string in LIO configuration format) and + sets the relevant objects, attributes and atttribute groups. + + Existing attributes and objects will be updated if needed and new ones + will be added. + + The list of created configuration nodes will be returned. + + If an error occurs, the operation will be aborted, leaving the current + configuration intact. + ''' + parse_tree = self._parser.parse_string(configuration) + source = {'operation': 'set', + 'data': configuration, + 'timestamp': time.time()} + return self._load_parse_tree(parse_tree, source=source) + + def delete(self, pattern, node_filter=lambda x:x): + ''' + Deletes all configuration objects and attributes whose paths match the + pattern, along with their children. + + The pattern is a single LIO configuration statement without any block, + where object identifiers, attributes names, attribute values and + attribute groups are regular expressions patterns. Object types have to + use their exact string representation to match. + + node_filter is a function applied to each node before returning it: + node_filter(node_in) -> node_out | None (aka filtered out) + + Returns a list of all deleted nodes. + + If an error occurs, the operation will be aborted, leaving the current + configuration intact. + ''' + path = [token for token in + self._pattern_parser.parse_string(pattern)] + log.debug("delete(%s)" % pattern) + source = {'operation': 'delete', + 'pattern': pattern, + 'timestamp': time.time()} + stage = self.current.get_clone() + stage.data['source'] = source + deleted = [] + for node in stage.search(path, node_filter): + log.debug("delete() found node %s" % node) + deleted.append(stage.delete(node.path)) + self.current = stage + return deleted + + def load(self, filepath, allow_new_attrs=False): + ''' + Loads an LIO configuration file and replace the current configuration + with it. + + All existing objects and attributes will be deleted, and new ones will + be added. + + If an error occurs, the operation will be aborted, leaving the current + configuration intact. + ''' + parse_tree = self._parser.parse_file(filepath) + source = {'operation': 'load', + 'filepath': filepath, + 'timestamp': time.time(), + 'mtime': os.path.getmtime(filepath)} + self._load_parse_tree(parse_tree, replace=True, + source=source, allow_new_attrs=allow_new_attrs) + + def load_live(self): + ''' + Loads the live-running configuration. + ''' + from config_live import dump_live + live = dump_live() + parse_tree = self._parser.parse_string(live) + source = {'operation': 'resync', + 'timestamp': time.time()} + self._load_parse_tree(parse_tree, replace=True, + source=source, allow_new_attrs=True) + + def update(self, filepath): + ''' + Updates the current configuration with the contents of an LIO + configuration file. + + Existing attributes and objects will be updated if needed and new ones + will be added. + + If an error occurs, the operation will be aborted, leaving the current + configuration intact. + ''' + parse_tree = self._parser.parse_file(filepath) + source = {'operation': 'update', + 'filepath': filepath, + 'timestamp': time.time(), + 'mtime': os.path.getmtime(filepath)} + self._load_parse_tree(parse_tree, source=source) + + def clear(self): + ''' + Clears the current configuration. + + This removes all current objects and attributes from the configuration. + ''' + source = {'operation': 'clear', + 'timestamp': time.time()} + self.current = ConfigTree({'source': source}, sort_key, key_to_string) + + def search(self, search_statement, node_filter=lambda x:x): + ''' + Returns a list of nodes matching the search_statement, relative to the + current node, or an empty list if no match was found. + + The search_statement is a single LIO configuration statement without + any block, where object identifiers, attributes names, attribute values + and attribute groups are regular expressions patterns. Object types + have to use their exact string representation to match. + + node_filter is a function applied to each node before returning it: + node_filter(node_in) -> node_out | None (aka filtered out) + ''' + path = [token for token in + self._pattern_parser.parse_string(search_statement)] + return self.current.search(path, node_filter) + + def dump(self, search_statement=None, node_filter=lambda x:x): + ''' + Returns a LIO configuration file format dump of the nodes matching + the search_statement, or of all nodes if search_statement is None. + + The search_statement is a single LIO configuration statement without + any block, where object identifiers, attributes names, attribute values + and attribute groups are regular expressions patterns. Object types + have to use their exact string representation to match. + + node_filter is a function applied to each node before dumping it: + node_filter(node_in) -> node_out | None (aka filtered out) + ''' + # FIXME: Breaks with filter_only_missing + if not search_statement: + root_nodes = [self.current] + else: + root_nodes = self.search(search_statement, node_filter) + + if root_nodes: + parts = [] + for root_node_in in root_nodes: + root_node = node_filter(root_node_in) + if root_node is None: + break + dump = '' + if root_node.key_str: + dump = "%s " % root_node.key_str + nodes = root_node.nodes + if root_node.is_root or len(nodes) == 1: + for node in nodes: + section = self.dump(node.path_str, node_filter) + if section: + dump += section + elif len(nodes) > 1: + dump += "{\n" + for node in nodes: + section = self.dump(node.path_str, node_filter) + if section is not None: + lines = section.splitlines() + else: + lines = [] + dump += "\n".join(" %s" % line + for line in lines if line) + dump += "\n" + dump += "}\n" + parts.append(dump) + dump = "\n".join(parts) + if dump.strip(): + return dump + + def save(self, filepath, pattern=None): + ''' + Saves the current configuration to filepath, using LIO configuration + file format. If path is not None, only objects and attributes starting + at path and hanging under it will be saved. + + For convenience, the saved configuration will also be returned as a + string. + + The pattern is a whitespace-separated string of regular expressions, + each of which will be matched against configuration objects and + attributes. In case of dump, the pattern must be non-ambiguous and + match only a single configuration node. + + If the pattern matches either zero or more than one configuration + nodes, a ConfigError exception will be raised. + ''' + dump = self.dump(pattern, filter_no_missing) + if dump is None: + dump = '' + with open(filepath, 'w') as f: + f.write(dump) + return dump + + def verify(self): + ''' + Validates the configuration for the following points: + - Portal IP Addresses exist + - Devices and file paths exist + - Files for fileio exist + - No required attributes are missing + - References are correct + Returns a dictionary of validation_test: [errors] + ''' + return {} + + def apply(self, brute_force=True): + ''' + Applies the configuration to the live system: + - Remove objects absent from the configuration and objects in the + configuration with different required attributes + - Create new storage objects + - Create new fabric objects + - Update relevant storage objects + - Update relevant fabric objects + ''' + from config_live import apply_create_obj, apply_delete_obj + + if brute_force: + from config_live import apply_create_obj, clear_configfs + yield "[clear] delete all live objects" + clear_configfs() + for obj in self.current.walk(get_filter_on_type(['obj'])): + yield("[create] %s" % obj.path_str) + apply_create_obj(obj) + else: + # TODO for minor_obj, update instead of create/delete + diff = self.diff_live() + delete_list = diff['removed'] + diff['major_obj'] + diff['minor_obj'] + delete_list.reverse() + for obj in delete_list: + yield "[delete] %s" % obj.path_str + apply_delete_obj(obj) + + for obj in diff['created'] + diff['major_obj'] + diff['minor_obj']: + yield "[create] %s" % obj.path_str + apply_create_obj(obj) + + def diff_live(self): + ''' + Returns a diff between the current configuration and the live + configuration as a reference. + ''' + from config_live import dump_live + parse_tree = self._parser.parse_string(dump_live()) + source = {'operation': 'load', + 'timestamp': time.time()} + self._load_parse_tree(parse_tree, replace=True, + source=source, target='reference', + allow_new_attrs=True) + return self.diff() + + def diff(self): + ''' + Computes differences between a valid current configuration and a + previously loaded valid reference configuration. + + Returns a dict of: + - 'removed': list of removed objects + - 'major': list of changed required attributes + - 'major_obj': list of obj with major changes + - 'minor': list of changed non-required attributes + - 'major_obj': list of obj with minor changes + - 'created': list of new objects in the current configuration + ''' + # FIXME data['required'] check should be enough without NO_VALUE check + # FIXME Can't we just pass the reference config instead of having to preload it? + diffs = {} + keys = ('removed', 'major', 'major_obj', + 'minor', 'minor_obj', 'created') + for key in keys: + diffs[key] = [] + + for obj in self.current.walk(get_filter_on_type(['obj'])): + if not self.reference.get_path(obj.path): + diffs['created'].append(obj) + + for obj in self.reference.walk(get_filter_on_type(['obj'])): + if not self.current.get_path(obj.path): + diffs['removed'].append(obj) + + for obj in self.current.walk(get_filter_on_type(['obj'])): + if self.reference.get_path(obj.path): + for node in obj.nodes: + if node.data['type'] == 'attr' \ + and (node.data['required'] \ + or node.key[1] == NO_VALUE): + if not self.reference.get_path(node.path): + diffs['major'].append(node) + diffs['major_obj'].append(node.parent) + + for obj in self.current.walk(get_filter_on_type(['obj'])): + if self.reference.get_path(obj.path): + for node in obj.nodes: + if node.data['type'] == 'attr' \ + and not node.data['required'] \ + and node.key[1] != NO_VALUE: + if not self.reference.get_path(node.path): + diffs['minor'].append(node) + if node.parent not in diffs['minor_obj'] \ + and node.parent not in diffs['major_obj']: + diffs['minor_obj'].append(node.parent) + elif node.data['type'] == 'group': + for attr in node.nodes: + if attr.data['type'] == 'attr' \ + and not attr.data['required'] \ + and attr.key[1] != NO_VALUE: + if not self.reference.get_path(attr.path): + diffs['minor'].append(attr) + if node.parent not in diffs['minor_obj'] \ + and node.parent not in diffs['major_obj']: + diffs['minor_obj'].append(node.parent) + return diffs diff --git a/rtslib/config_filters.py b/rtslib/config_filters.py new file mode 100644 index 0000000..668b8bd --- /dev/null +++ b/rtslib/config_filters.py @@ -0,0 +1,114 @@ +''' +This file is part of the LIO SCSI Target. + +Copyright (c) 2012-2014 by Datera, Inc. +More information on www.datera.io. + +Original author: Jerome Martin <jxm@netiant.com> + +Datera and LIO are trademarks of Datera, Inc., which may be registered in some +jurisdictions. + +Licensed under the Apache License, Version 2.0 (the "License"); you may +not use this file except in compliance with the License. You may obtain +a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. +''' +from config_tree import NO_VALUE + +def get_composed_filter(*filters): + ''' + Returns a node filter that is the composition of all filter functions + passed as arguments. Filters will be applied in the order they appear. + ''' + def composed_filter(node_in): + for node_filter in filters: + node_out = node_filter(node_in) + if node_out is None: + break + else: + node_in = node_out + return node_out + return composed_filter + +def get_filter_on_type(allowed_types): + ''' + Returns a node filter that only let nodes whose type is in the + allowed_types list to pass through. + ''' + def filter_on_type(node_in): + if node_in.data['type'] in allowed_types: + return node_in + return filter_on_type + +def get_reverse_filter(node_filter): + ''' + Returns a new filter that lets throught all nodes normally filtered out by + node_filter, and filters out the one normally passed. + + This should be useful only with filters that pass nodes through without + modifying them. + ''' + def reverse_filter(node_in): + if node_filter(node_in) is None: + return node_in + return reverse_filter + +def filter_no_default(node_in): + ''' + A filter that lets all nodes through, except attributes with a default + value and attribute groups containing only such attributes. + ''' + node_out = node_in + if node_in.data['type'] == 'attr' \ + and node_in.data['key'][1] != NO_VALUE \ + and node_in.data['key'][1] == node_in.data['val_dfl']: + node_out = None + elif node_in.data['type'] == 'group': + node_out = None + for attr in node_in.nodes: + if filter_no_default(attr) is not None: + node_out = node_in + break + return node_out + +filter_only_default = get_reverse_filter(filter_no_default) + +def filter_no_missing(node_in): + ''' + A filter that lets all nodes through, except required attributes missing a + value. + ''' + node_out = node_in + if node_in.data['type'] == 'attr' \ + and node_in.data['key'][1] is NO_VALUE: + node_out = None + return node_out + +def filter_only_missing(node_in): + ''' + A filter that only let through obj and groups containing attributes with + missing values, as well as those attributes. + ''' + # FIXME Breaks dump + node_out = None + if node_in.data['type'] == 'attr' \ + and node_in.data['key'][1] is NO_VALUE: + node_out = node_in + return node_out + +def filter_only_required(node_in): + ''' + A filter that only lets through required attribute nodes, aka those + attributes without a default value in LIO configuration policy. + ''' + if node_in.data['type'] == 'attr' \ + and node_in.data.get('val_dfl') is None: + return node_in diff --git a/rtslib/config_live.py b/rtslib/config_live.py new file mode 100644 index 0000000..e16c543 --- /dev/null +++ b/rtslib/config_live.py @@ -0,0 +1,664 @@ +''' +This file is part of the LIO SCSI Target. + +Copyright (c) 2012-2014 by Datera, Inc. +More information on www.datera.io. + +Original author: Jerome Martin <jxm@netiant.com> + +Datera and LIO are trademarks of Datera, Inc., which may be registered in some +jurisdictions. + +Licensed under the Apache License, Version 2.0 (the "License"); you may +not use this file except in compliance with the License. You may obtain +a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. +''' +import logging + +from rtslib.config_tree import NO_VALUE +from rtslib.config import dump_value, ConfigError +from rtslib.utils import convert_bytes_to_human, convert_human_to_bytes + +from rtslib import (RTSRoot, Target, FabricModule, LUN, MappedLUN, + NetworkPortal, TPG, NodeACL, FileIOBackstore, + FileIOStorageObject, IBlockBackstore, + IBlockStorageObject, PSCSIBackstore, + PSCSIStorageObject, RDMCPBackstore, + RDMCPStorageObject, RTSLibError) + +# TODO There seems to be a bug in LIO, affecting both this API and rtslib: +# when a tpg does not contain any objects, it cannot be removed. + +_rtsroot = None +_indent = ' '*4 + +DEBUG = False +if DEBUG: + logging.basicConfig() + log = logging.getLogger('Config') + log.setLevel(logging.DEBUG) +else: + log = logging.getLogger('Config') + log.setLevel(logging.INFO) + +def _b2h(b): + # FIXME: Fix convert_bytes_to_human() instead of juggling here + return convert_bytes_to_human(b)[1:-2] + +def get_root(): + global _rtsroot + if _rtsroot is None: + _rtsroot = RTSRoot() + return _rtsroot + +def _list_live_group_attrs(rts_obj): + ''' + Returns a list of all group attributes for the rts_obj rtslib object + currently running on the live system, in LIO configuration file format. + ''' + attrs = [] + for attribute in rts_obj.list_attributes(writable=True): + value = rts_obj.get_attribute(attribute) + attrs.append("attribute %s %s" % (attribute, dump_value(value))) + for parameter in rts_obj.list_parameters(writable=True): + value = rts_obj.get_parameter(parameter) + attrs.append("parameter %s %s" % (parameter, dump_value(value))) + for auth_attr in rts_obj.list_auth_attrs(writable=True): + value = rts_obj.get_auth_attr(auth_attr) + attrs.append("auth %s %s" % (auth_attr, dump_value(value))) + return attrs + +def dump_live(): + ''' + Returns a text dump of the objects and attributes currently running on + the live system, in LIO configuration file format. + ''' + dump = [] + dump.append(dump_live_storage()) + dump.append(dump_live_fabric()) + return "\n".join(dump) + +def dump_live_storage(): + ''' + Returns a text dump of the storage objects and attributes currently + running on the live system, in LIO configuration file format. + ''' + dump = [] + for so in sorted(get_root().storage_objects, key=lambda so: so.name): + dump.append("storage %s disk %s {" + % (so.backstore.plugin, so.name)) + attrs = [] + if so.backstore.plugin in ['fileio', 'pscsi', 'iblock']: + attrs.append("%spath %s" % (_indent, so.udev_path)) + if so.backstore.plugin in ['fileio', 'rd_mcp']: + attrs.append("%ssize %s" % (_indent, _b2h(so.size))) + if so.backstore.plugin in ['rd_mcp']: + if so.nullio: + nullio = 'yes' + else: + nullio = 'no' + attrs.append("%snullio %s" % (_indent, nullio)) + if so.backstore.plugin in ['fileio']: + is_buffered = "buffered" in so.mode + if is_buffered: + is_buffered = 'yes' + else: + is_buffered = 'no' + attrs.append("%sbuffered %s" % (_indent, is_buffered)) + + group_attrs = _list_live_group_attrs(so) + attrs.extend(["%s%s" % (_indent, attr) for attr in group_attrs]) + + dump.append("\n".join(attrs)) + dump.append("}") + + return "\n".join(dump) + +def dump_live_fabric(): + ''' + Returns a text dump of the fabric objects and attributes currently + running on the live system, in LIO configuration file format. + ''' + dump = [] + for fm in sorted(get_root().loaded_fabric_modules, key=lambda fm: fm.name): + if fm.has_feature('discovery_auth'): + dump.append("fabric %s {" % fm.name) + dump.append("%sdiscovery_auth enable %s" + % (_indent, dump_value(fm.discovery_enable_auth))) + dump.append("%sdiscovery_auth userid %s" + % (_indent, dump_value(fm.discovery_userid))) + dump.append("%sdiscovery_auth password %s" + % (_indent, dump_value(fm.discovery_password))) + dump.append("%sdiscovery_auth mutual_userid %s" + % (_indent, dump_value(fm.discovery_mutual_userid))) + dump.append("%sdiscovery_auth mutual_password %s" + % (_indent, dump_value(fm.discovery_mutual_password))) + dump.append("}") + + for tg in fm.targets: + tpgs = [] + if not list(tg.tpgs): + dump.append("fabric %s target %s" % (fm.name, tg.wwn)) + for tpg in tg.tpgs: + if tpg.has_feature("tpgts"): + head = ("fabric %s target %s tpgt %s" + % (fm.name, tg.wwn, tpg.tag)) + else: + head = ("fabric %s target %s" + % (fm.name, tg.wwn)) + + if tpg.has_enable(): + enable = int(tpg.enable) + else: + enable = None + + section = [] + attrs = ["%s%s" % (_indent, attr) + for attr in _list_live_group_attrs(tpg)] + if attrs: + section.append("\n".join(attrs)) + + for lun in sorted(tpg.luns, key=lambda l: l.lun): + attrs = ["%s%s" % (_indent, attr) + for attr in _list_live_group_attrs(lun)] + if attrs: + fmt = "%slun %s %s %s {" + else: + fmt = "%slun %s backend %s:%s" + section.append(fmt % (_indent, lun.lun, + lun.storage_object.backstore.plugin, + lun.storage_object.name)) + if attrs: + section.append("\n".join(attrs)) + section.append("%s}" % _indent) + + if tpg.has_feature("acls"): + for acl in tpg.node_acls: + section.append("%sacl %s {" % (_indent, acl.node_wwn)) + attrs = ["%s%s" % (2*_indent, attr) + for attr in _list_live_group_attrs(acl)] + if attrs: + section.append("\n".join(attrs)) + for mlun in acl.mapped_luns: + section.append("%smapped_lun %s {" + % (2*_indent, mlun.mapped_lun)) + section.append("%s target_lun %s" + % (3*_indent, mlun.tpg_lun.lun)) + section.append("%s write_protect %s" + % (3*_indent, + int(mlun.write_protect))) + section.append("%s}" % (2*_indent)) + section.append("%s}" % (_indent)) + + if tpg.has_feature("nps"): + for np in tpg.network_portals: + section.append("%sportal %s:%s" + % (_indent, np.ip_address, np.port)) + if section: + if enable is not None: + section.append("%senable %s" + % (_indent, enable)) + dump.append("%s {" % head) + dump.append("\n".join(section)) + dump.append("}") + else: + if enable is not None: + dump.append("%s enable %s" % (head, enable)) + else: + dump.append(head) + + return "\n".join(dump) + +def obj_attr(obj, attr): + ''' + Returns the value of attribute attr of the ConfigTree obj. + If we cannot find the attribute, a ConfigError exception will be raised. + Else, the attribute's value will be converted from its internal string + representation to whatever rtslib expects. + ''' + # TODO Factorize a bit the val_type switch. + # TODO Maybe consolidate with validate_val in config.py + log.debug("obj_attr(%s, %s)" % (obj, attr)) + matches = obj.search([(attr, ".*")]) + if len(matches) != 1: + raise ConfigError("Could not determine value of %s attribute for %s" + % (attr, obj.path_str)) + + if matches[0].data['type'] not in ['attr', 'group']: + raise ConfigError("Configuration error, expected attribute for %s" + % obj.path_str) + + string = matches[0].key[1] + if string == NO_VALUE: + raise ConfigError("Value of %s attribute is not set for %s" + % (attr, obj.path_str)) + + val_type = matches[0].data.get('val_type') + ref_path = matches[0].data.get('ref_path') + + valid_value = None + if val_type == 'bool': + # FIXME There are inconsistencies in bools at the configfs level + # The parameters take Yes/No values, the attributes 1/0 + # Maybe something can be done about it ? + if string in ['yes', 'true', '1', 'enable']: + valid_value = 1 + elif string in ['no', 'false', '0', 'disable']: + valid_value = 0 + if obj.key[0] == 'parameter': + if valid_value == 1: + valid_value = 'Yes' + else: + valid_value = 'No' + elif val_type == 'bytes': + mults = {'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4} + val = float(string[:-2]) + unit = string[-2:-1] + valid_value = int(val * mults[unit]) + elif val_type == 'int': + valid_value = int(string) + elif val_type == 'ipport': + (addr, _, port) = string.rpartition(":") + valid_value = (addr, int(port)) + elif val_type == 'posint': + valid_value = int(string) + elif val_type == 'str': + valid_value = string + elif val_type == 'iqn': + valid_value = string + elif val_type == 'naa': + valid_value = string + elif val_type == 'backend': + (plugin, _, name) = string.partition(':') + valid_value = (plugin, name) + elif val_type == 'raw': + valid_value = string + elif ref_path: + valid_value = ref_path + else: + raise ConfigError("Unknown value type '%s' when validating %s" + % (val_type, matches[0])) + return valid_value + +def apply_group_attrs(obj, lio_obj): + ''' + Applies group attributes obj to the live lio_obj. + ''' + # TODO Split that one up, too much indentation there! + unsupported_fmt = "Unsupported %s %s: consider upgrading your kernel" + for group in obj.nodes: + if group.data['type'] == 'group': + group_name = group.key[0] + for attr in group.nodes: + if attr.data['type'] == 'attr' \ + and not attr.data['required']: + name = attr.key[0] + value = obj_attr(group, name) + if group_name == 'auth': + try: + lio_obj.get_auth_attr(name) + except RTSLibError: + log.info(unsupported_fmt % ("auth attribute", name)) + else: + log.debug("Setting auth %s to %s" % (name, value)) + lio_obj.set_auth_attr(name, value) + elif group_name == 'attribute': + try: + lio_obj.get_attribute(name) + except RTSLibError: + log.info(unsupported_fmt % ("attribute", name)) + else: + log.debug("Setting attribute %s to %s" % (name, value)) + lio_obj.set_attribute(name, value) + elif group_name == 'parameter': + try: + lio_obj.get_parameter(name) + except RTSLibError: + log.info(unsupported_fmt % ("parameter", name)) + else: + log.debug("Setting parameter %s to %s" % (name, value)) + lio_obj.set_parameter(name, value) + elif group_name == 'discovery_auth': + log.debug("Setting discovery_auth %s to %s" % (name, value)) + if name == 'enable': + lio_obj.discovery_enable_auth = value + elif name == 'mutual_password': + lio_obj.discovery_mutual_password = value + elif name == 'mutual_userid': + lio_obj.discovery_mutual_userid = value + elif name == 'password': + lio_obj.discovery_password = value + elif name == 'userid': + lio_obj.discovery_userid = value + else: + raise ConfigError("Unexpected discovery_auth " + "attribute: %s" % name) + +def apply_create_obj(obj): + ''' + Creates an object on the live system. + ''' + # TODO Factorize this when stable, merging it with update and delete, + # leveraging rtslib 'any' mode (create if not exist) + # TODO storage + root = get_root() + log.debug("apply_create(%s)" % obj.data) + if obj.key[0] == 'mapped_lun': + acl = obj.parent + if acl.parent.key[0] == 'tpgt': + tpg = acl.parent + target = tpg.parent + else: + tpg = None + target = acl.parent + fabric = target.parent + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') + if tpg is None: + tpgt = 1 + else: + tpgt = int(tpg.key[1]) + lio_tpg = TPG(lio_target, tpgt, mode='lookup') + node_wwn = acl.key[1] + lio_acl = NodeACL(lio_tpg, node_wwn, mode='lookup') + mlun = int(obj.key[1]) + + write_protect = obj_attr(obj, "write_protect") + tpg_lun = int(obj_attr(obj, "target_lun").rpartition(' ')[2]) + lio_mlun = MappedLUN(lio_acl, mlun, tpg_lun, write_protect) + apply_group_attrs(obj, lio_mlun) + + elif obj.key[0] == 'acl': + if obj.parent.key[0] == 'tpgt': + tpg = obj.parent + target = tpg.parent + else: + tpg = None + target = obj.parent + fabric = target.parent + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') + if tpg is None: + tpgt = 1 + else: + tpgt = int(tpg.key[1]) + lio_tpg = TPG(lio_target, tpgt, mode='lookup') + node_wwn = obj.key[1] + lio_acl = NodeACL(lio_tpg, node_wwn) + apply_group_attrs(obj, lio_acl) + + elif obj.key[0] == 'portal': + if obj.parent.key[0] == 'tpgt': + tpg = obj.parent + target = tpg.parent + else: + tpg = None + target = obj.parent + fabric = target.parent + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') + if tpg is None: + tpgt = 1 + else: + tpgt = int(tpg.key[1]) + lio_tpg = TPG(lio_target, tpgt, mode='lookup') + (address, _, port) = obj.key[1].partition(':') + port = int(port) + lio_portal = NetworkPortal(lio_tpg, address, port) + apply_group_attrs(obj, lio_portal) + + elif obj.key[0] == 'lun': + if obj.parent.key[0] == 'tpgt': + tpg = obj.parent + target = tpg.parent + else: + tpg = None + target = obj.parent + fabric = target.parent + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') + if tpg is None: + tpgt = 1 + else: + tpgt = int(tpg.key[1]) + lio_tpg = TPG(lio_target, tpgt, mode='lookup') + lun = int(obj.key[1]) + (plugin, name) = obj_attr(obj, "backend") + + # TODO move that to a separate function, use for disk too + matching_lio_so = [so for so in root.storage_objects if + so.backstore.plugin == plugin and so.name == name] + + if len(matching_lio_so) > 1: + raise ConfigError("Detected unsupported configfs storage objects " + "allocation schema for storage object '%s'" + % obj.path_str) + elif len(matching_lio_so) == 0: + raise ConfigError("Could not find storage object '%s %s' for '%s'" + % (plugin, name, obj.path_str)) + else: + lio_so = matching_lio_so[0] + + lio_lun = LUN(lio_tpg, lun, lio_so) + apply_group_attrs(obj, lio_lun) + + elif obj.key[0] == 'tpgt': + target = obj.parent + fabric = target.parent + has_enable = len(obj.search([("enable", ".*")])) != 0 + if has_enable: + enable = obj_attr(obj, "enable") + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') + tpgt = int(obj.key[1]) + lio_tpg = TPG(lio_target, tpgt) + if has_enable: + lio_tpg.enable = enable + apply_group_attrs(obj, lio_tpg) + + elif obj.key[0] == 'target': + fabric = obj.parent + wwn = obj.key[1] + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=wwn) + apply_group_attrs(obj, lio_target) + if not lio_target.has_feature("tpgts"): + lio_tpg = TPG(lio_target, 1) + if len(obj.search([("enable", ".*")])) != 0: + lio_tpg.enable = True + + elif obj.key[0] == 'fabric': + lio_fabric = FabricModule(obj.key[1]) + apply_group_attrs(obj, lio_fabric) + + elif obj.key[0] == 'disk': + plugin = obj.parent.key[1] + name = obj.key[1] + idx = max([0] + [b.index for b in root.backstores if b.plugin == plugin]) + 1 + if plugin == 'fileio': + dev = obj_attr(obj, "path") + size = obj_attr(obj, "size") + buffered = obj_attr(obj, "buffered") + lio_bs = FileIOBackstore(idx) + lio_so = lio_bs.storage_object(name, dev, size, True, buffered) + apply_group_attrs(obj, lio_so) + elif plugin == 'iblock': + # TODO Add policy for iblock + lio_bs = IBlockBackstore(idx) + dev = obj_attr(obj, "path") + lio_so = lio_bs.storage_object(name, dev, True) + apply_group_attrs(obj, lio_so) + elif plugin == 'pscsi': + # TODO Add policy for pscsi + lio_bs = PSCSIBackstore(idx) + dev = obj_attr(obj, "path") + lio_so = lio_bs.storage_object(name, dev) + apply_group_attrs(obj, lio_so) + elif plugin == 'rd_mcp': + # TODO Add policy for rd_mcp + lio_bs = RDMCPBackstore(idx) + size = obj_attr(obj, "size") + nullio = obj_attr(obj, "nullio") + lio_so = lio_bs.storage_object(name, size, True, nullio) + apply_group_attrs(obj, lio_so) + else: + raise ConfigError("Unknown backend '%s' for backstore '%s'" + % (plugin, obj)) + + matching_lio_so = [so for so in root.storage_objects if + so.backstore.plugin == plugin and so.name == name] + if len(matching_lio_so) > 1: + raise ConfigError("Detected unsupported configfs storage objects " + "allocation schema for '%s'" % obj.path_str) + elif len(matching_lio_so) == 0: + raise ConfigError("Could not find backstore '%s'" % obj.path_str) + else: + lio_so = matching_lio_so[0] + +def apply_delete_obj(obj): + ''' + Deletes an object from the live system. + ''' + # TODO Factorize this when stable + # TODO storage fabric cannot be deleted from the system, find a way to + # handle this when i.e. path 'storage fileio' is in current config, but + # no objects are hanging under it. + + root = get_root() + log.debug("apply_delete(%s)" % obj.data) + if obj.key[0] == 'mapped_lun': + acl = obj.parent + if acl.parent.key[0] == 'tpgt': + tpg = acl.parent + target = tpg.parent + else: + tpg = None + target = acl.parent + fabric = target.parent + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') + if tpg is None: + tpgt = 1 + else: + tpgt = int(tpg.key[1]) + lio_tpg = TPG(lio_target, tpgt, mode='lookup') + node_wwn = acl.key[1] + lio_acl = NodeACL(lio_tpg, node_wwn, mode='lookup') + mlun = int(obj.key[1]) + lio_mlun = MappedLUN(lio_acl, mlun) + lio_mlun.delete() + + elif obj.key[0] == 'acl': + if obj.parent.key[0] == 'tpgt': + tpg = obj.parent + target = tpg.parent + else: + tpg = None + target = obj.parent + fabric = target.parent + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') + if tpg is None: + tpgt = 1 + else: + tpgt = int(tpg.key[1]) + lio_tpg = TPG(lio_target, tpgt, mode='lookup') + node_wwn = obj.key[1] + lio_acl = NodeACL(lio_tpg, node_wwn, mode='lookup') + lio_acl.delete() + + elif obj.key[0] == 'portal': + if obj.parent.key[0] == 'tpgt': + tpg = obj.parent + target = tpg.parent + else: + tpg = None + target = obj.parent + fabric = target.parent + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') + if tpg is None: + tpgt = 1 + else: + tpgt = int(tpg.key[1]) + lio_tpg = TPG(lio_target, tpgt, mode='lookup') + (address, _, port) = obj.key[1].partition(':') + port = int(port) + lio_portal = NetworkPortal(lio_tpg, address, port, mode='lookup') + lio_portal.delete() + + elif obj.key[0] == 'lun': + if obj.parent.key[0] == 'tpgt': + tpg = obj.parent + target = tpg.parent + else: + tpg = None + target = obj.parent + fabric = target.parent + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') + if tpg is None: + tpgt = 1 + else: + tpgt = int(tpg.key[1]) + lio_tpg = TPG(lio_target, tpgt, mode='lookup') + lun = int(obj.key[1]) + lio_lun = LUN(lio_tpg, lun) + lio_lun.delete() + + elif obj.key[0] == 'tpgt': + target = obj.parent + fabric = target.parent + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') + tpgt = int(obj.key[1]) + lio_tpg = TPG(lio_target, tpgt, mode='lookup') + # FIXME IS this really needed ? + lio_tpg.enable = True + lio_tpg.delete() + + elif obj.key[0] == 'target': + fabric = obj.parent + wwn = obj.key[1] + lio_fabric = FabricModule(fabric.key[1]) + lio_target = Target(lio_fabric, wwn=wwn, mode='lookup') + lio_target.delete() + + elif obj.key[0] == 'disk': + plugin = obj.parent.key[1] + name = obj.key[1] + matching_lio_so = [so for so in root.storage_objects if + so.backstore.plugin == plugin and so.name == name] + log.debug("Looking for storage object %s in %s" + % (obj.path_str, + str(["%s/%s" % (so.backstore.plugin, so.name) + for so in root.storage_objects]))) + if len(matching_lio_so) > 1: + raise ConfigError("Detected unsupported configfs storage objects " + "allocation schema for storage object '%s'" + % obj.path_str) + elif len(matching_lio_so) == 0: + raise ConfigError("Could not find storage object '%s'" + % obj.path_str) + else: + lio_so = matching_lio_so[0] + lio_so.delete() + +def clear_configfs(): + ''' + Clears the live configfs by deleteing all nodes. + ''' + root = get_root() + for target in root.targets: + target.delete() + for backstore in root.backstores: + backstore.delete() + diff --git a/rtslib/config_parser.py b/rtslib/config_parser.py new file mode 100644 index 0000000..f85aa55 --- /dev/null +++ b/rtslib/config_parser.py @@ -0,0 +1,291 @@ +''' +This file is part of the LIO SCSI Target. + +Copyright (c) 2012-2014 by Datera, Inc. +More information on www.datera.io. + +Original author: Jerome Martin <jxm@netiant.com> + +Datera and LIO are trademarks of Datera, Inc., which may be registered in some +jurisdictions. + +Licensed under the Apache License, Version 2.0 (the "License"); you may +not use this file except in compliance with the License. You may obtain +a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. +''' +import logging +import pyparsing as pp + +from config_tree import NO_VALUE + +# TODO Add strategic debug (and logging too, it is absent) +# TODO Using group names as we do with obj_classes would be more robust + +DEBUG = False +if DEBUG: + logging.basicConfig() + log = logging.getLogger('ConfigParser') + log.setLevel(logging.DEBUG) +else: + log = logging.getLogger('ConfigParser') + log.setLevel(logging.INFO) + +class ConfigParser(object): + ''' + Our configuration format parser. + ''' + # Order is important, used for sorting in Config + obj_classes = "storage disk fabric target tpgt lun acl portal mapped_lun" + + def __init__(self): + self._init_parser() + + def _init_parser(self): + pp.ParserElement.setDefaultWhitespaceChars(' \t') + + tok_comment = pp.Regex(r'#.*') + tok_ws = pp.Suppress(pp.OneOrMore(pp.White(' \t'))) + tok_delim = (pp.Optional(pp.Suppress(tok_comment)) + + pp.Suppress(pp.lineEnd | pp.Literal(';'))) + + tok_string = (pp.QuotedString('"') + | pp.QuotedString("'") + | pp.Word(pp.printables, excludeChars="{}#'\";")) + + tok_obj_class = pp.oneOf(self.obj_classes) + tok_obj_ident = tok_string + tok_obj = pp.Group(tok_obj_class + + tok_ws + + tok_obj_ident) + tok_obj = tok_obj.setParseAction(self._parse_action_obj) + + tok_attr_name = pp.Word(pp.alphas, pp.alphas + pp.nums + "_") + tok_attr_value = tok_string + tok_attr = pp.Group(tok_attr_name + + tok_ws + + tok_attr_value + + pp.Optional(tok_comment)) + tok_attr = tok_attr.setParseAction(self._parse_action_attr) + + tok_group = pp.Word(pp.alphas, pp.alphas + "_") + tok_group = tok_group.setParseAction(self._parse_action_group) + + # FIXME This does not work as intended when used + # tok_empty_block = pp.Suppress('{' + pp.ZeroOrMore(tok_delim) + '}') + + tok_statement = pp.Forward() + tok_block = (pp.Group(pp.Suppress('{') + + pp.OneOrMore(tok_statement) + + pp.Suppress('}'))) + tok_block = tok_block.setParseAction(self._parse_action_block) + + tok_statement_no_path = ((tok_group + tok_ws + tok_attr) + #| (tok_group + tok_empty_block) + | (tok_group + tok_block) + | tok_attr) + + tok_optional_if_path = ((tok_ws + tok_group + tok_ws + tok_attr) + #| (tok_ws + tok_group + tok_empty_block) + | (tok_ws + tok_group + tok_block) + #| tok_empty_block + | tok_block + | (tok_ws + tok_attr)) + + tok_statement_if_path = (pp.OneOrMore(tok_obj) + + pp.Optional(tok_optional_if_path)) + + tok_statement << pp.Group(pp.ZeroOrMore(tok_delim) + + (tok_statement_if_path + | tok_statement_no_path) + + pp.OneOrMore(tok_delim)) + + self._parser = pp.ZeroOrMore(tok_statement) + + def _parse_action_obj(self, source, idx, tokin): + value = tokin[0] + return [{'type': 'obj', + 'line': pp.lineno(idx, source), + 'col': pp.col(idx, source), + 'key': (value[0], value[1])}] + + def _parse_action_attr(self, source, idx, tokin): + value = tokin[0] + tokout = {'type': 'attr', + 'line': pp.lineno(idx, source), + 'col': pp.col(idx, source), + 'key': (value[0], value[1])} + if len(value) > 2: + tokout['comment'] = value[2][1:].strip() + return [tokout] + + def _parse_action_group(self, source, idx, tokin): + value = tokin + return [{'type': 'group', + 'line': pp.lineno(idx, source), + 'col': pp.col(idx, source), + 'key': (value[0],)}] + + def _parse_action_block(self, source, idx, tokin): + value = tokin[0].asList() + return [{'type': 'block', + 'line': pp.lineno(idx, source), + 'col': pp.col(idx, source), + 'statements': value}] + + def parse_file(self, filepath): + return self._parser.parseFile(filepath, parseAll=True).asList() + + def parse_string(self, string): + return self._parser.parseString(string, parseAll=True).asList() + +class PolicyParser(ConfigParser): + ''' + Our policy format parser. + ''' + def _init_parser(self): + # TODO Once stable, factorize with ConfigParser + pp.ParserElement.setDefaultWhitespaceChars(' \t') + + tok_comment = pp.Regex(r'#.*') + tok_ws = pp.Suppress(pp.OneOrMore(pp.White(' \t'))) + tok_delim = (pp.Optional(pp.Suppress(tok_comment)) + + pp.Suppress(pp.lineEnd | pp.Literal(';'))) + + tok_string = (pp.QuotedString('"') + | pp.QuotedString("'") + | pp.Word(pp.printables, excludeChars="{}#'\";%@()")) + + tok_ref_path = (pp.Suppress('@') + pp.Suppress('(') + + pp.OneOrMore(tok_string) + + pp.Suppress(')')) + + tok_id_rule = pp.Suppress('%') + tok_string("id_type") + + tok_val_rule = (pp.Suppress('%') + + tok_string("val_type") + + pp.Optional(pp.Suppress('(') + + tok_string("val_dfl") + + pp.Suppress(')'))) + + tok_obj_class = pp.oneOf(self.obj_classes) + tok_obj_ident = tok_id_rule | tok_string("id_fixed") + tok_obj = pp.Group(tok_obj_class("class") + + tok_ws + + tok_obj_ident) + tok_obj = tok_obj.setParseAction(self._parse_action_obj) + + tok_attr_name = pp.Word(pp.alphas, pp.alphas + pp.nums + "_") + tok_attr_value = tok_ref_path("ref_path") | tok_val_rule + tok_attr = pp.Group(tok_attr_name("name") + + tok_ws + + tok_attr_value + + pp.Optional(tok_comment)("comment")) + tok_attr = tok_attr.setParseAction(self._parse_action_attr) + + tok_group = pp.Word(pp.alphas, pp.alphas + "_") + tok_group = tok_group.setParseAction(self._parse_action_group) + + tok_statement = pp.Forward() + tok_block = (pp.Group(pp.Suppress('{') + + pp.OneOrMore(tok_statement) + + pp.Suppress('}'))) + tok_block = tok_block.setParseAction(self._parse_action_block) + + tok_statement_no_path = ((tok_group + tok_ws + tok_attr) + | (tok_group + tok_block) + | tok_attr) + + tok_optional_if_path = ((tok_ws + tok_group + tok_ws + tok_attr) + | (tok_ws + tok_group + tok_block) + | tok_block + | (tok_ws + tok_attr)) + + tok_statement_if_path = (pp.OneOrMore(tok_obj) + + pp.Optional(tok_optional_if_path)) + + tok_statement << pp.Group(pp.ZeroOrMore(tok_delim) + + (tok_statement_if_path + | tok_statement_no_path) + + pp.OneOrMore(tok_delim)) + + self._parser = pp.ZeroOrMore(tok_statement) + + def _parse_action_attr(self, source, idx, tokin): + value = tokin[0].asDict() + ref_path = value.get('ref_path') + if ref_path is not None: + ref_path = " ".join(ref_path.asList()) + tokout = {'type': 'attr', + 'line': pp.lineno(idx, source), + 'col': pp.col(idx, source), + 'ref_path': ref_path, + 'val_type': value.get('val_type'), + 'val_dfl': value.get('val_dfl', NO_VALUE), + 'required': value.get('val_dfl', NO_VALUE) == NO_VALUE, + 'comment': value.get('comment'), + 'key': (value.get('name'), 'xxx')} + + return [tokout] + + def _parse_action_obj(self, source, idx, tokin): + value = tokin[0].asDict() + return [{'type': 'obj', + 'line': pp.lineno(idx, source), + 'col': pp.col(idx, source), + 'id_type': value.get('id_type'), + 'id_fixed': value.get('id_fixed'), + 'key': (value.get('class'), value.get('id_fixed', 'xxx'))}] + +class PatternParser(ConfigParser): + ''' + Our pattern format parser. + ''' + def _init_parser(self): + # TODO Once stable, factorize with ConfigParser + pp.ParserElement.setDefaultWhitespaceChars(' \t') + + tok_ws = pp.Suppress(pp.OneOrMore(pp.White(' \t'))) + + tok_string = (pp.QuotedString('"') + | pp.QuotedString("'") + | pp.Word(pp.printables, excludeChars="{}#'\";")) + + tok_obj_class = pp.oneOf(self.obj_classes) + tok_obj_ident = tok_string + tok_obj = pp.Group(tok_obj_class + tok_ws + tok_obj_ident) + tok_obj = tok_obj.setParseAction(self._parse_action_obj_attr) + + tok_attr_name = pp.Word(pp.alphas + pp.nums + "_.*[]-") + tok_attr_value = tok_string + tok_attr = pp.Group(tok_attr_name + tok_ws + tok_attr_value) + tok_attr = tok_attr.setParseAction(self._parse_action_obj_attr) + + tok_group = pp.Word(pp.alphas + "_.*[]-") + tok_group = tok_group.setParseAction(self._parse_action_group) + + tok_statement_no_path = ((tok_group + tok_ws + tok_attr) + | tok_attr + | tok_group) + + tok_optional_if_path = ((tok_ws + tok_group + tok_ws + tok_attr) + | (tok_ws + tok_attr) + | (tok_ws + tok_group)) + + tok_statement_if_path = (pp.OneOrMore(tok_obj) + + pp.Optional(tok_optional_if_path)) + + self._parser = tok_statement_if_path | tok_statement_no_path + + def _parse_action_obj_attr(self, source, idx, tokin): + return (tokin[0][0], tokin[0][1]) + + def _parse_action_group(self, source, idx, tokin): + return (tokin[0],) diff --git a/rtslib/config_tree.py b/rtslib/config_tree.py new file mode 100644 index 0000000..5960a86 --- /dev/null +++ b/rtslib/config_tree.py @@ -0,0 +1,329 @@ +''' +This file is part of the LIO SCSI Target. + +Copyright (c) 2012-2014 by Datera, Inc. +More information on www.datera.io. + +Original author: Jerome Martin <jxm@netiant.com> + +Datera and LIO are trademarks of Datera, Inc., which may be registered in some +jurisdictions. + +Licensed under the Apache License, Version 2.0 (the "License"); you may +not use this file except in compliance with the License. You may obtain +a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. +''' +import re, copy, logging + +DEBUG = False +if DEBUG: + logging.basicConfig() + log = logging.getLogger('ConfigTree') + log.setLevel(logging.DEBUG) +else: + log = logging.getLogger('ConfigTree') + log.setLevel(logging.INFO) + +NO_VALUE = '~~~' + +def match_key(search_key, key): + ''' + Matches search_key and key tuple items-for-item, with search_key containing + regular expressions patterns or None values, and key containing string ir + None values. + ''' + log.debug("match_key(%s, %s)" % (search_key, key)) + if len(search_key) == len(key): + for idx, pattern in enumerate(search_key): + item = key[idx] + if not pattern.endswith('$'): + pattern = "%s$" % pattern + if item is None and pattern is None: + continue + elif item is None: + break + else: + match = re.match(pattern, item) + if match is None: + break + else: + return True + +class ConfigTreeError(Exception): + pass + +class ConfigTree(object): + ''' + An ordered tree structure to hold configuration data. + + A node can be referred to by its path, relative to the current node. + A path is a list of keys, each key a tuple of either string or None items. + ''' + def __init__(self, data=None, + sort_key=lambda x:x, + key_to_string=lambda x:str(x)): + ''' + Initializes a new ConfigTree. + + The optional sort_key is a function used when ordering children of a + configuration node. + + The optional key_to_string is a function used when converting a node + key to string. + + Direct instanciation should only happen for the root node of the tree. + Adding a new node to the tree is achieved by using the set() + method of the desired parent for that new node. + ''' + self.data = data + + self._key = () + self._nodes = {} + self._parent = None + self._sort_key = sort_key + self._key_to_string = key_to_string + + def __repr__(self): + return "(%s)" % self.path_str + + def __str__(self): + return self.path_str + + def get_clone(self, parent=None): + ''' + Returns a clone of the ConfigTree, not sharing any mutable data. + ''' + clone = ConfigTree(copy.deepcopy(self.data), + self._sort_key, + self._key_to_string) + clone._parent = parent + clone._key = self._key + clone.data = copy.deepcopy(self.data) + for node in self.nodes: + clone._nodes[node.key] = node.get_clone(parent=clone) + return clone + + @property + def root(self): + ''' + Returns the root node of the tree. + ''' + cur = self + while cur.parent: + cur = cur.parent + return cur + + @property + def key(self): + ''' + Returns the current node's key tuple. + ''' + return self._key + + @property + def key_str(self): + ''' + Returns the current node's key as a string. + ''' + return self._key_to_string(self.key) + + @property + def path(self): + ''' + Returns the node's full path from the tree root as a list of keys. + ''' + if self.is_root: + path = [] + else: + path = self.parent.path + [self._key] + return path + + @property + def path_str(self): + ''' + Returns the node's full path from the tree root as a string. + ''' + strings = [] + for key in self.path: + strings.append(self._key_to_string(key)) + return " ".join(strings) + + @property + def nodes(self): + ''' + Returns the list of all children nodes, sorted with potential + dependencies first. + ''' + nodes = sorted(self._nodes.values(), key=self._sort_key) + return nodes + + @property + def keys(self): + ''' + Generates all children nodes keys, sorted with potential + dependencies first. + ''' + keys = (node.key for node in self.nodes) + return keys + + @property + def parent(self): + ''' + Returns the parent node of the current node, or None. + ''' + return self._parent + + @property + def is_root(self): + ''' + Returns True if this is a root node, else False. + ''' + return self._parent == None + + def get(self, node_key): + ''' + Returns the current node's child having node_key, or None. + ''' + return self._nodes.get(node_key) + + def set(self, node_key, node_data=None): + ''' + Creates and adds a child node to the current node, and returns that new + node. If the node already exists, then a ConfigTreeError exception will + be raised. Else, the new node will be returned. + + node_key is any tuple of strings + node_data is an optional arbitrary value + ''' + if node_key not in self.keys: + new_node = ConfigTree(self.data, + self._sort_key, + self._key_to_string) + new_node._parent = self + new_node.data = node_data + new_node._key = node_key + self._nodes[node_key] = new_node + return new_node + else: + raise ConfigTreeError("Node already exists, cannot set: %s" + % self.get(node_key)) + + def cine(self, node_key, node_data=None): + ''' + cine stands for create if not exist: it makes sure a node exists. + If it does not, it will create it using node_data. + Else node_data will not be updated. + + Returns the matching node in all cases. + + node_key is any tuple of strings + node_data is an optional arbitrary value + ''' + if node_key in self.keys: + log.debug("cine(%s %s) -> Already exists" + % (self.path_str, node_key)) + return self.get(node_key) + else: + log.debug("cine(%s %s) -> Creating" + % (self.path_str, node_key)) + return self.set(node_key, node_data) + + def update(self, node_key, node_data=None): + ''' + If a node already has node_key as key, its data will be replaced with + node_data. Else, it will be created using node_data. + + The matching node will be returned in both cases. + + node_key is any tuple of strings. + node_data is an optional arbitrary value. + ''' + try: + node = self.set(node_key, node_data) + except ConfigTreeError: + node = self.get(node_key) + node.data = node_data + return node + + def delete(self, path): + ''' + Given a path, deletes an entire subtree from the configuration, + relative to the current node. + + The deleted subtree will be returned, or None is the path does not + exist or is empty. The path must be a list of node keys. + ''' + log.debug("delete(%s) getting subtree" % str(path)) + subtree = self.get_path(path) + log.debug("delete(%s) got subtree: %s" + % (str(path), subtree)) + if subtree is not None: + del subtree.parent._nodes[subtree.key] + return subtree + + def get_path(self, path): + ''' + Returns either the node matching path, relative to the current node, or + None if the path does not exists. + ''' + log.debug("get_path(%s)" % str(path)) + cur = self + log.debug("get_path(%s) - cur: %s" % (str(path), cur)) + if path: + for key in path: + cur = cur.get(key) + if cur is None: + break + else: + return cur + + def search(self, search_path, node_filter=lambda x:x): + ''' + Returns a list of nodes matching the search_path, relative to the + current node, or an empty list if no match was found. + + The search_path is a list of node search_key. Each will be matched + against node key tuples items-for-item, with search_key containing + regular expressions patterns or None values, and key containing string + or None values. + + node_filter is a function applied to each node before returning it: + node_filter(node_in) -> node_out | None (aka filtered out) + ''' + results = [] + if search_path: + search_key = search_path[0] + for node in self.nodes: + if match_key(search_key, node.key): + if search_path[1:]: + results.extend(node.search(search_path[1:])) + else: + node_out = node_filter(node) + if node_out is not None: + results.append(node_out) + return results + + def walk(self, node_filter=lambda x:x): + ''' + Returns a generator yielding our children's tree in depth-first order. + + node_filter is a function applied to each node before dumping it: + node_filter(node_in) -> node_out | None (aka filtered out) + + When a node is filtered out, its children will still be walked and + filtered/yielded as usual. + ''' + for node_in in self.nodes: + node_out = node_filter(node_in) + if node_out is not None: + yield node_out + for next in node_in.walk(node_filter): + yield next diff --git a/rtslib/loop.py b/rtslib/loop.py deleted file mode 100644 index 11a008b..0000000 --- a/rtslib/loop.py +++ /dev/null @@ -1,465 +0,0 @@ -''' -Implements the RTS SAS loopback classes. - -This file is part of RTSLib. -Copyright (c) 2011-2013 by Datera, Inc - -Licensed under the Apache License, Version 2.0 (the "License"); you may -not use this file except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -License for the specific language governing permissions and limitations -under the License. -''' - -import re -import os -import glob -import uuid -import shutil - -# rtslib modules -from root import RTSRoot -from node import CFSNode -from utils import RTSLibError, RTSLibBrokenLink -from utils import generate_wwn, fwrite, fread - -class LUN(CFSNode): - ''' - This is an interface to RTS Target LUNs in configFS. - A LUN is identified by its parent Nexus and LUN index. - ''' - - # LUN private stuff - - def __init__(self, parent_nexus, lun, storage_object=None, alias=None): - ''' - A LUN object can be instantiated in two ways: - - B{Creation mode}: If I{storage_object} is specified, the - underlying configFS object will be created with that parameter. - No LUN with the same I{lun} index can pre-exist in the parent - Nexus in that mode, or instantiation will fail. - - B{Lookup mode}: If I{storage_object} is not set, then the LUN - will be bound to the existing configFS LUN object of the parent - Nexus having the specified I{lun} index. The underlying configFS - object must already exist in that mode. - - @param parent_nexus: The parent Nexus object. - @type parent_nexus: Nexus - @param lun: The LUN index. - @type lun: 0-255 - @param storage_object: The storage object to be exported as a LUN. - @type storage_object: StorageObject subclass - @param alias: An optional parameter to manually specify the LUN alias. - You probably do not need this. - @type alias: string - @return: A LUN object. - ''' - super(LUN, self).__init__() - - if isinstance(parent_nexus, Nexus): - self._parent_nexus = parent_nexus - else: - raise RTSLibError("Invalid parent Nexus.") - - try: - lun = int(lun) - except ValueError: - raise RTSLibError("Invalid LUN index: %s" % str(lun)) - else: - if lun > 255 or lun < 0: - raise RTSLibError("Invalid LUN index, it must be " - + "between 0 and 255: %d" % lun) - self._lun = lun - - self._path = "%s/lun/lun_%d" % (self.parent_nexus.path, self.lun) - - if storage_object is None and alias is not None: - raise RTSLibError("The alias parameter has no meaning " - + "without the storage_object parameter.") - - if storage_object is not None: - self._create_in_cfs_ine('create') - try: - self._configure(storage_object, alias) - except: - self.delete() - raise - else: - self._create_in_cfs_ine('lookup') - - def __str__(self): - try: - storage_object = self.storage_object - except RTSLibBrokenLink: - desc = "[BROKEN STORAGE LINK]" - else: - backstore = storage_object.backstore - soname = storage_object.name - if backstore.plugin.startswith("rd"): - path = "ramdisk" - else: - path = storage_object.udev_path - desc = "-> %s%d '%s' (%s)" \ - % (backstore.plugin, backstore.index, soname, path) - return "LUN %d %s" % (self.lun, desc) - - def _create_in_cfs_ine(self, mode): - super(LUN, self)._create_in_cfs_ine(mode) - - def _configure(self, storage_object, alias): - self._check_self() - if alias is None: - alias = str(uuid.uuid4())[-10:] - else: - alias = str(alias).strip() - if '/' in alias: - raise RTSLibError("Invalid alias: %s", alias) - destination = "%s/%s" % (self.path, alias) - from tcm import StorageObject - if isinstance(storage_object, StorageObject): - if storage_object.exists: - source = storage_object.path - else: - raise RTSLibError("The storage_object does not exist " - + "in configFS.") - else: - raise RTSLibError("Invalid storage object.") - - os.symlink(source, destination) - - def _get_alias(self): - self._check_self() - alias = None - for path in os.listdir(self.path): - if os.path.islink("%s/%s" % (self.path, path)): - alias = os.path.basename(path) - break - if alias is None: - raise RTSLibBrokenLink("Broken LUN in configFS, no " \ - + "storage object attached.") - else: - return alias - - def _get_storage_object(self): - self._check_self() - alias_path = None - for path in os.listdir(self.path): - if os.path.islink("%s/%s" % (self.path, path)): - alias_path = os.path.realpath("%s/%s" % (self.path, path)) - break - if alias_path is None: - raise RTSLibBrokenLink("Broken LUN in configFS, no " \ - + "storage object attached.") - rtsroot = RTSRoot() - for storage_object in rtsroot.storage_objects: - if storage_object.path == alias_path: - return storage_object - raise RTSLibBrokenLink("Broken storage object link in LUN.") - - def _get_parent_nexus(self): - return self._parent_nexus - - def _get_lun(self): - return self._lun - - def _get_alua_metadata_path(self): - return "%s/lun_%d" % (self.parent_nexus.alua_metadata_path, self.lun) - - # LUN public stuff - - def delete(self): - ''' - If the underlying configFS object does not exists, this method does - nothing. If the underlying configFS object exists, this method attempts - to delete it. - ''' - self._check_self() - try: - link = self.alias - except RTSLibBrokenLink: - pass - else: - if os.path.islink("%s/%s" % (self.path, link)): - os.unlink("%s/%s" % (self.path, link)) - - super(LUN, self).delete() - if os.path.isdir(self.alua_metadata_path): - shutil.rmtree(self.alua_metadata_path) - - alua_metadata_path = property(_get_alua_metadata_path, - doc="Get the ALUA metadata directory path for the LUN.") - parent_nexus = property(_get_parent_nexus, - doc="Get the parent Nexus object.") - lun = property(_get_lun, - doc="Get the LUN index as an int.") - storage_object = property(_get_storage_object, - doc="Get the storage object attached to the LUN.") - alias = property(_get_alias, - doc="Get the LUN alias.") - -class Nexus(CFSNode): - ''' - This is a an interface to Target Portal Groups in configFS. - A Nexus is identified by its parent Target object and its nexus Tag. - To a Nexus object is attached a list of NetworkPortals. - ''' - - # Nexus private stuff - - def __init__(self, parent_target, tag, mode='any'): - ''' - @param parent_target: The parent Target object of the Nexus. - @type parent_target: Target - @param tag: The Nexus Tag (TPGT). - @type tag: int > 0 - @param mode:An optionnal string containing the object creation mode: - - I{'any'} means the configFS object will be either looked up or - created. - - I{'lookup'} means the object MUST already exist configFS. - - I{'create'} means the object must NOT already exist in configFS. - @type mode:string - @return: A Nexus object. - ''' - - super(Nexus, self).__init__() - - try: - self._tag = int(tag) - except ValueError: - raise RTSLibError("Invalid Tag.") - - if tag < 1: - raise RTSLibError("Invalig Tag, it must be >0.") - - if isinstance(parent_target, Target): - self._parent_target = parent_target - else: - raise RTSLibError("Invalid parent Target.") - - self._path = "%s/tpgt_%d" % (self.parent_target.path, self.tag) - self._create_in_cfs_ine(mode) - - def __str__(self): - try: - initiator = self.initiator - except RTSLibError: - initiator = "[BROKEN]" - return "Nexus %d / initiator %s" % (self.tag, initiator) - - def _get_initiator(self): - nexus_path = self._path + "/nexus" - if os.path.isfile(nexus_path): - try: - initiator = fread(nexus_path) - except IOError, msg: - raise RTSLibError("Cannot read Nexus initiator address " - + "(>=4.0 style, %s): %s." - % (nexus_path, msg)) - else: - try: - initiator = os.listdir(nexus_path)[0] - except IOError, msg: - raise RTSLibError("Cannot read Nexus initiator address " - + "(<4.0 style, %s): %s." - % (nexus_path, msg)) - return initiator.strip() - - def _get_tag(self): - return self._tag - - def _get_parent_target(self): - return self._parent_target - - def _create_in_cfs_ine(self, mode): - super(Nexus, self)._create_in_cfs_ine(mode) - - if not os.path.isdir(self.alua_metadata_path): - os.makedirs(self.alua_metadata_path) - - if self._fresh: - initiator = generate_wwn('naa') - nexus_path = self._path + "/nexus" - if os.path.isfile(nexus_path): - try: - fwrite(nexus_path, initiator) - except IOError, msg: - raise RTSLibError("Cannot create Nexus initiator " - + "(>=4.0 style, %s): %s." - % (nexus_path, msg)) - else: - try: - os.makedirs(nexus_path + "/" + initiator) - except IOError, msg: - raise RTSLibError("Cannot create Nexus initiator." - + "(<4.0 style, %s): %s." - % (nexus_path, msg)) - - def _list_luns(self): - self._check_self() - luns = [] - lun_dirs = [os.path.basename(path) - for path in os.listdir("%s/lun" % self.path)] - for lun_dir in lun_dirs: - lun = lun_dir.split('_')[1] - lun = int(lun) - luns.append(LUN(self, lun)) - return luns - - def _control(self, command): - self._check_self() - path = "%s/control" % self.path - fwrite(path, "%s\n" % str(command)) - - def _get_alua_metadata_path(self): - return "%s/%s+%d" \ - % (self.alua_metadata_dir, - self.parent_target.naa, self.tag) - - # Nexus public stuff - - def delete(self): - ''' - Recursively deletes a Nexus object. - This will delete all attached LUN, and then the Nexus itself. - ''' - self._check_self() - for lun in self.luns: - lun.delete() - - # TODO: check that ALUA MD removal works while removing Nexus - if os.path.isdir(self.alua_metadata_path): - shutil.rmtree(self.alua_metadata_path) - - nexus_path = self._path + "/nexus" - if os.path.isfile(nexus_path): - try: - fwrite(nexus_path, "NULL") - except IOError, msg: - raise RTSLibError("Cannot delete Nexus initiator " - + "(>=4.0 style, %s): %s." - % (nexus_path, msg)) - else: - try: - os.rmdir(nexus_path + "/" + self.initiator) - except IOError, msg: - raise RTSLibError("Cannot delete Nexus initiator." - + "(<4.0 style, %s): %s." - % (nexus_path, msg)) - - super(Nexus, self).delete() - - def lun(self, lun, storage_object=None, alias=None): - ''' - Same as LUN() but without specifying the parent_nexus. - ''' - self._check_self() - return LUN(self, lun=lun, storage_object=storage_object, alias=alias) - - alua_metadata_path = property(_get_alua_metadata_path, - doc="Get the ALUA metadata directory path " \ - + "for the Nexus.") - tag = property(_get_tag, - doc="Get the Nexus Tag as an int.") - initiator = property(_get_initiator, - doc="Get the Nexus initiator address as a string.") - parent_target = property(_get_parent_target, - doc="Get the parent Target object to which the " \ - + "Nexus is attached.") - luns = property(_list_luns, - doc="Get the list of LUN objects currently attached " \ - + "to the Nexus.") - -class Target(CFSNode): - ''' - This is an interface to loopback SAS Targets in configFS. - A Target is identified by its naa SAS address. - To a Target is attached a list of Nexus objects. - ''' - - # Target private stuff - - def __init__(self, naa=None, mode='any'): - ''' - @param naa: The optionnal Target's address. - If no address or an empty address is specified, one will be - generated for you. - @type naa: string - @param mode:An optionnal string containing the object creation mode: - - I{'any'} means the configFS object will be either looked up - or created. - - I{'lookup'} means the object MUST already exist configFS. - - I{'create'} means the object must NOT already exist in configFS. - @type mode:string - @return: A Target object. - ''' - - super(Target, self).__init__() - - if naa is None: - naa = generate_wwn('naa') - else: - naa = str(naa).lower().strip() - self._naa = naa - self._path = "%s/loopback/%s" % (self.configfs_dir, self._naa) - if not self: - if not re.match( - "naa\.[0-9]+", naa) \ - or re.search(' ', naa) \ - or re.search('_', naa): - raise RTSLibError("Invalid naa: %s" - % naa) - self._create_in_cfs_ine(mode) - - def __str__(self): - return "SAS loopback %s" % self.naa - - def _list_nexuses(self): - self._check_self() - nexuses = [] - nexus_dirs = glob.glob("%s/tpgt*" % self.path) - for nexus_dir in nexus_dirs: - tag = os.path.basename(nexus_dir).split('_')[1] - tag = int(tag) - nexuses.append(Nexus(self, tag, 'lookup')) - return nexuses - - def _get_naa(self): - return self._naa - - # Target public stuff - - def delete(self): - ''' - Recursively deletes a Target object. - This will delete all attached Nexus objects and then the Target itself. - ''' - self._check_self() - for nexus in self.nexuses: - nexus.delete() - super(Target, self).delete() - - def nexus(self, tag, mode='any'): - ''' - Same as Nexus() but without the parent_target parameter. - ''' - self._check_self() - return Nexus(self, tag=tag, mode=mode) - - naa = property(_get_naa, - doc="Get the naa of the Target object as a string.") - nexuses = property(_list_nexuses, - doc="Get the list of Nexus objects currently " - + "attached to the Target.") - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() diff --git a/rtslib/node.py b/rtslib/node.py index 77a1cbc..4107065 100644 --- a/rtslib/node.py +++ b/rtslib/node.py @@ -2,7 +2,7 @@ Implements the base CFSNode class and a few inherited variants. This file is part of RTSLib. -Copyright (c) 2011-2013 by Datera, Inc +Copyright (c) 2011-2014 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain @@ -24,8 +24,6 @@ from utils import fread, fwrite, RTSLibError, RTSLibNotInCFS class CFSNode(object): - # Where do we store the fabric modules spec files ? - spec_dir = "/var/target/fabric" # Where is the configfs base LIO directory ? configfs_dir = '/sys/kernel/config/target' # TODO: Make the ALUA path generic, not iscsi-centric @@ -51,11 +49,11 @@ class CFSNode(object): def _create_in_cfs_ine(self, mode): ''' - Creates the configFS node if it does not already exists depending on + Creates the configFS node if it does not already exist depending on the mode. - any -> makes sure it exists, also works if the node already does exists - lookup -> make sure it does NOT exists - create -> create the node which must not exists beforehand + any -> makes sure it exists, also works if the node already does exist + lookup -> make sure it does NOT exist + create -> create the node which must not exist beforehand Upon success (no exception raised), self._fresh is True if a node was created, else self._fresh is False. ''' @@ -67,11 +65,16 @@ class CFSNode(object): elif not self and mode == 'lookup': raise RTSLibNotInCFS("No such %s in configfs: %s." % (self.__class__.__name__, self.path)) - if not self: + if self: + self._fresh = False + return + + try: os.mkdir(self.path) self._fresh = True - else: - self._fresh = False + except: + raise RTSLibError("Could not create %s in configFS." + % self.__class__.__name__) def _exists(self): return bool(self) @@ -167,10 +170,10 @@ class CFSNode(object): else: try: fwrite(path, "%s\n" % str(value)) - except IOError, msg: + except Exception, msg: msg = msg[1] - raise RTSLibError("Cannot set attribute %s: %s" - % (str(attribute), str(msg))) + raise RTSLibError("Cannot set attribute %s to '%s': %s" + % (str(attribute), str(value), str(msg))) def get_attribute(self, attribute): ''' @@ -202,7 +205,7 @@ class CFSNode(object): else: try: fwrite(path, "%s\n" % str(value)) - except IOError, msg: + except Exception, msg: msg = msg[1] raise RTSLibError("Cannot set parameter %s: %s" % (str(parameter), str(msg))) diff --git a/rtslib/root.py b/rtslib/root.py index 970057f..9f32403 100644 --- a/rtslib/root.py +++ b/rtslib/root.py @@ -2,7 +2,7 @@ Implements the RTSRoot class. This file is part of RTSLib. -Copyright (c) 2011-2013 by Datera, Inc +Copyright (c) 2011-2014 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain @@ -24,8 +24,8 @@ import glob from node import CFSNode from target import Target, FabricModule from tcm import FileIOBackstore, IBlockBackstore -from tcm import PSCSIBackstore, RDDRBackstore, RDMCPBackstore -from utils import RTSLibError, RTSLibBrokenLink, flatten_nested_list, modprobe +from tcm import PSCSIBackstore, RDMCPBackstore +from utils import RTSLibError, RTSLibBrokenLink, modprobe class RTSRoot(CFSNode): ''' @@ -70,12 +70,11 @@ class RTSRoot(CFSNode): self._check_self() targets = set([]) for fabric_module in self.fabric_modules: - targets.update(fabric_module.targets) - return targets + for target in fabric_module.targets: + yield target def _list_backstores(self): self._check_self() - backstores = set([]) if os.path.isdir("%s/core" % self.path): backstore_dirs = glob.glob("%s/core/*_*" % self.path) for backstore_dir in [os.path.basename(path) @@ -84,58 +83,58 @@ class RTSRoot(CFSNode): backstore_dir) if regex: if regex.group(1) == "fileio": - backstores.add( - FileIOBackstore(int(regex.group(3)), 'lookup')) + yield FileIOBackstore(int(regex.group(3)), 'lookup') elif regex.group(1) == "pscsi": - backstores.add( - PSCSIBackstore(int(regex.group(3)), 'lookup')) + yield PSCSIBackstore(int(regex.group(3)), 'lookup') elif regex.group(1) == "iblock": - backstores.add( - IBlockBackstore(int(regex.group(3)), 'lookup')) - elif regex.group(1) == "rd_dr": - backstores.add( - RDDRBackstore(int(regex.group(3)), 'lookup')) + yield IBlockBackstore(int(regex.group(3)), 'lookup') elif regex.group(1) == "rd_mcp": - backstores.add( - RDMCPBackstore(int(regex.group(3)), 'lookup')) - return backstores + yield RDMCPBackstore(int(regex.group(3)), 'lookup') def _list_storage_objects(self): self._check_self() - return set(flatten_nested_list([backstore.storage_objects - for backstore in self.backstores])) + for bs in self.backstores: + for so in bs.storage_objects: + yield so def _list_tpgs(self): self._check_self() - return set(flatten_nested_list([t.tpgs for t in self.targets])) + for t in self.targets: + for tpg in t.tpgs: + yield tpg def _list_node_acls(self): self._check_self() - return set(flatten_nested_list([t.node_acls for t in self.tpgs])) + for t in self.tpgs: + for node_acl in t.node_acls: + yield node_acl def _list_network_portals(self): self._check_self() - return set(flatten_nested_list([t.network_portals for t in self.tpgs])) + for t in self.tpgs: + for p in t.network_portals: + yield p def _list_luns(self): self._check_self() - return set(flatten_nested_list([t.luns for t in self.tpgs])) + for t in self.tpgs: + for lun in t.luns: + yield lun def _list_fabric_modules(self): self._check_self() - mod_names = [mod_name[:-5] for mod_name in os.listdir(self.spec_dir) - if mod_name.endswith('.spec')] - modules = [FabricModule(mod_name) for mod_name in mod_names] - return modules + for mod in FabricModule.all(): + yield mod def _list_loaded_fabric_modules(self): - return [fm for fm in self._list_fabric_modules() if fm.exists] + for module in self._list_fabric_modules(): + if module.exists: + yield module def __str__(self): - return "rtsadmin" + return "rtslib" # RTSRoot public stuff - backstores = property(_list_backstores, doc="Get the list of Backstore objects.") targets = property(_list_targets, diff --git a/rtslib/target.py b/rtslib/target.py index ac5e11c..19541f7 100644 --- a/rtslib/target.py +++ b/rtslib/target.py @@ -2,7 +2,7 @@ Implements the RTS generic Target fabric classes. This file is part of RTSLib. -Copyright (c) 2011-2013 by Datera, Inc +Copyright (c) 2011-2014 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain @@ -25,12 +25,14 @@ import shutil from node import CFSNode from os.path import isdir -from doctest import testmod from configobj import ConfigObj from utils import RTSLibError, RTSLibBrokenLink, modprobe from utils import is_ipv6_address, is_ipv4_address from utils import fread, fwrite, generate_wwn, is_valid_wwn, exec_argv +# Where do we store the fabric modules spec files ? +spec_dir = "/var/target/fabric" + class FabricModule(CFSNode): ''' This is an interface to RTS Target Fabric Modules. @@ -44,6 +46,14 @@ class FabricModule(CFSNode): discovery_auth_attributes = set(["discovery_auth"]) target_names_excludes = version_attributes | discovery_auth_attributes + @classmethod + def all(cls): + mod_names = [mod_name[:-5] for mod_name in os.listdir(spec_dir) + if mod_name.endswith('.spec')] + for name in mod_names: + yield FabricModule(name) + + # FabricModule private stuff def __init__(self, name): ''' @@ -53,8 +63,8 @@ class FabricModule(CFSNode): @type name: str ''' super(FabricModule, self).__init__() - self.name = name - self.spec = self._parse_spec() + self.name = str(name) + self.spec = self._parse_spec("%s/%s.spec" % (spec_dir, name)) self._path = "%s/%s" % (self.configfs_dir, self.spec['configfs_group']) # FabricModule public stuff @@ -97,7 +107,7 @@ class FabricModule(CFSNode): yield ('create_cfs_group', self._fresh, "Created '%s'." % self.path) - def _parse_spec(self): + def _parse_spec(self, spec_file): ''' Parses the fabric module spec file. ''' @@ -112,7 +122,6 @@ class FabricModule(CFSNode): wwn_from_cmds_filter='', wwn_type='free') - spec_file = "%s/%s.spec" % (self.spec_dir, self.name) spec = ConfigObj(spec_file).dict() if spec: self.spec_file = spec_file @@ -196,13 +205,10 @@ class FabricModule(CFSNode): def _list_targets(self): if self.exists: - return set( - [Target(self, wwn, 'lookup') - for wwn in os.listdir(self.path) - if os.path.isdir("%s/%s" % (self.path, wwn)) - if wwn not in self.target_names_excludes]) - else: - return set([]) + for wwn in os.listdir(self.path): + if os.path.isdir("%s/%s" % (self.path, wwn)) and \ + wwn not in self.target_names_excludes: + yield Target(self, wwn, 'lookup') def _get_version(self): if self.exists: @@ -313,7 +319,7 @@ class FabricModule(CFSNode): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/enforce_discovery_auth" % self.path - if enable: + if int(enable): enable = 1 else: enable = 0 @@ -500,12 +506,15 @@ class LUN(CFSNode): def delete(self): ''' - If the underlying configFS object does not exists, this method does + If the underlying configFS object does not exist, this method does nothing. If the underlying configFS object exists, this method attempts to delete it along with all MappedLUN objects referencing that LUN. ''' self._check_self() - [mlun.delete() for mlun in self._list_mapped_luns()] + + for mlun in self.mapped_luns: + mlun.delete() + try: link = self.alias except RTSLibBrokenLink: @@ -654,11 +663,7 @@ class MappedLUN(CFSNode): def _get_write_protect(self): self._check_self() path = "%s/write_protect" % self.path - write_protect = fread(path).strip() - if write_protect == "1": - return True - else: - return False + return bool(int(fread(path))) def _get_tpg_lun(self): self._check_self() @@ -757,12 +762,9 @@ class NodeACL(CFSNode): def _list_mapped_luns(self): self._check_self() - mapped_luns = [] - mapped_lun_dirs = glob.glob("%s/lun_*" % self.path) - for mapped_lun_dir in mapped_lun_dirs: + for mapped_lun_dir in glob.glob("%s/lun_*" % self.path): mapped_lun = int(os.path.basename(mapped_lun_dir).split("_")[1]) - mapped_luns.append(MappedLUN(self, mapped_lun)) - return mapped_luns + yield MappedLUN(self, mapped_lun) # NodeACL public stuff def has_feature(self, feature): @@ -937,8 +939,8 @@ class TPG(CFSNode): except ValueError: raise RTSLibError("Invalid Tag.") - if tag < 1: - raise RTSLibError("Invalig Tag, it must be >0.") + if tag < 0: + raise RTSLibError("Invalig Tag, it must be 0 or more.") if isinstance(parent_target, Target): self._parent_target = parent_target @@ -968,10 +970,9 @@ class TPG(CFSNode): def _list_network_portals(self): self._check_self() if not self.has_feature('nps'): - return [] - network_portals = [] - network_portal_dirs = os.listdir("%s/np" % self.path) - for network_portal_dir in network_portal_dirs: + return + + for network_portal_dir in os.listdir("%s/np" % self.path): if network_portal_dir.startswith('['): # IPv6 portals are [IPv6]:PORT (ip_address, port) = \ @@ -982,9 +983,7 @@ class TPG(CFSNode): (ip_address, port) = \ os.path.basename(network_portal_dir).split(":") port = int(port) - network_portals.append( - NetworkPortal(self, ip_address, port, 'lookup')) - return network_portals + yield NetworkPortal(self, ip_address, port, 'lookup') def _get_enable(self): self._check_self() @@ -992,22 +991,22 @@ class TPG(CFSNode): # If the TPG does not have the enable attribute, then it is always # enabled. if os.path.isfile(path): - return int(fread(path)) + return bool(int(fread(path))) else: - return 1 + return True def _set_enable(self, boolean): ''' Enables or disables the TPG. Raises an error if trying to disable a TPG - without en enable attribute (but enabling works in that case). + without an enable attribute (but enabling works in that case). ''' self._check_self() path = "%s/enable" % self.path - if os.path.isfile(path): - if boolean and not self._get_enable(): - fwrite(path, "1") - elif not boolean and self._get_enable(): - fwrite(path, "0") + if os.path.isfile(path) and (boolean != self._get_enable()): + try: + fwrite(path, str(int(boolean))) + except IOError, e: + raise RTSLibError("Cannot change enable state: %s" % e) elif not boolean: raise RTSLibError("TPG cannot be disabled.") @@ -1051,24 +1050,21 @@ class TPG(CFSNode): def _list_node_acls(self): self._check_self() if not self.has_feature('acls'): - return [] - node_acls = [] + return + node_acl_dirs = [os.path.basename(path) for path in os.listdir("%s/acls" % self.path)] for node_acl_dir in node_acl_dirs: - node_acls.append(NodeACL(self, node_acl_dir, 'lookup')) - return node_acls + yield NodeACL(self, node_acl_dir, 'lookup') def _list_luns(self): self._check_self() - luns = [] lun_dirs = [os.path.basename(path) for path in os.listdir("%s/lun" % self.path)] for lun_dir in lun_dirs: lun = lun_dir.split('_')[1] lun = int(lun) - luns.append(LUN(self, lun)) - return luns + yield LUN(self, lun) def _control(self, command): self._check_self() @@ -1132,6 +1128,14 @@ class TPG(CFSNode): self._check_self() return LUN(self, lun=lun, storage_object=storage_object, alias=alias) + def has_enable(self): + ''' + Returns True if the TPG has the enable attribute, else False. + ''' + self._check_self() + path = "%s/enable" % self.path + return os.path.isfile(path) + alua_metadata_path = property(_get_alua_metadata_path, doc="Get the ALUA metadata directory path " \ + "for the TPG.") @@ -1210,13 +1214,10 @@ class Target(CFSNode): def _list_tpgs(self): self._check_self() - tpgs = [] - tpg_dirs = glob.glob("%s/tpgt*" % self.path) - for tpg_dir in tpg_dirs: + for tpg_dir in glob.glob("%s/tpgt*" % self.path): tag = os.path.basename(tpg_dir).split('_')[1] tag = int(tag) - tpgs.append(TPG(self, tag, 'lookup')) - return tpgs + yield TPG(self, tag, 'lookup') # Target public stuff @@ -1239,6 +1240,7 @@ class Target(CFSNode): tpgs = property(_list_tpgs, doc="Get the list of TPG for the Target.") def _test(): + from doctest import testmod testmod() if __name__ == "__main__": diff --git a/rtslib/tcm.py b/rtslib/tcm.py index 1b1b8c3..85e9b9b 100644 --- a/rtslib/tcm.py +++ b/rtslib/tcm.py @@ -2,7 +2,7 @@ Implements the RTS Target backstore and storage object classes. This file is part of RTSLib. -Copyright (c) 2011-2013 by Datera, Inc +Copyright (c) 2011-2014 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain @@ -47,24 +47,17 @@ class Backstore(CFSNode): self._index) self._create_in_cfs_ine(mode) - def _get_plugin(self): - return self._plugin - def _get_index(self): return self._index def _list_storage_objects(self): self._check_self() - storage_objects = [] storage_object_names = [os.path.basename(s) for s in os.listdir(self.path) if s not in set(["hba_info", "hba_mode"])] for storage_object_name in storage_object_names: - storage_objects.append(self._storage_object_class( - self, storage_object_name)) - - return storage_objects + yield self._storage_object_class(self, storage_object_name) def _create_in_cfs_ine(self, mode): try: @@ -113,8 +106,6 @@ class Backstore(CFSNode): doc="Get the list of StorageObjects attached to the backstore.") version = property(_get_version, doc="Get the Backstore plugin version string.") - plugin = property(_get_plugin, - doc="Get the Backstore plugin name.") name = property(_get_name, doc="Get the backstore name.") @@ -170,39 +161,6 @@ class PSCSIBackstore(Backstore): doc="Get the legacy mode flag. If True, the Vitualbackstore " + " index must match the StorageObjects real HBAs.") -class RDDRBackstore(Backstore): - ''' - This is an interface to rd_dr backstore plugin objects in configFS. - A RDDRBackstore object is identified by its backstore index. - ''' - - # RDDRBackstore private stuff - - def __init__(self, index, mode='any'): - ''' - @param index: The backstore index. - @type index: int - @param mode: An optionnal string containing the object creation mode: - - I{'any'} the configFS object will be either lookupd or created. - - I{'lookup'} the object MUST already exist configFS. - - I{'create'} the object must NOT already exist in configFS. - @type mode:string - @return: A RDDRBackstore object. - ''' - - super(RDDRBackstore, self).__init__("rd_dr", RDDRStorageObject, - index, mode) - - # RDDRBackstore public stuff - - def storage_object(self, name, size=None, gen_wwn=True): - ''' - Same as RDDRStorageObject() without specifying the backstore - ''' - self._check_self() - return RDDRStorageObject(self, name=name, - size=size, gen_wwn=gen_wwn) - class RDMCPBackstore(Backstore): ''' This is an interface to rd_mcp backstore plugin objects in configFS. @@ -228,13 +186,13 @@ class RDMCPBackstore(Backstore): # RDMCPBackstore public stuff - def storage_object(self, name, size=None, gen_wwn=True): + def storage_object(self, name, size=None, gen_wwn=True, nullio=False): ''' Same as RDMCPStorageObject() without specifying the backstore ''' self._check_self() - return RDMCPStorageObject(self, name=name, - size=size, gen_wwn=gen_wwn) + return RDMCPStorageObject(self, name=name, size=size, + gen_wwn=gen_wwn, nullio=nullio) class FileIOBackstore(Backstore): ''' @@ -420,13 +378,11 @@ class StorageObject(CFSNode): def _list_attached_luns(self): ''' - Just returns a set of all luns attached to a storage object. + Generates all luns attached to a storage object. ''' self._check_self() - luns = set([]) for lun in self._gen_attached_luns(): - luns.add(lun) - return luns + yield lun # StorageObject public stuff @@ -472,7 +428,7 @@ class StorageObject(CFSNode): wwn = property(_get_wwn, _set_wwn, doc="Get or set the StorageObject T10 WWN Serial as a string.") status = property(_get_status, - doc="Get the storage object status, depending on wether or not it"\ + doc="Get the storage object status, depending on whether or not it"\ + "is used by any LUN") attached_luns = property(_list_attached_luns, doc="Get the list of all LUN objects attached.") @@ -656,97 +612,6 @@ class PSCSIStorageObject(StorageObject): lun = property(_get_lun, doc="Get the SCSI device LUN") -class RDDRStorageObject(StorageObject): - ''' - An interface to configFS storage objects for rd_dr backstore. - ''' - - # RDDRStorageObject private stuff - - def __init__(self, backstore, name, size=None, gen_wwn=True): - ''' - A RDDRStorageObject can be instantiated in two ways: - - B{Creation mode}: If I{size} is specified, the underlying - configFS object will be created with that parameter. - No RDDRStorageObject with the same I{name} can pre-exist in the - parent RDDRBackstore in that mode, or instantiation will fail. - - B{Lookup mode}: If I{size} is not set, then the RDDRStorageObject - will be bound to the existing configFS object in the parent - RDDRBackstore having the specified I{name}. - The underlying configFS object must already exist in that mode, - or instantiation will fail. - - @param backstore: The parent backstore of the RDDRStorageObject. - @type backstore: RDDRBackstore - @param name: The name of the RDDRStorageObject. - @type name: string - @param size: The size of the ramdrive to create: - - If size is an int, it represents a number of bytes - - If size is a string, the following units can be used : - - I{B} or no unit present for bytes - - I{k}, I{K}, I{kB}, I{KB} for kB (kilobytes) - - I{m}, I{M}, I{mB}, I{MB} for MB (megabytes) - - I{g}, I{G}, I{gB}, I{GB} for GB (gigabytes) - - I{t}, I{T}, I{tB}, I{TB} for TB (terabytes) - Example: size="1MB" for a one megabytes storage object. - - Note that the size will be rounded to the closest 4096 Bytes - RAM pages count. For instance, a size of 100000 Bytes will be - rounded to 24 pages, really 98304 Bytes. - - The base value for kilo is 1024, aka 1kB = 1024B. - Strictly speaking, we use kiB, MiB, etc. - @type size: string or int - @param gen_wwn: Should we generate a T10 WWN Unit Serial ? - @type gen_wwn: bool - @return: A RDDRStorageObject object. - ''' - - if size is not None: - super(RDDRStorageObject, self).__init__(backstore, RDDRBackstore, - name, 'create') - try: - self._configure(size, gen_wwn) - except: - self.delete() - raise - else: - super(RDDRStorageObject, self).__init__(backstore, RDDRBackstore, - name, 'lookup') - - def _configure(self, size, wwn): - self._check_self() - size = convert_human_to_bytes(size) - # convert to 4k pages - size = round(float(size)/4096) - if size == 0: - size = 1 - - self._control("rd_pages=%d" % size) - self._enable() - if wwn: - self.wwn = generate_wwn('unit_serial') - - def _get_page_size(self): - self._check_self() - return int(self._parse_info("PAGES/PAGE_SIZE").split('*')[1]) - - def _get_pages(self): - self._check_self() - return int(self._parse_info("PAGES/PAGE_SIZE").split('*')[0]) - - def _get_size(self): - self._check_self() - size = self._get_page_size() * self._get_pages() - return size - - # RDDRStorageObject public stuff - - page_size = property(_get_page_size, - doc="Get the ramdisk page size.") - pages = property(_get_pages, - doc="Get the ramdisk number of pages.") - size = property(_get_size, - doc="Get the ramdisk size in bytes.") - class RDMCPStorageObject(StorageObject): ''' An interface to configFS storage objects for rd_mcp backstore. @@ -754,7 +619,7 @@ class RDMCPStorageObject(StorageObject): # RDMCPStorageObject private stuff - def __init__(self, backstore, name, size=None, gen_wwn=True): + def __init__(self, backstore, name, size=None, gen_wwn=True, nullio=False): ''' A RDMCPStorageObject can be instantiated in two ways: - B{Creation mode}: If I{size} is specified, the underlying @@ -788,16 +653,17 @@ class RDMCPStorageObject(StorageObject): @type size: string or int @param gen_wwn: Should we generate a T10 WWN Unit Serial ? @type gen_wwn: bool + @param nullio: If rd should be created w/o backing page store. + @type nullio: bool @return: A RDMCPStorageObject object. ''' - if size is not None: super(RDMCPStorageObject, self).__init__(backstore, RDMCPBackstore, name, 'create') try: - self._configure(size, gen_wwn) + self._configure(size, gen_wwn, nullio) except: self.delete() raise @@ -807,7 +673,7 @@ class RDMCPStorageObject(StorageObject): name, 'lookup') - def _configure(self, size, wwn): + def _configure(self, size, wwn, nullio): self._check_self() size = convert_human_to_bytes(size) # convert to 4k pages @@ -816,6 +682,8 @@ class RDMCPStorageObject(StorageObject): size = 1 self._control("rd_pages=%d" % size) + if nullio: + self._control("rd_nullio=1") self._enable() if wwn: self.wwn = generate_wwn('unit_serial') @@ -833,6 +701,14 @@ class RDMCPStorageObject(StorageObject): size = self._get_page_size() * self._get_pages() return size + def _get_nullio(self): + self._check_self() + # nullio not present before 3.10 + try: + return bool(int(self._parse_info('nullio'))) + except AttributeError: + return False + # RDMCPStorageObject public stuff page_size = property(_get_page_size, @@ -841,7 +717,8 @@ class RDMCPStorageObject(StorageObject): doc="Get the ramdisk number of pages.") size = property(_get_size, doc="Get the ramdisk size in bytes.") - + nullio = property(_get_nullio, + doc="Get the nullio status.") class FileIOStorageObject(StorageObject): ''' @@ -949,7 +826,6 @@ class FileIOStorageObject(StorageObject): + "%s is already in use." % dev) if is_disk_partition(rdev): size = get_disk_size(rdev) - print "fd_dev_name=%s,fd_dev_size=%d" % (dev, size) self._control("fd_dev_name=%s,fd_dev_size=%d" % (dev, size)) else: self._control("fd_dev_name=%s" % dev) diff --git a/rtslib/utils.py b/rtslib/utils.py index 5dcaeda..dbd6364 100644 --- a/rtslib/utils.py +++ b/rtslib/utils.py @@ -2,7 +2,7 @@ Provides various utility functions. This file is part of RTSLib. -Copyright (c) 2011-2013 by Datera, Inc +Copyright (c) 2011-2014 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain @@ -49,42 +49,15 @@ class RTSLibNotInCFS(RTSLibError): ''' The underlying configfs object does not exist. Happens when calling methods of an object that is instantiated but have - been deleted from congifs, or when trying to lookup an + been deleted from configs, or when trying to lookup an object that does not exist. ''' pass -def flatten_nested_list(nested_list): - ''' - Function to flatten a nested list. - - >>> import rtslib.utils as utils - >>> utils.flatten_nested_list([[1,2,3,[4,5,6]],[7,8],[[[9,10]],[11,]]]) - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] - - @param nested_list: A nested list (list of lists of lists etc.) - @type nested_list: list - @return: A list with only non-list elements - ''' - return list(gen_list_item(nested_list)) - -def gen_list_item(nested_list): - ''' - The generator for flatten_nested_list(). - It returns one by one items that are not a list, and recurses when - he finds an item that is a list. - ''' - for item in nested_list: - if type(item) is list: - for nested_item in gen_list_item(item): - yield nested_item - else: - yield item - def fwrite(path, string): ''' This function writes a string to a file, and takes care of - opening it and closing it. If the file does not exists, it + opening it and closing it. If the file does not exist, it will be created. >>> from rtslib.utils import * @@ -352,6 +325,7 @@ def get_block_type(path): 161, # Carmel 8-port SATA Disks 199, # Veritas volume manager (VxVM) volumes 201, # Veritas VxVM dynamic multipathing driver + 202, # Xen block device 230, # ZFS ZVols 240, # LOCAL/EXPERIMENTAL USE 241, # LOCAL/EXPERIMENTAL USE @@ -471,6 +445,14 @@ def convert_scsi_hctl_to_path(host, controller, target, lun): return os.path.realpath(path) return '' +def convert_bytes_to_human(size): + if not size: + return "" + for x in ['bytes','K','M','G','T']: + if size < 1024.0: + return "(%3.1f%s) " % (size, x) + size /= 1024.0 + def convert_human_to_bytes(hsize, kilo=1024): ''' This function converts human-readable amounts of bytes to bytes. @@ -621,7 +603,7 @@ def modprobe(module): else: return True else: - raise RTSLibError("Kernel module %s does not exists on disk " + raise RTSLibError("Kernel module %s does not exist on disk " % module + "and is not loaded.") else: return False diff --git a/setup.py b/setup.py index ebb22ad..34e5dfc 100755 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ #! /usr/bin/env python ''' This file is part of RTSLib. -Copyright (c) 2011-2013 by Datera, Inc +Copyright (c) 2011-2014 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain diff --git a/specs/README b/specs/README deleted file mode 100644 index 91b0869..0000000 --- a/specs/README +++ /dev/null @@ -1,122 +0,0 @@ -This directory (normally /var/lib/target) contains the spec files for -RisingTide Systems's LIO SCSI target subsystem fabric modules. - -Each spec file should be named MODULE.spec, where MODULE is the name the fabric -module is to be referred as. It contains a series of KEY = VALUE pairs, one per -line. - -KEY is an alphanumeric (no spaces) string. -VALUE can be anything. Quotes can be used for strings, but are not mandatory. -Lists of VALUEs are comma-separated. - -Syntax ------- - -* Strings -String values can either be enclosed in double quotes or not. -Those examples are equivalent: -kernel_module = "my_module" -kernel_module = my_module - -* Lists -Lists are comma-separated lists of values. If you want to use a comma in a -string, use double quotes. Example: -my_string = value1, value2, "value3, with comma", value4 - -* Comments -All lines beginning with a pound sign (#) will be ignored. -Empty lines will be ignored too. - -Available keys --------------- - -* features -Lists the target fabric available features. Default value: -discovery_auth, acls, acls_auth, nps -exemple: features = discovery_auth, acls, acls_auth - -Detail of features: - - * tpgts - The target fabric module is using iSCSI-style target portal group tags. - - * discovery_auth - The target fabric module supports a fabric-wide authentication for - discovery. - - * acls - The target's TPGTs do support explicit initiator ACLs. - - * acls_auth - The target's TPGT's ACLs do support per-ACL initiator authentication. - - * nps - The TPGTs do support iSCSI-like IPv4/IPv6 network portals, using IP:PORT - group names. - - * nexus - The TPGTs do have a 'nexus' attribute that contains the local initiator - serial unit. This attribute must be set before being able to create any - LUNs. - -* wwn_type -Sets the type of WWN expected by the target fabric. Defaults to 'free'. -Example: wwn_type = iqn -Current valid types are: - - * free - Freeform WWN. - - * iqn - The fabric module targets are using iSCSI-type IQNs. - - * naa - NAA SAS address type WWN. - - * unit_serial - Disk-type unit serial. - -* wwn_from_files -In some cases, and independently from the wwn type, the target WWNs must be -picked from a list of existing ones, the most obvious case being hardware-set -WWNs. Only the WWNs both matching the wwn_type (after filtering if set, see -below) and fetched from the specified files will be allowed for targets. The -value of this key is a list (one or more, comma-separated) of UNIX style -pathname patterns: * and ? wildcards can be used, and character ranges -expressed with [] will be correctly expanded. Each file is assumed to contain -one or more WWNs, and line ends, spaces, tabs and null (\0) will be considered -as separators chars. -Example: wwn_from_files = /sys/class/fc_host/host[0-9]/port_name - -* wwn_from_files_filter -Empty by default, this one allows specifying a shell command to which each WWN -from files will be fed, and the output of the filter will be used as the final -WWN to use. Examples: -wwn_from_files_filter = "sed -e s/0x// -e 's/../&:/g' -e s/:$//" -wwn_from_files_filter = "sed -e s/0x// -e 's/../&:/g' -e s/:$// | tr [a-z] [A-Z]" -The first example transforms strings like '0x21000024ff314c48' into -'21:00:00:24:ff:31:4c:48', the second one also shifts lower cases into upper -case, demonstrating that you can pipe as many commands you want into another. - -* wwn_from_cmds -Same as wwn_from_files, but instead of taking a list of file patterns, takes a -list of shell commands. Each commands output will be considered as a list of -WWNs to be used, separated ny line ends, spaces, tabs and null (\0) -chararcters. - -* wwn_from_cmds_filter -Same as wwn_from_files_filter, but filters/transforms the WWNs gotten from the -results of the wwn_from_cmds shell commands. - -* kernel_module -Sets the name of the kernel module implementing the fabric modules. If not -specified, it will be assumed to be MODULE_target_mod, where MODNAME is the -name of the fabric module, as used to name the spec file. Note that you must -not specify any .ko or such extension here. -Example: kernel_module = my_module - -* configfs_group -Sets the name of the configfs group used by the fabric module. Defaults to the -name of the module as used to name the spec file. -Example: configfs_group = iscsi - diff --git a/specs/example.spec.txt b/specs/example.spec.txt deleted file mode 100644 index f729157..0000000 --- a/specs/example.spec.txt +++ /dev/null @@ -1,15 +0,0 @@ -# Example LIO target fabric module. -# - -# The example fabric module uses the default feature set. -# features = discovery_auth, acls, acls_auth, nps - -# This module uses anything as WWNs. -wwn_type = free - -# Convoluted kernel module name. Default would be example_target_mod -kernel_module = my_complex_kernel_module_name - -# The configfs group name. Defauklt would be "example" -configfs_group = "example_group" - diff --git a/specs/example_spec_file_for_fabric_modules.txt b/specs/example_spec_file_for_fabric_modules.txt new file mode 100644 index 0000000..5e4f0c6 --- /dev/null +++ b/specs/example_spec_file_for_fabric_modules.txt @@ -0,0 +1,15 @@ +# Example LIO target fabric module. +# + +# The example fabric module uses the default feature set. +# features = discovery_auth, acls, acls_auth, nps + +# This module uses anything as WWNs. +wwn_type = free + +# Convoluted kernel module name. Default would be example_target_mod +kernel_module = my_complex_kernel_module_name + +# The configfs group name. Default would be "example" +configfs_group = "example_group" + diff --git a/specs/ib_srpt.spec b/specs/ib_srpt.spec index 958322d..1496b1a 100644 --- a/specs/ib_srpt.spec +++ b/specs/ib_srpt.spec @@ -10,7 +10,7 @@ kernel_module = ib_srpt # The module uses hardware addresses from there wwn_from_files = /sys/class/infiniband/*/ports/*/gids/0 # Transform 'fe80:0000:0000:0000:0002:1903:000e:8acd' WWN notation to -# '0x00000000000000000002c903000e8acd' +# '0xfe800000000000000002c903000e8acd' wwn_from_files_filter = "sed -e s/fe80/0xfe80/ -e 's/\://g'" # The configfs group diff --git a/specs/usb_gadget.spec b/specs/usb_gadget.spec new file mode 100644 index 0000000..43506db --- /dev/null +++ b/specs/usb_gadget.spec @@ -0,0 +1,4 @@ +features = nexus +wwn_type = naa +kernel_module = tcm_usb_gadget +configfs_group = "usb_gadget" diff --git a/specs/vhost.spec b/specs/vhost.spec index ee7c516..5e50601 100644 --- a/specs/vhost.spec +++ b/specs/vhost.spec @@ -5,7 +5,7 @@ features = nexus, tpgts wwn_type = naa # Non-standard module naming scheme -kernel_module = tcm_vhost +kernel_module = vhost_scsi # The configfs group configfs_group = vhost diff --git a/specs/writing_spec_files_for_fabric_modules.txt b/specs/writing_spec_files_for_fabric_modules.txt new file mode 100644 index 0000000..7783282 --- /dev/null +++ b/specs/writing_spec_files_for_fabric_modules.txt @@ -0,0 +1,126 @@ +The /var/lib/target directory contains the spec files for +RisingTide Systems's LIO SCSI target subsystem fabric modules. + +To support a new fabric module, a spec file should be installed containing +information for RTSLib to use it: SCSI Target features supported, WWN scheme, +kernel module information, etc. + +Each spec file should be named MODULE.spec, where MODULE is the name the fabric +module is to be referred as. It contains a series of KEY = VALUE pairs, one per +line. + +KEY is an alphanumeric (no spaces) string. +VALUE can be anything. Quotes can be used for strings, but are not mandatory. +Lists of VALUEs are comma-separated. + +Syntax +------ + +* Strings +String values can either be enclosed in double quotes or not. +Those examples are equivalent: +kernel_module = "my_module" +kernel_module = my_module + +* Lists +Lists are comma-separated lists of values. If you want to use a comma in a +string, use double quotes. Example: +my_string = value1, value2, "value3, with comma", value4 + +* Comments +All lines beginning with a pound sign (#) will be ignored. +Empty lines will be ignored too. + +Available keys +-------------- + +* features +Lists the target fabric available features. Default value: +discovery_auth, acls, acls_auth, nps +exemple: features = discovery_auth, acls, acls_auth + +Detail of features: + + * tpgts + The target fabric module is using iSCSI-style target portal group tags. + + * discovery_auth + The target fabric module supports a fabric-wide authentication for + discovery. + + * acls + The target's TPGTs do support explicit initiator ACLs. + + * acls_auth + The target's TPGT's ACLs do support per-ACL initiator authentication. + + * nps + The TPGTs do support iSCSI-like IPv4/IPv6 network portals, using IP:PORT + group names. + + * nexus + The TPGTs do have a 'nexus' attribute that contains the local initiator + serial unit. This attribute must be set before being able to create any + LUNs. + +* wwn_type +Sets the type of WWN expected by the target fabric. Defaults to 'free'. +Example: wwn_type = iqn +Current valid types are: + + * free + Freeform WWN. + + * iqn + The fabric module targets are using iSCSI-type IQNs. + + * naa + NAA SAS address type WWN. + + * unit_serial + Disk-type unit serial. + +* wwn_from_files +In some cases, and independently from the wwn type, the target WWNs must be +picked from a list of existing ones, the most obvious case being hardware-set +WWNs. Only the WWNs both matching the wwn_type (after filtering if set, see +below) and fetched from the specified files will be allowed for targets. The +value of this key is a list (one or more, comma-separated) of UNIX style +pathname patterns: * and ? wildcards can be used, and character ranges +expressed with [] will be correctly expanded. Each file is assumed to contain +one or more WWNs, and line ends, spaces, tabs and null (\0) will be considered +as separators chars. +Example: wwn_from_files = /sys/class/fc_host/host[0-9]/port_name + +* wwn_from_files_filter +Empty by default, this one allows specifying a shell command to which each WWN +from files will be fed, and the output of the filter will be used as the final +WWN to use. Examples: +wwn_from_files_filter = "sed -e s/0x// -e 's/../&:/g' -e s/:$//" +wwn_from_files_filter = "sed -e s/0x// -e 's/../&:/g' -e s/:$// | tr [a-z] [A-Z]" +The first example transforms strings like '0x21000024ff314c48' into +'21:00:00:24:ff:31:4c:48', the second one also shifts lower cases into upper +case, demonstrating that you can pipe as many commands you want into another. + +* wwn_from_cmds +Same as wwn_from_files, but instead of taking a list of file patterns, takes a +list of shell commands. Each commands output will be considered as a list of +WWNs to be used, separated ny line ends, spaces, tabs and null (\0) +chararcters. + +* wwn_from_cmds_filter +Same as wwn_from_files_filter, but filters/transforms the WWNs gotten from the +results of the wwn_from_cmds shell commands. + +* kernel_module +Sets the name of the kernel module implementing the fabric modules. If not +specified, it will be assumed to be MODULE_target_mod, where MODNAME is the +name of the fabric module, as used to name the spec file. Note that you must +not specify any .ko or such extension here. +Example: kernel_module = my_module + +* configfs_group +Sets the name of the configfs group used by the fabric module. Defaults to the +name of the module as used to name the spec file. +Example: configfs_group = iscsi + diff --git a/tests/data/config_attribute_group.ast b/tests/data/config_attribute_group.ast new file mode 100644 index 0000000..1cc53f1 --- /dev/null +++ b/tests/data/config_attribute_group.ast @@ -0,0 +1,100 @@ +(lp0 +(lp1 +(dp2 +S'line' +p3 +I1 +sS'type' +p4 +S'obj' +p5 +sS'col' +p6 +I1 +sS'key' +p7 +(S'storage' +p8 +S'fileio' +p9 +tp10 +sa(dp11 +g3 +I1 +sg4 +g5 +sg6 +I16 +sg7 +(S'disk' +p12 +S'vm1' +p13 +tp14 +sa(dp15 +S'statements' +p16 +(lp17 +(lp18 +(dp19 +g3 +I2 +sg4 +S'attr' +p20 +sg6 +I5 +sg7 +(S'path' +p21 +S'/tmp/vm1.img' +p22 +tp23 +saa(lp24 +(dp25 +g3 +I3 +sg4 +g20 +sg6 +I5 +sg7 +(S'size' +p26 +S'1MB' +p27 +tp28 +saa(lp29 +(dp30 +g3 +I4 +sg4 +S'group' +p31 +sg6 +I5 +sg7 +(S'attribute' +p32 +tp33 +sa(dp34 +g3 +I4 +sg4 +g20 +sg6 +I15 +sg7 +(S'block_size' +p35 +S'512' +p36 +tp37 +saasg3 +I1 +sg4 +S'block' +p38 +sg6 +I25 +saa. \ No newline at end of file diff --git a/tests/data/config_attribute_group.lio b/tests/data/config_attribute_group.lio new file mode 100644 index 0000000..aae8c5d --- /dev/null +++ b/tests/data/config_attribute_group.lio @@ -0,0 +1,5 @@ +storage fileio disk vm1 { + path /tmp/vm1.img + size 1MB + attribute block_size 512 +} diff --git a/tests/data/config_basic.ast b/tests/data/config_basic.ast new file mode 100644 index 0000000..fb02d66 --- /dev/null +++ b/tests/data/config_basic.ast @@ -0,0 +1,74 @@ +(lp0 +(lp1 +(dp2 +S'line' +p3 +I1 +sS'type' +p4 +S'obj' +p5 +sS'col' +p6 +I1 +sS'key' +p7 +(S'storage' +p8 +S'fileio' +p9 +tp10 +sa(dp11 +g3 +I1 +sg4 +g5 +sg6 +I16 +sg7 +(S'disk' +p12 +S'vm1' +p13 +tp14 +sa(dp15 +S'statements' +p16 +(lp17 +(lp18 +(dp19 +g3 +I2 +sg4 +S'attr' +p20 +sg6 +I5 +sg7 +(S'path' +p21 +S'/tmp/vm1.img' +p22 +tp23 +saa(lp24 +(dp25 +g3 +I3 +sg4 +g20 +sg6 +I5 +sg7 +(S'size' +p26 +S'1MB' +p27 +tp28 +saasg3 +I1 +sg4 +S'block' +p29 +sg6 +I25 +saa. \ No newline at end of file diff --git a/tests/data/config_basic.lio b/tests/data/config_basic.lio new file mode 100644 index 0000000..e27b8ae --- /dev/null +++ b/tests/data/config_basic.lio @@ -0,0 +1,4 @@ +storage fileio disk vm1 { + path /tmp/vm1.img + size 1MB +} diff --git a/tests/data/config_comments.ast b/tests/data/config_comments.ast new file mode 100644 index 0000000..dc35bc6 --- /dev/null +++ b/tests/data/config_comments.ast @@ -0,0 +1,228 @@ +(lp0 +(lp1 +(dp2 +S'line' +p3 +I2 +sS'type' +p4 +S'obj' +p5 +sS'col' +p6 +I1 +sS'key' +p7 +(S'storage' +p8 +S'fileio' +p9 +tp10 +sa(dp11 +S'statements' +p12 +(lp13 +(lp14 +(dp15 +g3 +I3 +sg4 +g5 +sg6 +I5 +sg7 +(S'disk' +p16 +S'vm1' +p17 +tp18 +sa(dp19 +g12 +(lp20 +(lp21 +(dp22 +g3 +I4 +sg4 +S'attr' +p23 +sg6 +I9 +sg7 +(S'path' +p24 +S'/tmp/disk1.img' +p25 +tp26 +saa(lp27 +(dp28 +g3 +I5 +sg4 +g23 +sg6 +I9 +sg7 +(S'size' +p29 +S'1MB' +p30 +tp31 +saa(lp32 +(dp33 +g3 +I7 +sg4 +S'group' +p34 +sg6 +I9 +sg7 +(S'attribute' +p35 +tp36 +sa(dp37 +g12 +(lp38 +(lp39 +(dp40 +g3 +I9 +sg4 +g23 +sg6 +I13 +sg7 +(S'block_size' +p41 +S'512' +p42 +tp43 +saa(lp44 +(dp45 +g3 +I10 +sg4 +g23 +sg6 +I13 +sg7 +(S'optimal_sectors' +p46 +S'1024' +p47 +tp48 +saa(lp49 +(dp50 +g3 +I11 +sg4 +g23 +sg6 +I13 +sg7 +(S'queue_depth' +p51 +S'32' +p52 +tp53 +saa(lp54 +(dp55 +g3 +I13 +sg4 +g23 +sg6 +I13 +sg7 +(S'emulate_tas' +p56 +S'yes' +p57 +tp58 +saa(lp59 +(dp60 +S'comment' +p61 +S'EOL comment' +p62 +sg3 +I14 +sg4 +g23 +sg6 +I13 +sg7 +(S'enforce_pr_isids' +p63 +S'yes' +p64 +tp65 +saa(lp66 +(dp67 +g3 +I16 +sg4 +g23 +sg6 +I13 +sg7 +(S'emulate_dpo' +p68 +S'no' +p69 +tp70 +saa(lp71 +(dp72 +g61 +S'Hello there!' +p73 +sg3 +I17 +sg4 +g23 +sg6 +I13 +sg7 +(S'emulate_tpu' +p74 +S'no' +p75 +tp76 +saa(lp77 +(dp78 +g61 +S'Does what it says?' +p79 +sg3 +I18 +sg4 +g23 +sg6 +I13 +sg7 +(S'is_nonrot' +p80 +S'no' +p81 +tp82 +saasg3 +I7 +sg4 +S'block' +p83 +sg6 +I19 +saasg3 +I3 +sg4 +g83 +sg6 +I14 +saasg3 +I2 +sg4 +g83 +sg6 +I16 +saa. \ No newline at end of file diff --git a/tests/data/config_comments.lio b/tests/data/config_comments.lio new file mode 100644 index 0000000..89a09c9 --- /dev/null +++ b/tests/data/config_comments.lio @@ -0,0 +1,23 @@ +# This is a comment before the first statement +storage fileio { + disk vm1 { + path /tmp/disk1.img + size 1MB + # This is an indented comment after size and before a group + attribute { + # This is an indented comment after a group + block_size 512 + optimal_sectors 1024 + queue_depth 32 + + emulate_tas yes + enforce_pr_isids yes # EOL comment + + emulate_dpo no + emulate_tpu no # Hello there! + is_nonrot no # Does what it says? + } + } +} + +# Last words comment diff --git a/tests/data/config_complete.ast b/tests/data/config_complete.ast new file mode 100644 index 0000000..76c4593 --- /dev/null +++ b/tests/data/config_complete.ast @@ -0,0 +1,1035 @@ +(lp1 +(lp2 +(dp3 +S'line' +p4 +I1 +sS'type' +p5 +S'obj' +p6 +sS'col' +p7 +I1 +sS'key' +p8 +(S'storage' +S'fileio' +tp9 +sa(dp10 +g4 +I1 +sg5 +g6 +sg7 +I16 +sg8 +(S'disk' +S'disk1' +tp11 +sa(dp12 +S'statements' +p13 +(lp14 +(lp15 +(dp16 +g4 +I2 +sg5 +S'attr' +p17 +sg7 +I5 +sg8 +(S'path' +S'/tmp/disk1.img' +tp18 +saa(lp19 +(dp20 +g4 +I3 +sg5 +g17 +sg7 +I5 +sg8 +(S'size' +S'1MB' +tp21 +saa(lp22 +(dp23 +g4 +I5 +sg5 +S'group' +p24 +sg7 +I5 +sg8 +(S'attribute' +tp25 +sa(dp26 +g13 +(lp27 +(lp28 +(dp29 +g4 +I6 +sg5 +g17 +sg7 +I9 +sg8 +(S'block_size' +S'512' +tp30 +saa(lp31 +(dp32 +g4 +I7 +sg5 +g17 +sg7 +I9 +sg8 +(S'optimal_sectors' +S'1024' +tp33 +saa(lp34 +(dp35 +g4 +I8 +sg5 +g17 +sg7 +I9 +sg8 +(S'queue_depth' +S'32' +tp36 +saa(lp37 +(dp38 +g4 +I10 +sg5 +g17 +sg7 +I9 +sg8 +(S'emulate_tas' +S'yes' +tp39 +saa(lp40 +(dp41 +g4 +I11 +sg5 +g17 +sg7 +I9 +sg8 +(S'enforce_pr_isids' +S'yes' +tp42 +saa(lp43 +(dp44 +g4 +I13 +sg5 +g17 +sg7 +I9 +sg8 +(S'emulate_dpo' +S'no' +tp45 +saa(lp46 +(dp47 +g4 +I14 +sg5 +g17 +sg7 +I9 +sg8 +(S'emulate_tpu' +S'no' +tp48 +saa(lp49 +(dp50 +g4 +I15 +sg5 +g17 +sg7 +I9 +sg8 +(S'is_nonrot' +S'no' +tp51 +saasg4 +I5 +sg5 +S'block' +p52 +sg7 +I15 +saasg4 +I1 +sg5 +g52 +sg7 +I27 +saa(lp53 +(dp54 +g4 +I19 +sg5 +g6 +sg7 +I1 +sg8 +(S'storage' +S'fileio' +tp55 +sa(dp56 +g4 +I19 +sg5 +g6 +sg7 +I16 +sg8 +(S'disk' +S'disk2' +tp57 +sa(dp58 +g13 +(lp59 +(lp60 +(dp61 +g4 +I20 +sg5 +g17 +sg7 +I5 +sg8 +(S'path' +S'/tmp/disk2.img' +tp62 +saa(lp63 +(dp64 +g4 +I21 +sg5 +g17 +sg7 +I5 +sg8 +(S'size' +S'1M' +tp65 +saa(lp66 +(dp67 +g4 +I22 +sg5 +g24 +sg7 +I5 +sg8 +(S'attribute' +tp68 +sa(dp69 +g4 +I22 +sg5 +g17 +sg7 +I15 +sg8 +(S'block_size' +S'512' +tp70 +saa(lp71 +(dp72 +g4 +I23 +sg5 +g24 +sg7 +I5 +sg8 +(S'attribute' +tp73 +sa(dp74 +g4 +I23 +sg5 +g17 +sg7 +I15 +sg8 +(S'optimal_sectors' +S'1024' +tp75 +saa(lp76 +(dp77 +g4 +I24 +sg5 +g24 +sg7 +I5 +sg8 +(S'attribute' +tp78 +sa(dp79 +g4 +I24 +sg5 +g17 +sg7 +I15 +sg8 +(S'queue_depth' +S'32' +tp80 +saasg4 +I19 +sg5 +g52 +sg7 +I27 +saa(lp81 +(dp82 +g4 +I28 +sg5 +g6 +sg7 +I1 +sg8 +(S'fabric' +S'iscsi' +tp83 +sa(dp84 +g4 +I28 +sg5 +g24 +sg7 +I14 +sg8 +(S'discovery_auth' +tp85 +sa(dp86 +g13 +(lp87 +(lp88 +(dp89 +g4 +I29 +sg5 +g17 +sg7 +I5 +sg8 +(S'enable' +S'yes' +tp90 +saa(lp91 +(dp92 +g4 +I30 +sg5 +g17 +sg7 +I5 +sg8 +(S'userid' +S'target1' +tp93 +saa(lp94 +(dp95 +g4 +I31 +sg5 +g17 +sg7 +I5 +sg8 +(S'password' +S'kjh45fDf_' +tp96 +saa(lp97 +(dp98 +g4 +I32 +sg5 +g17 +sg7 +I5 +sg8 +(S'mutual_userid' +S'no' +tp99 +saa(lp100 +(dp101 +g4 +I33 +sg5 +g17 +sg7 +I5 +sg8 +(S'mutual_password' +S'no' +tp102 +saasg4 +I28 +sg5 +g52 +sg7 +I29 +saa(lp103 +(dp104 +g4 +I36 +sg5 +g6 +sg7 +I1 +sg8 +(S'fabric' +S'iscsi' +tp105 +sa(dp106 +g13 +(lp107 +(lp108 +(dp109 +g4 +I37 +sg5 +g6 +sg7 +I5 +sg8 +(S'target' +S'iqn.2003-01.org.linux-iscsi.targetcli.x8664:sn.4699f8812c88' +tp110 +sa(dp111 +g13 +(lp112 +(lp113 +(dp114 +g4 +I38 +sg5 +g6 +sg7 +I9 +sg8 +(S'tpgt' +S'1' +tp115 +sa(dp116 +g13 +(lp117 +(lp118 +(dp119 +g4 +I39 +sg5 +g6 +sg7 +I13 +sg8 +(S'lun' +S'1' +tp120 +sa(dp121 +g4 +I39 +sg5 +g17 +sg7 +I19 +sg8 +(S'backend' +S'fileio:disk1' +tp122 +saa(lp123 +(dp124 +g4 +I40 +sg5 +g6 +sg7 +I13 +sg8 +(S'lun' +S'2' +tp125 +sa(dp126 +g4 +I40 +sg5 +g17 +sg7 +I19 +sg8 +(S'backend' +S'fileio:disk2' +tp127 +saa(lp128 +(dp129 +g4 +I41 +sg5 +g6 +sg7 +I13 +sg8 +(S'portal' +S'0.0.0.0:3260' +tp130 +saa(lp131 +(dp132 +g4 +I43 +sg5 +g24 +sg7 +I13 +sg8 +(S'attribute' +tp133 +sa(dp134 +g13 +(lp135 +(lp136 +(dp137 +g4 +I44 +sg5 +g17 +sg7 +I17 +sg8 +(S'authentication' +S'no' +tp138 +saa(lp139 +(dp140 +g4 +I45 +sg5 +g17 +sg7 +I17 +sg8 +(S'cache_dynamic_acls' +S'no' +tp141 +saa(lp142 +(dp143 +g4 +I46 +sg5 +g17 +sg7 +I17 +sg8 +(S'default_cmdsn_depth' +S'16' +tp144 +saa(lp145 +(dp146 +g4 +I47 +sg5 +g17 +sg7 +I17 +sg8 +(S'demo_mode_write_protect' +S'no' +tp147 +saa(lp148 +(dp149 +g4 +I48 +sg5 +g17 +sg7 +I17 +sg8 +(S'generate_node_acls' +S'no' +tp150 +saa(lp151 +(dp152 +g4 +I49 +sg5 +g17 +sg7 +I17 +sg8 +(S'login_timeout' +S'15' +tp153 +saa(lp154 +(dp155 +g4 +I50 +sg5 +g17 +sg7 +I17 +sg8 +(S'netif_timeout' +S'2' +tp156 +saa(lp157 +(dp158 +g4 +I51 +sg5 +g17 +sg7 +I17 +sg8 +(S'prod_mode_write_protect' +S'no' +tp159 +saasg4 +I43 +sg5 +g52 +sg7 +I23 +saa(lp160 +(dp161 +g4 +I54 +sg5 +g24 +sg7 +I13 +sg8 +(S'parameter' +tp162 +sa(dp163 +g13 +(lp164 +(lp165 +(dp166 +g4 +I55 +sg5 +g17 +sg7 +I17 +sg8 +(S'MaxConnections' +S'12' +tp167 +saa(lp168 +(dp169 +g4 +I56 +sg5 +g17 +sg7 +I17 +sg8 +(S'MaxOutstandingR2T' +S'34' +tp170 +saa(lp171 +(dp172 +g4 +I57 +sg5 +g17 +sg7 +I17 +sg8 +(S'TargetAlias' +S'LIO Target' +tp173 +saa(lp174 +(dp175 +g4 +I58 +sg5 +g17 +sg7 +I17 +sg8 +(S'AuthMethod' +S'CHAP' +tp176 +saa(lp177 +(dp178 +g4 +I59 +sg5 +g17 +sg7 +I17 +sg8 +(S'ImmediateData' +S'yes' +tp179 +saa(lp180 +(dp181 +g4 +I60 +sg5 +g17 +sg7 +I17 +sg8 +(S'MaxBurstLength' +S'262144' +tp182 +saa(lp183 +(dp184 +g4 +I61 +sg5 +g17 +sg7 +I17 +sg8 +(S'MaxRecvDataSegmentLength' +S'8192' +tp185 +saa(lp186 +(dp187 +g4 +I62 +sg5 +g17 +sg7 +I17 +sg8 +(S'HeaderDigest' +S'CRC32C,None' +tp188 +saa(lp189 +(dp190 +g4 +I63 +sg5 +g17 +sg7 +I17 +sg8 +(S'OFMarker' +S'no' +tp191 +saasg4 +I54 +sg5 +g52 +sg7 +I23 +saa(lp192 +(dp193 +g4 +I66 +sg5 +g6 +sg7 +I13 +sg8 +(S'acl' +S'iqn.2003-01.org.linux-iscsi.targetcli.x8664:client1' +tp194 +sa(dp195 +g13 +(lp196 +(lp197 +(dp198 +g4 +I67 +sg5 +g24 +sg7 +I17 +sg8 +(S'attribute' +tp199 +sa(dp200 +g13 +(lp201 +(lp202 +(dp203 +g4 +I68 +sg5 +g17 +sg7 +I21 +sg8 +(S'dataout_timeout' +S'3' +tp204 +saa(lp205 +(dp206 +g4 +I69 +sg5 +g17 +sg7 +I21 +sg8 +(S'dataout_timeout_retries' +S'5' +tp207 +saa(lp208 +(dp209 +g4 +I70 +sg5 +g17 +sg7 +I21 +sg8 +(S'default_erl' +S'0' +tp210 +saa(lp211 +(dp212 +g4 +I71 +sg5 +g17 +sg7 +I21 +sg8 +(S'nopin_response_timeout' +S'30' +tp213 +saa(lp214 +(dp215 +g4 +I72 +sg5 +g17 +sg7 +I21 +sg8 +(S'nopin_timeout' +S'15' +tp216 +saa(lp217 +(dp218 +g4 +I73 +sg5 +g17 +sg7 +I21 +sg8 +(S'random_datain_pdu_offsets' +S'no' +tp219 +saa(lp220 +(dp221 +g4 +I74 +sg5 +g17 +sg7 +I21 +sg8 +(S'random_datain_seq_offsets' +S'no' +tp222 +saa(lp223 +(dp224 +g4 +I75 +sg5 +g17 +sg7 +I21 +sg8 +(S'random_r2t_offsets' +S'no' +tp225 +saasg4 +I67 +sg5 +g52 +sg7 +I27 +saa(lp226 +(dp227 +g4 +I77 +sg5 +g24 +sg7 +I17 +sg8 +(S'auth' +tp228 +sa(dp229 +g13 +(lp230 +(lp231 +(dp232 +g4 +I78 +sg5 +g17 +sg7 +I21 +sg8 +(S'userid' +S'jerome' +tp233 +saa(lp234 +(dp235 +g4 +I79 +sg5 +g17 +sg7 +I21 +sg8 +(S'password' +S'foobar' +tp236 +saa(lp237 +(dp238 +g4 +I80 +sg5 +g17 +sg7 +I21 +sg8 +(S'userid_mutual' +S'just_the2ofus' +tp239 +saa(lp240 +(dp241 +g4 +I81 +sg5 +g17 +sg7 +I21 +sg8 +(S'password_mutual' +S'mutupass' +tp242 +saasg4 +I77 +sg5 +g52 +sg7 +I22 +saa(lp243 +(dp244 +g4 +I83 +sg5 +g6 +sg7 +I17 +sg8 +(S'mapped_lun' +S'1' +tp245 +sa(dp246 +g4 +I83 +sg5 +g17 +sg7 +I30 +sg8 +(S'target_lun' +S'1' +tp247 +saa(lp248 +(dp249 +g4 +I84 +sg5 +g6 +sg7 +I17 +sg8 +(S'mapped_lun' +S'2' +tp250 +sa(dp251 +g4 +I84 +sg5 +g17 +sg7 +I30 +sg8 +(S'target_lun' +S'1' +tp252 +saa(lp253 +(dp254 +g4 +I85 +sg5 +g6 +sg7 +I17 +sg8 +(S'mapped_lun' +S'3' +tp255 +sa(dp256 +g4 +I85 +sg5 +g17 +sg7 +I30 +sg8 +(S'target_lun' +S'1' +tp257 +saasg4 +I66 +sg5 +g52 +sg7 +I71 +saasg4 +I38 +sg5 +g52 +sg7 +I16 +saasg4 +I37 +sg5 +g52 +sg7 +I74 +saasg4 +I36 +sg5 +g52 +sg7 +I14 +saa. diff --git a/tests/data/config_complete.lio b/tests/data/config_complete.lio new file mode 100644 index 0000000..c287271 --- /dev/null +++ b/tests/data/config_complete.lio @@ -0,0 +1,89 @@ +storage fileio disk disk1 { + path /tmp/disk1.img + size 1MB + + attribute { + block_size 512 + optimal_sectors 1024 + queue_depth 32 + + emulate_tas yes + enforce_pr_isids yes + + emulate_dpo no + emulate_tpu no + is_nonrot no + } +} + +storage fileio disk disk2 { + path /tmp/disk2.img + size 1M + attribute block_size 512 + attribute optimal_sectors 1024 + attribute queue_depth 32 +} + + +fabric iscsi discovery_auth { + enable yes + userid "target1" + password "kjh45fDf_" + mutual_userid no + mutual_password no +} + +fabric iscsi { + target "iqn.2003-01.org.linux-iscsi.targetcli.x8664:sn.4699f8812c88" { + tpgt 1 { + lun 1 backend fileio:disk1 + lun 2 backend fileio:disk2 + portal 0.0.0.0:3260 + + attribute { + authentication no + cache_dynamic_acls no + default_cmdsn_depth 16 + demo_mode_write_protect no + generate_node_acls no + login_timeout 15 + netif_timeout 2 + prod_mode_write_protect no + } + + parameter { + MaxConnections 12 + MaxOutstandingR2T 34 + TargetAlias "LIO Target" + AuthMethod "CHAP" + ImmediateData yes + MaxBurstLength 262144 + MaxRecvDataSegmentLength 8192 + HeaderDigest "CRC32C,None" + OFMarker no + } + + acl "iqn.2003-01.org.linux-iscsi.targetcli.x8664:client1" { + attribute { + dataout_timeout 3 + dataout_timeout_retries 5 + default_erl 0 + nopin_response_timeout 30 + nopin_timeout 15 + random_datain_pdu_offsets no + random_datain_seq_offsets no + random_r2t_offsets no + } + auth { + userid jerome + password foobar + userid_mutual just_the2ofus + password_mutual mutupass + } + mapped_lun 1 target_lun 1 + mapped_lun 2 target_lun 1 + mapped_lun 3 target_lun 1 + } + } + } +} diff --git a/tests/data/config_invalid_reference.lio b/tests/data/config_invalid_reference.lio new file mode 100644 index 0000000..117380b --- /dev/null +++ b/tests/data/config_invalid_reference.lio @@ -0,0 +1,89 @@ +storage fileio disk disk1 { + path /tmp/disk1.img + size 1MB + + attribute { + block_size 512 + optimal_sectors 1024 + queue_depth 32 + + emulate_tas yes + enforce_pr_isids yes + + emulate_dpo no + emulate_tpu no + is_nonrot no + } +} + +storage fileio disk disk2 { + path /tmp/disk2.img + size 1M + attribute block_size 512 + attribute optimal_sectors 1024 + attribute queue_depth 32 +} + + +fabric iscsi discovery_auth { + enable yes + userid "target1" + password "kjh45fDf_" + mutual_userid no + mutual_password no +} + +fabric iscsi { + target "iqn.2003-01.org.linux-iscsi.targetcli.x8664:sn.4699f8812c88" { + tpgt 1 { + lun 1 backend fileio:disk3 + lun 2 backend fileio:disk2 + portal 0.0.0.0:3260 + + attribute { + authentication no + cache_dynamic_acls no + default_cmdsn_depth 16 + demo_mode_write_protect no + generate_node_acls no + login_timeout 15 + netif_timeout 2 + prod_mode_write_protect no + } + + parameter { + MaxConnections 12 + MaxOutstandingR2T 34 + TargetAlias "LIO Target" + AuthMethod "CHAP" + ImmediateData yes + MaxBurstLength 262144 + MaxRecvDataSegmentLength 8192 + HeaderDigest "CRC32C,None" + OFMarker no + } + + acl "iqn.2003-01.org.linux-iscsi.targetcli.x8664:client1" { + attribute { + dataout_timeout 3 + dataout_timeout_retries 5 + default_erl no + nopin_response_timeout 30 + nopin_timeout 15 + random_datain_pdu_offsets no + random_datain_seq_offsets no + random_r2t_offsets no + } + auth { + userid jerome + password foobar + userid_mutual just_the2ofus + password_mutual mutupass + } + lun 1 target_lun 1 + lun 2 target_lun 1 + lun 3 target_lun 1 + } + } + } +} diff --git a/tests/data/config_nested_blocks.ast b/tests/data/config_nested_blocks.ast new file mode 100644 index 0000000..93b0713 --- /dev/null +++ b/tests/data/config_nested_blocks.ast @@ -0,0 +1,218 @@ +(lp0 +(lp1 +(dp2 +S'line' +p3 +I1 +sS'type' +p4 +S'obj' +p5 +sS'col' +p6 +I1 +sS'key' +p7 +(S'storage' +p8 +S'fileio' +p9 +tp10 +sa(dp11 +S'statements' +p12 +(lp13 +(lp14 +(dp15 +g3 +I2 +sg4 +g5 +sg6 +I5 +sg7 +(S'disk' +p16 +S'vm1' +p17 +tp18 +sa(dp19 +g12 +(lp20 +(lp21 +(dp22 +g3 +I3 +sg4 +S'attr' +p23 +sg6 +I9 +sg7 +(S'path' +p24 +S'/tmp/disk1.img' +p25 +tp26 +saa(lp27 +(dp28 +g3 +I4 +sg4 +g23 +sg6 +I9 +sg7 +(S'size' +p29 +S'1MB' +p30 +tp31 +saa(lp32 +(dp33 +g3 +I6 +sg4 +S'group' +p34 +sg6 +I9 +sg7 +(S'attribute' +p35 +tp36 +sa(dp37 +g12 +(lp38 +(lp39 +(dp40 +g3 +I7 +sg4 +g23 +sg6 +I13 +sg7 +(S'block_size' +p41 +S'512' +p42 +tp43 +saa(lp44 +(dp45 +g3 +I8 +sg4 +g23 +sg6 +I13 +sg7 +(S'optimal_sectors' +p46 +S'1024' +p47 +tp48 +saa(lp49 +(dp50 +g3 +I9 +sg4 +g23 +sg6 +I13 +sg7 +(S'queue_depth' +p51 +S'32' +p52 +tp53 +saa(lp54 +(dp55 +g3 +I11 +sg4 +g23 +sg6 +I13 +sg7 +(S'emulate_tas' +p56 +S'yes' +p57 +tp58 +saa(lp59 +(dp60 +g3 +I12 +sg4 +g23 +sg6 +I13 +sg7 +(S'enforce_pr_isids' +p61 +S'yes' +p62 +tp63 +saa(lp64 +(dp65 +g3 +I14 +sg4 +g23 +sg6 +I13 +sg7 +(S'emulate_dpo' +p66 +S'no' +p67 +tp68 +saa(lp69 +(dp70 +g3 +I15 +sg4 +g23 +sg6 +I13 +sg7 +(S'emulate_tpu' +p71 +S'no' +p72 +tp73 +saa(lp74 +(dp75 +g3 +I16 +sg4 +g23 +sg6 +I13 +sg7 +(S'is_nonrot' +p76 +S'no' +p77 +tp78 +saasg3 +I6 +sg4 +S'block' +p79 +sg6 +I19 +saasg3 +I2 +sg4 +g79 +sg6 +I14 +saasg3 +I1 +sg4 +g79 +sg6 +I16 +saa. \ No newline at end of file diff --git a/tests/data/config_nested_blocks.lio b/tests/data/config_nested_blocks.lio new file mode 100644 index 0000000..85117af --- /dev/null +++ b/tests/data/config_nested_blocks.lio @@ -0,0 +1,19 @@ +storage fileio { + disk vm1 { + path /tmp/disk1.img + size 1MB + + attribute { + block_size 512 + optimal_sectors 1024 + queue_depth 32 + + emulate_tas yes + enforce_pr_isids yes + + emulate_dpo no + emulate_tpu no + is_nonrot no + } + } +} diff --git a/tests/data/config_one_line.ast b/tests/data/config_one_line.ast new file mode 100644 index 0000000..304fd7f --- /dev/null +++ b/tests/data/config_one_line.ast @@ -0,0 +1,47 @@ +(lp0 +(lp1 +(dp2 +S'line' +p3 +I1 +sS'type' +p4 +S'obj' +p5 +sS'col' +p6 +I1 +sS'key' +p7 +(S'fabric' +p8 +S'iscsi' +p9 +tp10 +sa(dp11 +g3 +I1 +sg4 +S'group' +p12 +sg6 +I14 +sg7 +(S'discovery_auth' +p13 +tp14 +sa(dp15 +g3 +I1 +sg4 +S'attr' +p16 +sg6 +I29 +sg7 +(S'enable' +p17 +S'yes' +p18 +tp19 +saa. \ No newline at end of file diff --git a/tests/data/config_one_line.lio b/tests/data/config_one_line.lio new file mode 100644 index 0000000..8347352 --- /dev/null +++ b/tests/data/config_one_line.lio @@ -0,0 +1 @@ +fabric iscsi discovery_auth enable yes diff --git a/tests/data/config_ramdisk_fileio_iscsi.lio b/tests/data/config_ramdisk_fileio_iscsi.lio new file mode 100644 index 0000000..002f45d --- /dev/null +++ b/tests/data/config_ramdisk_fileio_iscsi.lio @@ -0,0 +1,194 @@ +storage fileio disk disk1 { + path /tmp/disk1.img + size 1.0M + buffered yes + attribute block_size 512 + attribute emulate_dpo 0 + attribute emulate_fua_read 0 + attribute emulate_fua_write 1 + attribute emulate_rest_reord 0 + attribute emulate_tas 1 + attribute emulate_tpu 0 + attribute emulate_tpws 0 + attribute emulate_ua_intlck_ctrl 0 + attribute emulate_write_cache 0 + attribute enforce_pr_isids 1 + attribute is_nonrot 0 + attribute max_unmap_block_desc_count 0 + attribute max_unmap_lba_count 0 + attribute optimal_sectors 1024 + attribute queue_depth 32 + attribute unmap_granularity 0 + attribute unmap_granularity_alignment 0 +} + +storage fileio disk disk2 { + path /tmp/disk2.img + size 1.0M + buffered yes + attribute block_size 512 + attribute emulate_dpo 0 + attribute emulate_fua_read 0 + attribute emulate_fua_write 1 + attribute emulate_rest_reord 0 + attribute emulate_tas 1 + attribute emulate_tpu 0 + attribute emulate_tpws 0 + attribute emulate_ua_intlck_ctrl 0 + attribute emulate_write_cache 0 + attribute enforce_pr_isids 1 + attribute is_nonrot 0 + attribute max_unmap_block_desc_count 0 + attribute max_unmap_lba_count 0 + attribute optimal_sectors 1024 + attribute queue_depth 32 + attribute unmap_granularity 0 + attribute unmap_granularity_alignment 0 +} + +storage rd_mcp disk test { + size 1.0M + attribute block_size 512 + attribute emulate_dpo 0 + attribute emulate_fua_read 0 + attribute emulate_fua_write 1 + attribute emulate_rest_reord 0 + attribute emulate_tas 1 + attribute emulate_tpu 0 + attribute emulate_tpws 0 + attribute emulate_ua_intlck_ctrl 0 + attribute emulate_write_cache 0 + attribute enforce_pr_isids 1 + attribute is_nonrot 0 + attribute max_unmap_block_desc_count 0 + attribute max_unmap_lba_count 0 + attribute optimal_sectors 1024 + attribute queue_depth 32 + attribute unmap_granularity 0 + attribute unmap_granularity_alignment 0 +} + +storage rd_mcp disk test2 { + size 1.0M + attribute block_size 512 + attribute emulate_dpo 0 + attribute emulate_fua_read 0 + attribute emulate_fua_write 1 + attribute emulate_rest_reord 0 + attribute emulate_tas 1 + attribute emulate_tpu 0 + attribute emulate_tpws 0 + attribute emulate_ua_intlck_ctrl 0 + attribute emulate_write_cache 0 + attribute enforce_pr_isids 1 + attribute is_nonrot 0 + attribute max_unmap_block_desc_count 0 + attribute max_unmap_lba_count 0 + attribute optimal_sectors 1024 + attribute queue_depth 32 + attribute unmap_granularity 0 + attribute unmap_granularity_alignment 0 +} + +fabric iscsi { + discovery_auth enable 1 + discovery_auth userid target1 + discovery_auth password kjh45fDf_ + discovery_auth mutual_userid no + discovery_auth mutual_password no +} + +fabric iscsi target iqn.2003-01.org.linux-iscsi.targetcli.x8664:sn.4699f8812c88 tpgt 1 { + attribute authentication 0 + attribute cache_dynamic_acls 0 + attribute default_cmdsn_depth 16 + attribute demo_mode_write_protect 0 + attribute generate_node_acls 0 + attribute login_timeout 15 + attribute netif_timeout 2 + attribute prod_mode_write_protect 0 + parameter AuthMethod CHAP + parameter DataDigest "CRC32C,None" + parameter DataPDUInOrder Yes + parameter DataSequenceInOrder Yes + parameter DefaultTime2Retain 20 + parameter DefaultTime2Wait 2 + parameter ErrorRecoveryLevel No + parameter FirstBurstLength 65536 + parameter HeaderDigest "CRC32C,None" + parameter IFMarkInt "2048~65535" + parameter IFMarker No + parameter ImmediateData Yes + parameter InitialR2T Yes + parameter MaxBurstLength 262144 + parameter MaxConnections 12 + parameter MaxOutstandingR2T 34 + parameter MaxRecvDataSegmentLength 8192 + parameter OFMarkInt "2048~65535" + parameter OFMarker No + parameter TargetAlias "LIO Target" + lun 1 backend fileio:disk1 + lun 2 backend fileio:disk2 + acl iqn.2003-01.org.linux-iscsi.targetcli.x8664:client1 { + attribute dataout_timeout 3 + attribute dataout_timeout_retries 5 + attribute default_erl 0 + attribute nopin_response_timeout 30 + attribute nopin_timeout 15 + attribute random_datain_pdu_offsets 0 + attribute random_datain_seq_offsets 0 + attribute random_r2t_offsets 0 + auth password foobar + auth password_mutual mutupass + auth userid jerome + auth userid_mutual just_the2ofus + mapped_lun 1 { + target_lun 1 + write_protect 0 + } + mapped_lun 2 { + target_lun 1 + write_protect 0 + } + mapped_lun 3 { + target_lun 1 + write_protect 0 + } + } + portal 0.0.0.0:3260 + enable 1 +} + +fabric iscsi target iqn.2003-01.org.linux-iscsi.targetcli.x8664:sn.caa307436d89 tpgt 1 { + attribute authentication 1 + attribute cache_dynamic_acls 0 + attribute default_cmdsn_depth 16 + attribute demo_mode_write_protect 1 + attribute generate_node_acls 0 + attribute login_timeout 15 + attribute netif_timeout 2 + attribute prod_mode_write_protect 0 + parameter AuthMethod CHAP + parameter DataDigest "CRC32C,None" + parameter DataPDUInOrder Yes + parameter DataSequenceInOrder Yes + parameter DefaultTime2Retain 20 + parameter DefaultTime2Wait 2 + parameter ErrorRecoveryLevel 0 + parameter FirstBurstLength 65536 + parameter HeaderDigest "CRC32C,None" + parameter IFMarkInt "2048~65535" + parameter IFMarker No + parameter ImmediateData Yes + parameter InitialR2T Yes + parameter MaxBurstLength 262144 + parameter MaxConnections 1 + parameter MaxOutstandingR2T 1 + parameter MaxRecvDataSegmentLength 8192 + parameter OFMarkInt "2048~65535" + parameter OFMarker No + parameter TargetAlias "LIO Target" + lun 0 backend rd_mcp:test + lun 1 backend rd_mcp:test2 + enable 1 +} diff --git a/tests/data/config_sample_1.lio b/tests/data/config_sample_1.lio new file mode 100644 index 0000000..c7b16c0 --- /dev/null +++ b/tests/data/config_sample_1.lio @@ -0,0 +1,224 @@ +storage fileio disk test { + buffered no + path /tmp/test.img + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache no + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 1 + max_unmap_lba_count 8192 + max_write_same_len 4096 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity 1 + unmap_granularity_alignment 0 + } +} +storage rd_mcp { + disk test { + nullio no + size 10.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write no + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache no + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count no + max_unmap_lba_count no + max_write_same_len 0 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity no + unmap_granularity_alignment no + } + } + disk test2 { + nullio no + size 10.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache no + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count no + max_unmap_lba_count no + max_write_same_len 0 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity no + unmap_granularity_alignment no + } + } + disk test3 { + nullio no + size 10.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache no + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count no + max_unmap_lba_count no + max_write_same_len 0 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity no + unmap_granularity_alignment no + } + } + disk test_nullio { + nullio yes + size 10.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache no + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count no + max_unmap_lba_count no + max_write_same_len 0 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity no + unmap_granularity_alignment no + } + } +} +fabric iscsi { + discovery_auth { + enable no + mutual_password "" + mutual_userid "" + password "" + userid "" + } + target iqn.2003-01.org.linux-iscsi.ws0.x8664:sn.690f8dd50f79 tpgt 1 { + enable yes + attribute { + authentication yes + cache_dynamic_acls no + default_cmdsn_depth 64 + default_erl 0 + demo_mode_discovery yes + demo_mode_write_protect yes + generate_node_acls no + login_timeout 15 + netif_timeout 2 + prod_mode_write_protect no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + parameter { + AuthMethod CHAP + DataDigest "CRC32C,None" + DataPDUInOrder yes + DataSequenceInOrder yes + DefaultTime2Retain 20 + DefaultTime2Wait 2 + ErrorRecoveryLevel no + FirstBurstLength 65536 + HeaderDigest "CRC32C,None" + IFMarkInt "2048~65535" + IFMarker no + ImmediateData yes + InitialR2T yes + MaxBurstLength 262144 + MaxConnections 1 + MaxOutstandingR2T 1 + MaxRecvDataSegmentLength 8192 + MaxXmitDataSegmentLength 262144 + OFMarkInt "2048~65535" + OFMarker no + TargetAlias "LIO Target" + } + lun 0 backend fileio:test + acl iqn.2003-01.org.linux-iscsi.ws0.x8664:client1 { + attribute { + dataout_timeout 3 + dataout_timeout_retries 5 + default_erl 0 + nopin_response_timeout 30 + nopin_timeout 15 + random_datain_pdu_offsets no + random_datain_seq_offsets no + random_r2t_offsets no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + mapped_lun 0 { + target_lun 0 + write_protect no + } + } + } +} diff --git a/tests/data/config_sample_2.lio b/tests/data/config_sample_2.lio new file mode 100644 index 0000000..b92f726 --- /dev/null +++ b/tests/data/config_sample_2.lio @@ -0,0 +1,208 @@ +storage fileio { + disk file_buffered_1MB { + buffered yes + path /tmp/file_buffered_1MB + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache yes + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 1 + max_unmap_lba_count 8192 + max_write_same_len 4096 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity 1 + unmap_granularity_alignment 0 + } + } + disk file_no_option_2MB { + buffered yes + path /tmp/file_no_option_2MB + size 2.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache yes + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 1 + max_unmap_lba_count 8192 + max_write_same_len 4096 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity 1 + unmap_granularity_alignment 0 + } + } + disk file_sparse_1MB { + buffered yes + path /tmp/file_sparse_1MB + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache yes + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 1 + max_unmap_lba_count 8192 + max_write_same_len 4096 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity 1 + unmap_granularity_alignment 0 + } + } +} +fabric iscsi { + discovery_auth { + enable no + mutual_password "" + mutual_userid "" + password "" + userid "" + } + target iqn.2003-01.org.linux-iscsi.ws0.x8664:sn.31631c361eba tpgt 1 { + enable yes + attribute { + authentication yes + cache_dynamic_acls no + default_cmdsn_depth 64 + default_erl 0 + demo_mode_discovery yes + demo_mode_write_protect yes + generate_node_acls no + login_timeout 15 + netif_timeout 2 + prod_mode_write_protect no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + parameter { + AuthMethod CHAP + DataDigest "CRC32C,None" + DataPDUInOrder yes + DataSequenceInOrder yes + DefaultTime2Retain 20 + DefaultTime2Wait 2 + ErrorRecoveryLevel no + FirstBurstLength 65536 + HeaderDigest "CRC32C,None" + IFMarkInt "2048~65535" + IFMarker no + ImmediateData yes + InitialR2T yes + MaxBurstLength 262144 + MaxConnections 1 + MaxOutstandingR2T 1 + MaxRecvDataSegmentLength 8192 + MaxXmitDataSegmentLength 262144 + OFMarkInt "2048~65535" + OFMarker no + TargetAlias "LIO Target" + } + lun 0 backend fileio:file_buffered_1MB + lun 1 backend fileio:file_no_option_2MB + lun 2 backend fileio:file_sparse_1MB + acl iqn.2003-01.org.linux-iscsi.ws0.x8664:client1 { + attribute { + dataout_timeout 3 + dataout_timeout_retries 5 + default_erl 0 + nopin_response_timeout 30 + nopin_timeout 15 + random_datain_pdu_offsets no + random_datain_seq_offsets no + random_r2t_offsets no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + mapped_lun 0 { + target_lun 0 + write_protect no + } + mapped_lun 1 { + target_lun 1 + write_protect no + } + mapped_lun 2 { + target_lun 2 + write_protect no + } + } + acl iqn.2003-01.org.linux-iscsi.ws0.x8664:client2 { + attribute { + dataout_timeout 3 + dataout_timeout_retries 5 + default_erl 0 + nopin_response_timeout 30 + nopin_timeout 15 + random_datain_pdu_offsets no + random_datain_seq_offsets no + random_r2t_offsets no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + mapped_lun 0 { + target_lun 0 + write_protect no + } + mapped_lun 1 { + target_lun 1 + write_protect no + } + mapped_lun 2 { + target_lun 2 + write_protect no + } + } + } +} diff --git a/tests/data/config_sample_3.lio b/tests/data/config_sample_3.lio new file mode 100644 index 0000000..172455c --- /dev/null +++ b/tests/data/config_sample_3.lio @@ -0,0 +1,204 @@ +storage fileio { + disk file_no_option_2MB { + buffered yes + path /tmp/file_no_option_2MB + size 2.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache yes + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 1 + max_unmap_lba_count 8192 + max_write_same_len 4096 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity 1 + unmap_granularity_alignment 0 + } + } + disk file_sparse_1MB { + buffered yes + path /tmp/file_sparse_1MB + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache yes + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 1 + max_unmap_lba_count 8192 + max_write_same_len 4096 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity 1 + unmap_granularity_alignment 0 + } + } + disk file_unbuffered_1MB { + buffered yes + path /tmp/file_unbuffered_1MB + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache yes + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 1 + max_unmap_lba_count 8192 + max_write_same_len 4096 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity 1 + unmap_granularity_alignment 0 + } + } +} +fabric iscsi { + discovery_auth { + enable no + mutual_password "" + mutual_userid "" + password "" + userid "" + } + target iqn.2003-01.org.linux-iscsi.ws0.x8664:sn.31631c361eba tpgt 1 { + enable yes + attribute { + authentication yes + cache_dynamic_acls no + default_cmdsn_depth 64 + default_erl 0 + demo_mode_discovery yes + demo_mode_write_protect yes + generate_node_acls no + login_timeout 15 + netif_timeout 2 + prod_mode_write_protect no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + parameter { + AuthMethod CHAP + DataDigest "CRC32C,None" + DataPDUInOrder yes + DataSequenceInOrder yes + DefaultTime2Retain 20 + DefaultTime2Wait 2 + ErrorRecoveryLevel no + FirstBurstLength 65536 + HeaderDigest "CRC32C,None" + IFMarkInt "2048~65535" + IFMarker no + ImmediateData yes + InitialR2T yes + MaxBurstLength 262144 + MaxConnections 1 + MaxOutstandingR2T 1 + MaxRecvDataSegmentLength 8192 + MaxXmitDataSegmentLength 262144 + OFMarkInt "2048~65535" + OFMarker no + TargetAlias "LIO Target" + } + lun 1 backend fileio:file_no_option_2MB + lun 2 backend fileio:file_sparse_1MB + acl iqn.2003-01.org.linux-iscsi.ws0.x8664:client1 { + attribute { + dataout_timeout 3 + dataout_timeout_retries 5 + default_erl 0 + nopin_response_timeout 30 + nopin_timeout 15 + random_datain_pdu_offsets no + random_datain_seq_offsets no + random_r2t_offsets no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + mapped_lun 1 { + target_lun 1 + write_protect no + } + mapped_lun 2 { + target_lun 2 + write_protect no + } + } + acl iqn.2003-01.org.linux-iscsi.ws0.x8664:client2 { + attribute { + dataout_timeout 3 + dataout_timeout_retries 5 + default_erl 0 + nopin_response_timeout 30 + nopin_timeout 15 + random_datain_pdu_offsets no + random_datain_seq_offsets no + random_r2t_offsets no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + mapped_lun 1 { + target_lun 1 + write_protect no + } + mapped_lun 2 { + target_lun 2 + write_protect no + } + } + } +} +fabric loopback target naa.60014054793b60dd { + lun 0 backend fileio:file_no_option_2MB + lun 1 backend fileio:file_sparse_1MB + lun 2 backend fileio:file_unbuffered_1MB +} diff --git a/tests/data/config_sample_4.lio b/tests/data/config_sample_4.lio new file mode 100644 index 0000000..a06cd4a --- /dev/null +++ b/tests/data/config_sample_4.lio @@ -0,0 +1,205 @@ +storage fileio { + disk file_no_option_2MB { + buffered yes + path /tmp/file_no_option_2mb + size 2.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache yes + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 1 + max_unmap_lba_count 8192 + max_write_same_len 4096 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity 1 + unmap_granularity_alignment 0 + } + } + disk file_sparse_1MB { + buffered yes + path /tmp/file_sparse_1mb + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache yes + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 1 + max_unmap_lba_count 8192 + max_write_same_len 4096 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity 1 + unmap_granularity_alignment 0 + } + } + disk file_unbuffered_1MB { + buffered yes + path /tmp/file_unbuffered_1mb + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache yes + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 1 + max_unmap_lba_count 8192 + max_write_same_len 4096 + optimal_sectors 8192 + queue_depth 128 + unmap_granularity 1 + unmap_granularity_alignment 0 + } + } +} +fabric iscsi { + discovery_auth { + enable no + mutual_password "" + mutual_userid "" + password "" + userid "" + } + target iqn.2003-01.org.linux-iscsi.ws0.x8664:sn.31631c361eba tpgt 1 { + enable yes + attribute { + authentication yes + cache_dynamic_acls no + default_cmdsn_depth 64 + default_erl 0 + demo_mode_discovery yes + demo_mode_write_protect yes + generate_node_acls no + login_timeout 15 + netif_timeout 2 + prod_mode_write_protect no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + parameter { + AuthMethod CHAP + DataDigest "CRC32C,None" + DataPDUInOrder yes + DataSequenceInOrder yes + DefaultTime2Retain 20 + DefaultTime2Wait 2 + ErrorRecoveryLevel no + FirstBurstLength 65536 + HeaderDigest "CRC32C,None" + IFMarkInt "2048~65535" + IFMarker no + ImmediateData yes + InitialR2T yes + MaxBurstLength 262144 + MaxConnections 1 + MaxOutstandingR2T 1 + MaxRecvDataSegmentLength 8192 + MaxXmitDataSegmentLength 262144 + OFMarkInt "2048~65535" + OFMarker no + TargetAlias "LIO Target" + } + lun 1 backend fileio:file_no_option_2MB + lun 2 backend fileio:file_sparse_1MB + acl iqn.2003-01.org.linux-iscsi.ws0.x8664:client1 { + attribute { + dataout_timeout 3 + dataout_timeout_retries 5 + default_erl 0 + nopin_response_timeout 30 + nopin_timeout 15 + random_datain_pdu_offsets no + random_datain_seq_offsets no + random_r2t_offsets no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + mapped_lun 1 { + target_lun 1 + write_protect no + } + mapped_lun 2 { + target_lun 2 + write_protect no + } + } + acl iqn.2003-01.org.linux-iscsi.ws0.x8664:client2 { + attribute { + dataout_timeout 3 + dataout_timeout_retries 5 + default_erl 0 + nopin_response_timeout 30 + nopin_timeout 15 + random_datain_pdu_offsets no + random_datain_seq_offsets no + random_r2t_offsets no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + mapped_lun 1 { + target_lun 1 + write_protect no + } + mapped_lun 2 { + target_lun 2 + write_protect no + } + } + } +} +fabric loopback target naa.60014054793b60dd { + lun 0 backend fileio:file_no_option_2MB + lun 1 backend fileio:file_sparse_1MB + lun 2 backend fileio:file_unbuffered_1MB +} +fabric vhost target naa.6001405d7e35b513 tpgt 1 lun 0 backend fileio:file_no_option_2MB \ No newline at end of file diff --git a/tests/data/config_sample_5.lio b/tests/data/config_sample_5.lio new file mode 100644 index 0000000..3a51644 --- /dev/null +++ b/tests/data/config_sample_5.lio @@ -0,0 +1,254 @@ +storage fileio { + disk disk1 { + buffered yes + path /tmp/disk1.img + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache no + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 0 + max_unmap_lba_count 0 + max_write_same_len 4096 + optimal_sectors 1024 + queue_depth 32 + unmap_granularity 0 + unmap_granularity_alignment 0 + } + } + disk disk2 { + buffered yes + path /tmp/disk2.img + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache no + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count 0 + max_unmap_lba_count 0 + max_write_same_len 4096 + optimal_sectors 1024 + queue_depth 32 + unmap_granularity 0 + unmap_granularity_alignment 0 + } + } +} +storage rd_mcp { + disk test { + nullio no + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache no + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count no + max_unmap_lba_count no + max_write_same_len 0 + optimal_sectors 1024 + queue_depth 32 + unmap_granularity no + unmap_granularity_alignment no + } + } + disk test2 { + nullio no + size 1.0MB + attribute { + block_size 512 + emulate_3pc yes + emulate_caw yes + emulate_dpo no + emulate_fua_read no + emulate_fua_write yes + emulate_model_alias no + emulate_rest_reord no + emulate_tas yes + emulate_tpu no + emulate_tpws no + emulate_ua_intlck_ctrl no + emulate_write_cache no + enforce_pr_isids yes + fabric_max_sectors 8192 + is_nonrot no + max_unmap_block_desc_count no + max_unmap_lba_count no + max_write_same_len 0 + optimal_sectors 1024 + queue_depth 32 + unmap_granularity no + unmap_granularity_alignment no + } + } +} +fabric iscsi { + discovery_auth { + enable yes + mutual_password no + mutual_userid no + password kjh45fDf_ + userid target1 + } + target iqn.2003-01.org.linux-iscsi.targetcli.x8664:sn.4699f8812c88 tpgt 1 { + enable yes + attribute { + authentication no + cache_dynamic_acls no + default_cmdsn_depth 16 + default_erl 0 + demo_mode_discovery yes + demo_mode_write_protect no + generate_node_acls no + login_timeout 15 + netif_timeout 2 + prod_mode_write_protect no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + parameter { + AuthMethod CHAP + DataDigest "CRC32C,None" + DataPDUInOrder yes + DataSequenceInOrder yes + DefaultTime2Retain 20 + DefaultTime2Wait 2 + ErrorRecoveryLevel no + FirstBurstLength 65536 + HeaderDigest "CRC32C,None" + IFMarkInt "2048~65535" + IFMarker no + ImmediateData yes + InitialR2T yes + MaxBurstLength 262144 + MaxConnections 12 + MaxOutstandingR2T 34 + MaxRecvDataSegmentLength 8192 + MaxXmitDataSegmentLength 262144 + OFMarkInt "2048~65535" + OFMarker no + TargetAlias "LIO Target" + } + lun 1 backend fileio:disk1 + lun 2 backend fileio:disk2 + acl iqn.2003-01.org.linux-iscsi.targetcli.x8664:client1 { + attribute { + dataout_timeout 3 + dataout_timeout_retries 5 + default_erl 0 + nopin_response_timeout 30 + nopin_timeout 15 + random_datain_pdu_offsets no + random_datain_seq_offsets no + random_r2t_offsets no + } + auth { + password foobar + password_mutual mutupass + userid jerome + userid_mutual just_the2ofus + } + mapped_lun 1 { + target_lun 1 + write_protect no + } + mapped_lun 2 { + target_lun 1 + write_protect no + } + mapped_lun 3 { + target_lun 1 + write_protect no + } + } + portal 0.0.0.0:3260 + } + target iqn.2003-01.org.linux-iscsi.targetcli.x8664:sn.caa307436d89 tpgt 1 { + enable yes + attribute { + authentication yes + cache_dynamic_acls no + default_cmdsn_depth 16 + default_erl 0 + demo_mode_discovery yes + demo_mode_write_protect yes + generate_node_acls no + login_timeout 15 + netif_timeout 2 + prod_mode_write_protect no + } + auth { + password "" + password_mutual "" + userid "" + userid_mutual "" + } + parameter { + AuthMethod CHAP + DataDigest "CRC32C,None" + DataPDUInOrder yes + DataSequenceInOrder yes + DefaultTime2Retain 20 + DefaultTime2Wait 2 + ErrorRecoveryLevel no + FirstBurstLength 65536 + HeaderDigest "CRC32C,None" + IFMarkInt "2048~65535" + IFMarker no + ImmediateData yes + InitialR2T yes + MaxBurstLength 262144 + MaxConnections 1 + MaxOutstandingR2T 1 + MaxRecvDataSegmentLength 8192 + MaxXmitDataSegmentLength 262144 + OFMarkInt "2048~65535" + OFMarker no + TargetAlias "LIO Target" + } + lun 0 backend rd_mcp:test + lun 1 backend rd_mcp:test2 + } +} diff --git a/tests/data/config_strings.ast b/tests/data/config_strings.ast new file mode 100644 index 0000000..c969794 --- /dev/null +++ b/tests/data/config_strings.ast @@ -0,0 +1,242 @@ +(lp0 +(lp1 +(dp2 +S'line' +p3 +I2 +sS'type' +p4 +S'obj' +p5 +sS'col' +p6 +I1 +sS'key' +p7 +(S'storage' +p8 +S'fileio' +p9 +tp10 +sa(dp11 +S'statements' +p12 +(lp13 +(lp14 +(dp15 +g3 +I3 +sg4 +g5 +sg6 +I5 +sg7 +(S'disk' +p16 +S'This:is:a_long_name_for_disk' +p17 +tp18 +sa(dp19 +g12 +(lp20 +(lp21 +(dp22 +g3 +I4 +sg4 +S'attr' +p23 +sg6 +I9 +sg7 +(S'path' +p24 +S'/tmp/disk1.img' +p25 +tp26 +saa(lp27 +(dp28 +g3 +I5 +sg4 +g23 +sg6 +I9 +sg7 +(S'size' +p29 +S'1MB' +p30 +tp31 +saa(lp32 +(dp33 +g3 +I7 +sg4 +S'group' +p34 +sg6 +I9 +sg7 +(S'attribute' +p35 +tp36 +sa(dp37 +g12 +(lp38 +(lp39 +(dp40 +g3 +I9 +sg4 +g23 +sg6 +I13 +sg7 +(S'block_size' +p41 +S'512' +p42 +tp43 +saa(lp44 +(dp45 +g3 +I10 +sg4 +g23 +sg6 +I13 +sg7 +(S'optimal_sectors' +p46 +S'1024' +p47 +tp48 +saa(lp49 +(dp50 +g3 +I11 +sg4 +g23 +sg6 +I13 +sg7 +(S'queue_depth' +p51 +S'32' +p52 +tp53 +saa(lp54 +(dp55 +g3 +I12 +sg4 +g23 +sg6 +I13 +sg7 +(S'fancy_attribute' +p56 +S'This is a fancy attribute that takes a long value' +p57 +tp58 +saa(lp59 +(dp60 +g3 +I14 +sg4 +g23 +sg6 +I13 +sg7 +(S'emulate_tas' +p61 +S'yes_I_do_want_to_enable_this_%!?@_functionnality!' +p62 +tp63 +saa(lp64 +(dp65 +S'comment' +p66 +S'EOL comment' +p67 +sg3 +I15 +sg4 +g23 +sg6 +I13 +sg7 +(S'enforce_pr_isids' +p68 +S'yes' +p69 +tp70 +saa(lp71 +(dp72 +g3 +I17 +sg4 +g23 +sg6 +I13 +sg7 +(S'emulate_dpo' +p73 +S'no' +p74 +tp75 +saa(lp76 +(dp77 +g66 +S'Hello there!' +p78 +sg3 +I18 +sg4 +g23 +sg6 +I13 +sg7 +(S'emulate_tpu' +p79 +S'no' +p80 +tp81 +saa(lp82 +(dp83 +g66 +S'Does what it says?' +p84 +sg3 +I19 +sg4 +g23 +sg6 +I13 +sg7 +(S'is_nonrot' +p85 +S'no' +p86 +tp87 +saasg3 +I7 +sg4 +S'block' +p88 +sg6 +I19 +saasg3 +I3 +sg4 +g88 +sg6 +I39 +saasg3 +I2 +sg4 +g88 +sg6 +I16 +saa. \ No newline at end of file diff --git a/tests/data/config_strings.lio b/tests/data/config_strings.lio new file mode 100644 index 0000000..41ea6e8 --- /dev/null +++ b/tests/data/config_strings.lio @@ -0,0 +1,24 @@ +# This is a comment before the first statement +storage fileio { + disk This:is:a_long_name_for_disk { + path /tmp/disk1.img + size 1MB + # This is an indented comment after size and before a group + attribute { + # This is an indented comment after a group + block_size 512 + optimal_sectors 1024 + queue_depth 32 + fancy_attribute "This is a fancy attribute that takes a long value" + + emulate_tas yes_I_do_want_to_enable_this_%!?@_functionnality! + enforce_pr_isids yes # EOL comment + + emulate_dpo no + emulate_tpu no # Hello there! + is_nonrot no # Does what it says? + } + } +} + +# Last words comment diff --git a/tests/safe/test_config.py b/tests/safe/test_config.py new file mode 100644 index 0000000..b0e57a0 --- /dev/null +++ b/tests/safe/test_config.py @@ -0,0 +1,133 @@ +import sys, pprint, logging, unittest, tempfile +from pyparsing import ParseException +from rtslib import config + +logging.basicConfig() +log = logging.getLogger('TestConfig') +log.setLevel(logging.INFO) + +class TestConfig(unittest.TestCase): + + samples_dir = '../data' + + def test_load_basic(self): + print + log.info(self._testMethodName) + filepath = "%s/config_basic.lio" % self.samples_dir + lio = config.Config() + + lio.load(filepath) + + tests = [("storage fileio", 'obj', 1), + ("storage fileio disk vm1 path /tmp/vm1.img", 'attr', 1), + ("storage fileio disk vm1 size 1.0MB", 'attr', 1), + ("storage .* disk .* .* .*", 'attr', 3)] + + for pattern, node_type, arity in tests: + results = lio.search(pattern) + log.debug("config.current.search(%s) -> (%d) %s" + % (pattern, len(results), results)) + self.failUnless(len(results) == arity) + for result in results: + self.failUnless(result.data['type'] == node_type) + + self.failUnless(lio.search("storage fileio disk vm1 path") == []) + + def test_load_complete(self): + print + log.info(self._testMethodName) + filepath = "%s/config_complete.lio" % self.samples_dir + lio = config.Config() + lio.load(filepath) + + tests = [("storage fileio", 'obj', 1), + ("storage fileio disk disk1 path", None, 0), + ("storage fileio disk disk1 path /tmp/disk1.img", 'attr', 1), + ("storage fileio disk disk1 path /tmp/disk2.img", 'attr', 0), + ("storage fileio disk disk1 size 1.0MB", 'attr', 1), + ("storage fileio disk disk2 path /tmp/disk2.img", 'attr', 1), + ("storage .* disk .* .* .* .*", 'attr', 46), + ("storage .* disk .* attribute .* .*", 'attr', 46), + ("storage .* disk .* .* .*", 'attr', 6)] + + for pattern, node_type, arity in tests: + results = lio.search(pattern) + log.debug("config.current.search(%s) -> (%d) %s" + % (pattern, len(results), results)) + self.failUnless(len(results) == arity) + for result in results: + self.failUnless(result.data['type'] == node_type) + + def test_clear_undo(self): + print + log.info(self._testMethodName) + filepath = "%s/config_complete.lio" % self.samples_dir + lio = config.Config() + log.info("Load config") + lio.load(filepath) + self.failUnless(len(lio.search("storage fileio disk disk2")) == 1) + lio.clear() + self.failUnless(len(lio.search("storage fileio disk disk2")) == 0) + lio.undo() + self.failUnless(len(lio.search("storage fileio disk disk2")) == 1) + + def test_load_save(self): + print + log.info(self._testMethodName) + filepath = "%s/config_complete.lio" % self.samples_dir + lio = config.Config() + lio.load(filepath) + + with tempfile.NamedTemporaryFile(delete=False) as temp: + log.debug("Saving initial config to %s" % temp.name) + dump1 = lio.save(temp.name) + lio.load(temp.name) + + with tempfile.NamedTemporaryFile(delete=False) as temp: + log.debug("Saving reloaded config to %s" % temp.name) + dump2 = lio.save(temp.name) + + self.failUnless(dump1 == dump2) + + def test_set_delete(self): + print + log.info(self._testMethodName) + filepath = "%s/config_complete.lio" % self.samples_dir + + lio = config.Config() + set1 = lio.search("storage fileio disk mydisk") + set2 = lio.search("fabric iscsi discovery_auth enable yes") + self.failUnless(len(set1) == len(set2) == 0) + + iqn = '"iqn.2003-01.org.linux-iscsi.targetcli.x8664:sn.foo"' + lio.set("fabric iscsi target " + iqn) + self.assertRaises(ParseException, lio.set, + "fabric iscsi discovery_auth") + lio.set("fabric iscsi discovery_auth enable yes") + lio.set("storage fileio disk vm1 {path /foo.img; size 1MB;}") + self.assertRaises(ParseException, lio.set, + "storage fileio disk vm1 {path /foo.img; size 1MB}") + lio.set("storage fileio disk mydisk") + set1 = lio.search("storage fileio disk mydisk") + set2 = lio.search("fabric iscsi discovery_auth enable yes") + self.failUnless(len(set1) == len(set2) == 1) + + lio.delete("storage fileio disk mydisk") + lio.delete("fabric iscsi discovery_auth enable yes") + set1 = lio.search("storage fileio disk mydisk") + set2 = lio.search("fabric iscsi discovery_auth enable yes") + self.failUnless(len(set1) == 0) + self.failUnless(len(set2) == 0) + + def test_invalid_reference(self): + print + log.info(self._testMethodName) + filepath = "%s/config_invalid_reference.lio" % self.samples_dir + lio = config.Config() + self.assertRaisesRegexp(config.ConfigError, + ".*Invalid.*disk3.*", + lio.load, filepath) + lio = config.Config() + +if __name__ == '__main__': + unittest.main() diff --git a/tests/safe/test_config_parser.py b/tests/safe/test_config_parser.py new file mode 100644 index 0000000..b42c1a7 --- /dev/null +++ b/tests/safe/test_config_parser.py @@ -0,0 +1,108 @@ +import sys, pprint, logging, unittest, cPickle +from rtslib import config_parser + +# TODO Add PolicyParser tests + +logging.basicConfig() +log = logging.getLogger('TestConfigParser') +log.setLevel(logging.INFO) + +class TestConfigParser(unittest.TestCase): + + parser = config_parser.ConfigParser() + samples_dir = '../data' + + def test_one_line(self): + print + log.info(self._testMethodName) + config = "%s/config_one_line.lio" % self.samples_dir + parse_tree = self.parser.parse_file(config) + for statement in parse_tree: + log.debug(pprint.pformat(statement)) + # with open("%s.ast" % config[:-4], 'w') as f: + # cPickle.dump(parse_tree, f) + with open("%s.ast" % config[:-4], 'r') as f: + expected_tree = cPickle.load(f) + self.failUnless(parse_tree == expected_tree) + + def test_basic(self): + print + log.info(self._testMethodName) + config = "%s/config_basic.lio" % self.samples_dir + parse_tree = self.parser.parse_file(config) + for statement in parse_tree: + log.debug(pprint.pformat(statement)) + # with open("%s.ast" % config[:-4], 'w') as f: + # cPickle.dump(parse_tree, f) + with open("%s.ast" % config[:-4], 'r') as f: + expected_tree = cPickle.load(f) + self.failUnless(parse_tree == expected_tree) + + def test_attribute_group(self): + print + log.info(self._testMethodName) + config = "%s/config_attribute_group.lio" % self.samples_dir + parse_tree = self.parser.parse_file(config) + for statement in parse_tree: + log.debug(pprint.pformat(statement)) + # with open("%s.ast" % config[:-4], 'w') as f: + # cPickle.dump(parse_tree, f) + with open("%s.ast" % config[:-4], 'r') as f: + expected_tree = cPickle.load(f) + self.failUnless(parse_tree == expected_tree) + + def test_nested_blocks(self): + print + log.info(self._testMethodName) + config = "%s/config_nested_blocks.lio" % self.samples_dir + parse_tree = self.parser.parse_file(config) + for statement in parse_tree: + log.debug(pprint.pformat(statement)) + # with open("%s.ast" % config[:-4], 'w') as f: + # cPickle.dump(parse_tree, f) + with open("%s.ast" % config[:-4], 'r') as f: + expected_tree = cPickle.load(f) + self.failUnless(parse_tree == expected_tree) + + def test_comments(self): + print + log.info(self._testMethodName) + config = "%s/config_comments.lio" % self.samples_dir + parse_tree = self.parser.parse_file(config) + for statement in parse_tree: + log.debug(pprint.pformat(statement)) + # with open("%s.ast" % config[:-4], 'w') as f: + # cPickle.dump(parse_tree, f) + with open("%s.ast" % config[:-4], 'r') as f: + expected_tree = cPickle.load(f) + self.failUnless(parse_tree == expected_tree) + + def test_strings(self): + print + log.info(self._testMethodName) + config = "%s/config_strings.lio" % self.samples_dir + parse_tree = self.parser.parse_file(config) + for statement in parse_tree: + log.debug(pprint.pformat(statement)) + # with open("%s.ast" % config[:-4], 'w') as f: + # cPickle.dump(parse_tree, f) + with open("%s.ast" % config[:-4], 'r') as f: + expected_tree = cPickle.load(f) + self.failUnless(parse_tree == expected_tree) + + def test_complete(self): + print + log.info(self._testMethodName) + config = "%s/config_complete.lio" % self.samples_dir + parse_tree = self.parser.parse_file(config) + for statement in parse_tree: + log.debug(pprint.pformat(statement)) + # with open("%s.ast" % config[:-4], 'w') as f: + # cPickle.dump(parse_tree, f) + with open("%s.ast" % config[:-4], 'r') as f: + expected_tree = cPickle.load(f) + + self.failUnless(parse_tree == expected_tree) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/safe/test_config_tree.py b/tests/safe/test_config_tree.py new file mode 100644 index 0000000..cadc296 --- /dev/null +++ b/tests/safe/test_config_tree.py @@ -0,0 +1,101 @@ +import re, sys, pprint, logging, unittest +from rtslib import config_tree + +logging.basicConfig() +log = logging.getLogger('TestConfigTree') +log.setLevel(logging.INFO) + +class TestConfigTree(unittest.TestCase): + + def test_create(self): + print + log.info(self._testMethodName) + tree = config_tree.ConfigTree() + self.failUnless(tree.get(None) is None) + self.failUnless(tree.get_path(None) is None) + self.failUnless(tree.get_path([]) is None) + self.failUnless(tree.get(()) is None) + self.failUnless(tree.delete(None) is None) + self.failUnless(tree.get_path(('a',)) is None) + self.failUnless(tree.get_path([('a',), ('b',), ('c',)]) is None) + + def test_add_get_delete(self): + print + log.info(self._testMethodName) + tree = config_tree.ConfigTree() + n1 = tree.set(('1', '2'), {'info': 'n1'}) + nA = tree.set(('a', 'b'), {'info': 'nA'}) + n2 = n1.set(('3', '4'), {'info': 'n2'}) + nB = nA.set(('c', 'd'), {'info': 'nB'}) + + node = tree.get([('1', '2'), ('3', '4')]) + self.failUnless(node.data['info'] == 'n2') + node = tree.get([('1', '2')]) + self.failUnless(node.data['info'] == 'n1') + node = tree.get([('a', 'b'), ('c', 'd')]) + self.failUnless(node.data['info'] == 'nB') + self.failUnless(node.is_root == False) + self.failUnless(tree.is_root == True) + + def test_add_get_delete(self): + print + log.info(self._testMethodName) + tree = config_tree.ConfigTree() + n1 = tree.set(('1', '2'), {'info': 'n1'}) + nA = tree.set(('a', 'b'), {'info': 'nA'}) + n2 = n1.set(('3', '4'), {'info': 'n2'}) + nB = nA.set(('c', 'd'), {'info': 'nB'}) + log.debug("root path: %s" % tree.path) + log.debug("Node [1 2] path: %s" % n1.path) + log.debug("Node [1 2 3 4] path: %s" % n2.path) + log.debug("Node [a b] path: %s" % nA.path) + log.debug("Node [a b c d] path: %s" % nB.path) + + def test_search(self): + print + log.info(self._testMethodName) + tree = config_tree.ConfigTree() + fileio = tree.set(('storage', 'fileio')) + fileio.set(('disk', 'vm1')) + fileio.set(('disk', 'vm2')) + fileio.set(('disk', 'test1')) + fileio.set(('disk', 'test2')) + iblock = tree.set(('storage', 'iblock')) + iblock.set(('disk', 'vm3')) + iblock.set(('disk', 'vm4')) + iblock.set(('disk', 'test1')) + iblock.set(('disk', 'test2')) + + tests = [([("storage", ".*"), ("disk", "vm1")], 1), + ([("storage", ".*"), ("disk", "vm2")], 1), + ([("storage", ".*"), ("disk", "vm1")], 1), + ([("storage", "fileio"), ("disk", "vm[0-9]")], 2), + ([("storage", "file.*"), ("disk", "vm[0-9]")], 2), + ([("storage", ".*"), ("disk", "vm[0-9]")], 4), + ([("storage", ".*"), ("disk", ".*[12]")], 6), + ([("storage", ".*"), ("disk", ".*")], 8)] + + for search_path, arity in tests: + nodes = tree.search(search_path) + self.failUnless(len(nodes) == arity) + + log.debug("Deleting iblock subtree") + for node in tree.search([(".*", "iblock")]): + tree.delete(node.path) + + tests = [([(".*", ".*"), ("disk", "vm1")], 1), + ([(".*", ".*"), ("disk", "vm2")], 1), + ([("storage", ".*"), ("disk", "vm1")], 1), + ([(".*", "fileio"), ("disk", "vm[0-9]")], 2), + ([(".*", "file.*"), ("disk", "vm[0-9]")], 2), + ([(".*", ".*"), ("disk", "vm[0-9]")], 2), + ([(".*", ".*"), (".*", ".*[12]")], 4), + ([(".*", ".*"), (".*", ".*")], 4)] + + for search_path, arity in tests: + nodes = tree.search(search_path) + log.debug("search(%s) -> %s" % (search_path, nodes)) + self.failUnless(len(nodes) == arity) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/system/test_dump_restore.py b/tests/system/test_dump_restore.py new file mode 100644 index 0000000..f7c5e87 --- /dev/null +++ b/tests/system/test_dump_restore.py @@ -0,0 +1,82 @@ +import os, sys, logging, unittest, tempfile, difflib, rtslib +from pyparsing import ParseException + +logging.basicConfig() +log = logging.getLogger('TestDumpRestore') +log.setLevel(logging.INFO) + +def diffs(a, b): + differ = difflib.Differ() + context = [] + result = [] + + for line in differ.compare(a.splitlines(), b.splitlines()): + if line[0] in "+-": + result.extend(context[-5:]) + result.append(line) + elif line[0] == "?": + result.append(line[:-1]) + context = [] + else: + context.append(line) + return '\n'.join(result) + +class TestDumpRestore(unittest.TestCase): + + samples_dir = '../data' + + def setUp(self): + self.config_backup = rtslib.Config() + self.config_backup.load_live() + print + log.info(self._testMethodName) + + def tearDown(self): + print("Restoring initial config...") + for step in self.config_backup.apply(): + print(step) + + def test_load_apply_config(self): + filepath = "%s/config_ramdisk_fileio_iscsi.lio" % self.samples_dir + config = rtslib.Config() + config.load(filepath) + for step in config.apply(): + print(step) + + def test_clear_apply_config(self): + config = rtslib.Config() + config.verify() + for step in config.apply(): + print(step) + + def test_config_samples(self): + samples = ["%s/%s" % (self.samples_dir, name) + for name in sorted(os.listdir(self.samples_dir)) + if name.startswith("config_sample_") + if name.endswith(".lio")] + for sample in samples: + with open(sample) as fd: + orig = fd.read() + + config = rtslib.Config() + print("Loading %s" % sample) + config.load(sample) + diff = diffs(orig, config.dump()) + print(diff) + self.failIf(diff) + + print("Verifying %s" % sample) + config.verify() + print("Applying %s" % sample) + for step in config.apply(): + print(step) + + config = rtslib.Config() + print("Reloading %s from live" % sample) + config.load_live() + diff = diffs(orig, config.dump()) + print(diff) + self.failIf(diff) + +if __name__ == '__main__': + unittest.main()
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor