diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..e0416b01 --- /dev/null +++ b/.gitignore @@ -0,0 +1,15 @@ +#Ignore all .o or .a files +*.[oa] +#Ignore all files with ~ at the end +*~ +#Ignore all files with .flc suffix +*.flc +#Ignore files starting with #, which are the +# emergency saved emacs files. +\#* +#Ignore obj subdirectory +obj/ +#Ignore img subdirectory +img/ +#Ignore pkg +pkg diff --git a/README.md b/README.md new file mode 100644 index 00000000..d6849c71 --- /dev/null +++ b/README.md @@ -0,0 +1,111 @@ +IBM Data Engine for NoSQL Software Libraries +============================================ + +IBM Data Engine for NoSQL - Power Systems Edition creates a new tier of memory by attaching up to 57 Terabytes of auxiliary flash memory to the processor without the latency issues of traditional I/O storage. While not as fast as DRAM, the latency is within the acceptable limit of most applications especially when data is accessed over the network. Flash is also dramatically less expensive than DRAM, and helps reduce the deployment and operational cost for delivering the customer solution. Customers, MSPs, and ISPs all benefit from application of this new technology in the NoSQL application space. Exploiting hardware and software built-in to IBM’s flagship POWER8 open architecture means that clients no longer much choose between “big” or “fast” for their solutions. +Read more, including API guides at: + +* [IBM Data Engine for NoSQL Whitepaper](http://ibm.biz/capiflash) + +To configure a build environment on Ubuntu: +``` +#!/bin/bash +#a ready-to-go toolchain exists in the IBM Toolchain for Linux. Configure its repo and download it +sudo apt-get -y install software-properties-common #needed for the next few lines... +#set up at7.1 repo +wget ftp://ftp.unicamp.br/pub/linuxpatch/toolchain/at/ubuntu/dists/precise/6976a827.gpg.key + +sudo apt-key add 6976a827.gpg.key +sudo add-apt-repository "deb ftp://ftp.unicamp.br/pub/linuxpatch/toolchain/at/ubuntu trusty at7.1 " + +sudo apt-get -y update +sudo apt-get -y install advance-toolchain-at7.1-runtime advance-toolchain-at7.1-perf advance-toolchain-at7.1-devel advance-toolchain-at7.1-mcore-libs libudev1 + +``` + +To configure a build environment on RHEL or Fedora: +``` +#!/bin/bash +cat >/etc/yum.repos.d/atX.X.repo +# Beginning of configuration file +[atX.X] +name=Advance Toolchain Unicamp FTP +baseurl=ftp://ftp.unicamp.br/pub/linuxpatch/toolchain/at/redhat/RHEL7 +failovermethod=priority +enabled=1 +gpgcheck=1 +gpgkey=ftp://ftp.unicamp.br/pub/linuxpatch/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b +# End of configuration file + + +yum install make cscope ctags doxygen git gitk links +yum install advance-toolchain-at7.1-runtime advance-toolchain-at7.1-devel advance-toolchain-at7.1-perf advance-toolchain-at7.1-mcore-libs + +``` + +### API Guide +The IBM Data Engine for NoSQL provides two major sets of public APIs. These are described in: +- [cflash - Block Layer APIs](src/block/README.md) +- [arkdb - Key/Value Layer APIs](src/kv/README.md) + + +### Building and installing + +Builds are configurable for different purposes. If no Data Engine for NoSQL Accelerator is available, you can still do active development using a "File Mode." See below for how to select the mode. Likewise, you may also select the endianness of your code (if needed). + +As a developer, to get started: +1. clone the repository +2. cd capiflash +3. (optional) modify / select a customrc file (see below) +4. source env.bash +5. make -j32 #build the code +6. make test && make unit #run unit tests +7. make fvt #run FVT tests - note you may need to set one or more env variables + +#### customrc - Targeting a specific platform or tuning + +Note: Developers have options to enable / disable specific targets (e.g. Big endian PPC64BE vs little endian PPC64EL) or P8 vs P7 tunings. See the customrc.p8be as an example. Creating a new component-specific environment variable is legal, however the env variable should be optional and provide a default that is safe for production. + +Current valid options: +|ENV Variable | Component | Usage (BOLD = default) | +|------------------------|-----------|-------------------------| +|TARGET_PLATFORM | (all) | PPC64BE - Big-Endian Structures | +| | | PPC64LE - Little-Endian Structures | +|CUSTOMFLAGS | (all) | Custom GCC flags. Used typically to enable P8 or P7 tunings, debug, optimization, etc. | +|BLOCK_FILEMODE_ENABLED | block | Forces Block Layer to redirect all IO to a file instead of a CAPI device. 1 = enabled, 0 = disabled | +|BLOCK_KERNEL_MC_ENABLED | block | Enables block layer to communicate with cxlflash driver built in to the Linux kernel. For more information, see https://www.kernel.org/doc/Documentation/powerpc/cxlflash.txt | + + +Prebuild customrc files exist for different environments. Most users will want to use "customrc.p8elblkkermc" or "customrc.p8el:" + +CustomRC files: +|Filename | Description +|------------------------|--------------------------------------| +|customrc.p8el | Little-endian Linux, P8 Tunings, Block FileMode enabled | +|customrc.p8elblkkermc | Little-endian Linux, P8 Tunings, Real IO to CXL Flash kernel driver | + + +Example on a POWER8 Little-endian system: +``` +ln -s customrc.p8elblkkermc customrc +source env.bash +make cleanall +make -j32 +``` + +#### Unit Tests + +The software package relies on Google Test. For more information, see src/test/framework/README.md + +Example of acquiring the test framework and running unit tests in file mode: +``` +pushd src/test/framework +git clone git@github.com:google/googletest.git +popd +ln -s customrc.p8el customrc +source env.bash +make cleanall +make -j32 +make -j32 tests +make run_unit #run the unit tests - note that certain test cases will create up to a 1GB file in /tmp during the test run + +``` diff --git a/config.aix.mk b/config.aix.mk new file mode 100644 index 00000000..9fd8d7f0 --- /dev/null +++ b/config.aix.mk @@ -0,0 +1,483 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: config.aix.mk $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +SHELL=/bin/bash + +ship: + ${MAKE} code_pass + @if [[ $(notdir $(PWD)) = test ]]; then ${MAKE} test; fi + +docs: + @echo "WARNING: Skipping docs step" + +tests: + ${MAKE} ship + ${MAKE} test + +run_fvt: + ${MAKE} tests + ${MAKE} fvt + +run_unit: + ${MAKE} tests + ${MAKE} unit + +#generate VPATH based on these dirs. +VPATH_DIRS=. ${ROOTPATH}/src/common ${ROOTPATH}/obj/lib/ ${ROOTPATH}/img /lib +#generate the VPATH, subbing :'s for spaces +EMPTY := +SPACE := $(EMPTY) $(EMPTY) +VPATH += $(subst $(SPACE),:,$(VPATH_DIRS)) + +## output libs, objs for userdetails parsers +UD_DIR = ${ROOTPATH}/obj/modules/userdetails +UD_OBJS = ${UD_DIR}*.o ${UD_DIR}/*.so ${UD_DIR}/*.a + +ARFLAGS =-X32_64 rv +LIBPATHS=-L${ROOTPATH}/img + +LDFLAGS_PROGRAMS = -L${LIBPATHS} -lc ${LINKLIBS} +LDFLAGS64_PROGRAMS = -b64 ${LDFLAGS_PROGRAMS} + +PGMDIR = ${ROOTPATH}/obj/programs +TESTDIR = ${ROOTPATH}/obj/tests +GENDIR = ${ROOTPATH}/obj/genfiles +IMGDIR = ${ROOTPATH}/img +PKGDIR = ${ROOTPATH}/pkg + +ifdef MODULE +OBJDIR = ${ROOTPATH}/obj/modules/${MODULE} +BEAMDIR = ${ROOTPATH}/obj/beam/${MODULE} + +MEMBER = shr.o +MEMBER64 = shr_64.o + +#EXTRACOMMONFLAGS += -fPIC +ifdef STRICT + EXTRACOMMONFLAGS += -Weffc++ +endif +#CUSTOMFLAGS += -D__SURELOCK_MODULE=${MODULE} +#For AIX use the following istead. +#Suppress infinite loop warnings on AIX (1500-010) +CUSTOMFLAGS += -qcpluscmt -Dinline=__inline -D_AIX -D__SURELOCK_MODULE=${MODULE} -qsuppress=1500-010 +CUSTOMFLAGS64 += -q 64 ${CUSTOMFLAGS} +LIBS += $(addsuffix .so, $(addprefix lib, ${MODULE})) +LIBS64 += $(addsuffix .64so, $(addprefix lib, ${MODULE})) +AR_LIBS += $(addsuffix .a, $(addprefix lib, ${MODULE})) +EXTRAINCDIR += ${GENDIR} ${CURDIR} +#EXPFLAGS = -bE:exportfile +LDFLAGS = -bnoentry -bM:SRE $(EXPFLAGS) ${LIBPATHS} -lc +#LDFLAGS = -bnoentry -bM:SRE -bexpall ${LIBPATHS} -lc +LDFLAGS64 = -b64 ${LDFLAGS} +LIBRARIES = $(addprefix ${OBJDIR}/, ${MEMBER}) +ifdef OBJS64 +LIBRARIES64 = $(addprefix ${OBJDIR}/, ${MEMBER64}) +endif +else +OBJDIR = ${ROOTPATH}/obj/surelock +BEAMDIR = ${ROOTPATH}/obj/beam/surelock + +CUSTOMFLAGS += -qcpluscmt -Dinline=__inline -D_AIX +CUSTOMFLAGS64 += -q64 -qcpluscmt -Dinline=__inline -D_AIX +EXTRAINCDIR += ${GENDIR} ${CURDIR} +LDFLAGS = -L${LIBPATHS} -lc ${LINKLIBS} +LDFLAGS64 = -b64 ${LDFLAGS} +#GTESTFDIR=${ROOTPATH}/src/test +endif + +__internal__comma= , +__internal__empty= +__internal__space=$(__internal__empty) $(__internal__empty) +MAKE_SPACE_LIST = $(subst $(__internal__comma),$(__internal__space),$(1)) + +ifdef SURELOCK_DEBUG +ifeq ($(SURELOCK_DEBUG),1) + CUSTOMFLAGS += -DHOSTBOOT_DEBUG=1 +else +ifndef MODULE +ifneq (,$(filter kernel,$(call MAKE_SPACE_LIST, $(HOSTBOOT_DEBUG)))) + CUSTOMFLAGS += -DHOSTBOOT_DEBUG=kernel +endif +else +ifneq (,$(filter $(MODULE), $(call MAKE_SPACE_LIST, $(HOSTBOOT_DEBUG)))) + CUSTOMFLAGS += -DHOSTBOOT_DEBUG=$(MODULE) +endif +endif +endif +endif + +ifeq ($(USE_ADVANCED_TOOLCHAIN),yes) + CC_RAW = cc + CXX_RAW = xlC_r + CC = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/${CC_RAW} + CXX = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/${CXX_RAW} + LD = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/ld + +else + CC_RAW = cc + CXX_RAW = xlC_r + CC = ${CC_RAW} + CXX = ${CXX_RAW} + LD = ld + +endif + +#TODO: need to figure out if we can run beam... MCH +BEAMVER = beam-3.5.2 +BEAMPATH = /afs/rch/projects/esw/beam/${BEAMVER} +BEAMCMD = i686-mcp6-jail ${BEAMPATH}/bin/beam_compile +BEAMFLAGS = \ + --beam::source=${BEAMPATH}/tcl/beam_default_parms.tcl \ + --beam::source=${ROOTPATH}/src/build/beam/compiler_c_config.tcl \ + --beam::source=${ROOTPATH}/src/build/beam/compiler_cpp_config.tcl \ + --beam::exit0 \ + -o /dev/null + +#TODO: Find correct flags for surelock - copied from POWER7 Hostboot for now +COMMONFLAGS = ${EXTRACOMMONFLAGS} + +ifndef NO_O3 +COMMONFLAGS += -O3 +endif + +#add support for the rev ID header +GITREVISION:=$(shell git rev-list HEAD | wc -l | sed -e 's/^ *//')-$(shell git rev-parse --short HEAD) +CUSTOMFLAGS += -DGITREVISION='"${GITREVISION}"' + + +CFLAGS += ${COMMONFLAGS} -g \ + ${CUSTOMFLAGS} \ + ${ARCHFLAGS} \ + ${INCFLAGS} +CFLAGS64 += ${COMMONFLAGS} -g \ + ${CUSTOMFLAGS64} \ + ${ARCHFLAGS} \ + ${INCFLAGS} +ASMFLAGS = ${COMMONFLAGS} + +# TODO: avoid LD error for XCOFF64 with 32 bit objets +#CXXFLAGS += ${CFLAGS64} +CXXFLAGS += ${CFLAGS} +#LDFLAGS = --sort-common ${COMMONFLAGS} + + +ifdef USE_PYTHON + TESTGEN = ${ROOTPATH}/src/usr/cxxtest/cxxtestgen.py +else + TESTGEN = ${ROOTPATH}/src/usr/cxxtest/cxxtestgen.pl +endif + +INCDIR = /usr/include /usr/include/sys ${ROOTPATH}/src/include/ +_INCDIRS = ${INCDIR} ${EXTRAINCDIR} +INCFLAGS = $(addprefix -I, ${_INCDIRS} ) +ASMINCFLAGS = $(addprefix $(lastword -Wa,-I), ${_INCDIRS}) + +OBJECTS = $(addprefix ${OBJDIR}/, ${OBJS}) +OBJECTS64 = $(addprefix ${OBJDIR}/, ${OBJS64}) +AR_LIBRARIES = $(addprefix ${IMGDIR}/, ${AR_LIBS}) + +ifdef IMGS +IMGS_ = $(addprefix ${IMGDIR}/, ${IMGS}) +LIDS = $(foreach lid,$(addsuffix _LIDNUMBER, $(IMGS)),$(addprefix ${IMGDIR}/,$(addsuffix .ruhx, $($(lid))))) +IMAGES = $(addsuffix .bin, ${IMGS_}) $(addsuffix .elf, ${IMGS_}) ${LIDS} +#$(addsuffix .ruhx, ${IMGS_}) +IMAGE_EXTRAS = $(addprefix ${IMGDIR}/, hbotStringFile) +endif + + +${OBJDIR}/%.o ${OBJDIR}/%.list : %.C + @mkdir -p ${OBJDIR} + ${CXX} -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) + +${OBJDIR}/%.o ${OBJDIR}/%.list : %.c + @mkdir -p ${OBJDIR} + ${CC} -c ${CFLAGS} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) + +${OBJDIR}/%.64o ${OBJDIR}/%.list : %.C + @mkdir -p ${OBJDIR} + ${CXX} -q64 -c ${CXXFLAGS64} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) + +${OBJDIR}/%.64o ${OBJDIR}/%.list : %.c + @mkdir -p ${OBJDIR} + ${CC} -q64 -c ${CFLAGS64} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) + +${OBJDIR}/%.o : %.S + @mkdir -p ${OBJDIR} + ${CC} -c ${ASMFLAGS} $< -o $@ ${ASMINCFLAGS} ${INCFLAGS} + +ifdef MODULE +${OBJDIR}/${MEMBER} : ${OBJECTS} + @mkdir -p ${IMGDIR} + ${LD} ${LDFLAGS} ${MODLIBS} -o $@ $(OBJECTS) + + +${OBJDIR}/${MEMBER64} : ${OBJECTS64} + @mkdir -p ${IMGDIR} + ${LD} ${LDFLAGS64} ${MODLIBS} -o $@ $(OBJECTS64) +endif + +${IMGDIR}/%.a : ${LIBRARIES} ${LIBRARIES64} + @mkdir -p ${IMGDIR} + $(AR) $(ARFLAGS) $@ $(LIBRARIES) $(LIBRARIES64) + -@ ($(RANLIB) -X32_64 $@ || true) >/dev/null 2>&1 + +${PGMDIR}/%.o : %.c + @mkdir -p ${PGMDIR} + ${CC} -c ${CFLAGS} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) +${PGMDIR}/%.o : %.C + @mkdir -p ${PGMDIR} + ${CXX} -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) +${PGMDIR}/%.64o : %.c + @mkdir -p ${PGMDIR} + ${CC} -q64 -c ${CFLAGS} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) +${PGMDIR}/%.64o : %.C + @mkdir -p ${PGMDIR} + ${CXX} -q64 -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) + +${TESTDIR}/%.o : %.c + @mkdir -p ${TESTDIR} + ${CC} -c ${CFLAGS} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) +${TESTDIR}/%.o : %.C + @mkdir -p ${TESTDIR} + ${CXX} -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) +${TESTDIR}/64obj/%.o : %.c + @mkdir -p ${TESTDIR}/64obj + ${CC} -q64 -c ${CFLAGS} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) +${TESTDIR}/64obj/%.o : %.C + @mkdir -p ${TESTDIR}/64obj + ${CXX} -q64 -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -qmakedep -MF $(@:.o=.u) + +${BEAMDIR}/%.beam : %.C + @mkdir -p ${BEAMDIR} + ${BEAMCMD} -I ${INCDIR} ${CXXFLAGS} ${BEAMFLAGS} $< \ + --beam::complaint_file=$@ --beam::parser_file=/dev/null + +${BEAMDIR}/%.beam : %.c + @mkdir -p ${BEAMDIR} + ${BEAMCMD} -I ${INCDIR} ${CXXFLAGS} ${BEAMFLAGS} $< \ + --beam::complaint_file=$@ --beam::parser_file=/dev/null + +${BEAMDIR}/%.beam : %.S + echo Skipping ASM file. + +%.d: + cd ${basename $@} && ${MAKE} code_pass +%.test: + cd ${basename $@} && ${MAKE} test +%.fvt: + cd ${basename $@} && ${MAKE} fvt +%.unit: + cd ${basename $@} && ${MAKE} unit +%.clean: + cd ${basename $@} && ${MAKE} clean +%.beamdir: + cd ${basename $@} && ${MAKE} beam + +#Build a C-file main, build the *_OFILES into OBJDIR, and link them together +define PROGRAMS_template + $(1)_PGM_OFILES = $(addprefix ${PGMDIR}/, $($(notdir $(1)_OFILES))) $(addprefix $(PGMDIR)/, $(notdir $1)).o + $(1): $$($(1)_PGM_OFILES) $(notdir $(1)).c + ALL_OFILES += $$($(1)_PGM_OFILES) $(1) +endef +$(foreach pgm,$(PROGRAMS),$(eval $(call PROGRAMS_template,$(pgm)))) + +$(PROGRAMS): + @mkdir -p ${PGMDIR} + $(CC) $(CFLAGS) $(LDFLAGS_PROGRAMS) $($(@)_PGM_OFILES) $(LINKLIBS) ${LIBPATHS} -o $@ + +define PROGRAMS64_template + $(1)_PGMS64_OFILES = $(addprefix ${PGMDIR}/, $($(notdir $(1)_OFILES))) $(addprefix $(PGMDIR)/, $(notdir $(1:64=))).64o + $(1): $$($(1)_PGMS64_OFILES) $(notdir $(1:64=)).c + ALL_OFILES += $$($(1)_PGMS64_OFILES) $(1) +endef + +$(foreach pgm64,$(PROGRAMS64),$(eval $(call PROGRAMS64_template,$(pgm64)))) + +$(PROGRAMS64): + $(CC) -q64 $(CFLAGS) $(LDFLAGS64_PROGRAMS) $($(@)_PGMS64_OFILES) $(LINKLIBS) ${LIBPATHS} -o $@ + +#------------------------------------------------------------------------------- +#Build a C-file main, build the *_OFILES into TESTDIR, and link them together +define BIN_TESTS_template + $(1)_BTEST_OFILES = $(addprefix ${TESTDIR}/, $($(notdir $(1)_OFILES))) $(addprefix $(TESTDIR)/, $(notdir $1)).o + $(1): $$($(1)_BTEST_OFILES) $(notdir $(1)).c + ALL_OFILES += $$($(1)_BTEST_OFILES) $(1) +endef +$(foreach bin_test,$(BIN_TESTS),$(eval $(call BIN_TESTS_template,$(bin_test)))) + +$(BIN_TESTS): + $(CC) $(CFLAGS) $(LDFLAGS_PROGRAMS) $($(@)_BTEST_OFILES) $(LINKLIBS) ${LIBPATHS} -o $@ + +#------------------------------------------------------------------------------- +define BIN_TESTS64_template + $(1)_BTEST_OFILES = $(addprefix $(TESTDIR)/64obj/, $($(notdir $(1:64=)_OFILES))) $(addprefix $(TESTDIR)/64obj/, $(notdir $(1:64=))).o + $(1): $$($(1)_BTEST_OFILES) $(notdir $(1:64=)).c + ALL_OFILES += $$($(1)_BTEST_OFILES) $(1) +endef + +$(foreach bin_test,$(BIN_TESTS64),$(eval $(call BIN_TESTS64_template,$(bin_test)))) + +$(BIN_TESTS64): + $(CC) -q64 $(CFLAGS) $(LDFLAGS64_PROGRAMS) $($(@)_BTEST_OFILES) $(LINKLIBS) ${LIBPATHS} -o $@ + +#------------------------------------------------------------------------------- +#Build a C++ file that uses gtest, build the *_OFILES defined, and link with gtest_main + +define GTESTS_template + GTESTS_DEPS = $(TESTDIR)/gtest-all.o $(TESTDIR)/gtest_main.o + $(1)_GTESTS_OFILES = $(addprefix $(TESTDIR)/,$($(notdir $(1))_OFILES)) $(addprefix $(TESTDIR)/, $(notdir $1)).o + $(1): $$(GTESTS_DEPS) $$($(1)_GTESTS_OFILES) $(notdir $(1)).C + ALL_OFILES += $$($(1)_GTESTS_OFILES) $(1) +endef +$(foreach _gtest,$(GTESTS_DIR),$(eval $(call GTESTS_template,$(_gtest)))) + +$(GTESTS_DIR): + $(CXX) $(CFLAGS) $(LDFLAGS_PROGRAMS) $($(@)_GTESTS_OFILES) $(GTESTS_DEPS) $(LINKLIBS) ${LIBPATHS} -o $@ + +#------------------------------------------------------------------------------- +define GTESTS64_template + GTESTS64_DEPS = $(TESTDIR)/64obj/gtest-all.o $(TESTDIR)/64obj/gtest_main.o + $(1)_GTESTS64_OFILES = $(addprefix $(TESTDIR)/64obj/,$($(notdir $(1:64=))_OFILES)) $(addprefix $(TESTDIR)/64obj/, $(notdir $(1:64=))).o + $(1): $$(GTESTS64_DEPS) $$($(1)_GTESTS64_OFILES) $(notdir $(1:64=)).C + ALL_OFILES += $$($(1)_GTESTS64_OFILES) $(1) +endef +$(foreach _gtest,$(GTESTS64_DIR),$(eval $(call GTESTS64_template,$(_gtest)))) + +$(GTESTS64_DIR): + $(CXX) -q64 $(CFLAGS) $(LDFLAGS64_PROGRAMS) $($(@)_GTESTS64_OFILES) $(GTESTS64_DEPS) $(LINKLIBS) ${LIBPATHS} -o $@ + +#------------------------------------------------------------------------------- +#Build a C++ file that uses gtest, build *_OFILES into TESTDIR, link with gtest +define GTESTS_NM_template + GTEST_NM_DEPS = $(TESTDIR)/gtest-all.o + $(1)_GTESTS_NM_OFILES = $(addprefix $(TESTDIR)/,$($(notdir $(1))_OFILES)) $(addprefix $(TESTDIR)/, $(notdir $1)).o + $(1): $$(GTEST_NM_DEPS) $$($(1)_GTESTS_NM_OFILES) $(notdir $(1)).C + ALL_OFILES += $$($(1)_GTESTS_NM_OFILES) $(1) +endef +$(foreach _gtest_nm,$(GTESTS_NM_DIR),$(eval $(call GTESTS_NM_template,$(_gtest_nm)))) + +$(GTESTS_NM_DIR): + $(CXX) $(CFLAGS) $(LDFLAGS_PROGRAMS) $($(@)_GTESTS_NM_OFILES) $(GTEST_NM_DEPS) $(LINKLIBS) ${LIBPATHS} -o $@ + +#------------------------------------------------------------------------------- +#Build a C++ file that uses gtest, build *_OFILES into TESTDIR, link with gtest +define GTESTS64_NM_template + GTESTS64_NM_DEPS = $(TESTDIR)/64obj/gtest-all.o + $(1)_GTESTS64_NM_OFILES = $(addprefix $(TESTDIR)/64obj/,$($(notdir $(1:64=))_OFILES)) $(addprefix $(TESTDIR)/64obj/, $(notdir $(1:64=))).o + $(1): $$(GTESTS64_NM_DEPS) $$($(1)_GTESTS64_NM_OFILES) $(notdir $(1:64=)).C + ALL_OFILES += $$($(1)_GTESTS64_NM_OFILES) $(1) +endef +$(foreach _gtest64_nm,$(GTESTS64_NM_DIR),$(eval $(call GTESTS64_NM_template,$(_gtest64_nm)))) + +$(GTESTS64_NM_DIR): + $(CXX) -q64 $(CFLAGS) $(LDFLAGS64_PROGRAMS) $($(@)_GTESTS64_NM_OFILES) $(GTESTS64_NM_DEPS) $(LINKLIBS) ${LIBPATHS} -o $@ + +#------------------------------------------------------------------------------- + +DEPS += $(addsuffix .u, ${BIN_TESTS}) \ + $(addsuffix .u, ${BIN_TESTS64}) \ + $(addsuffix .u, ${PROGRAMS}) \ + $(addsuffix .u, ${PROGRAMS64}) \ + $(addsuffix .u, ${GTESTS_DIR}) \ + $(addsuffix .u, ${GTESTS64_DIR}) \ + $(addsuffix .u, ${GTESTS_NM_DIR}) \ + $(addsuffix .u, ${GTESTS64_NM_DIR}) \ + $(OBJECTS:.o=.u) \ + $(OBJECTS64:.64o=.u) + +BEAMOBJS = $(addprefix ${BEAMDIR}/, ${OBJS:.o=.beam}) +GENTARGET = $(addprefix %/, $(1)) + +${PROGRAMS} ${BIN_TESTS} ${GTESTS_DIR} ${GTESTS64_DIR} $(GTESTS_NM_DIR) ${OBJECTS} ${OBJECTS64} ${LIBRARIES} ${LIBRARIES64} ${AR_LIBRARIES}: makefile +${LIBRARIES} ${LIBRARIES64} ${AR_LIBRARIES}: ${OBJECTS} ${OBJECTS64} +${EXTRA_PARTS} ${PROGRAMS} ${PROGRAMS64} : ${LIBRARIES} ${LIBRARIES64} ${AR_LIBRARIES} +$(GTESTS_DIR) $(GTESTS64_DIR) $(GTESTS_NM_DIR) $(GTESTS64_NM_DIR) $(BIN_TESTS) $(BIN_TESTS64): $(GTEST_TARGETS) + +code_pass: ${SUBDIRS} ${LIBRARIES} ${LIBRARIES64} ${AR_LIBRARIES} ${EXTRA_PARTS} ${PROGRAMS} ${PROGRAMS64} +test: ${SUBDIRS:.d=.test} ${BIN_TESTS} ${BIN_TESTS64} ${GTESTS_DIR} $(GTESTS64_DIR) $(GTESTS_NM_DIR) $(GTESTS64_NM_DIR) +fvt: ${SUBDIRS:.d=.fvt} +unit: ${SUBDIRS:.d=.unit} +beam: ${SUBDIRS:.d=.beamdir} ${BEAMOBJS} + +install: + rm -rf ${PKGDIR}/install_root/* + cd ${ROOTPATH}/src/build/install && ${MAKE} +# @echo "WARNING: Skipping install phase" +packaging: + cd ${ROOTPATH}/src/build/packaging && ${MAKE} +# @echo "WARNING: Skipping packaging phase" + +cscope: + @mkdir -p ${ROOTPATH}/obj/cscope + (cd ${ROOTPATH}/obj/cscope ; rm -f cscope.* ; \ + find ../../ -name '*.[CHchS]' -type f -print > cscope.files; \ + cscope -bq) + +ctags: + @mkdir -p ${ROOTPATH}/obj/cscope + (cd ${ROOTPATH}/obj/cscope ; rm -f tags ; \ + ctags ../../src) + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),cleanall) +ifneq ($(MAKECMDGOALS),unit) +ifneq ($(MAKECMDGOALS),fvt) +ifneq ($(MAKECMDGOALS),run_unit) +ifneq ($(MAKECMDGOALS),run_fvt) +ifneq ($(MAKECMDGOALS),install) +ifneq ($(MAKECMDGOALS),packaging) + -include $(DEPS) +endif +endif +endif +endif +endif +endif +endif +endif + +cleanud : + rm -f ${UD_OBJS} + +clean: cleanud ${SUBDIRS:.d=.clean} + (rm -f ${OBJECTS} ${OBJECTS:.o=.u} ${OBJECTS:.o=.list} \ + ${OBJECTS64} ${OBJECTS64:.o=.u} ${OBJECTS64:.o=.list} \ + ${OBJECTS:.o=.o.hash} ${BEAMOBJS} ${LIBRARIES} ${LIBRARIES64} ${AR_LIBRARIES} \ + ${IMAGES} ${IMAGES:.bin=.list} ${IMAGES:.bin=.syms} \ + ${IMAGES:.bin=.bin.modinfo} ${IMAGES:.ruhx=.lid} \ + ${IMAGES:.ruhx=.lidhdr} ${IMAGES:.bin=_extended.bin} \ + ${IMAGE_EXTRAS} ${TESTDIR}/* \ + ${EXTRA_OBJS} ${_GENFILES} ${EXTRA_PARTS} ${EXTRA_CLEAN}\ + ${PROGRAMS} ${PROGRAMS64} ${ALL_OFILES} \ + *.a *.o *~* ) + +cleanall: + rm -Rf ${ROOTPATH}/obj/* + rm -Rf $(IMGDIR)/* + rm -Rf $(PKGDIR)/* + +ifdef IMAGES + ${MAKE} ${IMAGES} ${IMAGE_EXTRAS} +endif diff --git a/config.linux.mk b/config.linux.mk new file mode 100644 index 00000000..7cb60519 --- /dev/null +++ b/config.linux.mk @@ -0,0 +1,424 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: config.linux.mk $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +SHELL=/bin/bash + +ship: + ${MAKE} dep + ${MAKE} code_pass + @if [[ $(notdir $(PWD)) = test ]]; then ${MAKE} test; fi + +docs: src/build/doxygen/doxygen.conf + rm -rf obj/doxygen/* + doxygen src/build/doxygen/doxygen.conf + + +tests: + ${MAKE} ship + ${MAKE} test + +run_fvt: + ${MAKE} tests + ${MAKE} fvt + +run_unit: + ${MAKE} tests + ${MAKE} unit + +#needed to provide linker rpath hints for installed code +DEFAULT_LIB_INSTALL_PATH = /opt/ibm/capikv/lib +#generate VPATH based on these dirs. +VPATH_DIRS=. ${ROOTPATH}/src/common ${ROOTPATH}/obj/lib/ ${ROOTPATH}/img +#generate the VPATH, subbing :'s for spaces +EMPTY := +SPACE := $(EMPTY) $(EMPTY) +VPATH += $(subst $(SPACE),:,$(VPATH_DIRS)) + +## output libs, objs for userdetails parsers +UD_DIR = ${ROOTPATH}/obj/modules/userdetails +UD_OBJS = ${UD_DIR}*.o ${UD_DIR}/*.so ${UD_DIR}/*.a + +PGMDIR = ${ROOTPATH}/obj/programs +TESTDIR = ${ROOTPATH}/obj/tests +GENDIR = ${ROOTPATH}/obj/genfiles +IMGDIR = ${ROOTPATH}/img +PKGDIR = ${ROOTPATH}/pkg + +ifdef MODULE +OBJDIR = ${ROOTPATH}/obj/modules/${MODULE} +BEAMDIR = ${ROOTPATH}/obj/beam/${MODULE} + +EXTRACOMMONFLAGS += -fPIC +ifdef STRICT + EXTRACOMMONFLAGS += -Weffc++ +endif +CUSTOMFLAGS += -D__SURELOCK_MODULE=${MODULE} +#For AIX use the following istead. +#CUSTOMFLAGS += -D_AIX -D__SURELOCK_MODULE=${MODULE} +LIBS += $(addsuffix .so, $(addprefix lib, ${MODULE})) +EXTRAINCDIR += ${GENDIR} ${CURDIR} +else +OBJDIR = ${ROOTPATH}/obj/surelock +BEAMDIR = ${ROOTPATH}/obj/beam/surelock +EXTRAINCDIR += ${GENDIR} ${CURDIR} +endif + +__internal__comma= , +__internal__empty= +__internal__space=$(__internal__empty) $(__internal__empty) +MAKE_SPACE_LIST = $(subst $(__internal__comma),$(__internal__space),$(1)) + + +ifdef SURELOCK_DEBUG +ifeq ($(SURELOCK_DEBUG),1) + CUSTOMFLAGS += -DSURELOCK_DEBUG=1 +endif +endif + + +ifeq ($(USE_ADVANCED_TOOLCHAIN),yes) + CC_RAW = gcc + CXX_RAW = g++ + CC = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/${CC_RAW} + CXX = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/${CXX_RAW} + LD = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/gcc + OBJDUMP = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/objdump + #this line is very specifically-written to explicitly-place the ATx.x stuff at the FRONT of the VPATH dirs. + #this is a REQUIREMENT of the advanced toolchain for linux. + VPATH_DIRS:= ${ADV_TOOLCHAIN_PATH}/lib64 ${VPATH_DIRS} + #see the ld flags below (search for rpath). This puts the atx.x stuff on the front + #which is REQUIRED by the toolchain. + RPATH_PREPEND = -rpath,${ADV_TOOLCHAIN_PATH}/lib64 + +else + CC_RAW = gcc + CXX_RAW = g++ + CC = ${CC_RAW} + CXX = ${CXX_RAW} + LD = ld + OBJDUMP = objdump + #if we are NOT using the atx.x stuff, prepend nothing. + RPATH_PREPEND = +endif + +#TODO: Find correct flags for surelock +#moved custom P8 tunings to customrc file. +COMMONFLAGS = ${EXTRACOMMONFLAGS} + +ifndef NO_O3 +COMMONFLAGS += -O3 +endif + +#add support for the rev ID header +GITREVISION:=$(shell git rev-list HEAD | wc -l)-$(shell git rev-parse --short HEAD) +CUSTOMFLAGS += -DGITREVISION='"${GITREVISION}"' + + +CFLAGS += ${COMMONFLAGS} \ + -Wall ${CUSTOMFLAGS} ${ARCHFLAGS} \ + -R '/opt/at7.1/lib64:$$ORIGIN/../lib:$$ORIGIN:/lib:/usr/lib:/opt/ibm/capikv/lib' \ + ${INCFLAGS} +#if ALLOW_WARNINGS is NOT defined, we assume we are compiling production code +#as such, we adhere to strict compile flags. If this is defined then we warn +#but allow the compile to continue. +ifndef ALLOW_WARNINGS + CFLAGS += -Werror + CXXFLAGS += -Werror +endif + +ifdef COVERAGE +COMMONFLAGS += -fprofile-arcs -ftest-coverage +LDFLAGS += -lgcov +LINKLIBS += -lgcov +endif + +ASMFLAGS = ${COMMONFLAGS} +CXXFLAGS += ${CFLAGS} -fno-rtti -fno-exceptions -Wall +#RPATH order is important here. the prepend var lets us throw the ATxx stuff in first if needed. +LDFLAGS = ${COMMONFLAGS} -Wl,${RPATH_PREPEND},-rpath,$$ORIGIN/../lib,-rpath,$$ORIGIN,-rpath,$(DEFAULT_LIB_INSTALL_PATH) + + +INCDIR = ${ROOTPATH}/src/include/ +_INCDIRS = ${INCDIR} ${EXTRAINCDIR} +INCFLAGS = $(addprefix -I, ${_INCDIRS} ) +ASMINCFLAGS = $(addprefix $(lastword -Wa,-I), ${_INCDIRS}) + +OBJECTS = $(addprefix ${OBJDIR}/, ${OBJS}) +LIBRARIES = $(addprefix ${IMGDIR}/, ${LIBS}) + +ifdef IMGS +IMGS_ = $(addprefix ${IMGDIR}/, ${IMGS}) +LIDS = $(foreach lid,$(addsuffix _LIDNUMBER, $(IMGS)),$(addprefix ${IMGDIR}/,$(addsuffix .ruhx, $($(lid))))) +IMAGES = $(addsuffix .bin, ${IMGS_}) $(addsuffix .elf, ${IMGS_}) ${LIDS} +#$(addsuffix .ruhx, ${IMGS_}) +endif + + +${OBJDIR}/%.o: %.C + @mkdir -p ${OBJDIR} + ${CXX} -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -iquote . + +${OBJDIR}/%.list : ${OBJDIR}/%.o + ${OBJDUMP} -dCS $@ > $(basename $@).list + +${OBJDIR}/%.o: %.c + @mkdir -p ${OBJDIR} + ${CC} -c ${CFLAGS} $< -o $@ ${INCFLAGS} -iquote . + +${OBJDIR}/%.o : %.S + @mkdir -p ${OBJDIR} + ${CC} -c ${ASMFLAGS} $< -o $@ ${ASMINCFLAGS} ${INCFLAGS} -iquote . + +${OBJDIR}/%.dep : %.C + @mkdir -p ${OBJDIR}; + @rm -f $@; + ${CXX} -M ${CXXFLAGS} $< -o $@.$$$$ ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${OBJDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; + @rm -f $@.$$$$ + +${OBJDIR}/%.dep : %.c + @mkdir -p ${OBJDIR}; + @rm -f $@; + ${CC} -M ${CFLAGS} $< -o $@.$$$$ ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${OBJDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; + @rm -f $@.$$$$ + +${OBJDIR}/%.dep : %.S + @mkdir -p ${OBJDIR}; + @rm -f $@; + ${CC} -M ${ASMFLAGS} $< -o $@.$$$$ ${ASMINCFLAGS} ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${OBJDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; + @rm -f $@.$$$$ + +${IMGDIR}/%.so : ${OBJECTS} + @mkdir -p ${IMGDIR} + ${LD} -shared -z now ${LDFLAGS} -o $@ $(OBJECTS) $(MODULE_LINKLIBS) ${LIBPATHS} +# ${LD} -shared -z now ${LDFLAGS} -o $@ $(OBJECTS) + +${PGMDIR}/%.o : %.c + @mkdir -p ${PGMDIR} + ${CC} -c ${CFLAGS} $< -o $@ ${INCFLAGS} -iquote . + ${OBJDUMP} -dCS $@ > $(basename $@).list +${PGMDIR}/%.o : %.C + @mkdir -p ${PGMDIR} + ${CXX} -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -iquote . + ${OBJDUMP} -dCS $@ > $(basename $@).list +${PGMDIR}/%.dep : %.C + @mkdir -p ${PGMDIR}; + @rm -f $@; + ${CXX} -M ${CXXFLAGS} $< -o $@.$$$$ ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${PGMDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; + @rm -f $@.$$$$ +${PGMDIR}/%.dep : %.c + @mkdir -p ${PGMDIR}; + @rm -f $@; + ${CC} -M ${CFLAGS} $< -o $@.$$$$ ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${PGMDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; + @rm -f $@.$$$$ + +${TESTDIR}/%.o : %.c + @mkdir -p ${TESTDIR} + ${CC} -c ${CFLAGS} $< -o $@ ${INCFLAGS} -iquote . + ${OBJDUMP} -dCS $@ > $(basename $@).list +${TESTDIR}/%.o : %.C + @mkdir -p ${TESTDIR} + ${CXX} -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -iquote . + ${OBJDUMP} -dCS $@ > $(basename $@).list +${TESTDIR}/%.dep : %.C + @mkdir -p ${TESTDIR}; + @rm -f $@; + ${CXX} -M ${CXXFLAGS} $< -o $@.$$$$ ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${TESTDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; + @rm -f $@.$$$$ +${TESTDIR}/%.dep : %.c + @mkdir -p ${TESTDIR}; + @rm -f $@; + ${CC} -M ${CFLAGS} $< -o $@.$$$$ ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${TESTDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; + @rm -f $@.$$$$ + +${BEAMDIR}/%.beam : %.C + @mkdir -p ${BEAMDIR} + ${BEAMCMD} -I ${INCDIR} ${CXXFLAGS} ${BEAMFLAGS} $< \ + --beam::complaint_file=$@ --beam::parser_file=/dev/null + +${BEAMDIR}/%.beam : %.c + @mkdir -p ${BEAMDIR} + ${BEAMCMD} -I ${INCDIR} ${CXXFLAGS} ${BEAMFLAGS} $< \ + --beam::complaint_file=$@ --beam::parser_file=/dev/null + +${BEAMDIR}/%.beam : %.S + echo Skipping ASM file. + +%.dep: + cd ${basename $@} && ${MAKE} dep +%.d: + cd ${basename $@} && ${MAKE} code_pass +%.test: + cd ${basename $@} && ${MAKE} test +%.fvt: + cd ${basename $@} && ${MAKE} fvt +%.unit: + cd ${basename $@} && ${MAKE} unit +%.clean: + cd ${basename $@} && ${MAKE} clean +%.beamdir: + cd ${basename $@} && ${MAKE} beam + +#Build a C-file main, build the *_OFILES into OBJDIR, and link them together +define PROGRAMS_template + $(1)_PGM_OFILES = $(addprefix ${PGMDIR}/, $($(notdir $(1)_OFILES))) $(addprefix $(PGMDIR)/, $(notdir $1)).o + $(1): $$($(1)_PGM_OFILES) $(notdir $(1)).c + ALL_OFILES += $$($(1)_PGM_OFILES) $(1) +endef +$(foreach pgm,$(PROGRAMS),$(eval $(call PROGRAMS_template,$(pgm)))) + +$(PROGRAMS): + @mkdir -p ${PGMDIR} + $(LINK.o) $(CFLAGS) $(LDFLAGS) $($(@)_PGM_OFILES) $(LINKLIBS) ${LIBPATHS} -o $@ + +#------------------------------------------------------------------------------- +#Build a C-file main, build the *_OFILES into TESTDIR, and link them together +define BIN_TESTS_template + $(1)_BTEST_OFILES = $(addprefix ${TESTDIR}/, $($(notdir $(1)_OFILES))) $(addprefix $(TESTDIR)/, $(notdir $1)).o + $(1): $$($(1)_BTEST_OFILES) $(notdir $(1)).c + ALL_OFILES += $$($(1)_BTEST_OFILES) $(1) +endef +$(foreach bin_test,$(BIN_TESTS),$(eval $(call BIN_TESTS_template,$(bin_test)))) + +$(BIN_TESTS): + $(LINK.o) $(CFLAGS) $(LDFLAGS) $($(@)_BTEST_OFILES) $(LINKLIBS) ${LIBPATHS} -o $@ + +#Build a C++ file that uses gtest, build *_OFILES into TESTDIR, link with gtest_main +define GTESTS_template + GTEST_DEPS = $(TESTDIR)/gtest-all.o $(TESTDIR)/gtest_main.o + $(1)_GTESTS_OFILES = $(addprefix $(TESTDIR)/,$($(notdir $(1))_OFILES)) $(addprefix $(TESTDIR)/, $(notdir $1)).o + $(1): $$(GTEST_DEPS) $$($(1)_GTESTS_OFILES) $(notdir $(1)).C + ALL_OFILES += $$($(1)_GTESTS_OFILES) $(1) +endef +$(foreach _gtest,$(GTESTS_DIR),$(eval $(call GTESTS_template,$(_gtest)))) + +#Find out if a header exists or not by creating a one-liner c program that includes it, +#compiling that, and passing the results back as a 'y' or 'n' - liberated from github.com/ibm-capi/libcxl's Makefile +CHECK_HEADER = $(shell echo \\\#include $(1) | \ + $(CC) $(CFLAGS) -E - > /dev/null 2>&1 && echo y || echo n) + +$(GTESTS_DIR): + $(CXX) $(CFLAGS) $(LDFLAGS) $($(@)_GTESTS_OFILES) $(GTEST_DEPS) $(LINKLIBS) ${LIBPATHS} -o $@ +#------------------------------------------------------------------------------- + +#Build a C++ file that uses gtest, build *_OFILES into TESTDIR, link with gtest_main +define GTESTS_NM_template + GTEST_NM_DEPS = $(TESTDIR)/gtest-all.o + $(1)_GTESTS_NM_OFILES = $(addprefix $(TESTDIR)/,$($(notdir $(1))_OFILES)) $(addprefix $(TESTDIR)/, $(notdir $1)).o + $(1): $$(GTEST_NM_DEPS) $$($(1)_GTESTS_NM_OFILES) $(notdir $(1)).C + ALL_OFILES += $$($(1)_GTESTS_NM_OFILES) $(1) +endef +$(foreach _gtest_nm,$(GTESTS_NM_DIR),$(eval $(call GTESTS_NM_template,$(_gtest_nm)))) + +$(GTESTS_NM_DIR): + $(CXX) $(CFLAGS) $(LDFLAGS) $($(@)_GTESTS_NM_OFILES) $(GTEST_NM_DEPS) $(LINKLIBS) ${LIBPATHS} -o $@ +#------------------------------------------------------------------------------- + +DEPS += $(addsuffix .dep, ${BIN_TESTS}) $(addsuffix .dep, ${PROGRAMS}) \ + $(addsuffix .dep, ${GTESTS_DIR}) $(OBJECTS:.o=.dep) \ + $(addsuffix .dep, ${GTESTS_NM_DIR}) + +BEAMOBJS = $(addprefix ${BEAMDIR}/, ${OBJS:.o=.beam}) +GENTARGET = $(addprefix %/, $(1)) + +${PROGRAMS} ${BIN_TESTS} ${GTESTS_DIR} $(GTESTS_NM_DIR) ${OBJECTS} ${LIBRARIES}: makefile +${LIBRARIES}: ${OBJECTS} +${EXTRA_PARTS} ${PROGRAMS}: ${LIBRARIES} +$(GTESTS_DIR) $(GTESTS_NM_DIR) $(BIN_TESTS): $(GTEST_TARGETS) + +dep: ${SUBDIRS:.d=.dep} ${DEPS} +code_pass: ${SUBDIRS} ${LIBRARIES} ${EXTRA_PARTS} ${PROGRAMS} +test: ${SUBDIRS:.d=.test} ${BIN_TESTS} ${GTESTS_DIR} ${GTESTS_NM_DIR} +fvt: ${SUBDIRS:.d=.fvt} +unit: ${SUBDIRS:.d=.unit} +beam: ${SUBDIRS:.d=.beamdir} ${BEAMOBJS} + +install: + rm -rf ${PKGDIR}/install_root/* + cd ${ROOTPATH}/src/build/install && ${MAKE} + +packaging: + cd ${ROOTPATH}/src/build/packaging && ${MAKE} + +cscope: + @mkdir -p ${ROOTPATH}/obj/cscope + (cd ${ROOTPATH}/obj/cscope ; rm -f cscope.* ; \ + find ../../ -name '*.[CHchS]' -type f -fprint cscope.files; \ + cscope -bqk) + +ctags: + @mkdir -p ${ROOTPATH}/obj/cscope + (cd ${ROOTPATH}/obj/cscope ; rm -f tags ; \ + ctags --recurse=yes --fields=+S ../../src) + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),cleanall) +ifneq ($(MAKECMDGOALS),tests) +ifneq ($(MAKECMDGOALS),unit) +ifneq ($(MAKECMDGOALS),fvt) +ifneq ($(MAKECMDGOALS),run_unit) +ifneq ($(MAKECMDGOALS),run_fvt) +ifneq ($(MAKECMDGOALS),install) +ifneq ($(MAKECMDGOALS),packaging) + -include $(DEPS) +endif +endif +endif +endif +endif +endif +endif +endif +endif + +cleanud : + rm -f ${UD_OBJS} + +clean: cleanud ${SUBDIRS:.d=.clean} + (rm -f ${OBJECTS} ${OBJECTS:.o=.dep} ${OBJECTS:.o=.list} \ + ${OBJECTS:.o=.o.hash} ${BEAMOBJS} ${LIBRARIES} \ + ${IMAGES} ${IMAGES:.bin=.list} ${IMAGES:.bin=.syms} \ + ${IMAGES:.bin=.bin.modinfo} ${IMAGES:.ruhx=.lid} \ + ${IMAGES:.ruhx=.lidhdr} ${IMAGES:.bin=_extended.bin} \ + ${IMAGE_EXTRAS} ${TESTDIR}/* \ + ${EXTRA_OBJS} ${_GENFILES} ${EXTRA_PARTS} ${EXTRA_CLEAN}\ + ${PROGRAMS} ${ALL_OFILES} \ + *.a *.o *~* ) + +cleanall: + @if [[ -e ${ROOTPATH}/obj ]]; then rm -Rf ${ROOTPATH}/obj/*; fi + @if [[ -e $(IMGDIR) ]]; then rm -Rf $(IMGDIR)/*; fi + @if [[ -e $(PKGDIR) ]]; then rm -Rf $(PKGDIR)/*; fi + @echo "clean done" + +ifdef IMAGES + ${MAKE} ${IMAGES} ${IMAGE_EXTRAS} +endif diff --git a/config.mac.mk b/config.mac.mk new file mode 100644 index 00000000..9c373e38 --- /dev/null +++ b/config.mac.mk @@ -0,0 +1,289 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: config.mac.mk $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +all: + ${MAKE} gen_pass + ${MAKE} code_pass + +#generate VPATH based on these dirs. +VPATH_DIRS=. ${ROOTPATH}/src/common ${ROOTPATH}/obj/lib/ ${ROOTPATH}/img /usr/lib +#generate the VPATH, subbing :'s for spaces +EMPTY := +SPACE := $(EMPTY) $(EMPTY) +VPATH += $(subst $(SPACE),:,$(VPATH_DIRS)) + +## output libs, objs for userdetails parsers +UD_DIR = ${ROOTPATH}/obj/modules/userdetails +UD_OBJS = ${UD_DIR}*.o ${UD_DIR}/*.so ${UD_DIR}/*.a + +LIBPATHS=${ROOTPATH}/img + +ifdef MODULE +OBJDIR = ${ROOTPATH}/obj/modules/${MODULE} +BEAMDIR = ${ROOTPATH}/obj/beam/${MODULE} +GENDIR = ${ROOTPATH}/obj/genfiles +TESTDIR = ${ROOTPATH}/obj/tests +IMGDIR = ${ROOTPATH}/img +PKGDIR = ${ROOTPATH}/pkg +#GTESTFDIR=${ROOTPATH}/src/test +EXTRACOMMONFLAGS += +ifdef STRICT + EXTRACOMMONFLAGS += -Weffc++ +endif +CUSTOMFLAGS += -D_MACOSX -D__SURELOCK_MODULE=${MODULE} +#CUSTOMFLAGS += -D__SURELOCK_MODULE=${MODULE} +LIBS += $(addsuffix .so, $(addprefix lib, ${MODULE})) +EXTRAINCDIR += ${GENDIR} ${CURDIR} +else +OBJDIR = ${ROOTPATH}/obj/surelock +BEAMDIR = ${ROOTPATH}/obj/beam/surelock +GENDIR = ${ROOTPATH}/obj/genfiles +IMGDIR = ${ROOTPATH}/img +PKGDIR = ${ROOTPATH}/pkg +TESTDIR = ${ROOTPATH}/obj/tests +EXTRAINCDIR += ${GENDIR} ${CURDIR} +CUSTOMFLAGS += -D_MACOSX +#GTESTFDIR=${ROOTPATH}/src/test +endif + +__internal__comma= , +__internal__empty= +__internal__space=$(__internal__empty) $(__internal__empty) +MAKE_SPACE_LIST = $(subst $(__internal__comma),$(__internal__space),$(1)) + +ifdef SURELOCK_DEBUG +ifeq ($(SURELOCK_DEBUG),1) + CUSTOMFLAGS += -DSURELOCK_DEBUG=1 +endif +endif + +TRACEPP = ${ROOTPATH}/src/build/trace/tracepp +CUSTOM_LINKER = i686-mcp6-jail ${CUSTOM_LINKER_EXE} +JAIL = ppc64-mcp75-jail + +ifeq ($(USE_ADVANCED_TOOLCHAIN),yes) + CC_RAW = gcc + CXX_RAW = g++ + CC = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/${CC_RAW} + CXX = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/${CXX_RAW} + LD = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/ld + OBJDUMP = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/objdump +else + CC_RAW = gcc + CXX_RAW = g++ + CC = ${CC_RAW} + CXX = ${CXX_RAW} + LD = ld + OBJDUMP = objdump +endif + +#TODO: Find correct flags for surelock - copied from POWER7 Hostboot for now +COMMONFLAGS = -O3 ${EXTRACOMMONFLAGS} +CFLAGS += ${COMMONFLAGS} -g \ + ${CUSTOMFLAGS} ${ARCHFLAGS} \ + ${INCFLAGS} +ASMFLAGS = ${COMMONFLAGS} +CXXFLAGS = ${CFLAGS} -fno-rtti -fno-exceptions +LDFLAGS = -lc ${MODLIBS} -macosx_version_min 10.6 + +ifdef USE_PYTHON + TESTGEN = ${ROOTPATH}/src/usr/cxxtest/cxxtestgen.py +else + TESTGEN = ${ROOTPATH}/src/usr/cxxtest/cxxtestgen.pl +endif + +INCDIR = ${ROOTPATH}/src/include/ +_INCDIRS = ${INCDIR} ${EXTRAINCDIR} +INCFLAGS = $(addprefix -I, ${_INCDIRS} ) +ASMINCFLAGS = $(addprefix $(lastword -Wa,-I), ${_INCDIRS}) + +OBJECTS = $(addprefix ${OBJDIR}/, ${OBJS}) +LIBRARIES = $(addprefix ${IMGDIR}/, ${LIBS}) + +ifdef IMGS +IMGS_ = $(addprefix ${IMGDIR}/, ${IMGS}) +LIDS = $(foreach lid,$(addsuffix _LIDNUMBER, $(IMGS)),$(addprefix ${IMGDIR}/,$(addsuffix .ruhx, $($(lid))))) +IMAGES = $(addsuffix .bin, ${IMGS_}) $(addsuffix .elf, ${IMGS_}) ${LIDS} +#$(addsuffix .ruhx, ${IMGS_}) +endif + + +${OBJDIR}/%.o ${OBJDIR}/%.list : %.C + mkdir -p ${OBJDIR} + ${CXX} -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -iquote . + + +${OBJDIR}/%.o ${OBJDIR}/%.list : %.c + mkdir -p ${OBJDIR} + ${CC} -c ${CFLAGS} $< -o $@ ${INCFLAGS} -iquote . + + +${OBJDIR}/%.o : %.S + mkdir -p ${OBJDIR} + ${CC} -c ${ASMFLAGS} $< -o $@ ${ASMINCFLAGS} ${INCFLAGS} -iquote . + +${OBJDIR}/%.dep : %.C + mkdir -p ${OBJDIR}; \ + rm -f $@; \ + ${CXX} -M ${CXXFLAGS} $< -o $@.$$$$ ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${OBJDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; \ + rm -f $@.$$$$ + +${OBJDIR}/%.dep : %.c + mkdir -p ${OBJDIR}; \ + rm -f $@; \ + ${CC} -M ${CFLAGS} $< -o $@.$$$$ ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${OBJDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; \ + rm -f $@.$$$$ + +${OBJDIR}/%.dep : %.S + mkdir -p ${OBJDIR}; \ + rm -f $@; \ + ${CC} -M ${ASMFLAGS} $< -o $@.$$$$ ${ASMINCFLAGS} ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${OBJDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; \ + rm -f $@.$$$$ + +${IMGDIR}/%.so : ${OBJECTS} + mkdir -p ${IMGDIR} + ${LD} -dylib /usr/lib/dylib1.o -L${LIBPATHS} ${LDFLAGS} -o $@ $(OBJECTS) + +${TESTDIR}/%.o : %.c + mkdir -p ${TESTDIR} + ${CC} -c ${CFLAGS} $< -o $@ ${INCFLAGS} -iquote . + ${OBJDUMP} -dCS $@ > $(basename $@).list +${TESTDIR}/%.o : %.C + mkdir -p ${TESTDIR} + ${CXX} -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -iquote . + ${OBJDUMP} -dCS $@ > $(basename $@).list + +%.d: ${OBJECTS} + cd ${basename $@} && ${MAKE} code_pass + +%.gen_pass: + cd ${basename $@} && ${MAKE} gen_pass + +%.test: + cd ${basename $@} && ${MAKE} test + +#packaging generally requires the code pass to be complete +%.packaging: + cd ${basename $@} && ${MAKE} packaging + +#install generally requires the code pass to be complete +%.install: all + cd ${basename $@} && ${MAKE} install +%.clean: + cd ${basename $@} && ${MAKE} clean + +%.beamdir: + cd ${basename $@} && ${MAKE} beam + +#create a make function that we can use to generically create a program with extra libs +define PROGRAM_template + $(1): $$($(1)_OFILES) $$($(1)_EXTRA_LIBS:%=lib%.so) + ALL_OFILES += $$($(1)_OFILES) +endef + +$(foreach prog,$(PROGRAMS),$(eval $(call PROGRAM_template,$(prog)))) + +$(PROGRAMS): + $(LINK.o) $^ $(LDLIBS) -o $@ + +$(BIN_TESTS): + mkdir -p ${PGMDIR} + ${CC} -c ${CFLAGS} ${INCFLAGS} -iquote . -c $@.c -o $(PGMDIR)/$@.o + $(LINK.o) $(CFLAGS) $(LDFLAGS) $(LINKLIBS) ${LIBPATHS} $(PGMDIR)/$@.o -o $(PGMDIR)/$@ + +code_pass: ${OBJECTS} ${SUBDIRS} ${LIBRARIES} ${EXTRA_PARTS} ${PROGRAMS} +ifdef IMAGES + ${MAKE} ${IMAGES} ${IMAGE_EXTRAS} +endif + +gen_pass: + mkdir -p ${GENDIR} + ${MAKE} GEN_PASS + +_GENFILES = $(addprefix ${GENDIR}/, ${GENFILES}) +GEN_PASS: ${_GENFILES} ${SUBDIRS:.d=.gen_pass} + +GENTARGET = $(addprefix %/, $(1)) + +${BEAMDIR}/%.beam : %.C + mkdir -p ${BEAMDIR} + ${BEAMCMD} -I ${INCDIR} ${CXXFLAGS} ${BEAMFLAGS} $< \ + --beam::complaint_file=$@ --beam::parser_file=/dev/null + +${BEAMDIR}/%.beam : %.c + mkdir -p ${BEAMDIR} + ${BEAMCMD} -I ${INCDIR} ${CXXFLAGS} ${BEAMFLAGS} $< \ + --beam::complaint_file=$@ --beam::parser_file=/dev/null + +${BEAMDIR}/%.beam : %.S + echo Skipping ASM file. + +BEAMOBJS = $(addprefix ${BEAMDIR}/, ${OBJS:.o=.beam}) +beam: ${SUBDIRS:.d=.beamdir} ${BEAMOBJS} + +cleanud : + rm -f ${UD_OBJS} + +test: ${SUBDIRS:.d=.test} + + +.PHONY: install +install: ${SUBDIRS:.d=.install} + + +.PHONY: packaging +packaging: ${SUBDIRS:.d=.packaging} + +clean: cleanud ${SUBDIRS:.d=.clean} + (rm -rf ${OBJECTS} ${OBJECTS:.o=.dep} ${OBJECTS:.o=.list} \ + ${OBJECTS:.o=.o.hash} ${BEAMOBJS} ${LIBRARIES} \ + ${IMAGES} ${IMAGES:.bin=.list} ${IMAGES:.bin=.syms} \ + ${IMAGES:.bin=.bin.modinfo} ${IMAGES:.ruhx=.lid} \ + ${IMAGES:.ruhx=.lidhdr} ${IMAGES:.bin=_extended.bin} \ + ${IMAGE_EXTRAS} ${TESTDIR}/* \ + ${EXTRA_OBJS} ${_GENFILES} ${EXTRA_PARTS} ${EXTRA_CLEAN}\ + $gtest.a gtest_main.a *.o *unit_test-kv_results* *~* ) + +cscope: + mkdir -p ${ROOTPATH}/obj/cscope + (cd ${ROOTPATH}/obj/cscope ; rm -f cscope.* ; \ + find ../../ -name '*.[CHchS]' -type f -print > cscope.files; \ + cscope -bqk) + +ctags: + mkdir -p ${ROOTPATH}/obj/cscope + (cd ${ROOTPATH}/obj/cscope ; rm -f tags ; \ + ctags ../../src) + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),gen_pass) +ifneq ($(MAKECMDGOALS),GEN_PASS) + -include $(OBJECTS:.o=.dep) +endif +endif +endif diff --git a/config.mk b/config.mk new file mode 100644 index 00000000..9a805827 --- /dev/null +++ b/config.mk @@ -0,0 +1,46 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: config.mk $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +#select a default target architecture +#if no target arch is set, select PPC64BE for convenience. +ifndef TARGET_PLATFORM + TARGET_PLATFORM=PPC64BE +endif +#If the target architecture is set, pass an architecture +#down as a #define to the underlying code +ARCHFLAGS += -DTARGET_ARCH_${TARGET_PLATFORM} + +#Determine if this a linux or AIX system + +UNAME=$(shell uname) +ifeq ($(UNAME),AIX) +include ${ROOTPATH}/config.aix.mk +else +ifeq ($(UNAME),Darwin) +include ${ROOTPATH}/config.mac.mk +else +include ${ROOTPATH}/config.linux.mk +endif +endif diff --git a/config.mk.aix b/config.mk.aix new file mode 100644 index 00000000..62cc0f80 --- /dev/null +++ b/config.mk.aix @@ -0,0 +1,278 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: config.mk.aix $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +all: + ${MAKE} gen_pass + ${MAKE} code_pass + +## output libs, objs for userdetails parsers +UD_DIR = ${ROOTPATH}/obj/modules/userdetails +UD_OBJS = ${UD_DIR}*.o ${UD_DIR}/*.so ${UD_DIR}/*.a + +ifdef MODULE +OBJDIR = ${ROOTPATH}/obj/modules/${MODULE} +BEAMDIR = ${ROOTPATH}/obj/beam/${MODULE} +GENDIR = ${ROOTPATH}/obj/genfiles +TESTDIR = ${ROOTPATH}/obj/tests +IMGDIR = ${ROOTPATH}/img +PKGDIR = ${ROOTPATH}/pkg +#GTESTFDIR=${ROOTPATH}/src/test + +EXTRACOMMONFLAGS += -fPIC -Bsymbolic -Bsymbolic-functions +ifdef STRICT + EXTRACOMMONFLAGS += -Weffc++ +endif +#CUSTOMFLAGS += -D__SURELOCK_MODULE=${MODULE} +#For AIX use the following istead. +CUSTOMFLAGS += -D_AIX -D__SURELOCK_MODULE=${MODULE} +LIBS += $(addsuffix .so, $(addprefix lib, ${MODULE})) +EXTRAINCDIR += ${GENDIR} ${CURDIR} +else +OBJDIR = ${ROOTPATH}/obj/surelock +BEAMDIR = ${ROOTPATH}/obj/beam/surelock +GENDIR = ${ROOTPATH}/obj/genfiles +IMGDIR = ${ROOTPATH}/img +PKGDIR = ${ROOTPATH}/pkg +TESTDIR = ${ROOTPATH}/obj/tests +EXTRAINCDIR += ${GENDIR} ${CURDIR} +#GTESTFDIR=${ROOTPATH}/src/test +endif + +__internal__comma= , +__internal__empty= +__internal__space=$(__internal__empty) $(__internal__empty) +MAKE_SPACE_LIST = $(subst $(__internal__comma),$(__internal__space),$(1)) + +ifdef SURELOCK_DEBUG +ifeq ($(SURELOCK_DEBUG),1) + CUSTOMFLAGS += -DHOSTBOOT_DEBUG=1 +else +ifndef MODULE +ifneq (,$(filter kernel,$(call MAKE_SPACE_LIST, $(HOSTBOOT_DEBUG)))) + CUSTOMFLAGS += -DHOSTBOOT_DEBUG=kernel +endif +else +ifneq (,$(filter $(MODULE), $(call MAKE_SPACE_LIST, $(HOSTBOOT_DEBUG)))) + CUSTOMFLAGS += -DHOSTBOOT_DEBUG=$(MODULE) +endif +endif +endif +endif + + +ifeq ($(USE_ADVANCED_TOOLCHAIN),yes) + CC_RAW = gcc + CXX_RAW = g++ + CC = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/${CC_RAW} + CXX = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/${CXX_RAW} + LD = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/ld + #OBJDUMP = ${JAIL} ${ADV_TOOLCHAIN_PATH}/bin/objdump +else + CC_RAW = gcc + CXX_RAW = g++ + CC = ${CC_RAW} + CXX = ${CXX_RAW} + LD = ld + #OBJDUMP = objdump +endif + +#TODO: need to figure out if we can run beam... MCH +BEAMVER = beam-3.5.2 +BEAMPATH = /afs/rch/projects/esw/beam/${BEAMVER} +BEAMCMD = i686-mcp6-jail ${BEAMPATH}/bin/beam_compile +BEAMFLAGS = \ + --beam::source=${BEAMPATH}/tcl/beam_default_parms.tcl \ + --beam::source=${ROOTPATH}/src/build/beam/compiler_c_config.tcl \ + --beam::source=${ROOTPATH}/src/build/beam/compiler_cpp_config.tcl \ + --beam::exit0 \ + -o /dev/null + + +#TODO: Find correct flags for surelock - copied from POWER7 Hostboot for now +COMMONFLAGS = -O3 ${EXTRACOMMONFLAGS} +CFLAGS = ${COMMONFLAGS} -g \ + -Wall -Werror ${CUSTOMFLAGS} +ASMFLAGS = ${COMMONFLAGS} +CXXFLAGS = ${CFLAGS} -fno-rtti -fno-exceptions -Wall +#LDFLAGS = --sort-common ${COMMONFLAGS} +LDFLAGS = -bnoentry -bM:SRE -bexpall -lc + + +ifdef USE_PYTHON + TESTGEN = ${ROOTPATH}/src/usr/cxxtest/cxxtestgen.py +else + TESTGEN = ${ROOTPATH}/src/usr/cxxtest/cxxtestgen.pl +endif + +INCDIR = ${ROOTPATH}/src/include/ +_INCDIRS = ${INCDIR} ${EXTRAINCDIR} +INCFLAGS = $(addprefix -I, ${_INCDIRS} ) +ASMINCFLAGS = $(addprefix $(lastword -Wa,-I), ${_INCDIRS}) + +OBJECTS = $(addprefix ${OBJDIR}/, ${OBJS}) +T_OBJECTS = $(addprefix ${TESTDIR}/, ${TESTS}) +LIBRARIES = $(addprefix ${IMGDIR}/, ${LIBS}) + +ifdef IMGS +IMGS_ = $(addprefix ${IMGDIR}/, ${IMGS}) +LIDS = $(foreach lid,$(addsuffix _LIDNUMBER, $(IMGS)),$(addprefix ${IMGDIR}/,$(addsuffix .ruhx, $($(lid))))) +IMAGES = $(addsuffix .bin, ${IMGS_}) $(addsuffix .elf, ${IMGS_}) ${LIDS} +#$(addsuffix .ruhx, ${IMGS_}) +IMAGE_EXTRAS = $(addprefix ${IMGDIR}/, hbotStringFile) +endif + + +${OBJDIR}/%.o ${OBJDIR}/%.list : %.C + mkdir -p ${OBJDIR} + ${CXX} -c ${CXXFLAGS} $< -o $@ ${INCFLAGS} -iquote . + #${OBJDUMP} -dCS $@ > $(basename $@).list + +${OBJDIR}/%.o ${OBJDIR}/%.list : %.c + mkdir -p ${OBJDIR} + ${CC} -c ${CFLAGS} $< -o $@ ${INCFLAGS} -iquote . + #${OBJDUMP} -dCS $@ > $(basename $@).list + +${OBJDIR}/%.o : %.S + mkdir -p ${OBJDIR} + ${CC} -c ${ASMFLAGS} $< -o $@ ${ASMINCFLAGS} ${INCFLAGS} -iquote . + +${OBJDIR}/%.dep : %.C + mkdir -p ${OBJDIR}; \ + rm -f $@; \ + ${CXX} -M ${CXXFLAGS} $< -o $@.$$$$ ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${OBJDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; \ + rm -f $@.$$$$ + +${OBJDIR}/%.dep : %.c + mkdir -p ${OBJDIR}; \ + rm -f $@; \ + ${CC} -M ${CFLAGS} $< -o $@.$$$$ ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${OBJDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; \ + rm -f $@.$$$$ + +${OBJDIR}/%.dep : %.S + mkdir -p ${OBJDIR}; \ + rm -f $@; \ + ${CC} -M ${ASMFLAGS} $< -o $@.$$$$ ${ASMINCFLAGS} ${INCFLAGS} -iquote .; \ + sed 's,\($*\)\.o[ :]*,${OBJDIR}/\1.o $@ : ,g' < $@.$$$$ > $@; \ + rm -f $@.$$$$ + +${IMGDIR}/%.so : ${OBJECTS} + mkdir -p ${IMGDIR} + ${LD} ${LDFLAGS} -o $@ $(OBJECTS) + +ifdef TESTS +${TESTDIR}/% : %.c + mkdir -p ${TESTDIR} + ${CC} ${CFLAGS} $< -o $@ ${INCFLAGS} ${LIBPATHS} ${LINKLIBS} -iquote . +endif + +%.d: ${OBJECTS} + cd ${basename $@} && ${MAKE} code_pass + +%.gen_pass: + cd ${basename $@} && ${MAKE} gen_pass + +%.test: + cd ${basename $@} && ${MAKE} test + +#packaging generally requires the code pass to be complete +%.packaging: + cd ${basename $@} && ${MAKE} packaging + +#install generally requires the code pass to be complete +%.install: all + cd ${basename $@} && ${MAKE} install +%.clean: + cd ${basename $@} && ${MAKE} clean + +%.beamdir: + cd ${basename $@} && ${MAKE} beam + +code_pass: ${OBJECTS} ${SUBDIRS} ${LIBRARIES} ${EXTRA_PARTS} ${T_OBJECTS} +ifdef IMAGES + ${MAKE} ${IMAGES} ${IMAGE_EXTRAS} +endif + +gen_pass: + mkdir -p ${GENDIR} + ${MAKE} GEN_PASS + +_GENFILES = $(addprefix ${GENDIR}/, ${GENFILES}) +GEN_PASS: ${_GENFILES} ${SUBDIRS:.d=.gen_pass} + +GENTARGET = $(addprefix %/, $(1)) + +${BEAMDIR}/%.beam : %.C + mkdir -p ${BEAMDIR} + ${BEAMCMD} -I ${INCDIR} ${CXXFLAGS} ${BEAMFLAGS} $< \ + --beam::complaint_file=$@ --beam::parser_file=/dev/null + +${BEAMDIR}/%.beam : %.c + mkdir -p ${BEAMDIR} + ${BEAMCMD} -I ${INCDIR} ${CXXFLAGS} ${BEAMFLAGS} $< \ + --beam::complaint_file=$@ --beam::parser_file=/dev/null + +${BEAMDIR}/%.beam : %.S + echo Skipping ASM file. + +BEAMOBJS = $(addprefix ${BEAMDIR}/, ${OBJS:.o=.beam}) +beam: ${SUBDIRS:.d=.beamdir} ${BEAMOBJS} + +cleanud : + rm -f ${UD_OBJS} + +test: ${SUBDIRS:.d=.test} + + +.PHONY: install +install: ${SUBDIRS:.d=.install} + + +.PHONY: packaging +packaging: ${SUBDIRS:.d=.packaging} + +clean: cleanud ${SUBDIRS:.d=.clean} + (rm -f ${OBJECTS} ${OBJECTS:.o=.dep} ${OBJECTS:.o=.list} \ + ${OBJECTS:.o=.o.hash} ${BEAMOBJS} ${LIBRARIES} \ + ${IMAGES} ${IMAGES:.bin=.list} ${IMAGES:.bin=.syms} \ + ${IMAGES:.bin=.bin.modinfo} ${IMAGES:.ruhx=.lid} \ + ${IMAGES:.ruhx=.lidhdr} ${IMAGES:.bin=_extended.bin} \ + ${IMAGE_EXTRAS} \ + ${EXTRA_OBJS} ${_GENFILES} ${EXTRA_PARTS} ${EXTRA_CLEAN}\ + $gtest.a gtest_main.a *.o *unit_test-kv_results* *~* ) + +cscope: ${SUBDIRS} + mkdir -p ${ROOTPATH}/obj/cscope + (cd ${ROOTPATH}/obj/cscope ; rm -f cscope.* ; \ + find ../../ -name '*.[CHchS]' -type f -fprint cscope.files; \ + cscope -bqk) + +ctags: ${SUBDIRS} + mkdir -p ${ROOTPATH}/obj/cscope + (cd ${ROOTPATH}/obj/cscope ; rm -f tags ; \ + ctags --recurse=yes --fields=+S ../../src) + + diff --git a/customrc.p8be b/customrc.p8be new file mode 100644 index 00000000..d479e4ab --- /dev/null +++ b/customrc.p8be @@ -0,0 +1,30 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: customrc.p8be $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +export MAKECMD=gmake +export CUSTOMFLAGS="" +export BLOCK_FILEMODE_ENABLED=1 +export TARGET_PLATFORM="PPC64BE" + diff --git a/customrc.p8beblk b/customrc.p8beblk new file mode 100644 index 00000000..e7bce347 --- /dev/null +++ b/customrc.p8beblk @@ -0,0 +1,30 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: customrc.p8beblk $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +export MAKECMD=gmake +export CUSTOMFLAGS="" +export BLOCK_FILEMODE_ENABLED=0 +export TARGET_PLATFORM="PPC64BE" + diff --git a/customrc.p8beblkkermc b/customrc.p8beblkkermc new file mode 100644 index 00000000..00b6862f --- /dev/null +++ b/customrc.p8beblkkermc @@ -0,0 +1,32 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: customrc.p8beblkkermc $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +export MAKECMD=gmake +export CUSTOMFLAGS="" +export BLOCK_FILEMODE_ENABLED=0 +export BLOCK_MC_ENABLED=1 +export BLOCK_KERNEL_MC_ENABLED=1 +export TARGET_PLATFORM="PPC64BE" + diff --git a/customrc.p8bemc b/customrc.p8bemc new file mode 100644 index 00000000..428a4d5f --- /dev/null +++ b/customrc.p8bemc @@ -0,0 +1,29 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: customrc.p8bemc $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +export MAKECMD=gmake +export BLOCK_FILEMODE_ENABLED=1 +export TARGET_PLATFORM="PPC64BE" +export BLOCK_MC_ENABLED=1 diff --git a/customrc.p8el b/customrc.p8el new file mode 100644 index 00000000..85b46245 --- /dev/null +++ b/customrc.p8el @@ -0,0 +1,29 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: customrc.p8el $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +export MAKECMD=make +export CUSTOMFLAGS="-mcpu=power8 -mtune=power8" +export BLOCK_FILEMODE_ENABLED=1 +export TARGET_PLATFORM="PPC64EL" diff --git a/customrc.p8elblk b/customrc.p8elblk new file mode 100644 index 00000000..54d4d42d --- /dev/null +++ b/customrc.p8elblk @@ -0,0 +1,29 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: customrc.p8elblk $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +export MAKECMD=make +export CUSTOMFLAGS="-mcpu=power8 -mtune=power8" +export BLOCK_FILEMODE_ENABLED=0 +export TARGET_PLATFORM="PPC64EL" diff --git a/customrc.p8elblkkermc b/customrc.p8elblkkermc new file mode 100644 index 00000000..e64da3b0 --- /dev/null +++ b/customrc.p8elblkkermc @@ -0,0 +1,31 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: customrc.p8elblkkermc $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +export MAKECMD=make +export CUSTOMFLAGS="-mcpu=power8 -mtune=power8" +export BLOCK_FILEMODE_ENABLED=0 +export BLOCK_MC_ENABLED=1 +export BLOCK_KERNEL_MC_ENABLED=1 +export TARGET_PLATFORM="PPC64EL" diff --git a/customrc.p8elblkmc b/customrc.p8elblkmc new file mode 100644 index 00000000..b1eea0d7 --- /dev/null +++ b/customrc.p8elblkmc @@ -0,0 +1,30 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: customrc.p8elblkmc $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +export MAKECMD=make +export CUSTOMFLAGS="-mcpu=power8 -mtune=power8" +export BLOCK_FILEMODE_ENABLED=0 +export BLOCK_MC_ENABLED=1 +export TARGET_PLATFORM="PPC64EL" diff --git a/customrc.p8elmc b/customrc.p8elmc new file mode 100644 index 00000000..6ccd8a2a --- /dev/null +++ b/customrc.p8elmc @@ -0,0 +1,30 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: customrc.p8elmc $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +export MAKECMD=make +export CUSTOMFLAGS="-mcpu=power8 -mtune=power8" +export BLOCK_FILEMODE_ENABLED=1 +export TARGET_PLATFORM="PPC64EL" +export BLOCK_MC_ENABLED=1 diff --git a/env.bash b/env.bash new file mode 100644 index 00000000..9356b9ec --- /dev/null +++ b/env.bash @@ -0,0 +1,103 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: env.bash $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + + + +#allow a user to specify a custom RC file if needed +#e.g. disable the advanced toolchain with "export USE_ADVANCED_TOOLCHAIN=no" +if [ -e ./customrc ]; then + echo "INFO: Running customrc" + set -x + . ./customrc + set +x +fi + +## setup git hooks for this session +## adds prologs and Change-IDs for gerrit +export SURELOCKROOT=`pwd` +TOOLSDIR=${SURELOCKROOT}/src/build/tools +if [ -e $TOOLSDIR/setupgithooks.sh ]; then + echo "Setting up gerrit hooks." + $TOOLSDIR/setupgithooks.sh +fi + + +export MCP_PATH=/opt/mcp/toolchains/fr_SL1_2014-05-12-194021 + +#configure advanced toolchain for linux +AT70PATH=/opt/at7.0 +AT71PATH=/opt/at7.1 +AT80PATH=/opt/at8.0 + +if [ -d $MCP_PATH ]; then + echo "INFO: Found MCP: $MCP_PATH ." + echo "INFO: Enabling JAILchain for builds." + export JAIL=ppc64-mcp75-jail +else + echo "INFO: MCP Jail disabled." +fi + + + +if [ -d $AT70PATH ]; then + export ADV_TOOLCHAIN_PATH=$AT70PATH +elif [ -d $AT71PATH ]; then + export ADV_TOOLCHAIN_PATH=$AT71PATH +elif [ -d $AT80PATH ]; then + export ADV_TOOLCHAIN_PATH=$AT80PATH +else + echo "WARNING: no toolchain was found. Will fall back to system defaults. YMMV." +fi + +#don't add MCP path to the $PATH... this isn't absolutely necessary +#export PATH=${MCP_PATH}/opt/mcp/bin:${MCP_PATH}/usr/bin:${PATH} +export PATH=/opt/mcp/bin:${PATH} + +export PATH=${PATH}:`pwd`/src/build/tools + + + +#enable advanced toolchain, if no one has an opinion +if [ -z "$USE_ADVANCED_TOOLCHAIN" ]; then + #enabling advanced toolchain by default. If you don't want this, set USED_ADVANCED_TOOLCHAIN in your environment + export USE_ADVANCED_TOOLCHAIN=yes +fi +if [ "$USE_ADVANCED_TOOLCHAIN" = "yes" ]; then + echo "INFO: Enabling Advanced Toolchain: $ADV_TOOLCHAIN_PATH" + export PATH=${ADV_TOOLCHAIN_PATH}/bin:${ADV_TOOLCHAIN_PATH}/sbin:${PATH} +else + echo "INFO: Advanced Toolchain Disabled." +fi + + +#fix up sandboxes in ODE, if we need to +if [ -n "${SANDBOXROOT}" ]; then + if [ -n "${SANDBOXNAME}" ]; then + export SANDBOXBASE="${SANDBOXROOT}/${SANDBOXNAME}" + fi +fi + +#set the default ulimit -c for a developer +ulimit -c unlimited diff --git a/makefile b/makefile new file mode 100644 index 00000000..b7faf028 --- /dev/null +++ b/makefile @@ -0,0 +1,31 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +SUBDIRS = src.d +ROOTPATH = . + +EXTRA_PARTS = + +include ./config.mk + diff --git a/src/block/Makefile.ade b/src/block/Makefile.ade new file mode 100644 index 00000000..1b86d3f8 --- /dev/null +++ b/src/block/Makefile.ade @@ -0,0 +1,56 @@ +# %Z%%M% %I% %W% %G% %U% +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/block/Makefile.ade $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +CFLAGS += -qcpluscmt #allow C++ style comments +CFLAGS += -Dinline=__inline #alloc inline functions +CFLAGS += -D_KERNEL_MASTER_CONTXT #Use kernel master context +CFLAGS += -D_MASTER_CONTXT #Use kernel master context +CFLAGS += -DTARGET_ARCH_PPC64BE #User POWER Big Endian +VPATH +=./common +INCFLAGS += -I./common + +EXPORTS = -bE:libcflsh_block.exp + +SHARED_LIBRARIES = libcflsh_block.a + +SHARED_OFILES = \ + cflash_scsi_user.o \ + cflash_tools_user.o \ + cflash_block.o \ + cflash_block_int.o \ + cflash_block_kern_mc.o \ + cflash_block_sisl.o + +SHARED64_OFILES = ${SHARED_OFILES:.o=.64o} + +LIBS = -lc -lpthreads + +EXPLIB_TARGETS = export_libcflsh_block.a + +ILIST = libcflsh_block.a +IDIR = /usr/lib/ + +.include <${RULES_MK}> diff --git a/src/block/README.md b/src/block/README.md new file mode 100755 index 00000000..970647e8 --- /dev/null +++ b/src/block/README.md @@ -0,0 +1,881 @@ +**CAPI Flash Block Layer API** + +**1.1 cblk\_init** + +***Purpose*** + +Initializes CAPI block library + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +int rc = cblk\_int(void \*arg, int flags) + +***Parameters*** + +| | | +|---------------|---------------------------------------------------------| +| **Parameter** | **Description** | +| arg | Currently unused (set to NULL) | +| flags | Specifies flags for initialization. Currently set to 0. | + +***Description*** + +The cblk\_init API initializes the CAPI block library prior to use. cblk\_init must be called before any other API in the library is called. + +***Return Values*** + +Returns 0 on success; otherwise it is an error. + +**1.2 cblk\_term** + +***Purpose*** + +Cleans up CAPI block library resources after the library is no longer used. + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +int rc = cblk\_term(void \*arg, int flags) + +***Parameters*** + +| | | +|---------------|---------------------------------------------------------| +| **Parameter** | **Description** | +| arg | Currently unused (set to NULL) | +| flags | Specifies flags for initialization. Currently set to 0. | + +***Description*** + +The cblk\_term API terminates the CAPI block library after use. + +***Return Values*** + +Returns 0 on success; otherwise it is an error. + +**1.3 cblk\_open** + +***Purpose*** + +Open a collection of contiguous blocks (currently called a “chunk”) on a CAPI flash storage device. for which I/O (read and writes) can be done. A chunk can be thought of as a lun, which the provides access to sectors 0 thru n-1 (where n is the size of the chunk in sectors). If virtual luns are specified then that chunk is a subset of sectors on a physical lun. + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +chunk\_id\_t chunk\_id = cblk\_open(const char \*path, int max\_num\_requests, int mode, uint64\_t ext\_arg, int flags) + +***Parameters*** + +***Parameters*** + +| | | +|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Parameter** | **Description** | +| path | This the CAPI disk special filename (i.e. /dev/sg0 for Linux, /dev/hdisk1 for AIX) | +| max\_num\_requests | This indicates the maximum number of commands that can be queued to the adapter for this chunk at a given time. If this value is 0, then block layer will choose a default size. If the value specified it too large then the cblk\_open request will fail with an ENOMEM errno. | +| mode | Specifies the access mode for the child process (O\_RDONLY, O\_WRONLY, O\_RDWR). | +| ext\_arg | Reserved for future use | +| flags | This is a collection of bit flags. The **CBLK\_OPN\_VIRT\_LUN**indicates a virtual lun on the one physical lun will be provisioned. If the **CBLK\_OPN\_VIRT\_LUN** is not specified, then direct access to the full physical lun will be provided. The **CBLK\_OPN\_VIRT\_LUN**flag must be set for Redis shards. The **CBLK\_OPN\_NO\_INTRP\_THREADS**flag indicates that the cflash block library will not start any back ground threads for processing/harvesting of asynchronous completions from the CAPI adapter. Instead the process using this library must either call cblk\_aresult, or cblk\_listio library calls to poll for I/O completions. The **CBLK\_OPN\_MPIO\_FO**flag (valid for only AIX) indicates that the cflash block library will use Multi-path I/O failover (i.e. one path will be used for all I/O unless path specific errors are encountered, in which case an alternate path will be used if available. To determine the paths for a CAPI flash disk, use the command lspath -l hdiskN). The **CBLK\_OPN\_RESERVE** flag (valid for only AIX) indicates the cflash block library will use the “reserve policy” attribute associated with the disk in terms of establishing disk reservations. The **CBLK\_OPN\_RESERVE** flag can not be used in conjunction with the **CBLK\_OPN\_MPIO\_FO.** The **CBLK\_OPN\_FORCED\_RESERVE** flag (valid for only AIX) has the same behavior as the **CBLK\_OPEN\_RESERVE** flag with the one addition, that when the device is opened it will break any outstanding disk reservations on the first open of this disk. The **CBLK\_OPN\_FORCED\_RESERVE**flag can not be used in conjunction with the **CBLK\_OPN\_MPIO\_FO.** | + +***Description*** + +The cblk\_open API creates a “chunk” of blocks on a CAPI flash lun. This chunk will be used for I/O (cblk\_read/cblk\_write) requests. The returned chunk\_id is assigned to a specific path via a specific adapter transparently to the caller. The underlying physical sectors used by a chunk will not be directly visible to users of the block layer. + +Upon successful completion, a chunk id representing the newly created chunk instance is returned to the caller to be used for future API calls. + +***Return Values*** + +Returns NULL\_CHUNK\_ID on error; otherwise it is a chunk\_id handle. + +**1.4 cblk\_close** + +***Purpose*** + +Closes a collection of contiguous blocks (called a “chunk”) on a CAPI flash storage device. for which I/O (read and writes) can be done. + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +int rc = cblk\_close(chunk\_id\_t chunk\_id, int flags)) + +***Description*** + +This service releases the blocks associated with a chunk to be reused by others. Prior to them being reused by others the data blocks will need to be “scrubbed” to remove user data if the **CBLK\_SCRUB\_DATA\_FLG** is set. + +***Parameters*** + +| | | +|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Parameter** | **Description** | +| chunk\_id | The handle for the chunk which is being closed (released for reuse) | +| flags | This is a collection of bit flags. The **CBLK\_SCRUB\_DATA\_FLG**indicates data blocks should be scrubbed before they can be reused by others,which is only valid for virtual luns (chunks opened with **CBLK\_OPN\_VIRT\_LUN**flag). | + +***Return Values*** + +Returns 0 on success; otherwise it is an error. + +**1.5 cblk\_get\_lun\_size** + +***Purpose*** + +Returns the “size” (number of blocks) of the physical lun to which this chunk is associated. This call is not valid for a virtual lun. + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +int rc = cblk\_get\_lun\_size(chunk\_id\_t chunk\_id, size\_t \*size, int flags)) + +***Description*** + +This service returns the number of blocks of the physical lun associated with this chunk. The cblk\_get\_lun\_size service requires one has done a cblk\_open to receive a valid chunk\_id. + +***Parameters*** + +| | | +|---------------|-------------------------------------------------------------| +| **Parameter** | **Description** | +| chunk\_id | The handle for the chunk whose size is about to be changed. | +| size | Total number of 4K block for this physical lun. | +| flags | This is a collection of bit flags. | + +***Return Values*** + +Returns 0 on success; otherwise it is error. + +**1.6 cblk\_get\_size** + +***Purpose*** + +Returns the “size” (number of blocks) assigned to a specific (virtual lun) chunk id, which is a virtual lun (i.e. the cblk\_open call that returned this id, had the **CBLK\_OPN\_VIRT\_LUN** flag). This service is not valid for physical luns. + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +int rc = cblk\_get\_size(chunk\_id\_t chunk\_id, size\_t \*size, int flags)) + +***Description*** + +This service returns the number of blocks allocated to this chunk. The cblk\_get\_size service requires one has done a cblk\_open to receive a valid chunk\_id. + +***Parameters*** + +| | | +|---------------|-------------------------------------------------------------| +| **Parameter** | **Description** | +| chunk\_id | The handle for the chunk whose size is about to be changed. | +| size | Number of 4K block to be used for this chunk. | +| flags | This is a collection of bit flags. | + +***Return Values*** + +Returns 0 on success; otherwise it is error. + +**1.7 cblk\_set\_size** + +***Purpose*** + +Assign “size” blocks to a specific chunk id which is a virtual lun (i.e. the cblk\_open call that returned this id, had the **CBLK\_OPN\_VIRT\_LUN** flag). If blocks are already assigned to this chunk id, then one can increase/decrease the size by specifying a larger/smaller size, respectively. This service is not valid for physical luns. + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +int rc = cblk\_set\_size(chunk\_id\_t chunk\_id, size\_t size, int flags)) + +***Description*** + +This service allocates “size” blocks to this chunk. The cblk\_set\_size call must be done prior to any cblk\_read or cblk\_write calls to this chunk. The cblk\_set\_size service requires one has done a cblk\_open to receive a valid chunk\_id. + +If there were blocks originally assigned to this chunk and they are not being reused after cblk\_set\_size allocates the new blocks and the CBLK\_SCRUB\_DATA\_FLG is set in the flags parameter, then those originally blocks will be “scrubbed” prior to allowing them to be reused by other cblk\_set\_size operations. + +Upon successful completion, the chunk will has LBAs 0 thru size – 1 that can be read/written. + +***Parameters*** + +| | | +|---------------|---------------------------------------------------------------------------------------------------------------------------------------------| +| **Parameter** | **Description** | +| chunk\_id | The handle for the chunk whose size is about to be changed. | +| size | Number of 4K block to be used for this chunk. | +| flags | This is a collection of bit flags. The CBLK\_SCRUB\_DATA\_FLG indicates data blocks should be scrubbed before they can be reused by others. | + +***Return Values*** + +Returns 0 on success; otherwise it is error. + +**1.8 cblk\_get\_stats** + +***Purpose*** + +Returns statistics for a specific chunk id. + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +typedef struct chunk\_stats\_s { + +uint64\_t max\_transfer\_size; /\* Maximum transfer size in \*/ + +/\* blocks of this chunk. \*/ + +uint64\_t num\_reads; /\* Total number of reads issued \*/ + +/\* via cblk\_read interface \*/ + +uint64\_t num\_writes; /\* Total number of writes issued \*/ + +/\* via cblk\_write interface \*/ + +uint64\_t num\_areads; /\* Total number of async reads \*/ + +/\* issued via cblk\_aread interface \*/ + +uint64\_t num\_awrites; /\* Total number of async writes \*/ + +/\* issued via cblk\_awrite interface\*/ + +uint32\_t num\_act\_reads; /\* Current number of reads active \*/ + +/\* via cblk\_read interface \*/ + +uint32\_t num\_act\_writes; /\* Current number of writes active \*/ + +/\* via cblk\_write interface \*/ + +uint32\_t num\_act\_areads; /\* Current number of async reads \*/ + +/\* active via cblk\_aread interface \*/ + +uint32\_t num\_act\_awrites; /\* Current number of async writes \*/ + +/\* active via cblk\_awrite interface\*/ + +uint32\_t max\_num\_act\_writes; /\* High water mark on the maximum \*/ + +/\* number of writes active at once \*/ + +uint32\_t max\_num\_act\_reads; /\* High water mark on the maximum \*/ + +/\* number of reads active at once \*/ + +uint32\_t max\_num\_act\_awrites; /\* High water mark on the maximum \*/ + +/\* number of asyync writes active \*/ + +/\* at once. \*/ + +uint32\_t max\_num\_act\_areads; /\* High water mark on the maximum \*/ + +/\* number of asyync reads active \*/ + +/\* at once. \*/ + +uint64\_t num\_blocks\_read; /\* Total number of blocks read \*/ + +uint64\_t num\_blocks\_written; /\* Total number of blocks written \*/ + +uint64\_t num\_errors; /\* Total number of all error \*/ + +/\* responses seen \*/ + +uint64\_t num\_aresult\_no\_cmplt; /\* Number of times cblk\_aresult \*/ + +/\* returned with no command \*/ + +/\* completion \*/ + +uint64\_t num\_retries; /\* Total number of all commmand \*/ + +/\* retries. \*/ + +uint64\_t num\_timeouts; /\* Total number of all commmand \*/ + +/\* time-outs. \*/ + +uint64\_t num\_fail\_timeouts; /\* Total number of all commmand \*/ + +/\* time-outs that led to a command \*/ + +/\* failure. \*/ + +uint64\_t num\_no\_cmds\_free; /\* Total number of times we didm't \*/ + +/\* have free command available \*/ + +uint64\_t num\_no\_cmd\_room ; /\* Total number of times we didm't \*/ + +/\* have room to issue a command to \*/ + +/\* the AFU. \*/ + +uint64\_t num\_no\_cmds\_free\_fail; /\* Total number of times we didn't \*/ + +/\* have free command available and \*/ + +/\* failed a request because of this\*/ + +uint64\_t num\_fc\_errors; /\* Total number of all FC \*/ + +/\* error responses seen \*/ + +uint64\_t num\_port0\_linkdowns; /\* Total number of all link downs \*/ + +/\* seen on port 0. \*/ + +uint64\_t num\_port1\_linkdowns; /\* Total number of all link downs \*/ + +/\* seen on port 1. \*/ + +uint64\_t num\_port0\_no\_logins; /\* Total number of all no logins \*/ + +/\* seen on port 0. \*/ + +uint64\_t num\_port1\_no\_logins; /\* Total number of all no logins \*/ + +/\* seen on port 1. \*/ + +uint64\_t num\_port0\_fc\_errors; /\* Total number of all general FC \*/ + +/\* errors seen on port 0. \*/ + +uint64\_t num\_port1\_fc\_errors; /\* Total number of all general FC \*/ + +/\* errors seen on port 1. \*/ + +uint64\_t num\_cc\_errors; /\* Total number of all check \*/ + +/\* condition responses seen \*/ + +uint64\_t num\_afu\_errors; /\* Total number of all AFU error \*/ + +/\* responses seen \*/ + +uint64\_t num\_capi\_false\_reads; /\* Total number of all times \*/ + +/\* poll indicated a read was ready \*/ + +/\* but there was nothing to read. \*/ + +uint64\_t num\_capi\_adap\_resets; /\* Total number of all adapter \*/ + +/\* reset errors. \*/ + +uint64\_t num\_capi\_afu\_errors; /\* Total number of all \*/ + +/\* CAPI error responses seen \*/ + +uint64\_t num\_capi\_afu\_intrpts; /\* Total number of all \*/ + +/\* CAPI AFU interrupts for command \*/ + +/\* responses seen. \*/ + +uint64\_t num\_capi\_unexp\_afu\_intrpts; /\* Total number of all of \*/ + +/\* unexpected AFU interrupts \*/ + +uint64\_t num\_active\_threads; /\* Current number of threads \*/ + +/\* running. \*/ + +uint64\_t max\_num\_act\_threads; /\* Maximum number of threads \*/ + +/\* running simultaneously. \*/ + +uint64\_t num\_cache\_hits; /\* Total number of cache hits \*/ + +/\* seen on all reads \*/ + +} chunk\_stats\_t; + +int rc = cblk\_get\_stats(chunk\_id\_t chunk\_id, chunk\_stats\_t \*stats, int flags)) + +***Description*** + +This service returns statistics for a specific chunk\_id. + +***Parameters*** + +| | | +|---------------|-------------------------------------------------------------| +| **Parameter** | **Description** | +| chunk\_id | The handle for the chunk whose size is about to be changed. | +| stats | Address of a chunk\_stats\_t structure. | +| flags | This is a collection of bit flags. | + +***Return Values*** + +Returns 0 on success; otherwise it is error. + +**1.9 cblk\_read** + +***Purpose*** + +Read 4K blocks from the chunk at the specified logical block address (LBA) into the buffer specified. It should be noted that his LBA is not the same as the LUNs LBA, since the chunk does not necessarily start at the lun's LBA 0 + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +int rc = cblk\_read(chunk\_id\_t chunk\_id, void \*buf, off\_t lba, size\_t nblocks, int flags)); + +***Description*** + +This service reads data from the chunk and places that data into the supplied buffer. This call will block until the read completes with success or error. The cblk\_set\_size call must be done prior to any cblk\_read, cblk\_write, cblk\_aread, or cblk\_awrite calls to this chunk. + +***Parameters*** + +| | | +|---------------|----------------------------------------------------------------------------------------------------------------------------| +| **Parameter** | **Description** | +| chunk\_id | The handle for the chunk which is being read. | +| buf | Buffer to which data is read into from the chunk must be aligned on 16 byte boundaries. | +| lba | Logical Block Address (4K offset) inside chunk. | +| nblocks | Specifies the size of the transfer in 4K sectors. Upper bound is 16 MB for physical lun. Upper bound for virtual lun is 4K | +| flags | This is a collection of bit flags. | + +***Return Values*** + +| | | +|------------------|-----------------------------------------| +| **Return Value** | **Description** | +| -1 | Error and errno is set for more details | +| 0 | No data was read. | +| n >0 | Number, n, of blocks read. | + +**1.10 cblk\_write** + +***Purpose*** + +Write 4K blocks to the chunk at the specified logical block address (LBA) using the data from the buffer. It should be noted that his LBA is not the same as the LUNs LBA, since the chunk does not start at LBA 0 + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +int rc = cblk\_write(chunk\_id\_t chunk\_id, void \*buf, off\_t lba, size\_t nblocks, int flags)); + +***Description*** + +This service writes data from the chunk and places that data into the supplied buffer. This call will block until the write completes with success or error. The cblk\_set\_size call must be done prior to any cblk\_write calls to this chunk. + +***Parameters*** + +| | | +|---------------|----------------------------------------------------------------------------------------------------------------------------| +| **Parameter** | **Description** | +| chunk\_id | The handle for the chunk which is being written. | +| buf | Buffer to which data is written from onto the chunk must be aligned on 16 byte boundaries. | +| lba | Logical Block Address (4K offset) inside chunk. | +| nblocks | Specifies the size of the transfer in 4K sectors. Upper bound is 16 MB for physical lun. Upper bound for virtual lun is 4K | +| flags | This is a collection of bit flags. | + +***Return Values*** + +| | | +|------------------|-----------------------------------------| +| **Return Value** | **Description** | +| -1 | Error and errno is set for more details | +| 0 | No data was written | +| n >0 | Number, n, of blocks written. | + +**1.11 cblk\_aread** + +***Purpose*** + +Read 4K blocks from the chunk at the specified logical block address (LBA) into the buffer specified. It should be noted that his LBA is not the same as the LUNs LBA, since the chunk does not start at LBA 0 + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +typedef enum { + +CBLK\_ARW\_STATUS\_PENDING = 0, /\* Command has not completed \*/ + +CBLK\_ARW\_STATUS\_SUCCESS = 1 /\* Command completed successfully \*/ + +CBLK\_ARW\_STATUS\_INVALID = 2 /\* Caller's request is invalid \*/ + +CBLK\_ARW\_STATUS\_FAIL = 3 /\* Command completed with error \*/ + +} cblk\_status\_type\_t; + +typedef struct cblk\_arw\_status\_s { + +cblk\_status\_type\_t status; /\* Status of command \*/ + +/\* See errno field for additional \*/ + +/\* details on failure \*/ + +size\_t blocks\_transferred;/\* Number of block transferred by \*/ + +/\* this reqeuest. \*/ + +int errno; /\* Errno when status indicates \*/ + +/\* CBLK\_ARW\_STAT\_FAIL \*/ + +} cblk\_arw\_status\_t; + +int rc = cblk\_aread(chunk\_id\_t chunk\_id, void \*buf, off\_t lba, size\_t nblocks, int \*tag, cblk\_arw\_status\_t \*status, int flags)); + +***Description*** + +This service reads data from the chunk and places that data into the supplied buffer. This call will not block to wait for the read to complete. A subsequent cblk\_aresult call must be invoked to poll on completion. The cblk\_set\_size call must be done prior to any cblk\_aread calls to this chunk. + +***Parameters*** + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
chunk_idThe handle for the chunk which is being written.
bufBuffer to which data is written from onto the chunk must be aligned on 16 byte boundaries.
lbaLogical Block Address (4K offset) inside chunk.
nblocksSpecifies the size of the transfer in 4K sectors. Upper bound is 16 MB for physical lun. Upper bound for virtual lun is 4K
tagReturned Identifier that allows the caller to uniquely identify each command issued.
status

Address or 64-bit field provided by the caller which the capiblock library will update when a this command completes. This can be used by an application in place of using the cblk_aresult service.

+

It should be noted that the CAPI adapter can not do this directly it would require software threads to update the status region. This field is not used if the CBLK_OPN_NO_INTRP_THREADSflags was specified for cblk_open the returned this chunk_id.

flagsThis is a collection of bit flags. The CBLK_ARW_WAIT_CMD_FLAGSwill cause this service to block to wait for a free command to issue the request. Otherwise this service could return a value of -1 with an errno of EWOULDBLOCK (if there is no free command currently available). The CBLK_ARW_USER_TAG_FLAGSindicates the caller is specifying a user defined tag for this request. The caller would then need to use this tag with cblk_aresult and set its CBLK_ARESULT_USER_TAG flag. The CBLK_ARW_USER_STATUS_FLAGindicates the caller has set the status parameter which it expects will be updated when the command completes.
+ +***Return Values*** + +| | | +|------------------|----------------------------------------------------------------------------------| +| **Return Value** | **Description** | +| -1 | Error and errno is set for more details | +| 0 | Successfully issued | +| n >0 | Indicates read completed (possibly from cache) and Number, n, of blocks written. | + +**1.12 cblk\_awrite** + +***Purpose*** + +Write one 4K block to the chunk at the specified logical block address (LBA) using the data from the buffer. It should be noted that his LBA is not the same as the LUNs LBA, since the chunk does not start at LBA 0 + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +typedef enum { + +CBLK\_ARW\_STAT\_NOT\_ISSUED = 0, /\* Command is has not been issued \*/ + +CBLK\_ARW\_STAT\_PENDING = 1, /\* Command has not completed \*/ + +CBLK\_ARW\_STAT\_SUCCESS = 2 /\* Command completed successfully \*/ + +CBLK\_ARW\_STAT\_FAIL = 3 /\* Command completed with error \*/ + +} cblk\_status\_type\_t; + +typedef struct cblk\_arw\_status\_s { + +cblk\_status\_type\_t status; /\* Status of command \*/ + +/\* See errno field for additional \*/ + +/\* details on failure \*/ + +size\_t blocks\_transferred;/\* Number of block transferred by \*/ + +/\* this reqeuest. \*/ + +int errno; /\* Errno when status indicates \*/ + +/\* CBLK\_ARW\_STAT\_FAIL \*/ + +} cblk\_arw\_status\_t; + +int rc = cblk\_awrite(chunk\_id\_t chunk\_id, void \*buf, off\_t lba, size\_t nblocks, int \*tag, cblk\_arw\_status\_t \*status, int flags)); + +***Description*** + +This service writes data from the chunk and places that data into the supplied buffer. This call will not block waiting for the write to complete. A subsequent cblk\_aresult call must be invoked to poll on completion. The cblk\_set\_size call must be done prior to any cblk\_awrite calls to this chunk. + +***Parameters*** + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
chunk_idThe handle for the chunk which is being written.
bufBuffer to which data is written from onto the chunk must be aligned on 16 byte boundaries.
lbaLogical Block Address (4K offset) inside chunk.
nblocksSpecifies the size of the transfer in 4K sectors. Upper bound is 16 MB for physical lun. Upper bound for virtual lun is 4K
tagReturned identifier that allows the caller to uniquely identify each command issued.
status

Address or 64-bit field provided by the caller which the capiblock library will updatewhen a this command completes. This can be used by an application in place of using the cblk_aresult service.

+

It should be noted that the CAPI adapter can not do this directly it would require software threads to update the status region. This field is not used if the CBLK_OPN_NO_INTRP_THREADSflags was specified for cblk_open the returned this chunk_id.

flagsThis is a collection of bit flags. The CBLK_ARW_WAIT_CMD_FLAGSwill cause this service to block to wait for a free command to issue the request. Otherwise this service could return a value of -1 with an errno of EWOULDBLOCK (if there is no free command currently available). The CBLK_ARW_USER_TAG_FLAGSindicates the caller is specifying a user defined tag for this request. The caller would then need to use this tag with cblk_aresult and set its CBLK_ARESULT_USER_TAG flag. The CBLK_ARW_USER_STATUS_FLAGindicates the caller has set the status parameter which it expects will be updated when the command completes.
+ +***Return Values*** + +Returns 0 on success; otherwise it returns -1 and errno is set. + +**1.13 cblk\_aresult** + +***Purpose*** + +Return status and completion information for asynchronous requests. + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +rc = cblk\_aresult(chunk\_id\_t chunk\_id, int \*tag, uint64\_t \*status, int flags); + +***Description*** + +This service returns an indication if the pending request issued via cblk\_aread or cblk\_awrite, which may have completed and if so its status. + +***Parameters*** + +| | | +|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Parameter** | **Description** | +| chunk\_id | The handle for the chunk which is being written. | +| tag | Pointer to tag caller is waiting for completion. If the CBLK\_ARESULT\_NEXT\_TAG is set, then this field returns the tag for the next asynchronous completion | +| status | Pointer to status. The status will be returned when a request completes. | +| flags | Flags passed from the caller to cblk\_aresult. The flag **CBLK\_ARESULT\_BLOCKING**is set by the caller if they want cblk\_aresult to block until a command completes (provided there are active commands). If the **CBLK\_ARESULT\_NEXT\_TAG**flag is set, then this call returns whenever any asynchronous I/O request completes. The **CBLK\_ARESULT\_USER\_TAG** flag indicates the caller checking for status of an asynchronous request that was issued with a user specified tag. | + +***Return Values*** + +| | | +|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| **Return Value** | **Description** | +| -1 | Error and errno is set for more details | +| 0 | Returned without error but no tag is set. This may occur if an I/O request has not yet completed and the CBLK\_ARESULT\_BLOCKING flag is not set. | +| n >0 | Number, n, of blocks read/written. | + +**1.14 cblk\_clone\_after\_fork** + +***Purpose*** + +Allows a child process to access the same virtual lun as the parent process. The child process must do this operation immediately after the fork, using the parent's chunk id in order to access that storage. If the child does not do this operation then it will not have any access to the parent's chunk ids. This service is not valid for physical luns. This is service is only valid for linux. + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +rc = cblk\_clone\_after\_fork(chunk\_id\_t chunk\_id, int mode, int flags); + +***Description*** + +This service allows a child process to access data from the parents process. + +***Parameters*** + +| | | +|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Parameter** | **Description** | +| chunk\_id | The handle for the chunk which is in use by the parent process. If this call returns successfully, then this chunk id can also be used by the child process. | +| mode | Specifies the access mode for the child process (O\_RDONLY, O\_WRONLY, O\_RDWR). NOTE: child process can not have greater access than parent. cblk\_open is O\_RDWR for the initial parent. Descendant processes can have less access. | +| flags | Flags passed from the caller. | + +***Return Values*** + +| | | +|------------------|-----------------------------------------| +| **Return Value** | **Description** | +| 0 | The request completed successfully. | +| -1 | Error and errno is set for more details | + +**1.15 cblk\_listio** + +***Purpose*** + +Issues multiple I/O requests to CAPI flash disk with a single call and/or waits for multiple I/O requests from a CAPI flash disk to complete.. + +***Syntax*** + +\#include <capiblock.h> for linux or <sys/capiblock.h> for AIX + +typedef struct cblk\_io { + +uchar version; /\* Version of structure \*/ + +\#define CBLK\_IO\_VERSION\_0 “I” /\* Initial version 0 \*/ + +int flags; /\* Flags for request \*/ + +\#define CBLK\_IO\_USER\_TAG 0x0001 /\* Caller is specifying a user defined \*/ + +/\* tag. \*/ + +\#define CBLK\_IO\_USER\_STATUS 0x0002 /\* Caller is specifying a status location \*/ + +/\* to be updated \*/ + +\#define CBLK\_IO\_PRIORITY\_REQ 0x0004/\* This is (high) priority request that \*/ + +/\* should be expediated vs non-priority \*/ + +/\* requests \*/ + +uchar request\_type; /\* Type of request \*/ + +\#define CBLK\_IO\_TYPE\_READ 0x01 /\* Read data request \*/ + +\#define CBLK\_IO\_TYPE\_WRITE 0x02 /\* Write data request \*/ + +void \*buf; /\* Data buffer for request \*/ + +offset\_t lba; /\* Starting Logical block address for \*/ + +/\* request. \*/ + +size\_t nblocks; /\* Size of request based on number of \*/ + +/\* blocks. \*/ + +int tag; /\* Tag for request. \*/ + +cblk\_arw\_status\_t stat; /\* Status of request \*/ + +} cblk\_io\_t + +int rc = cblk\_listio(chunk\_id\_t chunk\_id,cblk\_io\_t \*issue\_io\_list\[\],int issue\_items,cblk\_io\_t \*pending\_io\_list\[\],int pending\_items, cblk\_io\_t \*wait\_io\_list\[\], int wait\_items, cblk\_io\_t \*completion\_io\_list\[\],int \*completion\_items, uint64\_t timeout, int flags)); + +***Description*** + +This service provides an interface to issue multiple I/O requests with one call and/or poll for completions of multiple I/O requests via one call. The individual requests are specified by the **cblk\_io\_t** type, which includes a chunk id, data buffer, starting Logical Block Address, a transfer size in 4K blocks. This service can update the I/O requests associated cblk\_io\_t (i.e. update status, tags and flags based on disposition of the I/O request). + +This service can not be used to check for completion on I/O requests issued via cblk\_aread or cblk\_awrite. + +***Parameters*** + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
chunk_idThe handle for the chunk which these I/O requests are associated.
issue_io_list

This specifies an array of I/O requests to issue to CAPI flash disks. Each individual array element of type cblk_io_t specifies an individual I/O request containing chunk id, data buffer, starting Logical Block Address, a transfer size in 4K blocks. These array elements can be updated by this service to indicate completion status and/or tags. The status field of the individual cblk_io_t array elements will be initialized by this service.

+

If this field is null, then this service can be used to wait for completions of other requests issued from previous cblk_listio calls by setting the pending_io_list

issue_io_itemsSpecifies the number of array elements in the issue_io_list array
pending_io_listThis specifies an array of I/O requests that were issued via a previous cblk_listio request. This allows one to poll for I/O request completions, without requiring them to wait for all completions (i.e. setting the completion_io_list parameter).
pending_io_itemsSpecifies the number of array elements in the pending_io_list array
wait_io_listArray of I/O requests, which for which this service will block until these requests complete. These I/O requests must also be specified in the either the issue_io_list or the pending_io_list. If an I/O request in the issue_io_list fails to be issued due to invalid settings by the caller or no resources, then that I/O request's elements in the io_list will be updated to indicate this (its status will be left as CBLK_ARW_STAT_NOT_ISSUED) and this service will not wait on that I/O request. Thus all I/O requests in the wait_io_list which completed will have a status of CBLK_ARW_STAT_SUCCESS or CBLK_ARW_STAT_FAIL. I/O requests which did not complete will not have their status updated.
wait_itemsSpecifies the number of array elements in the wait_io_list array
completion_io_listThe caller will set this to an initialized (zeroed) array of I/O requests and set the completion_items to the number of array elements in this array. When this service returns the array will contain I/O requests specified in the issue_io_list and/or pending_io_list that were completed by the CAPI device, but which were not specified in the wait_io_list. If an I/O request in the io_list fails to be issued due to invalid settings by the caller or no resources, then that I/O requests element will not be copied to the completion_io_list and its status in the io_list will be updated to indicate this (its status will be left as CBLK_ARW_STAT_NOT_ISSUED) . Thus all I/O requests returned in this list will have a status of CBLK_ARW_STAT_SUCCESS or CBLK_ARW_STAT_FAIL.
completion_itemsThe caller sets this to the address of the number of array elements it placed in the completion_io_list. When this service returns, this value is updated to the number of I/O requests placed in the completion_io_list.
timeoutTimeout in microseconds to wait for all I/O requests in the wait_io_list. This is only valid if the wait_io_list is not null. If any of the I/O requests in the wait_io_list do not complete within the time-out value, then this service returns a value of -1 and sets errno to a value ETIMEDOUT (when this occurs some commands may have completed in the wait_io_list, Thus the caller needs to check each request in the wait_io_list to determine which ones completed.) It is the caller's responsibility to remove completed items from the pending_io_list before the next invocation of this service. A timeout value of 0, indicates that this service will block until requests in the wait_io_list complete.
flagsThis is a collection of bit flags. The CBLK_LISTIO_WAIT_ISSUE_CMD flagswill cause this service to block to wait for a free commands to issue all the requests even if the timeout value is exceeded and CBLK_LISTIO_WAIT_CMD_FLAG is set. Otherwise this service could return a value of -1 with an errno of EWOULDBLOCKif there are not enough free commands currently available (for this situation, some commands may have successfully queued. The caller would need to examine the individual I/O requests in the issue_io_list to determine which ones failed)
+ +***Return Values*** + +| | | +|------------------|-----------------------------------------| +| **Return Value** | **Description** | +| -1 | Error and errno is set for more details | +| 0 | This service completed without error. | + + diff --git a/src/block/cflash_block.c b/src/block/cflash_block.c new file mode 100644 index 00000000..b2a36c32 --- /dev/null +++ b/src/block/cflash_block.c @@ -0,0 +1,4547 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/cflash_block.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +//ident on linux - testing if we can see this in the build(s) +//to find out if filemode is enabled, run "ident *.so" + +#define CFLSH_BLK_FILENUM 0x0100 +#include "cflash_block_internal.h" +#include "cflash_block_inline.h" + +#ifndef _AIX +#include +REVISION_TAGS(block); +#ifdef BLOCK_FILEMODE_ENABLED +#ident "$Debug: BLOCK_FILEMODE_ENABLED $" +#endif +#ifdef CFLASH_LITTLE_ENDIAN_HOST +#ident "$Debug: LITTLE_ENDIAN $" +#else +#ident "$Debug: BIG_ENDIAN $" +#endif +#ifdef _MASTER_CONTXT +#ident "$Debug: MASTER_CONTEXT $" +#endif +#ifdef _KERNEL_MASTER_CONTXT +#ident "$Debug: KERNEL_MASTER_CONTEXT $" +#endif +#ifdef _COMMON_INTRPT_THREAD +#ident "$Debug: COMMON_INTRPT_THREAD $" +#endif +#else +#include +#endif + +#define CFLSH_BLK_FILENUM 0x0100 +#include "cflash_block_internal.h" +#include "cflash_block_inline.h" +cflsh_block_t cflsh_blk; + + +char *cblk_log_filename = NULL; /* Trace log filename */ + /* This traces internal */ + /* activities */ +int cblk_log_verbosity = 0; /* Verbosity of traces in */ + /* log. */ +FILE *cblk_logfp = NULL; /* File pointer for */ + /* trace log */ +FILE *cblk_dumpfp = NULL; /* File pointer for */ + /* dumpfile */ +int cblk_dump_level; /* Run time level threshold */ + /* required to initiate */ + /* live dump. */ +int dump_sequence_num; /* Dump sequence number */ + + +int cblk_notify_log_level; /* Run time level threshold */ + /* notify/error logging */ + + +pthread_mutex_t cblk_log_lock = PTHREAD_MUTEX_INITIALIZER; + +pthread_mutex_t cblk_init_lock = PTHREAD_MUTEX_INITIALIZER; + +uint32_t num_thread_logs = 0; /* Number of thread log files */ + + +size_t cblk_cache_size = CFLASH_CACHE_SIZE; +uint64_t cblk_lun_id = 0; +#ifndef _AIX +/* + * This __attribute constructor is not valid for the AIX xlc + * compiler. + */ +static void _cflsh_blk_init(void) __attribute__((constructor)); +static void _cflsh_blk_free(void) __attribute__((destructor)); +#else +static int cflsh_blk_initialize = 0; +static int cflsh_blk_init_finish = 0; +static int cflsh_blk_term = 0; + +#endif + + +/* + * NAME: CBLK_GET_CHUNK_HASH + * + * FUNCTION: Find chunk from chunk id in the hash table + * + * + * INPUTS: + * chunk_id - Chunk identifier + * + * RETURNS: + * NULL = No chunk found. + * pointer = chunk found. + * + */ + +inline cflsh_chunk_t *CBLK_GET_CHUNK_HASH(chunk_id_t chunk_id, int check_rdy) + +{ + cflsh_chunk_t *chunk = NULL; + + + chunk = cflsh_blk.hash[chunk_id & CHUNK_HASH_MASK]; + + + while (chunk) { + + if ( (ulong)chunk & CHUNK_BAD_ADDR_MASK ) { + + CBLK_TRACE_LOG_FILE(1,"Corrupted chunk address = 0x%llx, index = 0x%x", + (uint64_t)chunk, (chunk_id & CHUNK_HASH_MASK)); + + cflsh_blk.num_bad_chunk_ids++; + chunk = NULL; + + break; + + } + + + if (chunk->index == chunk_id) { + + + /* + * Found the specified chunk. Let's + * validate it. + */ + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk + */ + + CBLK_TRACE_LOG_FILE(1,"Invalid chunk, chunk_id = %d", + chunk_id); + chunk = NULL; + } else if ((!(chunk->flags & CFLSH_CHNK_RDY)) && + (check_rdy)) { + + + /* + * This chunk is not ready + */ + + CBLK_TRACE_LOG_FILE(1,"chunk not ready, chunk_id = %d", + chunk_id); + chunk = NULL; + } + break; + } + + chunk = chunk->next; + + } /* while */ + + + + + + + return (chunk); +} + + + +/* + * NAME: CBLK_VALIDATE_RW + * + * FUNCTION: Initial validation of read/write request. + * + * + * INPUTS: + * chunk_id - Chunk identifier + * buf - Buffer for read/write data + * lba - starting LBA (logical Block Address) + * in chunk to read data from. + * nblocks - Number of blocks to read. + * + * RETURNS: + * 0 for good completion + * -1 for error + * + */ + +inline int CBLK_VALIDATE_RW(chunk_id_t chunk_id, void *buf, cflash_offset_t lba,size_t nblocks) + +{ + + if ((chunk_id <= NULL_CHUNK_ID) || + (chunk_id >= cflsh_blk.next_chunk_id)) { + + errno = EINVAL; + return -1; + } + + + if (buf == NULL) { + + CBLK_TRACE_LOG_FILE(1,"Null buf passed"); + + errno = EINVAL; + return -1; + } + + if (lba < 0) { + + CBLK_TRACE_LOG_FILE(1,"Invalid LBA = 0x%llx",lba); + + errno = EINVAL; + return -1; + + } + + return 0; +} + + +/* + * NAME: _cflsh_blk_init + * + * FUNCTION: Internal intializer/constructor + * for the CAPI flash block library. + * + * + * INPUTS: + * NONE + * + * RETURNS: + * NONE + * + */ + +void _cflsh_blk_init(void) +{ + char *cache_size = getenv("CFLSH_BLK_CACHE_SIZE"); + char *env_port_mask = getenv("CFLSH_BLK_PORT_SEL_MASK"); + char *env_timeout = getenv("CFLSH_BLK_TIMEOUT"); + char *env_timeout_units = getenv("CFLSH_BLK_TIMEOUT_UNITS"); + char *env_sigsegv = getenv("CFLSH_BLK_SIGSEGV_DUMP"); + char *env_sigusr1 = getenv("CFLSH_BLK_SIGUSR1_DUMP"); + char *env_dump_level = getenv("CFLSH_BLK_DUMP_LEVEL"); + char *env_notify_log_level = getenv("CFLSH_BLK_NOTIFY_LOG_LEVEL"); +#ifdef _SKIP_READ_CALL + char *env_adap_poll_delay = getenv("CFLSH_BLK_ADAP_POLL_DLY"); +#endif /* _SKIP_READ_CALL */ + int rc; + + /* + * Require that the cflash_cmd_mgm_t structure be a multiple + * of 64 bytes in size. Since we are looking at the sizeof + * of structure the preprocessor #error/#warning directives + * can not be used. + */ + + CFLASH_COMPILE_ASSERT((sizeof(cflsh_cmd_mgm_t) % CFLASH_BLOCK_CMD_ALIGNMENT) == 0); + + +#ifdef _LINUX_MTRACE + mtrace(); +#endif + + + CFLASH_BLOCK_RWLOCK_INIT(cflsh_blk.global_lock); + + CFLASH_BLOCK_WR_RWLOCK(cflsh_blk.global_lock); + + + + rc = pthread_atfork(cblk_prepare_fork,cblk_parent_post_fork,cblk_child_post_fork); + + if (rc) { +#ifndef _AIX + fprintf(stderr,"pthread_atfork failed rc = %d, errno = %d\n",rc,errno); +#endif + } + + cblk_init_mc_interface(); + + + cflsh_blk.eyec = CFLSH_EYEC_CBLK; + + cflsh_blk.next_chunk_starting_lba = 0; + + cflsh_blk.timeout = CAPI_SCSI_IO_TIME_OUT; + cflsh_blk.timeout_units = CFLSH_G_TO_SEC; + + if (env_timeout) { + + cflsh_blk.timeout = atoi(env_timeout); + } + + if (env_timeout_units) { + + if (!strcmp(env_timeout_units,"MILLI")) { + + cflsh_blk.timeout_units = CFLSH_G_TO_MSEC; + + } else if (!strcmp(env_timeout_units,"MICRO")) { + + cflsh_blk.timeout_units = CFLSH_G_TO_USEC; + + } + } + + + if (env_port_mask) { + + /* + * If there is an environment variable specifying port + * selection mask then use it. + */ + cflsh_blk.port_select_mask = atoi(env_port_mask); + + } else { + + /* + * Allow both ports to be used + */ + + cflsh_blk.port_select_mask = 0x3; + } + + bzero((void *)cflsh_blk.hash,(sizeof(cflsh_chunk_t *) * MAX_NUM_CHUNKS_HASH)); + + if (cache_size) { + + cblk_cache_size = atoi(cache_size); + } +#ifdef _SKIP_READ_CALL + + + cflsh_blk.adap_poll_delay = CFLASH_BLOCK_ADAP_POLL_DELAY; + if (env_adap_poll_delay) { + + /* + * A poll delay has been specified. So use it. + */ + + cflsh_blk.adap_poll_delay = atoi(env_adap_poll_delay); + } +#endif /* _SKIP_READ_CALL */ + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + + + cblk_setup_trace_files(FALSE); + + + if (!cblk_valid_endianess()) { + + CBLK_TRACE_LOG_FILE(1,"This program is compiled for different endianess then the host is is running"); + } + + + CBLK_TRACE_LOG_FILE(2,"cflsh_blk = %p",&cflsh_blk); + + if (env_sigsegv) { + + /* + * The caller has requested that on SIGSEGV + * signals (segmentation fault), that we dump + * data just prior the application crash (and + * potentially coredump). This will provide + * more data to aid in analyzing the core dump. + */ + + if (cblk_setup_sigsev_dump()) { + CBLK_TRACE_LOG_FILE(1,"failed to set up sigsev dump handler"); + } + + } + + if (env_sigusr1) { + + /* + * The caller has requested that on SIGUSR1 + * signals that we dump data. + */ + + if (cblk_setup_sigusr1_dump()) { + CBLK_TRACE_LOG_FILE(1,"failed to set up sigusr1 dump handler"); + } + + } + + if (env_dump_level) { + + if (cblk_setup_dump_file()) { + CBLK_TRACE_LOG_FILE(1,"failed to set up dump file"); + + } else { + + cblk_dump_level = atoi(env_dump_level); + } + + } + + + if (env_notify_log_level) { + + cblk_notify_log_level = atoi(env_notify_log_level); + + } + + cflsh_blk.caller_pid = getpid(); + return; +} + +/* + * NAME: _cflsh_blk_fee + * + * FUNCTION: Free library resources. + * + * + * INPUTS: + * NONE + * + * RETURNS: + * NONE + * + */ + +void _cflsh_blk_free(void) +{ + + + if (cflsh_blk.flags & CFLSH_G_SYSLOG) { + + closelog(); + + } + + + if (num_thread_logs) { + + free(cflsh_blk.thread_logs); + } + +#ifdef _LINUX_MTRACE + muntrace(); +#endif + + cblk_cleanup_mc_interface(); + + if (cflsh_blk.process_name) { + + free(cflsh_blk.process_name); + } + + + CBLK_TRACE_LOG_FILE(3,"\nLIBRARY STATISTICS ..."); + +#ifdef BLOCK_FILEMODE_ENABLED + CBLK_TRACE_LOG_FILE(3,"FILEMODE"); +#endif /* BLOCK_FILEMODE_ENABLED */ +#ifdef CFLASH_LITTLE_ENDIAN_HOST + CBLK_TRACE_LOG_FILE(3,"Little Endian"); +#else + CBLK_TRACE_LOG_FILE(3,"Big Endian"); +#endif + +#ifdef _MASTER_CONTXT + CBLK_TRACE_LOG_FILE(3,"Master Context"); +#else + CBLK_TRACE_LOG_FILE(3,"No Master Context"); +#endif + CBLK_TRACE_LOG_FILE(3,"cblk_log_verbosity 0x%x",cblk_log_verbosity); + CBLK_TRACE_LOG_FILE(3,"flags 0x%x",cflsh_blk.flags); + CBLK_TRACE_LOG_FILE(3,"lun_id 0x%llx",cflsh_blk.lun_id); + CBLK_TRACE_LOG_FILE(3,"next_chunk_id 0x%llx",cflsh_blk.next_chunk_id); + CBLK_TRACE_LOG_FILE(3,"num_active_chunks 0x%x",cflsh_blk.num_active_chunks); + CBLK_TRACE_LOG_FILE(3,"num_max_active_chunks 0x%x",cflsh_blk.num_max_active_chunks); + CBLK_TRACE_LOG_FILE(3,"num_bad_chunk_ids 0x%x",cflsh_blk.num_bad_chunk_ids); + + return; + +} + + + +#ifdef _AIX + +/* + * NAME: cflsh_blk_init + * + * FUNCTION: Call library initializing code. + * + * + * INPUTS: + * NONE + * + * RETURNS: + * NONE + * + */ + +static void cflsh_blk_init(void) +{ + + /* + * The first thread will invoke this routine, but + * but subsequent threads must not be allowed to + * proceed until the _cflsh_blk_init has completed. + * + * NOTE: the use of both a lock and a fetch_and_or + * is overkill here. A better solution would be + * to find a way to use fetch_and_or and only using + * the lock while the _cflsh_blk_init. + */ + + //pthread_mutex_lock(&cblk_init_lock); + + if (fetch_and_or(&cflsh_blk_initialize,1)) { + + + + /* + * If cflsh_blk_initialize is set, then + * we have done all the cflash block code + * initialization. As result we can return + * now provided we ensure any thread that is + * doing initializatoin for this library + * has completed. + */ + + while (!cflsh_blk_init_finish) { + + usleep(1); + + } + + return; + } + + + /* + * We get here if the cflash_block code has not been + * initialized yet. + */ + + _cflsh_blk_init(); + + fetch_and_or(&cflsh_blk_init_finish,1); + + //pthread_mutex_unlock(&cblk_init_lock); + + return; +} + + +#endif /* AIX */ + + + + + + +/* + * NAME: CBLK_IN_CACHE + * + * FUNCTION: Check if data to be read starting + * at lba is in_cache. + * If so then copy data to user's buffer + * + * + * INPUTS: + * chunk - Chunk the read is associated. + * buf - Buffer to read data into + * lba - starting LBA (logical Block Address) + * in chunk to read data from. + * nblocks - Number of blocks to read. + * + * + * RETURNS: + * FALSE - Data is not in cache + * TRUE - Data is in cache. + * + * + */ + +inline int CBLK_IN_CACHE(cflsh_chunk_t *chunk,void *buf, cflash_offset_t lba, size_t nblocks) +{ + int rc = FALSE; + cflsh_cache_line_t *line; + cflash_offset_t cur_lba, end_lba; + void *cur_buf; + uint64_t tag; + uint32_t inx; + int lru; + int mru; + int e; + + + if (chunk == NULL) { + + return rc; + } + + if ((chunk->cache == NULL) || + (chunk->cache_size == 0)) { + + return rc; + } + + end_lba = lba + nblocks; + + for (cur_lba = lba, cur_buf = buf; cur_lba < end_lba; cur_lba++, cur_buf += CAPI_FLASH_BLOCK_SIZE) { + + inx = CFLSH_BLK_GETINX (cur_lba,chunk->l2setsz); + tag = CFLSH_BLK_GETTAG (cur_lba,chunk->l2setsz); + line = &chunk->cache [inx]; + + rc = FALSE; + + for (e = 0; e < CFLSH_BLK_NSET; e++) { + if (line->entry[e].valid && line->entry[e].tag == tag) { + lru = line->lrulist; + if (lru == e) + line->lrulist = line->entry[e].next; + else { + mru = line->entry[lru].prev; + if (e != mru) { + line->entry[line->entry[e].prev].next = + line->entry[e].next; + line->entry[line->entry[e].next].prev = + line->entry[e].prev; + + line->entry[e].next = lru; + line->entry[e].prev = mru; + line->entry[lru].prev = e; + line->entry[mru].next = e; + } + } + + + bcopy(line->entry[e].data,cur_buf, nblocks * CAPI_FLASH_BLOCK_SIZE); + chunk->stats.num_cache_hits++; + rc = TRUE; + + break; + } + } /* for */ + + if (rc == FALSE) { + /* + * If any blocks are not in the cache the mark the entire. + * request as not being in the cache and force a full re-read of + * the data. + * + * In the future maybe we could improve this to just do the read for + * blocks not in the cache. + */ + break; + } + } /* outer for */ + + + + return rc; +} + + + + +/* + * NAME: CBLK_BUILD_ISSUE_RW_CMD + * + * FUNCTION: Builds and issues a READ16/WRITE16 command + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * cmd - Cmd this routine will wait for completion. + * + * RETURNS: + * 0 - Good completion, otherwise error. + * + * + */ + +inline int CBLK_BUILD_ISSUE_RW_CMD(cflsh_chunk_t *chunk, int *cmd_index, void *buf,cflash_offset_t lba, + size_t nblocks, int flags, int lib_flags,uint8_t op_code, + cblk_arw_status_t *status) +{ + int rc = 0; + int local_flags = 0; + scsi_cdb_t *cdb = NULL; + int pthread_rc; +#ifndef _COMMON_INTRPT_THREAD + cflsh_async_thread_cmp_t *async_data; +#endif + cflsh_cmd_mgm_t *cmd = NULL; + + CFLASH_BLOCK_LOCK(chunk->lock); + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"Invalid chunk lba = 0xllx, chunk->index = %d", + lba,chunk->index); + errno = EINVAL; + return -1; + } + + if (chunk->flags & CFLSH_CHNK_HALTED) { + + /* + * This chunk is in a halted state. Wait + * the chunk to be resumed. + */ + CBLK_TRACE_LOG_FILE(5,"chunk in halted state lba = 0x%llx, chunk->index = %d", + lba,chunk->index); + + /* + * NOTE: Even if we have no background thread, this is still valid. + * If we are being used by a single threaded process, then there + * will never be anything waiting to wake up. If we are being used + * by a multi-thread process, then there could be threads blocked + * waiting to resume. + * + * The assumption here is that who ever halts the commands will + * resume them before exiting this library. + */ + + pthread_rc = pthread_cond_wait(&(chunk->path[chunk->cur_path]->resume_event),&(chunk->lock.plock)); + + if (pthread_rc) { + + + + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_wait failed for resume_event rc = %d errno = %d", + pthread_rc,errno); + CFLASH_BLOCK_UNLOCK(chunk->lock); + errno = EIO; + return -1; + } + } + + if (chunk->flags & CFLSH_CHNK_CLOSE) { + + /* + * If this chunk is closing then + * return EINVAL + */ + CBLK_TRACE_LOG_FILE(1,"chunk is closing lba = 0x%llx, chunk->index = %d", + lba,chunk->index); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + errno = EINVAL; + return -1; + } + + + if (chunk->flags & CFLSH_CHUNK_FAIL_IO) { + + /* + * This chunk is in a failed state. All + * I/O needs to be failed. + */ + CBLK_TRACE_LOG_FILE(5,"chunk in failed state lba = 0x%llx, chunk->index = %d", + lba,chunk->index); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + errno = EIO; + return -1; + } + + + + + + if (chunk->num_blocks < (lba + nblocks)) { + /* + * This request exceeds the capacity of + * this chunk. + */ + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CBLK_TRACE_LOG_FILE(5,"Data request is beyond end of chunk: num_blocks = 0x%llx lba = 0x%llx, chunk->index = %d", + chunk->num_blocks, lba,chunk->index); + errno = EINVAL; + return -1; + } + + + + if ((op_code == SCSI_READ_16) && + (CBLK_IN_CACHE(chunk,buf,lba,nblocks))) { + + /* + * Data is in cache. So no need + * to issue requests to hardware. + */ + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CBLK_TRACE_LOG_FILE(5,"Data read from cache rc = %d,errno = %d, chunk->index = %d", + rc,errno,chunk->index); + + rc = nblocks; + + if (flags & CBLK_ARW_USER_STATUS_FLAG) { + + /* + * Set user status + */ + + status->blocks_transferred = rc; + status->status = CBLK_ARW_STATUS_SUCCESS; + } + + return rc; + } + + if ((lib_flags & CFLASH_ASYNC_OP) && + !(flags & CBLK_ARW_WAIT_CMD_FLAGS)) { + + /* + * If this is an async read/write that + * did not indicate we should wait for a command + * then call cblk_find_free_cmd, indicating + * we do not want to wait for a free command. + */ + + rc = cblk_find_free_cmd(chunk,&cmd,0); + + if (rc) { + + errno = EWOULDBLOCK; + + CBLK_TRACE_LOG_FILE(1,"could not find a free cmd, num_active_cmds = %d, errno = %d, chunk->index = %d", + chunk->num_active_cmds,errno,chunk->index); + + } + + } else { + rc = cblk_find_free_cmd(chunk,&cmd,CFLASH_WAIT_FREE_CMD); + + + if (rc) { + + errno = EBUSY; + + CBLK_TRACE_LOG_FILE(1,"could not find a free cmd, num_active_cmds = %d, errno = %d, chunk->index = %d", + chunk->num_active_cmds,errno,chunk->index); + } + } + + + if (rc) { + + + CBLK_TRACE_LOG_FILE(1,"could not find a free cmd, num_active_cmds = %d, errno = %d, chunk->index = %d", + chunk->num_active_cmds,errno,chunk->index); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return -1; + } + + *cmd_index = cmd->index; + + if (flags & CBLK_ARW_USER_STATUS_FLAG) { + + /* + * Set user status + */ + chunk->cmd_info[cmd->index].flags |= CFLSH_CMD_INFO_USTAT; + chunk->cmd_info[cmd->index].user_status = status; + chunk->cmd_info[cmd->index].path_index = chunk->cur_path; + + status->status = CBLK_ARW_STATUS_PENDING; + } + + if (op_code == SCSI_READ_16) { + + local_flags = CFLASH_READ_DIR_OP; + } else if (op_code == SCSI_WRITE_16) { + + local_flags = CFLASH_WRITE_DIR_OP; + + } + + CBLK_BUILD_ADAP_CMD(chunk,cmd,buf,(CAPI_FLASH_BLOCK_SIZE * nblocks),local_flags); + + + cdb = CBLK_GET_CMD_CDB(chunk,cmd); + cdb->scsi_op_code = op_code; + + + /* + * TODO: ?? Currently we are requiring callers of this + * interface to to use 4K alignment, but the SCSI + * device maybe is using 512 or 4K sectors. However + * we will only officially support 4K sectors when this + * code ships. So this current code should only be needed + * internally. + */ + + + CFLASH_BUILD_RW_16(cdb,(chunk->start_lba + lba)*(chunk->blk_size_mult),nblocks*(chunk->blk_size_mult)); + +#ifndef _COMMON_INTRPT_THREAD + if (lib_flags & CFLASH_ASYNC_OP) { + + /* + * If this is an async read/write then we need to + * start a thread to wait for its completion, since the + * caller of this routine expects to issue the request and + * later collect the status. + * + * NOTE: + * + * We need to start the async completion thread now, because if it + * fails we can inform the user (via an errno EAGAIN) that we + * could not issue the request now before we issue the request + * to the adapter. If we waited to start this thread after + * we issued the request to the adapter, and the thread create + * failed, we could not block and wait for the command to complete. + * Since there is no mechanism to abort AFU commands, we would have no + * mechanism to properly clean up. The caller of this routine might think + * the issuing of the command failed (due to error seen for the the pthread_create), + * and never check back for completion. Furthermore since the AFU is now + * handling the request it could DMA data into the users buffers, but the + * user may believe the buffers are available for reuse (thus causing + * data corruption). Since we can cancel a thread, we can call + * pthread_cancel if we faile to issue the command to the adapter. + * + */ + + async_data = &(cmd->cmdi->async_data); + async_data->chunk = chunk; + async_data->cmd_index = cmd->index; + + pthread_rc = pthread_create(&(cmd->cmdi->thread_id),NULL,cblk_async_recv_thread,async_data); + + if (pthread_rc) { + + chunk->stats.num_failed_threads++; + + CBLK_TRACE_LOG_FILE(5,"pthread_create failed rc = %d,errno = %d, cmd_index = %d,lba = 0x%llx, num_active_cmds = 0x%x, num_active_threads = 0x%x, chunk->index = %d", + pthread_rc,errno, cmd->index,chunk->start_lba + lba,chunk->num_active_cmds,chunk->stats.num_active_threads,chunk->index); + + CBLK_FREE_CMD(chunk,cmd); + + errno = EAGAIN; + CFLASH_BLOCK_UNLOCK(chunk->lock); + return -1; + } + + /* + * We successfully started the thread. + * Update statistics reflecting this. + */ + + chunk->stats.num_success_threads++; + + chunk->stats.num_active_threads++; + + chunk->stats.max_num_act_threads = MAX(chunk->stats.max_num_act_threads,chunk->stats.num_active_threads); + + } + +#endif /* !_COMMON_INTRPT_THREAD */ + + if (CBLK_ISSUE_CMD(chunk,cmd,buf,chunk->start_lba + lba,nblocks,0)) { + + /* + * Failed to issue the request. Let's clean up + * and fail this request. + */ + +#ifndef _COMMON_INTRPT_THREAD + if (lib_flags & CFLASH_ASYNC_OP) { + + /* + * If this is an async I/O then we just + * created a thread to wait for its completion. + * Since we failed here, we need to cancel + * that thread now. This can be some what + * tricky. First our async receive completion, + * will be cancelable (default), but with the + * a deferred type (default). Thus when we cancel + * it, the thread will only be canceled at + * cancelation points. The code for that thread + * was written to create safe cancelation points + * (i.e. calls to pthread_testcancel) where it + * should not be holding any resources-like locks. + * As a result these cancelation points alone have some + * limitations (for example if the thread is waiting + * on a signal from us, we would would first need + * to signal it so that it could wake up. In + * addition since it would be locked on chunk->lock, + * it would be blocked from proceeding to a cancelation + * point until after we unlocked here. + * + * To resolve these issues, we will signal here + * (the same as we would in the case where + * CBLK_ISSUE_CMD succeeded) to wake it up. + * and then unlock. We will also set the + * CFLSH_ASYNC_IO_SNT prior to signaling and unlocking. + * Thus if it has not reached that point in the code + * yet will not sleep and proceed to unlocking + * and reaching a cancelation point. + */ + + + cmd->cmdi->flags |= CFLSH_ASYNC_IO_SNT; + + pthread_rc = pthread_cond_signal(&(cmd->cmdi->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signall failed rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + + + + pthread_rc = pthread_cancel(cmd->cmdi->thread_id); + + if (pthread_rc) { + + chunk->stats.num_fail_canc_threads++; + + CBLK_TRACE_LOG_FILE(5,"pthread_cancel failed rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } else { + chunk->stats.num_canc_threads++; + + /* + * Since the pthread_ancel will only + * effect this thread at cancelation + * points, it is possible that it maybe + * waiting on the chunk->lock or the + * thread event signal above (actually + * because of the way this is currently + * implemented it should not be waiting on + * thread event, since we have not yet released + * the chunk->lock since creating this thread. + * Thus at most it could be waiting on the + * chunk->lock, However in the future + * this may change. So we will assume + * it is possible now and handle + * that case too). + * + * So we need to unlock here so that + * it can wake up and proceed to + * a cancellation point. This should + * be ok, since we have not yet + * cleared cmd->cmdi->in_use. Thus the command + * can not be reused. If we did not wake up + * here we could hang on the pthread_join, + * if that thread was waiting on the + * chunk->lock. + */ + CFLASH_BLOCK_UNLOCK(chunk->lock); + pthread_join(cmd->cmdi->thread_id,NULL); + CFLASH_BLOCK_LOCK(chunk->lock); + + cmd->cmdi->thread_id = 0; + + } + + + } + +#endif /* !_COMMON_INTRPT_THREAD */ + + CBLK_FREE_CMD(chunk,cmd); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CBLK_TRACE_LOG_FILE(5,"CBLK_ISSUE_CMD failed rc = %d,errno = %d, chunk->index = %d", + rc,errno,chunk->index); + return -1; + } + + if (lib_flags & CFLASH_ASYNC_OP) { + /* + * Update async I/O statistics + */ + + cmd->cmdi->flags |= CFLSH_ASYNC_IO; + + if (op_code == SCSI_READ_16) { + chunk->stats.num_areads++; + chunk->stats.num_act_areads++; + + chunk->stats.max_num_act_areads = MAX(chunk->stats.num_act_areads,chunk->stats.max_num_act_areads); + } else if (op_code == SCSI_WRITE_16) { + chunk->stats.num_awrites++; + chunk->stats.num_act_awrites++; + + chunk->stats.max_num_act_awrites = MAX(chunk->stats.num_act_awrites,chunk->stats.max_num_act_awrites); + + } + + /* + * If this is an async I/O then we just + * created a thread to wait for its completion. + * Since we just successfully issue the command to the + * AFU, we need to wakeup the async thread waiting for + * it to complete. + * + * We need to set the CFLSH_ASYNC_IO_SNT so that + * the receiver of this signal know we have just + * sent the signal and it should not do + * a pthread_cond_wait. We need to be locked + * here when we set this flag and send the signal + * to prevent the receiver from hanging. + */ + +#ifndef _COMMON_INTRPT_THREAD + cmd->cmdi->flags |= CFLSH_ASYNC_IO_SNT; + + pthread_rc = pthread_cond_signal(&(cmd->cmdi->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signall failed rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } +#endif /* COMMON_INTRPT_THREAD */ + + } else { + /* + * Update sync I/O statistics + */ + if (op_code == SCSI_READ_16) { + chunk->stats.num_reads++; + chunk->stats.num_act_reads++; + + chunk->stats.max_num_act_reads = MAX(chunk->stats.num_act_reads,chunk->stats.max_num_act_reads); + } else if (op_code == SCSI_WRITE_16) { + chunk->stats.num_writes++; + chunk->stats.num_act_writes++; + + chunk->stats.max_num_act_writes = MAX(chunk->stats.num_act_writes,chunk->stats.max_num_act_writes); + + } + } + + + + if (op_code == SCSI_READ_16) { + + cmd->cmdi->flags |= CFLSH_MODE_READ; + + } else if (op_code == SCSI_WRITE_16) { + + cmd->cmdi->flags |= CFLSH_MODE_WRITE; + + } + + + /* + * Unlock since we are waiting for + * a completions. Other threads may + * also be doing doing I/O to this chunk + * too. + */ + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + + return rc; +} + + + +/* + * NAME: cblk_init + * + * FUNCTION: Initializes library resources. + * + * + * INPUTS: NONE + * + * + * RETURNS: NONE + * + */ + +int cblk_init(void *arg,uint64_t flags) +{ + + /* + * Today, this routine is a no-op, but it provides + * future expandability options (such as shared segments + * per context, should that ever be needed. + */ + + + return 0; +} +/* + * NAME: cblk_term + * + * FUNCTION: Free library resources. + * + * + * INPUTS: NONE + * + * + * RETURNS: NONE + * + */ + +int cblk_term(void *arg,uint64_t flags) +{ +#ifdef _AIX + if (fetch_and_or(&cflsh_blk_term,1)) { + + + + /* + * If cflsh_blk_term is set, then + * we have done all the cflash block code + * cleanup. As result we can return + * now. + */ + return 0; + } + + /* + * We get here if the cflash_block code has not been + * terminated yet. + */ + + _cflsh_blk_free(); + +#endif /* AIX */ + return 0; +} + + + + + + +/* + * NAME: cblk_open + * + * FUNCTION: Opens a handle for a CAPI flash lun to + * a contiguous set of blocks (chunk). + * + * + * INPUTS: + * path - Path of device to open + * + * flags - Flags for open + * + * RETURNS: + * NULL_CHUNK_ID for error. + * Otherwise the chunk_id is returned. + * + */ + +chunk_id_t cblk_open(const char *path, int max_num_requests, int mode, chunk_ext_arg_t ext, int flags) +{ + chunk_id_t ret_chunk_id = NULL_CHUNK_ID; + int open_flags; + int cleanup_depth; + cflsh_chunk_t *chunk = NULL; + +#ifdef _AIX + int ext_flags = 0; +#endif /* _AIX */ + errno = 0; + + + +#ifdef _AIX + cflsh_blk_init(); +#endif /* AIX */ + + + CFLASH_BLOCK_WR_RWLOCK(cflsh_blk.global_lock); + + CBLK_TRACE_LOG_FILE(5,"opening %s with max_num_requests = %d, mode = 0x%x, flags = 0x%x for pid = 0x%llx", + path,max_num_requests,mode,flags,(uint64_t)cflsh_blk.caller_pid); + + + if (strlen(path) > PATH_MAX) { + + CBLK_TRACE_LOG_FILE(1,"opening failed because filename too long"); + + errno = EINVAL; + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + return ret_chunk_id; + } + + + ret_chunk_id = cblk_get_chunk(CFLSH_BLK_CHUNK_SET_UP, max_num_requests); + + chunk = CBLK_GET_CHUNK_HASH(ret_chunk_id,FALSE); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + if (chunk) { + + + + CFLASH_BLOCK_LOCK(chunk->lock); + + switch (mode & O_ACCMODE) { + case O_RDONLY: + chunk->flags |= CFLSH_CHNK_RD_AC; + break; + case O_WRONLY: + chunk->flags |= CFLSH_CHNK_WR_AC; + break; + case O_RDWR: + chunk->flags |= CFLSH_CHNK_RD_AC | CFLSH_CHNK_WR_AC; + break; + default: + + errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Invalid access mode %d",mode); + + cblk_chunk_open_cleanup(chunk,20); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + free(chunk); + + + return NULL_CHUNK_ID; + + } + + /* + * For now set the starting LBA of + * this chunk to be the same as + * the physical LBA. For virtual + * luns, the caller is supposed to do + * a set size operation, which at that + * time we will set the chunk's + * actual starting lba. + */ + + chunk->start_lba = 0; + + + if (flags & CBLK_OPN_VIRT_LUN) { + + /* + * This is a virtual lun, + * not a physical lun. + */ + chunk->flags |= CFLSH_CHNK_VLUN; + + if (flags & CBLK_OPN_SCRUB_DATA) { + + chunk->flags |= CFLSH_CHNK_VLUN_SCRUB; + } + } + + + + if (flags & CBLK_OPN_NO_INTRP_THREADS) { + + /* + * This chunk is not allowed to use + * back ground threads. + */ + + chunk->flags |= CFLSH_CHNK_NO_BG_TD; + } + + /* + * Don't use libafu + */ + + strcpy(chunk->dev_name,path); + +#ifdef BLOCK_FILEMODE_ENABLED + open_flags = mode & O_ACCMODE; + + if (!strncmp("/dev/",path,5)) { + + + errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Can not use device special files for file mode"); + perror("cblk_open: Can not use device special files for file mode"); + + cblk_chunk_open_cleanup(chunk,20); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + free(chunk); + + + return NULL_CHUNK_ID; + } +#else + + + if (strncmp("/dev/",path,5)) { + + + errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Can not use non device special files for real block mode"); + + + cblk_chunk_open_cleanup(chunk,20); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + free(chunk); + + + return NULL_CHUNK_ID; + } + + +#endif + + if (flags & CBLK_OPN_SHARE_CTXT) { + + chunk->flags |= CFLSH_CHNK_SHARED; + } + + +#ifdef _AIX + + open_flags = (mode & O_ACCMODE) | O_NONBLOCK; + + ext_flags |= SC_CAPI_USER_IO; + + if ((flags & (CBLK_OPN_RESERVE|CBLK_OPN_FORCED_RESERVE)) && + (flags & CBLK_OPN_MPIO_FO)) { + + + errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Can not use MPIO with reservations"); + + + cblk_chunk_open_cleanup(chunk,20); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + free(chunk); + + + return NULL_CHUNK_ID; + + } + + + if (!(flags & CBLK_OPN_RESERVE) && + !(flags & CBLK_OPN_FORCED_RESERVE)) { + + + chunk->flags |= CFLSH_CHNK_NO_RESRV; + ext_flags |= SC_NO_RESERVE; + } + + if (flags & CBLK_OPN_FORCED_RESERVE) { + + ext_flags |= SC_FORCED_OPEN_LUN; + } + + + if (flags & CBLK_OPN_MPIO_FO) { + + + chunk->flags |= CFLSH_CHNK_MPIO_FO; + } + + + + chunk->fd = openx(chunk->dev_name,open_flags,0,ext_flags); +#else + + open_flags = O_RDWR | O_NONBLOCK; + + chunk->fd = open(chunk->dev_name,open_flags); +#endif /* !_AIX */ + + if (chunk->fd < 0) { + + CBLK_TRACE_LOG_FILE(1,"Unable to open device errno = %d",errno); + perror("cblk_open: Unable to open device"); + + cblk_chunk_open_cleanup(chunk,20); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + free(chunk); + + + return NULL_CHUNK_ID; + } + + cleanup_depth = 30; + + if (cblk_chunk_attach_process_map(chunk,mode,&cleanup_depth)) { + + CBLK_TRACE_LOG_FILE(1,"Unable to attach errno = %d",errno); + + cblk_chunk_open_cleanup(chunk,cleanup_depth); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + free(chunk); + + + return NULL_CHUNK_ID; + + } + + + + cleanup_depth = 40; + +#ifdef _COMMON_INTRPT_THREAD + + + if (cblk_start_common_intrpt_thread(chunk)) { + + + CBLK_TRACE_LOG_FILE(1,"cblk_start_common_intrpt thread failed with errno= %d", + errno); + + + cblk_chunk_open_cleanup(chunk,cleanup_depth); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + free(chunk); + + + return NULL_CHUNK_ID; + } + + cleanup_depth = 45; +#endif + + if (cblk_chunk_get_mc_device_resources(chunk,&cleanup_depth)) { + + + CBLK_TRACE_LOG_FILE(5,"cblk_get_device_info failed errno = %d", + errno); + + cblk_chunk_open_cleanup(chunk,cleanup_depth); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + free(chunk); + + + return NULL_CHUNK_ID; + + } + + if (!(chunk->flags & CFLSH_CHNK_VLUN)) { + + cblk_chunk_init_cache(chunk,chunk->num_blocks); + + + } + + /* + * This chunk is ready for use. + */ + chunk->flags |= CFLSH_CHNK_RDY; + + CFLASH_BLOCK_UNLOCK(chunk->lock); + } + + + CBLK_TRACE_LOG_FILE(5,"opening for %s, returned rc = %d",path,ret_chunk_id); + + return ret_chunk_id; +} + +/* + * NAME: cblk_close + * + * FUNCTION: Closes a chunk. + * + * + * INPUTS: + * chunk_id - Chunk identifier + * + * flags - Flags for close + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cblk_close(chunk_id_t chunk_id, int flags) +{ + int rc = 0; + cflsh_chunk_t *chunk; + int loop_cnt = 0; + + errno = 0; + + if ((chunk_id <= NULL_CHUNK_ID) || + (chunk_id >= cflsh_blk.next_chunk_id)) { + + errno = EINVAL; + return -1; + } + + + CBLK_TRACE_LOG_FILE(5,"closing chunk_id = %d",chunk_id); + + + CFLASH_BLOCK_WR_RWLOCK(cflsh_blk.global_lock); + + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + if (chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"closing failed because chunk not found, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + if (chunk->in_use == FALSE) { + + CBLK_TRACE_LOG_FILE(1,"closing failed because chunk not in use, rchunk_id = %d, path = %s", + chunk_id,chunk->dev_name); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"Invalid chunk, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_LOCK(chunk->lock); + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"Invalid chunk, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + CBLK_TRACE_LOG_FILE(9,"closing chunk->dev_name = %s",chunk->dev_name); + + if (chunk->flags & CFLSH_CHNK_CLOSE) { + + CBLK_TRACE_LOG_FILE(1,"Trying to close chunk that someone else is also closing, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_UNLOCK(chunk->lock); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + + chunk->flags |= CFLSH_CHNK_CLOSE; + + while ((chunk->num_active_cmds > 0) && + (loop_cnt < CFLSH_BLK_WAIT_CLOSE_RETRIES)) { + + /* + * Wait for a limited time for + * active commands to complete. + * Unlock when we are sleeping. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + usleep(CFLSH_BLK_WAIT_CLOSE_DELAY); + + CFLASH_BLOCK_WR_RWLOCK(cflsh_blk.global_lock); + CFLASH_BLOCK_LOCK(chunk->lock); + loop_cnt++; + + + } + + + if (chunk->num_active_cmds > 0) { + + + /* + * If this chunk still has active + * commands then fails this + * close. + */ + + CBLK_TRACE_LOG_FILE(1,"closing failed because chunk in use, rchunk_id = %d, path = %s, num_active_cmds %d", + chunk_id,chunk->dev_name,chunk->num_active_cmds); + + + chunk->flags &= ~CFLSH_CHNK_CLOSE; + + cblk_display_stats(chunk,3); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EBUSY; + return -1; + } + + + if (chunk->cache) { + + cblk_chunk_free_cache(chunk); + + } + + /* + * Indicate chunk is not ready for use + */ + + chunk->flags &= ~CFLSH_CHNK_RDY; + + cblk_display_stats(chunk,3); + + cblk_chunk_open_cleanup(chunk,50); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + free(chunk); + + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + + CBLK_TRACE_LOG_FILE(5,"closing chunk_id = %d returned rc = %d",chunk_id, rc); + return rc; +} + +/* + * NAME: cblk_get_lun_size + * + * FUNCTION: Returns the number of blocks + * associated with the physical lun that + * contains this chunk + * + * + * INPUTS: + * chunk_id - Chunk identifier + * nblocks - Address of returned number of + * blocks for this lun + * flags - Flags for open + * + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cblk_get_lun_size(chunk_id_t chunk_id, size_t *nblocks, int flags) +{ + int rc = 0; + cflsh_chunk_t *chunk; + + + errno = 0; + if ((chunk_id <= NULL_CHUNK_ID) || + (chunk_id >= cflsh_blk.next_chunk_id)) { + + errno = EINVAL; + return -1; + } + + if (nblocks == NULL) { + + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_RD_RWLOCK(cflsh_blk.global_lock); + + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + *nblocks = chunk->num_blocks_lun; + + CBLK_TRACE_LOG_FILE(5,"get_lun_size returned = 0x%llx, rc = %d",(uint64_t)(*nblocks), rc); + return rc; +} + + +/* + * NAME: cblk_get_size + * + * FUNCTION: Returns the number of blocks + * associated with this chunk. + * + * + * INPUTS: + * chunk_id - Chunk identifier + * nblocks - Address of returned number of + * blocks for this chunk. + * flags - Flags for open + * + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cblk_get_size(chunk_id_t chunk_id, size_t *nblocks, int flags) +{ + int rc = 0; + cflsh_chunk_t *chunk; + + + errno = 0; + if ((chunk_id <= NULL_CHUNK_ID) || + (chunk_id >= cflsh_blk.next_chunk_id)) { + + errno = EINVAL; + return -1; + } + + if (nblocks == NULL) { + + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_RD_RWLOCK(cflsh_blk.global_lock); + + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + *nblocks = chunk->num_blocks; + + CBLK_TRACE_LOG_FILE(5,"get_size returned = 0x%llx, rc = %d",(uint64_t)(*nblocks), rc); + + + return rc; +} + + +/* + * NAME: cblk_set_size + * + * FUNCTION: Specifies the number of blocks + * associated with this chunk. If blocks are already + * associated then this request can decrease/increase + * them. + * + * + * INPUTS: + * chunk_id - Chunk identifier + * nblocks - Number of blocks for this chunk. + * flags - Flags for open + * + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cblk_set_size(chunk_id_t chunk_id, size_t nblocks, int flags) +{ + int rc = 0; + cflsh_chunk_t *chunk; + + + + errno = 0; + if ((chunk_id <= NULL_CHUNK_ID) || + (chunk_id >= cflsh_blk.next_chunk_id)) { + + errno = EINVAL; + return -1; + } + + + CBLK_TRACE_LOG_FILE(5,"set_size size = 0x%llx",(uint64_t)nblocks); + + CFLASH_BLOCK_RD_RWLOCK(cflsh_blk.global_lock); + + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + if (chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"chunk not found, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + + CFLASH_BLOCK_LOCK(chunk->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + if (!(chunk->flags & CFLSH_CHNK_VLUN)) { + + + CBLK_TRACE_LOG_FILE(1,"set_size failed with EINVAL no VLUN"); + CFLASH_BLOCK_UNLOCK(chunk->lock); + errno = EINVAL; + return -1; + } + + +#if defined(_KERNEL_MASTER_CONTXT) || defined(BLOCK_FILEMODE_ENABLED) + if (nblocks > chunk->num_blocks_lun) { + + + CBLK_TRACE_LOG_FILE(1,"set_size failed with EINVAL, nblocks = 0x%llx, num_blocks_lun = 0x%llx", + (uint64_t)nblocks,(uint64_t)chunk->num_blocks_lun); + CBLK_TRACE_LOG_FILE(1,"set_size failed with EINVAL, nblocks = 0x%llx",nblocks); + CFLASH_BLOCK_UNLOCK(chunk->lock); + errno = EINVAL; + return -1; + } +#endif /* !_MASTER_CONTXT */ + + + CBLK_TRACE_LOG_FILE(9,"set size for chunk->dev_name = %s",chunk->dev_name); + + + rc = cblk_chunk_set_mc_size(chunk,nblocks); + + if (rc) { + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return -1; + } + + + /* + * If we got this far, then the we succeeded in + * getting the space requested. + */ + + + + if (chunk->cache) { + + cblk_chunk_free_cache(chunk); + + } + + + cblk_chunk_init_cache(chunk,nblocks); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return rc; +} + + +/* + * NAME: cblk_read + * + * FUNCTION: Reads data from the specified offset in the chunk + * and places that data in the specified buffer. + * This request is a blocking read request (i.e. + * it will not return until either data is read or + * an error is encountered). + * + * + * INPUTS: + * chunk_id - Chunk identifier + * buf - Buffer to read data into + * lba - starting LBA (logical Block Address) + * in chunk to read data from. + * nblocks - Number of blocks to read. + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cblk_read(chunk_id_t chunk_id,void *buf,cflash_offset_t lba, size_t nblocks, int flags) +{ + int rc = 0; + cflsh_chunk_t *chunk; + int cmd_index = 0; + size_t transfer_size = 0; +#ifdef _COMMON_INTRPT_THREAD + cflsh_cmd_mgm_t *cmd = NULL; + int pthread_rc; +#endif + + + + errno = 0; + CBLK_TRACE_LOG_FILE(5,"chunk_id = %d, lba = 0x%llx, nblocks = 0x%llx, buf = %p",chunk_id,lba,(uint64_t)nblocks,buf); + + + if (CBLK_VALIDATE_RW(chunk_id,buf,lba,nblocks)) { + + return -1; + } + + + CFLASH_BLOCK_RD_RWLOCK(cflsh_blk.global_lock); + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + if (chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"chunk not found, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + + if (nblocks > chunk->stats.max_transfer_size) { + + + CBLK_TRACE_LOG_FILE(1,"nblocks too large = 0x%llx",nblocks); + + errno = EINVAL; + return -1; + } + + if (!(chunk->flags & CFLSH_CHNK_RD_AC)) { + + + CBLK_TRACE_LOG_FILE(1,"chunk does not have read access",nblocks); + + errno = EINVAL; + return -1; + + } + + + rc = CBLK_BUILD_ISSUE_RW_CMD(chunk,&cmd_index,buf,lba,nblocks,flags,0,SCSI_READ_16,NULL); + + + if (rc) { + + return rc; + } + +#ifndef _COMMON_INTRPT_THREAD + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,&cmd_index,&transfer_size,TRUE,0); + + +#else + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,&cmd_index,&transfer_size,TRUE,0); + + } else { + + if ((cmd_index >= chunk->num_cmds) || + (cmd_index < 0)) { + + errno = EINVAL; + + rc = -1; + CBLK_TRACE_LOG_FILE(1,"Invalid cmd_index = 0x%x",cmd_index); + + return rc; + } + + + CFLASH_BLOCK_LOCK(chunk->lock); + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"Invalid chunk, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_UNLOCK(chunk->lock); + errno = EINVAL; + return -1; + } + + cmd = &(chunk->cmd_start[cmd_index]); + + if (cmd->cmdi == NULL) { + + CBLK_TRACE_LOG_FILE(1,"null cmdi for cmd_index = 0x%x",cmd_index); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + errno = EINVAL; + + rc = -1; + + return rc; + + } + + + if (cmd->cmdi->state != CFLSH_MGM_CMP) { + + /* + * Only wait if the cmd is not completed. + */ + pthread_rc = pthread_cond_wait(&(cmd->cmdi->thread_event),&(chunk->lock.plock)); + + if (pthread_rc) { + + + + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_wait failed cmd_index = 0x%x rc = %d errno = %d", + cmd->index,pthread_rc,errno); + } else { + + + rc = CBLK_COMPLETE_CMD(chunk,cmd,&transfer_size); + + + } + + } else { + + rc = CBLK_COMPLETE_CMD(chunk,cmd,&transfer_size); + } + + CFLASH_BLOCK_UNLOCK(chunk->lock); + } + +#endif + + if (!rc) { + + /* + * For good completion, indicate we read + * all the data requested. + */ + rc = transfer_size; + } + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d lba = 0x%llx",rc,errno,lba); + + + return rc; +} + +/* + * NAME: cblk_write + * + * FUNCTION: Writes data to the specified offset in the chunk + * from the specified buffer. This request is + * a blocking write request (i.e.it will not + * return until either data is written or + * an error is encountered). + * + * + * INPUTS: + * chunk_id - Chunk identifier + * buf - Buffer to write data from + * lba - starting LBA (logical Block Address) + * in chunk to write data to. + * nblocks - Number of blocks to write. + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cblk_write(chunk_id_t chunk_id,void *buf,cflash_offset_t lba, size_t nblocks, int flags) +{ + int rc = 0; + cflsh_chunk_t *chunk; + int cmd_index = 0; + size_t transfer_size = 0; +#ifdef _COMMON_INTRPT_THREAD + cflsh_cmd_mgm_t *cmd = NULL; + int pthread_rc; +#endif + + + errno = 0; + CBLK_TRACE_LOG_FILE(5,"chunk_id = %d, lba = 0x%llx, nblocks = 0x%llx, buf = %p",chunk_id,lba,(uint64_t)nblocks,buf); + + if (CBLK_VALIDATE_RW(chunk_id,buf,lba,nblocks)) { + + return -1; + } + + + CFLASH_BLOCK_RD_RWLOCK(cflsh_blk.global_lock); + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + if (chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"chunk not found, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + + if (nblocks > chunk->stats.max_transfer_size) { + + CBLK_TRACE_LOG_FILE(1,"nblocks too large = 0x%llx",nblocks); + + errno = EINVAL; + return -1; + } + + if (!(chunk->flags & CFLSH_CHNK_WR_AC)) { + + + CBLK_TRACE_LOG_FILE(1,"chunk does not have write access",nblocks); + +#if defined(_AIX) + errno = EWRPROTECT; +#else + errno = EINVAL; +#endif /* !defined(_AIX) && !defined(_MACOSX) */ + + return -1; + + } + + rc = CBLK_BUILD_ISSUE_RW_CMD(chunk,&cmd_index,buf,lba,nblocks,flags,0,SCSI_WRITE_16,NULL); + + + if (rc) { + + return rc; + } + + +#ifndef _COMMON_INTRPT_THREAD + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,&cmd_index,&transfer_size,TRUE,0); + +#else + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,&cmd_index,&transfer_size,TRUE,0); + + } else { + + if ((cmd_index >= chunk->num_cmds) || + (cmd_index < 0)) { + + errno = EINVAL; + + rc = -1; + CBLK_TRACE_LOG_FILE(1,"Invalid cmd_index = 0x%x",cmd_index); + + return rc; + } + + + CFLASH_BLOCK_LOCK(chunk->lock); + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"Invalid chunk, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_UNLOCK(chunk->lock); + errno = EINVAL; + return -1; + } + + cmd = &(chunk->cmd_start[cmd_index]); + + if (cmd->cmdi == NULL) { + + CBLK_TRACE_LOG_FILE(1,"null cmdi for cmd_index = 0x%x",cmd_index); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + errno = EINVAL; + + rc = -1; + + return rc; + + } + + + if (cmd->cmdi->state != CFLSH_MGM_CMP) { + + /* + * Only wait if the command is not completed. + */ + pthread_rc = pthread_cond_wait(&(cmd->cmdi->thread_event),&(chunk->lock.plock)); + + if (pthread_rc) { + + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_wait failed cmd_index = 0x%x rc = %d errno = %d", + cmd->index,pthread_rc,errno); + } else { + + + rc = CBLK_COMPLETE_CMD(chunk,cmd,&transfer_size); + + + } + + } else { + + rc = CBLK_COMPLETE_CMD(chunk,cmd,&transfer_size); + } + + CFLASH_BLOCK_UNLOCK(chunk->lock); + } + +#endif + + if (!rc) { + + /* + * For good completion, indicate we read + * all the data requested. + */ + rc = transfer_size; + } + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d lba = 0x%llx",rc,errno,lba); + + + return rc; +} + + +/* + * NAME: _cblk_aread + * + * FUNCTION: Internal implementation of async read + * + * + * INPUTS: + * chunk - Chunk associated with operation + * buf - Buffer to read data into + * lba - starting LBA (logical Block Address) + * in chunk to read data from. + * nblocks - Number of blocks to read. + * tag - Tag associated with this request. + * + * RETURNS: + * 0 for good completion, + * >0 Read data was in cache and was read. + * -1 for error with ERRNO set + * + */ + +static inline int _cblk_aread(cflsh_chunk_t *chunk,void *buf,cflash_offset_t lba, size_t nblocks, int *tag, + cblk_arw_status_t *status, int flags) +{ + int rc = 0; + int cmd_index = 0; + + + + if (nblocks > chunk->stats.max_transfer_size) { + + CBLK_TRACE_LOG_FILE(1,"nblocks too large = 0x%llx",nblocks); + + errno = EINVAL; + return -1; + } + + if (!(chunk->flags & CFLSH_CHNK_RD_AC)) { + + + CBLK_TRACE_LOG_FILE(1,"chunk does not have read access",nblocks); + + errno = EINVAL; + return -1; + + } + + if ((flags & CBLK_ARW_USER_STATUS_FLAG) && + (status == NULL)) { + + + CBLK_TRACE_LOG_FILE(1,"status field is NULL"); + + errno = EINVAL; + return -1; + } + + + if ((flags & CBLK_ARW_USER_STATUS_FLAG) && + (chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + + /* + * If this chunk was opened with no background + * thread, then the caller can not expect + * to use a user specified status field to + * be updated/notified when a command is complete. + */ + + errno = EINVAL; + + return -1; + } + /* + * NOTE: If data was read from the cache, then the rc + * from CBLK_BUILD_ISSUE_RW_CMD will be greater + * than 0. If CBLK_BUILD_ISSUE_RW_CMD fails, then + * rc = -1. + */ + rc = CBLK_BUILD_ISSUE_RW_CMD(chunk,&cmd_index,buf,lba,nblocks,flags,CFLASH_ASYNC_OP,SCSI_READ_16,status); + + if (rc) { + + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d lba = 0x%llx",rc,errno,lba); + + return rc; + } else { + + /* + * TODO:?? Is this the best solution to set + * user defined tag? + */ + + if (!(flags & CBLK_ARW_USER_TAG_FLAG)) { + + *tag = cmd_index; + } else { + + /* + * Set user defined tag + */ + + chunk->cmd_info[cmd_index].flags |= CFLSH_CMD_INFO_UTAG; + chunk->cmd_info[cmd_index].user_tag = *tag; + } + + + } + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d lba = 0x%llx, tag = 0x%x",rc,errno,lba,*tag); + + return rc; + +} + + +/* + * NAME: cblk_aread + * + * FUNCTION: Reads data from the specified offset in the chunk + * and places that data in the specified buffer. + * This request is an asynchronous read request (i.e. + * it will return as soon as it has issued the request + * to the device. It will not wait for a response from + * the device.). + * + * + * INPUTS: + * chunk_id - Chunk identifier + * buf - Buffer to read data into + * lba - starting LBA (logical Block Address) + * in chunk to read data from. + * nblocks - Number of blocks to read. + * tag - Tag associated with this request. + * + * RETURNS: + * 0 for good completion, + * >0 Read data was in cache and was read. + * -1 for error with ERRNO set + * + */ + +int cblk_aread(chunk_id_t chunk_id,void *buf,cflash_offset_t lba, size_t nblocks, int *tag, + cblk_arw_status_t *status, int flags) +{ + cflsh_chunk_t *chunk; + + errno = 0; + + CBLK_TRACE_LOG_FILE(5,"chunk_id = %d, lba = 0x%llx, nblocks = 0x%llx, buf = %p",chunk_id,lba,(uint64_t)nblocks,buf); + + if (CBLK_VALIDATE_RW(chunk_id,buf,lba,nblocks)) { + + return -1; + } + + CFLASH_BLOCK_RD_RWLOCK(cflsh_blk.global_lock); + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + if (chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"chunk not found, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + return (_cblk_aread(chunk,buf,lba,nblocks,tag,status,flags)); +} + + +/* + * NAME: _cblk_awrite + * + * FUNCTION: Internal implementation of async write + * + * + * INPUTS: + * chunk - Chunk associated with operation + * buf - Buffer to write data from + * lba - starting LBA (logical Block Address) + * in chunk to write data to. + * nblocks - Number of blocks to write. + * tag - Tag associated with this request. + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +static inline int _cblk_awrite(cflsh_chunk_t *chunk,void *buf,cflash_offset_t lba, size_t nblocks, int *tag, + cblk_arw_status_t *status, int flags) +{ + int rc = 0; + int cmd_index = 0; + + if (nblocks > chunk->stats.max_transfer_size) { + + CBLK_TRACE_LOG_FILE(1,"nblocks too large = 0x%llx",nblocks); + + errno = EINVAL; + return -1; + } + + if (!(chunk->flags & CFLSH_CHNK_WR_AC)) { + + + CBLK_TRACE_LOG_FILE(1,"chunk does not have write access",nblocks); + +#if defined(_AIX) + errno = EWRPROTECT; +#else + errno = EINVAL; +#endif /* !defined(_AIX) && !defined(_MACOSX) */ + return -1; + + } + + if ((flags & CBLK_ARW_USER_STATUS_FLAG) && + (status == NULL)) { + + + CBLK_TRACE_LOG_FILE(1,"status field is NULL"); + + errno = EINVAL; + return -1; + } + + if ((flags & CBLK_ARW_USER_STATUS_FLAG) && + (chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + + /* + * If this chunk was opened with no background + * thread, then the caller can not expect + * to use a user specified status field to + * be updated/notified when a command is complete. + */ + + errno = EINVAL; + + return -1; + } + + rc = CBLK_BUILD_ISSUE_RW_CMD(chunk,&cmd_index,buf,lba,nblocks,flags,CFLASH_ASYNC_OP,SCSI_WRITE_16,status); + + if (rc) { + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d lba = 0x%llx",rc,errno,lba); + + return rc; + } else { + + /* + * TODO:?? Is this the best solution to set + * user defined tag? + */ + + if (!(flags & CBLK_ARW_USER_TAG_FLAG)) { + + *tag = cmd_index; + } else { + + /* + * Set user defined tag + */ + + chunk->cmd_info[cmd_index].flags |= CFLSH_CMD_INFO_UTAG; + chunk->cmd_info[cmd_index].user_tag = *tag; + } + + } + + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d lba = 0x%llx, tag = 0x%x",rc,errno,lba,*tag); + return rc; +} + + + +/* + * NAME: cblk_awrite + * + * FUNCTION: Writes data to the specified offset in the chunk + * from the specified buffer. This request is + * an asynchronous write request (i.e.it will + * return as soon as it has issued the request + * to the device. It will not wait for a response from + * the device.). + * + * + * INPUTS: + * chunk_id - Chunk identifier + * buf - Buffer to write data from + * lba - starting LBA (logical Block Address) + * in chunk to write data to. + * nblocks - Number of blocks to write. + * tag - Tag associated with this request. + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cblk_awrite(chunk_id_t chunk_id,void *buf,cflash_offset_t lba, size_t nblocks, int *tag, + cblk_arw_status_t *status, int flags) +{ + + cflsh_chunk_t *chunk; + + + errno = 0; + + CBLK_TRACE_LOG_FILE(5,"chunk_id = %d, lba = 0x%llx, nblocks = 0x%llx, buf = %p",chunk_id,lba,(uint64_t)nblocks,buf); + + + if (CBLK_VALIDATE_RW(chunk_id,buf,lba,nblocks)) { + + return -1; + } + + + CFLASH_BLOCK_RD_RWLOCK(cflsh_blk.global_lock); + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + if (chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"chunk not found, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + return (_cblk_awrite(chunk,buf,lba,nblocks,tag,status,flags)); +} + + +/* + * NAME: _cblk_aresult + * + * FUNCTION: Internal implementation of async result + * + * + * INPUTS: + * chunk - Chunk associated with operation + * tag - Tag associated with this request that + * completed. + * flags - Flags on this request. + * harvest - 0 to skip harvest call, else do harvest + * + * RETURNS: + * 0 for good completion, but requested tag has not yet completed. + * -1 for error and errno is set. + * > 0 for good completion where the return value is the + * number of blocks transferred. + * + */ + +static inline int _cblk_aresult(cflsh_chunk_t *chunk,int *tag, uint64_t *status, + int flags, int harvest) +{ + + int rc = 0; + cflsh_cmd_mgm_t *cmd = NULL; + size_t transfer_size = 0; + int pthread_rc; + cflsh_cmd_info_t *cmdi; + int i; + + + + + if (tag == NULL) { + + errno = EINVAL; + return -1; + + } + + if (status == NULL) { + + errno = EINVAL; + return -1; + + } + + + *status = 0; + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD && harvest) { + + /* + * Check if any commands completed since the last time we + * checked, but do not wait for any to complete. + */ + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,tag,&transfer_size,FALSE,CFLASH_ASYNC_OP); + + } + + CFLASH_BLOCK_LOCK(chunk->lock); + + + if (flags & CBLK_ARESULT_NEXT_TAG) { + + /* + * The caller has asked to be + * notified of the first tag + * (commmand) to complete. + */ + + + + if (chunk->num_active_cmds == 0) { + + /* + * No commands are active to wait on. + */ + + errno = EINVAL; + + CFLASH_BLOCK_UNLOCK(chunk->lock); + return -1; + } + + *tag = -1; + + /* + * In the case where someone has all commands active, + * and they are issue new requests to us as soone as one + * completes, we need to ensure fairness in how command responses + * are returned. We can not just return the same command tag + * and allow another to issued using that uses same command, because + * the other commands would be starved out. So we will start with the + * head of the active queue, which will be the oldest command outstanding. + * In most cases it should be more likely to have completed and be ready + * for completion that newer commands. The tail of the active queue is the newest + * command. Thus we'll traverse from the oldest to the newest looking for completions. + */ + + + + cmdi = chunk->head_act; + + while (cmdi) { + + + if ((cmdi->in_use) && + (cmdi->flags & CFLSH_ASYNC_IO) && + (cmdi->state == CFLSH_MGM_CMP)) { + + /* + * This is an async command that completed + * but has not yet been seen by a caller. + */ + + *tag = cmdi->index; + break; + } + + + cmdi = cmdi->act_next; + } + + + if (*tag == -1) { + if (!(flags & CBLK_ARESULT_BLOCKING)) { + + /* + * No commands have completed that + * are waiting for processing and + * the caller does not want to block + * to wait for any. So return + * with an indication we did not find + * any commands. + */ + CFLASH_BLOCK_UNLOCK(chunk->lock); + return 0; + } else { + + *tag = -1; + +#ifndef _COMMON_INTRPT_THREAD + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,tag,&transfer_size,TRUE,CFLASH_ASYNC_OP); + + + CFLASH_BLOCK_LOCK(chunk->lock); +#else + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,tag,&transfer_size,TRUE,CFLASH_ASYNC_OP); + + CFLASH_BLOCK_LOCK(chunk->lock); + + } else { + + + /* + * Wait for any command completion. + */ + pthread_rc = pthread_cond_wait(&(chunk->cmd_cmplt_event),&(chunk->lock.plock)); + + if (pthread_rc) { + + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_wait failed rc = %d errno = %d", + pthread_rc,errno); + + + } + } + +#endif + if (*tag == -1) { + + /* + * No command has been found above. Check + * to see if a command was processed by the back ground + * threads. + */ + + cmdi = chunk->head_act; + + while (cmdi) { + + + if ((cmdi->in_use) && + (cmdi->flags & CFLSH_ASYNC_IO) && + (cmdi->state == CFLSH_MGM_CMP)) { + + /* + * This is an async command that completed + * but has not yet been seen by a caller. + */ + + *tag = cmdi->index; + break; + } + + + cmdi = cmdi->act_next; + } + + if (*tag == -1) { + + /* + * No command was still found that completed. + */ + + + if (!rc) { + + /* + * If no error was reported for no + * command, then return an error + * here. + */ + + errno = ETIMEDOUT; + + CFLASH_BLOCK_UNLOCK(chunk->lock); + return(-1); + + } + + CFLASH_BLOCK_UNLOCK(chunk->lock); + return 0; + + } + + } + + } + } + + + + } + + + + /* + * Since we unlocked above it is possible + * another thread tried to close this thread + * As a result we'll double check the chunk] + * again. There is one issue with this approach. + * If the chunk is invalid then is locking + * chunk->locking chunk->lock valid. + */ + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"Invalid chunk, chunk_id = %d", + chunk->index); + CFLASH_BLOCK_UNLOCK(chunk->lock); + errno = EINVAL; + return -1; + } + + if (flags & CBLK_ARESULT_USER_TAG) { + + cmd = NULL; + + for (i = 0; i < chunk->num_cmds;i++) { + + if (chunk->cmd_info[i].user_tag == *tag) { + + /* + * We found the user tag provided + */ + + cmd = &(chunk->cmd_start[i]); + + break; + } + } + + if (cmd == NULL) { + + errno = EINVAL; + + rc = -1; + + /* + * TODO: ?? add stats for user defined tag not found + */ + + CBLK_TRACE_LOG_FILE(3," user defined tag not found. rc = %d,errno = %d, tag = %d", + rc, errno,*tag); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return rc; + } + + + } else { + + if ((*tag >= chunk->num_cmds) || + (*tag < 0)) { + + errno = EINVAL; + + rc = -1; + CBLK_TRACE_LOG_FILE(1,"Invalid cmd_index = 0x%x",*tag); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return rc; + } + cmd = &(chunk->cmd_start[*tag]); + } + + + + if (cmd->cmdi == NULL) { + + errno = EINVAL; + + rc = -1; + CBLK_TRACE_LOG_FILE(1,"null cmdi for cmd_index = 0x%x",cmd->index); + + return rc; + + } + + if (!cmd->cmdi->in_use) { + + errno = ENOMSG; + + rc = -1; + CBLK_TRACE_LOG_FILE(3," tag not in use. rc = %d,errno = %d, tag = %d, cmd = 0x%llx", + rc, errno,*tag,(uint64_t)cmd); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return rc; + } + +#ifndef _COMMON_INTRPT_THREAD + if (cmd->cmdi->thread_id && + cmd->cmdi->in_use && + (flags & CBLK_ARESULT_BLOCKING)) { + + /* + * The caller is indicating they + * want to wait for completion of + * this tag. So lets do that now. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + pthread_join(cmd->cmdi->thread_id,NULL); + CFLASH_BLOCK_LOCK(chunk->lock); + + cmd->cmdi->thread_id = 0; + } +#else + + + if ((cmd->cmdi->state != CFLSH_MGM_CMP) && + cmd->cmdi->in_use && + (flags & CBLK_ARESULT_BLOCKING)) { + + + /* + * The caller is indicating they + * want to wait for completion of + * this tag. So lets do that now. + */ + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,tag,&transfer_size,TRUE,CFLASH_ASYNC_OP); + + } else { + /* + * Only wait if the command is not completed + */ + pthread_rc = pthread_cond_wait(&(cmd->cmdi->thread_event),&(chunk->lock.plock)); + + if (pthread_rc) { + + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_wait failed cmd_index = 0x%x rc = %d errno = %d", + cmd->index,pthread_rc,errno); + } else { + + + rc = CBLK_COMPLETE_CMD(chunk,cmd,&transfer_size); + + if ((rc == 0) && + (transfer_size == 0)) { + + + /* + * If this routine returns a 0 return code, then + * the caller will believe the command has not yet + * completed. So we need to map this to an error. + */ + + rc = -1; + + errno = EIO; + + } + } + + } + } + + +#endif + + + if (cmd->cmdi->state == CFLSH_MGM_CMP) { + + /* + * This command completed, + * clean it up. + */ + + + if (!(cmd->cmdi->status)) { + + /* + * Good completion + */ + + rc = cmd->cmdi->transfer_size; + + errno = 0; + + if (rc == 0) { + + /* + * If this routine returns a 0 return code, then + * the caller will believe the command has not yet + * completed. So we need to map this to an error. + */ + + rc = -1; + + errno = EIO; + + } + + } else { + + /* + * We encountered an error. + */ + + rc = -1; + + errno = cmd->cmdi->status & 0xffffffff; + + } + + if (cmd->cmdi->flags & CFLSH_ASYNC_IO) { + + if (cmd->cmdi->flags & CFLSH_MODE_READ) { + + chunk->stats.num_blocks_read += cmd->cmdi->transfer_size; + if (chunk->stats.num_act_areads) { + chunk->stats.num_act_areads--; + } else { + CBLK_TRACE_LOG_FILE(1,"!! ----- ISSUE PROBLEM ----- !! flags = 0x%x", + cmd->cmdi->flags); + } + + } else if (cmd->cmdi->flags & CFLSH_MODE_WRITE) { + + + chunk->stats.num_blocks_written += cmd->cmdi->transfer_size; + if (chunk->stats.num_act_awrites) { + chunk->stats.num_act_awrites--; + } else { + CBLK_TRACE_LOG_FILE(1,"!! ----- ISSUE PROBLEM ----- !! flags = 0x%x", + cmd->cmdi->flags); + } + } else { + CBLK_TRACE_LOG_FILE(1,"!! ----- ISSUE PROBLEM ----- !! flags = 0x%x", + cmd->cmdi->flags); + } + } else { + + + + // This can happen in mixed mode (both async and sync) and if the caller waits for next tag. + if (cmd->cmdi->flags & CFLSH_MODE_READ) { + + chunk->stats.num_blocks_read += cmd->cmdi->transfer_size; + if (chunk->stats.num_act_reads) { + chunk->stats.num_act_reads--; + } else { + CBLK_TRACE_LOG_FILE(1,"!! ----- ISSUE PROBLEM ----- !! flags = 0x%x", + cmd->cmdi->flags); + } + + } else if (cmd->cmdi->flags & CFLSH_MODE_WRITE) { + + chunk->stats.num_blocks_written += cmd->cmdi->transfer_size; + if (chunk->stats.num_act_writes) { + chunk->stats.num_act_writes--; + } else { + CBLK_TRACE_LOG_FILE(1,"!! ----- ISSUE PROBLEM ----- !! flags = 0x%x", + cmd->cmdi->flags); + } + } else { + CBLK_TRACE_LOG_FILE(1,"!! ----- ISSUE PROBLEM ----- !! flags = 0x%x", + cmd->cmdi->flags); + } + } + + chunk->num_active_cmds--; + + +#ifndef _COMMON_INTRPT_THREAD + /* + * The caller may have told us to not wait + * so we do not want to wait for the cancel to complete. + * Instead will check flag in the command to + * to see if the thread was exiting the last + * time it held the lock. If so then no cancel is + * needed here, since that thread is done accessing + * this command. Thus we can mark it free. Otherwise + * we need to set a flag so that the async thread + * marks this command as available (clears in_use) when it + * exits. + */ + + if (cmd->cmdi->flags & CFLSH_ATHRD_EXIT) { + + CBLK_TRACE_LOG_FILE(7,"cmd = 0x%llx, tag = 0x%x",cmd,*tag); + + + CBLK_FREE_CMD(chunk,cmd); + + } else { + + CBLK_TRACE_LOG_FILE(7,"cmd = 0x%llx, tag = 0x%x",cmd,*tag); + cmd->cmdi->state = CFLSH_MGM_ASY_CMP; + } + + + /* + * Inform system to reclaim space for this thread when it + * terminates. + */ + + if (cmd->cmdi->thread_id) { + + pthread_rc = pthread_detach(cmd->cmdi->thread_id); + + + cmd->cmdi->thread_id = 0; + + if (pthread_rc) { + + chunk->stats.num_fail_detach_threads++; + CBLK_TRACE_LOG_FILE(5,"pthread_detach failed rc = %d errno = %d, cmd_index = %d,lba = 0x%llx, num_active_cmds = 0x%x, num_active_threads = 0x%x", + pthread_rc,errno, cmd->index,cmd->cmdi->lba,chunk->num_active_cmds,chunk->stats.num_active_threads); + + } + } +#else + + CBLK_FREE_CMD(chunk,cmd); + +#endif /* _COMMON_INTRPT_THREAD */ + } + + if (rc == 0) { + + chunk->stats.num_aresult_no_cmplt++; + } + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + + + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d, tag = %d",rc,errno,*tag); + + return rc; +} + + +/* + * NAME: cblk_aresult + * + * FUNCTION: Waits for asynchronous read or writes to complete + * and returns the status of them. + * + * + * INPUTS: + * chunk_id - Chunk identifier + * tag - Tag associated with this request that + * completed. + * flags - Flags on this request. + * + * RETURNS: + * 0 for good completion, but requested tag has not yet completed. + * -1 for error and errno is set. + * > 0 for good completion where the return value is the + * number of blocks transferred. + * + */ + +int cblk_aresult(chunk_id_t chunk_id,int *tag, uint64_t *status, int flags) +{ + cflsh_chunk_t *chunk; + + + errno = 0; + + CBLK_TRACE_LOG_FILE(5,"chunk_id = %d, flags = 0x%x, tag = 0x%x",chunk_id,flags,*tag); + + if ((chunk_id <= NULL_CHUNK_ID) || + (chunk_id >= cflsh_blk.next_chunk_id)) { + + + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_RD_RWLOCK(cflsh_blk.global_lock); + + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + if (chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"chunk not found, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + return (_cblk_aresult(chunk,tag,status,flags,1)); +} + + +/* + * NAME: cblk_listio + * + * FUNCTION: Issues and waits for multiple I/O requests. + * + * + * INPUTS: + * chunk_id - Chunk identifier + * flags - Flags on this request. + * + * RETURNS: + * 0 for good completion, but requested tag has not yet completed. + * -1 for error and errno is set. + * > 0 for good completion where the return value is the + * number of blocks transferred. + * + */ + +int cblk_listio(chunk_id_t chunk_id, + cblk_io_t *issue_io_list[],int issue_items, + cblk_io_t *pending_io_list[], int pending_items, + cblk_io_t *wait_io_list[],int wait_items, + cblk_io_t *completion_io_list[],int *completion_items, + uint64_t timeout,int flags) +{ + + int rc = 0; + int wait_item_found; + int item_found; + cflsh_chunk_t *chunk; + cblk_io_t *io; + cblk_io_t *tmp_io; + cblk_io_t *wait_io; + cblk_io_t *complete_io; + int io_flags; + int i,j; /* General counters */ + uint64_t status; + struct timespec start_time; + struct timespec last_time; + uint64_t uelapsed_time = 0; /* elapsed time in microseconds */ + int cmd_not_complete; + int avail_completions; + int harvest=1;/* send TRUE to aresult for the first cmd in the list only */ + + errno = 0; + + CBLK_TRACE_LOG_FILE(5,"chunk_id = %d, issue_items = 0x%x, pending_items = 0x%x, wait_items = 0x%x, timeout = 0x%llx,flags = 0x%x", + chunk_id,issue_items,pending_items,wait_items,timeout,flags); + + CFLASH_BLOCK_RD_RWLOCK(cflsh_blk.global_lock); + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + if (chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"chunk not found, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + + + /* + * Do some validation on lists past before we do any processing. + * This allows us to completely fail the request before some I/O + * requests may have been issued. + */ + + if (cblk_listio_arg_verify(chunk_id,issue_io_list,issue_items,pending_io_list,pending_items, + wait_io_list,wait_items,completion_io_list,completion_items,timeout,flags)) { + + + return -1; + } + + complete_io = completion_io_list[0]; + + avail_completions = *completion_items; + + + /* + * Reset complete_items to 0 completed. + */ + + *completion_items = 0; + + + /* + * TODO: ?? This API is ugly in that a command may be in both the wait_io_list + * and one of the other list (pending, or issue). Since commands in the wait_io_list + * are not copied int the complete_io_list. Each pending or issue completion needs to first be checked + * via the waiting list and ensure it is not there already. Otherwise the entry in the wait_io_list should be + * updated. + */ + + + if (issue_items) { + + /* + * Caller is requesting I/Os to issued. + */ + + + if (issue_io_list == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"Issue_io_list array is a null pointer for chunk_id = %d and issue_items = %d", + chunk_id,issue_items); + errno = EINVAL; + + return -1; + + } else if (issue_io_list[0] == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"Issue_io_list[0] is a null pointer for chunk_id = %d and issue_items = %d", + chunk_id,issue_items); + errno = EINVAL; + + return -1; + } + + + for (i=0; i< issue_items;i++) { + + io = issue_io_list[i]; + + + if (io == NULL) { + + + continue; + + } + + io->stat.blocks_transferred = 0; + io->stat.fail_errno = 0; + io->stat.status = CBLK_ARW_STATUS_PENDING; + + if (io->buf == NULL) { + + + + CBLK_TRACE_LOG_FILE(1,"data buffer is a null pointer for chunk_id = %d and index = %d", + chunk_id,i); + + + io->stat.fail_errno = EINVAL; + io->stat.status = CBLK_ARW_STATUS_INVALID; + + continue; + + } + + if ((io->request_type != CBLK_IO_TYPE_READ) && + (io->request_type != CBLK_IO_TYPE_WRITE)) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid request_type = %d chunk_id = %d and index = %d", + io->request_type,chunk_id,i); + + + io->stat.fail_errno = EINVAL; + io->stat.status = CBLK_ARW_STATUS_INVALID; + continue; + } + + + + /* + * Process this I/O request + */ + + io_flags = 0; + + if (io->flags & CBLK_IO_USER_TAG) { + + io_flags |= CBLK_ARW_USER_TAG_FLAG; + + } else if (io->flags & CBLK_IO_USER_STATUS) { + + io_flags |= CBLK_ARW_USER_STATUS_FLAG; + + } + + if (flags & CBLK_LISTIO_WAIT_ISSUE_CMD) { + + io_flags |= CBLK_ARW_WAIT_CMD_FLAGS; + } + + + // TODO:?? Is it correct to give address of io->tag, since it is not in wait_list? + + // TODO:?? Should we specify io->stat address here. Does the caller expect this to be updated? + + + if (io->request_type == CBLK_IO_TYPE_READ) { + + rc = _cblk_aread(chunk,io->buf,io->lba,io->nblocks,&(io->tag),&(io->stat),io_flags); + } else { + + rc = _cblk_awrite(chunk,io->buf,io->lba,io->nblocks,&(io->tag),&(io->stat),io_flags); + } + + if (rc < 0) { + + CBLK_TRACE_LOG_FILE(1,"Request failed for chunk_id = %d and index = %d with rc = %d, errno = %d", + chunk_id,i,rc,errno); + + // TODO:?? Should we filter on EINVAL and uses a different status? + io->stat.fail_errno = errno; + io->stat.blocks_transferred = 0; + io->stat.status = CBLK_ARW_STATUS_FAIL; + + } else if (rc) { + + CBLK_TRACE_LOG_FILE(9,"Request chunk_id = %d and index = %d with rc = %d, errno = %d", + chunk_id,i,rc,errno); + + io->stat.fail_errno = 0; + io->stat.blocks_transferred = rc; + io->stat.status = CBLK_ARW_STATUS_SUCCESS; + } + + if (rc) { + + + /* + * For a non-zero status update wait list + * if this request is on that list. + */ + + CBLK_TRACE_LOG_FILE(9,"Request chunk_id = %d and index = %d with rc = %d, errno = %d", + chunk_id,i,rc,errno); + + wait_item_found = FALSE; + + + if (wait_items) { + + for (j=0; j < wait_items; j++) { + + wait_io = wait_io_list[j]; + + if ((wait_io->buf == io->buf) && + (wait_io->lba == io->lba) && + (wait_io->nblocks == io->nblocks)) { + + wait_io->stat.fail_errno = io->stat.fail_errno; + wait_io->stat.blocks_transferred = rc; + wait_io->stat.status = io->stat.status; + wait_io->tag = io->tag; + + wait_item_found = TRUE; + + break; + + } + + } /* inner for */ + + } + + if (!wait_item_found) { + + if ((complete_io) && + (*completion_items stat.fail_errno = io->stat.fail_errno; + complete_io->stat.blocks_transferred = rc; + complete_io->stat.status = io->stat.status; + complete_io->tag = io->tag; + complete_io++; + (*completion_items)++; + } else { + + + CBLK_TRACE_LOG_FILE(1,"Request chunk_id = %d and index = %d no complete_io entry found", + chunk_id,i); + } + } + + } else { + + + /* + * Command did not complete yet. Check to see if this + * item is in the wait list and if necessary update + * its io_tag, since it may be a copy of this issue io item. + */ + + + if (wait_items) { + + for (j=0; j < wait_items; j++) { + + wait_io = wait_io_list[j]; + + if ((wait_io->buf == io->buf) && + (wait_io->lba == io->lba) && + (wait_io->nblocks == io->nblocks)) { + + wait_io->tag = io->tag; + + break; + + } + + } /* inner for */ + + } + + } + + } /* for */ + + + } + + + /* + * TODO: ?? Look for a common routine that could do logic required of both both pending and + * waiting list. Thus this routine could be invoked twice here. Once for pending and then again + * for waiting list. + */ + + + if (pending_items) { + + /* + * Caller is requesting I/Os to issued. + */ + + + if (pending_io_list == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"pending_io_list array is a null pointer for chunk_id = %d and pending_items = %d", + chunk_id,pending_items); + errno = EINVAL; + + return -1; + + } + + + for (i=0; i< pending_items;i++) { + + io = pending_io_list[i]; + + + if (io == NULL) { + + continue; + + } + + if (io->buf == NULL) { + + + + CBLK_TRACE_LOG_FILE(1,"data buffer is a null pointer for chunk_id = %d and index = %d", + chunk_id,i); + + + io->stat.fail_errno = EINVAL; + io->stat.status = CBLK_ARW_STATUS_INVALID; + + continue; + + } + + if ((io->request_type != CBLK_IO_TYPE_READ) && + (io->request_type != CBLK_IO_TYPE_WRITE)) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid request_type = %d chunk_id = %d and index = %d", + io->request_type,chunk_id,i); + + + io->stat.fail_errno = EINVAL; + io->stat.status = CBLK_ARW_STATUS_INVALID; + continue; + } + + if (io->stat.status != CBLK_ARW_STATUS_PENDING) { + + + /* + * Is not an error, since a caller may pass the same + * list over and over again. In that case we should + * only process commands that are pending. + */ + + continue; + + } + + /* + * Process this I/O request + */ + + + + io_flags = 0; + + if (io->flags & CBLK_IO_USER_TAG) { + + io_flags |= CBLK_ARESULT_USER_TAG; + + } + + rc = _cblk_aresult(chunk,&(io->tag),&status,io_flags,harvest); + + if (harvest) { + harvest=0; + } + + if (rc < 0) { + + CBLK_TRACE_LOG_FILE(1,"Request failed for chunk_id = %d and index = %d with rc = %d, errno = %d", + chunk_id,i,rc,errno); + + + // TODO:?? Should we filter on EINVAL and uses a different status? + + if (errno == EAGAIN) { + + io->stat.fail_errno = 0; + io->stat.blocks_transferred = rc; + io->stat.status = CBLK_ARW_STATUS_PENDING; + + } else { + io->stat.fail_errno = errno; + io->stat.blocks_transferred = 0; + io->stat.status = CBLK_ARW_STATUS_FAIL; + } + + } else if (rc) { + + CBLK_TRACE_LOG_FILE(9,"Request chunk_id = %d and index = %d with rc = %d, errno = %d", + chunk_id,i,rc,errno); + + io->stat.fail_errno = 0; + io->stat.blocks_transferred = rc; + io->stat.status = CBLK_ARW_STATUS_SUCCESS; + } + + + + if (rc) { + + /* + * For a non-zero status update wait list + * if this request is on that list. + */ + + wait_item_found = FALSE; + + + if (wait_items) { + + /* + * If there are wait_items, then see + * if this item is one of them. If so + * update the associated wait_item. + */ + + for (j=0; j < wait_items; j++) { + + wait_io = wait_io_list[j]; + + if ((wait_io->buf == io->buf) && + (wait_io->lba == io->lba) && + (wait_io->nblocks == io->nblocks) && + (wait_io->tag == io->tag)) { + + wait_io->stat.fail_errno = io->stat.fail_errno; + wait_io->stat.blocks_transferred = rc; + wait_io->stat.status = io->stat.status; + + wait_item_found = TRUE; + + break; + + } + + } /* inner for */ + + } + + if (!wait_item_found) { + + if ((complete_io) && + (*completion_items stat.fail_errno = io->stat.fail_errno; + complete_io->stat.blocks_transferred = rc; + complete_io->stat.status = io->stat.status; + complete_io->tag = io->tag; + complete_io++; + (*completion_items)++; + } else { + + + CBLK_TRACE_LOG_FILE(1,"Request chunk_id = %d and index = %d no complete_io entry found", + chunk_id,i); + } + } + + } + + + } /* for */ + + } + + if (wait_items) { + + /* + * Caller is requesting to wait for these I/O + */ + + + if (wait_io_list == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"wait_io_list array is a null pointer for chunk_id = %d and pending_items = %d", + chunk_id,wait_items); + errno = EINVAL; + + return -1; + + } + + clock_gettime(CLOCK_MONOTONIC,&start_time); + clock_gettime(CLOCK_MONOTONIC,&last_time); + + + // TODO: ?? Add macros to replace this. + + uelapsed_time = ((last_time.tv_sec - start_time.tv_sec) * 1000000) + ((last_time.tv_nsec - start_time.tv_nsec)/1000); + + + + while ((timeout == 0) || + (uelapsed_time < timeout)) { + + /* + * If no time out is specified then only go thru this loop + * once. Otherwise continue thru this loop until + * our time has elapsed. + */ + + cmd_not_complete = FALSE; + + + for (i=0; i< wait_items;i++) { + + io = wait_io_list[i]; + + + if (io == NULL) { + + continue; + + } + + if (io->stat.status != CBLK_ARW_STATUS_PENDING) { + + /* + * This I/O request has already completed. + * continue to the next wait I/O request. + */ + + continue; + } + + + + if (io->buf == NULL) { + + + + CBLK_TRACE_LOG_FILE(1,"data buffer is a null pointer for chunk_id = %d and index = %d", + chunk_id,i); + + + io->stat.fail_errno = EINVAL; + io->stat.status = CBLK_ARW_STATUS_INVALID; + + continue; + + } + + + /* + * Process this I/O request, always wait for request. + */ + + + // TODO:?? Need mechanism to specify time-out + + io_flags = 0; + + + if (timeout == 0) { + + io_flags |= CBLK_ARESULT_BLOCKING; + } + + if (io->flags & CBLK_IO_USER_TAG) { + + io_flags |= CBLK_ARESULT_USER_TAG; + + } + + rc = cblk_aresult(chunk_id,&(io->tag),&status,io_flags); + + if (rc < 0) { + + CBLK_TRACE_LOG_FILE(1,"Request failed for chunk_id = %d and index = %d with rc = %d, errno = %d", + chunk_id,i,rc,errno); + + + // TODO:?? Should we filter on EINVAL and uses a different status? + io->stat.fail_errno = errno; + io->stat.blocks_transferred = 0; + io->stat.status = CBLK_ARW_STATUS_FAIL; + + } else if (rc) { + + + io->stat.fail_errno = 0; + io->stat.blocks_transferred = rc; + io->stat.status = CBLK_ARW_STATUS_SUCCESS; + } else { + + /* + * This command has not completed yet. + */ + + cmd_not_complete = TRUE; + } + + if (rc) { + + /* + * For a non-zero status update + * associated issue/pending list. + */ + + item_found = FALSE; + + + if (issue_items) { + + /* + * If there are wait_items, then see + * if this item is one of them. If so + * update the associated wait_item. + */ + + for (j=0; j < issue_items; j++) { + + tmp_io = issue_io_list[j]; + + if ((tmp_io->buf == io->buf) && + (tmp_io->lba == io->lba) && + (tmp_io->nblocks == io->nblocks) && + (tmp_io->tag == io->tag)) { + + tmp_io->stat.fail_errno = io->stat.fail_errno; + tmp_io->stat.blocks_transferred = rc; + tmp_io->stat.status = io->stat.status; + + item_found = TRUE; + + break; + + } + + } /* inner for */ + + } + + if ((pending_items) && (!item_found)) { + + /* + * If there are wait_items, then see + * if this item is one of them. If so + * update the associated wait_item. + */ + + for (j=0; j < pending_items; j++) { + + tmp_io = pending_io_list[j]; + + if ((tmp_io->buf == io->buf) && + (tmp_io->lba == io->lba) && + (tmp_io->nblocks == io->nblocks) && + (tmp_io->tag == io->tag)) { + + tmp_io->stat.fail_errno = io->stat.fail_errno; + tmp_io->stat.blocks_transferred = rc; + tmp_io->stat.status = io->stat.status; + + item_found = TRUE; + + break; + + } + + } /* inner for */ + + } + + } + + + } /* for */ + + if (timeout == 0) { + + /* + * Only go thru the while loop one time if + * no time out is specified, since we will block until + * command completion. + */ + + break; + } + + if (cmd_not_complete) { + + /* + * Sleep for one microsecond + */ + + usleep(1); + } else { + + /* + * All I/O has completed. So exit this loop. + */ + + break; + } + + clock_gettime(CLOCK_MONOTONIC,&last_time); + + + // TODO: ?? Add macros to replace this. + uelapsed_time = ((last_time.tv_sec - start_time.tv_sec) * 1000000) + ((last_time.tv_nsec - start_time.tv_nsec)/1000); + + + + } /* while */ + + } + + rc = 0; + + + if ((timeout) && (uelapsed_time >= timeout)) { + + errno = ETIMEDOUT; + rc = -1; + } + + CBLK_TRACE_LOG_FILE(5,"rc = %d, errno = %d",rc,errno); + return rc; +} + + + + + + + +/* + * NAME: cblk_clone_after_fork + * + * FUNCTION: clone a (virtual lun) chunk with an existing (original) + * chunk. This is useful if a process is forked and the + * child wants to read data from the parents chunk. + * + * + * INPUTS: + * chunk_id - Chunk id to be cloned. + * mode - Access mode for the chunk + * (O_RDONLY, O_WRONLY, O_RDWR) + * flags - Flags on this request. + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ +int cblk_clone_after_fork(chunk_id_t chunk_id, int mode, int flags) +{ + + int rc = 0; + cflsh_chunk_t *chunk; + + errno = 0; + + + + CBLK_TRACE_LOG_FILE(5,"orig_chunk_id = %d mode = 0x%x, flags = 0x%x", + chunk_id,mode,flags); + + if ((chunk_id <= NULL_CHUNK_ID) || + (chunk_id >= cflsh_blk.next_chunk_id)) { + + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_WR_RWLOCK(cflsh_blk.global_lock); + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + if (chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"chunk not found, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"Invalid chunk, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + + CFLASH_BLOCK_LOCK(chunk->lock); + + /* + * Since we forked the child, get its new PID + * and clear its old process name if known. + */ + + cflsh_blk.caller_pid = getpid(); + + + if (cflsh_blk.process_name) { + + free(cflsh_blk.process_name); + } + + + + /* + * Since we forked the child, if we have tracing turned + * on for a trace file per process, then we need to + * open the new file for this child's PID. The routine + * cblk_setup_trace_files will handle the situation + * where multiple chunks are cloned and using the same + * new trace file. + */ + + cblk_setup_trace_files(TRUE); + + + if (chunk->num_active_cmds > 0) { + + + /* + * If this chunk still has active + * commands then fails this + * close. + */ + + CBLK_TRACE_LOG_FILE(1,"cloning failed because chunk in use, rchunk_id = %d, path = %s, num_active_cmds %d", + chunk_id,chunk->dev_name,chunk->num_active_cmds); + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EBUSY; + return -1; + } + + /* + * Ensure child does not have more read/write access + * than the parent. + */ + + switch (mode & O_ACCMODE) { + case O_RDONLY: + + if (chunk->flags & CFLSH_CHNK_RD_AC) { + + /* + * Clear write access if it was previously + * enabled. + */ + + chunk->flags &= ~CFLSH_CHNK_WR_AC; + } else { + + CBLK_TRACE_LOG_FILE(1,"cloning failed because parent does not have read access, rchunk_id = %d, path = %s", + chunk_id,chunk->dev_name); + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + break; + case O_WRONLY: + if (chunk->flags & CFLSH_CHNK_WR_AC) { + /* + * Clear read access if it was previously + * enabled. + */ + chunk->flags &= ~CFLSH_CHNK_RD_AC; + } else { + + CBLK_TRACE_LOG_FILE(1,"cloning failed because parent does not have write access, rchunk_id = %d, path = %s", + chunk_id,chunk->dev_name); + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + + } + + break; + case O_RDWR: + if (!((chunk->flags & CFLSH_CHNK_WR_AC) && + (chunk->flags & CFLSH_CHNK_RD_AC))) { + /* + * If caller is specifying read/write + * access, but this chunk did not have that + * permission then fail this + * request. + */ + CBLK_TRACE_LOG_FILE(1,"cloning failed because parent does not have both read and write access, rchunk_id = %d, path = %s", + chunk_id,chunk->dev_name); + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + break; + default: + + + CBLK_TRACE_LOG_FILE(1,"Invalid access mode %d",mode); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + + + + + } + + + if (chunk->path[chunk->cur_path] == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"Null path"); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EIO; + return -1; + + + + } + + if (chunk->path[chunk->cur_path]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"Null afu"); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EIO; + return -1; + } + + + rc = cblk_mc_clone(chunk,mode,flags); + + if (!rc) { + + /* + * If this worked, then reinitialize RRQ and toggle. + */ + + + // TODO: ?? this assumes rrq size matches this chunk's num_cmds + bzero((void *)chunk->path[chunk->cur_path]->afu->p_hrrq_start , + (sizeof(*(chunk->path[chunk->cur_path]->afu->p_hrrq_start)) * chunk->path[chunk->cur_path]->afu->num_rrqs)); + + chunk->path[chunk->cur_path]->afu->p_hrrq_curr = chunk->path[chunk->cur_path]->afu->p_hrrq_start; + + /* + * Since the host RRQ is + * bzeroed. The toggle bit in the host + * RRQ that initially indicates we + * have a new RRQ will need to be 1. + */ + + + chunk->path[chunk->cur_path]->afu->toggle = 1; + + chunk->path[chunk->cur_path]->afu->num_issued_cmds = 0; + + chunk->cmd_curr = chunk->cmd_start; + + } + + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d",rc,errno); + + return rc; +} + + +/* + * NAME: cblk_get_stats + * + * FUNCTION: Return statistics for this chunk. + * + * + * INPUTS: + * chunk_id - Chunk identifier + * tag - Pointer to stats returned + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cblk_get_stats(chunk_id_t chunk_id, chunk_stats_t *stats, int flags) +{ + + int rc = 0; + cflsh_chunk_t *chunk; + + + + errno = 0; + + CBLK_TRACE_LOG_FILE(6,"flags = 0x%x",flags); + + + if ((chunk_id <= NULL_CHUNK_ID) || + (chunk_id >= cflsh_blk.next_chunk_id)) { + + errno = EINVAL; + return -1; + } + + + if (stats == NULL) { + + CBLK_TRACE_LOG_FILE(1,"Null stats passed"); + + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_RD_RWLOCK(cflsh_blk.global_lock); + chunk = CBLK_GET_CHUNK_HASH(chunk_id,TRUE); + + if (chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"chunk not found, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"Invalid chunk, chunk_id = %d", + chunk_id); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + errno = EINVAL; + return -1; + } + + CFLASH_BLOCK_LOCK(chunk->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + + /* + * Copy stats back to caller + */ + + bcopy((void *)&(chunk->stats),(void *)stats, sizeof(chunk->stats)); + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d ",rc,errno); + return rc; +} + diff --git a/src/block/cflash_block_aix.h b/src/block/cflash_block_aix.h new file mode 100644 index 00000000..f0a23e67 --- /dev/null +++ b/src/block/cflash_block_aix.h @@ -0,0 +1,179 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/cflash_block_internal.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _H_CFLASH_BLOCK_AIX +#define _H_CFLASH_BLOCK_AIX + +#define CFLASH_BLOCK_EXCEP_QDEPTH 32768 /* Number of exceptions--including */ + /* page faults--for this context, */ + /* before we hold off all interrupts*/ + /* for this context */ + +/*****************************************************************************/ +/* Error logging information */ +/*****************************************************************************/ + + +#define CFLASH_BLOCK_LOG_DATA_LEN 224 + +struct cflash_block_log { + + /*---------------start of first line of detail data----------------*/ + + uint app_indicator; /* Indicates the application/library */ + /* associated with this. */ +#define CFLSH_BLK_LIB 0x10 /* Indicates this library will be used */ + /* for this error log. */ + uint errnum; /* 4 bytes */ + uint64_t rc; /* Return code from the failed calls */ + uchar type; /* Type of data in detail data */ + uchar afu_type; /* Type of AFU. */ + ushort version; /* Version of detail data */ +#define CFLSH_BLK_LIB_VER_0 0x0 /* Version 0 */ + + int cflsh_blk_flags; /* Global library flags */ + int num_active_chunks; /* Number of active chunks */ + int num_max_active_chunks; /* Maximum number of active */ + /* chunks seen at a time. */ + +/*---------------start of second line of detail data----------------*/ + + int chunk_flags; /* Flags for this chunk */ + uint32_t num_active_cmds; /* Number of active commands */ + int num_cmds; /* Number of commands in */ + /* in command queue */ + int num_paths; /* Number of paths */ + int path_flags; /* Flags for this path */ + int path_index; /* Path to issue command */ + int afu_flags; /* Flags for this AFU */ + int num_rrqs; /* Number of RRQ elements */ + + +/*---------------start of third line of detail data----------------*/ + + uint32_t num_act_reads; /* Current number of reads active */ + /* via cblk_read interface */ + uint32_t num_act_writes; /* Current number of writes active */ + /* via cblk_write interface */ + uint32_t num_act_areads; /* Current number of async reads */ + /* active via cblk_aread interface */ + uint32_t num_act_awrites; /* Current number of async writes */ + /* active via cblk_awrite interface */ + uint32_t max_num_act_writes; /* High water mark on the maximum */ + /* number of writes active at once */ + uint32_t max_num_act_reads; /* High water mark on the maximum */ + /* number of reads active at once */ + uint32_t max_num_act_awrites; /* High water mark on the maximum */ + /* number of asyync writes active */ + /* at once. */ + uint32_t max_num_act_areads; /* High water mark on the maximum */ + /* number of asyync reads active */ + /* at once. */ +/*---------------start of fourth line of detail data----------------*/ + + uint64_t num_cc_errors; /* Total number of all check */ + /* condition responses seen */ + uint64_t num_afu_errors; /* Total number of all AFU error */ + /* responses seen */ + uint64_t num_fc_errors; /* Total number of all FC */ + /* error responses seen */ + uint64_t num_errors; /* Total number of all error */ + /* responses seen */ + + + +/*---------------start of fifth line of detail data----------------*/ + + uint64_t num_reset_contexts; /* Total number of reset contexts */ + /* done */ + + uint64_t num_reset_contxt_fails;/* Total number of reset context */ + /* failures */ + uint64_t num_path_fail_overs; /* Total number of times a request */ + /* has failed over to another path. */ + + uint32_t block_size; /* Block size of this chunk. */ + uint32_t primary_path_id; /* Primary path id */ + + +/*---------------start of sixth line of detail data----------------*/ + + uint64_t num_no_cmd_room; /* Total number of times we didm't */ + /* have room to issue a command to */ + /* the AFU. */ + uint64_t num_no_cmds_free; /* Total number of times we didm't */ + /* have free command available */ + uint64_t num_no_cmds_free_fail; /* Total number of times we didn't */ + /* have free command available and */ + /* failed a request because of this */ + uint64_t num_fail_timeouts; /* Total number of all commmand */ + /* time-outs that led to a command */ + /* failure. */ + +/*---------------start of seventh line of detail data----------------*/ + + uint64_t num_capi_adap_chck_err;/* Total number of all check */ + /* adapter errors. */ + + + uint64_t num_capi_adap_resets; /* Total number of all adapter */ + /* reset errors. */ + + uint64_t num_capi_data_st_errs; /* Total number of all */ + + /* CAPI data storage event */ + /* responses seen. */ + + uint64_t num_capi_afu_errors; /* Total number of all */ + /* CAPI error responses seen */ + +/*---------------start of eighth line of detail data----------------*/ + + scsi_cdb_t failed_cdb; /* CDB that failed */ + uint64_t data_ea; /* Effective address of data buffer */ + uint64_t data_len; /* Length of data for this CDB */ + + + + +/*---------------start of nineth line of detail data----------------*/ + + + uint64_t lba; /* Starting LBA */ + uint64_t mmio; /* Start address of MMIO space */ + uint64_t hrrq_start; /* Start address of HRRQ */ + uint64_t cmd_start; /* Start address of commands */ + + + +/*---------------start of tentth line of detail data until end ---------*/ + + uchar data[CFLASH_BLOCK_LOG_DATA_LEN]; + + +}; + +#endif /* _H_CFLASH_BLOCK_AIX */ + diff --git a/src/block/cflash_block_inline.h b/src/block/cflash_block_inline.h new file mode 100644 index 00000000..0b62212b --- /dev/null +++ b/src/block/cflash_block_inline.h @@ -0,0 +1,3985 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/cflash_block_inline.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +//#define CFLSH_BLK_FILENUM 0x0300 +#ifndef _H_CFLASH_BLOCK_INLINE +#define _H_CFLASH_BLOCK_INLINE + + +#include "cflash_block_internal.h" +#include "cflash_block_protos.h" + + + + + + +/* + * NAME: CBLK_SETUP_BAD_MMIO_SIGNAL + * + * FUNCTION: Sets up a signal handler to catch + * MMIO failure due to adapter reset + * or uncorrectable error. + * + * NOTES: This routine assumes the caller is holding + * the chunk lock. + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * upper_offset - Upper offset of MMIO. + * + * RETURNS: + * + * 0 - Good completion, otherwise error + * + */ + +#ifdef DEBUG +static inline int CBLK_SETUP_BAD_MMIO_SIGNAL(cflsh_chunk_t *chunk, + int path_index, + uint64_t upper_offset) +{ + int rc = 0; + struct sigaction action; + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + return -1; + + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu is null"); + + return -1; + + } + + bzero((void *)&action,sizeof(action)); + + action.sa_sigaction = cblk_chunk_sigsev_handler; + action.sa_flags = SA_SIGINFO; + + if (sigaction(SIGSEGV, &action,&(chunk->path[path_index]->old_action))) { + + CBLK_TRACE_LOG_FILE(1,"Failed to set up SIGSEGV handler with errno = %d\n", + errno); + + return rc; + } + + + chunk->path[path_index]->flags |= CFLSH_CHNK_SIGH; + + chunk->path[path_index]->upper_mmio_addr = chunk->path[path_index]->afu->mmio + upper_offset; + + + + if (setjmp(chunk->path[path_index]->jmp_mmio)) { + + /* + * We only get here if a longjmp occurred, + * which indicates we failed doing the MMIO + * operation. + */ + rc = TRUE; + if (sigaction(SIGSEGV, &(chunk->path[path_index]->old_action),NULL)) { + + CBLK_TRACE_LOG_FILE(1,"Failed to restore SIGSEGV handler with errno = %d\n", + errno); + } + chunk->path[path_index]->flags &= ~CFLSH_CHNK_SIGH; + CBLK_TRACE_LOG_FILE(1,"MMIO failure with upper_offset = 0x%llx",upper_offset); + } + + + return rc; +} +#else +static inline int CBLK_SETUP_BAD_MMIO_SIGNAL(cflsh_chunk_t *chunk, + int path_index, + uint64_t upper_offset) +{ + + return 0; +} +#endif /* !DEBUG */ + +/* + * NAME: CBLK_CLEANUP_BAD_MMIO_SIGNAL + * + * FUNCTION: Removes the signal handler to catch + * MMIO failures ad restores the previous + * signal handler.. + * + * NOTES: This routine assumes the caller is holding + * the chunk lock. + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * + * RETURNS: + * 0 - Good completion, otherwise error. + * + * + */ + +#ifdef DEBUG +static inline void CBLK_CLEANUP_BAD_MMIO_SIGNAL(cflsh_chunk_t *chunk, + int path_index) +{ + + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + return; + + } + + if (sigaction(SIGSEGV, &(chunk->path[path_index]->old_action),NULL)) { + + CBLK_TRACE_LOG_FILE(1,"Failed to restore SIGSEGV handler with errno = %d\n", + errno); + } + + chunk->path[path_index]->flags &= ~CFLSH_CHNK_SIGH; + return; +} +#else +static inline void CBLK_CLEANUP_BAD_MMIO_SIGNAL(cflsh_chunk_t *chunk, + int path_index) +{ + return; +} +#endif /* !DEBUG */ + + + +/* + * NAME: CBLK_INVALD_CHUNK_PATH_AFU + * + * FUNCTION: Perform + * MMIO failures ad restores the previous + * signal handler.. + * + * NOTES: This routine assumes the caller is holding + * the chunk lock. + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * + * RETURNS: + * 0 - Good completion, otherwise error. + * + * + */ + + +static inline int CBLK_INVALID_CHUNK_PATH_AFU(cflsh_chunk_t *chunk, + int path_index, + const char *fcn_name) +{ + int rc = 0; + + if (chunk == NULL) { + + CBLK_TRACE_LOG_FILE(1,"chunk is null for fcn = %s",fcn_name); + + return -1; + } + + if ( (ulong)chunk & CHUNK_BAD_ADDR_MASK ) { + + CBLK_TRACE_LOG_FILE(1,"chunk has invalid address for fcn = %s",fcn_name); + + return -1; + + } + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + + CBLK_TRACE_LOG_FILE(1,"Invalid chunk eyecatcher"); + + return -1; + } + + + if (path_index < 0) { + + CBLK_TRACE_LOG_FILE(1,"path_index is invald for fcn = %s",fcn_name); + return -1; + } + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null for fcn = %s",fcn_name); + + errno = EINVAL; + return -1; + + } + + if ( (ulong)(chunk->path[path_index]) & PATH_BAD_ADDR_MASK ) { + + CBLK_TRACE_LOG_FILE(1,"path has invalid address for fcn = %s",fcn_name); + + return -1; + + } + + + if (CFLSH_EYECATCH_PATH(chunk->path[path_index])) { + + CBLK_TRACE_LOG_FILE(1,"Invalid path eyecatcher for fcn = %s",fcn_name); + + return -1; + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu is null for fcn = %s",fcn_name); + + return (-1); + + } + + if ( (ulong)(chunk->path[path_index]->afu) & AFU_BAD_ADDR_MASK ) { + + CBLK_TRACE_LOG_FILE(1,"AFU has invalid addressfor fcn = %s ",fcn_name); + + return -1; + + } + + if (CFLSH_EYECATCH_AFU(chunk->path[path_index]->afu)) { + + CBLK_TRACE_LOG_FILE(1,"Invalid AFU eyecatcher for %s",fcn_name); + + return -1; + } + return rc; +} + +/************************************************************************/ +/* Adapter Specific Inline Functions */ +/************************************************************************/ + + +/* + * NAME: CBLK_GET_NUM_INTERRUPTS + * + * FUNCTION: This routine is called whenever one needs to issue + * an IOARCB to see if there is room for another command + * to be accepted by the AFU from this context. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: The number of commands that can currently be issued to the AFU + * for this context. + * + * + */ + +static inline int CBLK_GET_NUM_INTERRUPTS(cflsh_chunk_t *chunk, + int path_index) +{ + int rc; + + if (chunk == NULL) { + + errno = EINVAL; + + return 0; + } + + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return 0; + + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu is null"); + + errno = EINVAL; + + return 0; + + } + + if (chunk->path[path_index]->fcn_ptrs.get_num_interrupts == NULL) { + + errno = EINVAL; + + return 0; + } + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then return 0 (no command room) + * from this routine + */ + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return 0; + + } + + rc = chunk->path[path_index]->fcn_ptrs.get_num_interrupts(chunk,path_index); + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + + return rc; +} + +/* + * NAME: CBLK_GET_CMD_ROOM + * + * FUNCTION: This routine is called whenever one needs to issue + * an IOARCB to see if there is room for another command + * to be accepted by the AFU from this context. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: The number of commands that can currently be issued to the AFU + * for this context. + * + * + */ + +static inline uint64_t CBLK_GET_CMD_ROOM(cflsh_chunk_t *chunk, + int path_index) +{ + uint64_t rc; + + if (chunk == NULL) { + + errno = EINVAL; + + return 0; + } + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return 0; + + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu is null"); + + errno = EINVAL; + + return 0; + + } + + if (chunk->path[path_index]->fcn_ptrs.get_cmd_room == NULL) { + + errno = EINVAL; + + return 0; + } + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then return 0 (no command room) + * from this routine + */ + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return 0; + + } + + rc = chunk->path[path_index]->fcn_ptrs.get_cmd_room(chunk,path_index); + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + if (rc == 0xffffffffffffffffLL) { + + CBLK_TRACE_LOG_FILE(1,"Potential UE encountered for command room\n"); + + + cblk_check_os_adap_err(chunk,path_index); + + /* + * Tell caller there is no command room + */ + + rc = 0; + } + + + return rc; +} + +/* + * NAME: CBLK_GET_INTRPT_STATUS + * + * FUNCTION: This routine is called whenever one needs to read + * the status of the adapter. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: The interrupt status register. + * + * + */ + +static inline uint64_t CBLK_GET_INTRPT_STATUS(cflsh_chunk_t *chunk, int path_index) +{ + uint64_t rc; + + if (chunk == NULL) { + + errno = EINVAL; + + return 0; + } + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return 0; + + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu is null"); + + errno = EINVAL; + + return 0; + + } + + if (chunk->path[path_index]->fcn_ptrs.get_intrpt_status == NULL) { + + errno = EINVAL; + + return 0; + } + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then return 0 + * from this routine + */ + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return 0; + + } + + rc = chunk->path[path_index]->fcn_ptrs.get_intrpt_status(chunk,path_index); + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + if (rc == 0xffffffffffffffffLL) { + + CBLK_TRACE_LOG_FILE(1,"Potential UE encountered for interrupt status\n"); + + cblk_check_os_adap_err(chunk,path_index); + } + + + return rc; +} + +/* + * NAME: CBLK_INC_RRQ + * + * FUNCTION: This routine is called whenever an RRQ has been processed and + * the path lock is needed. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ + +static inline void CBLK_INC_RRQ(cflsh_chunk_t *chunk, int path_index) +{ + + + + if (chunk == NULL) { + + errno = EINVAL; + + return; + } + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return; + + } + + if (chunk->path[path_index]->fcn_ptrs.inc_rrq == NULL) { + + errno = EINVAL; + + return; + } + + + + chunk->path[path_index]->fcn_ptrs.inc_rrq(chunk,path_index); + + + + return ; +} + +/* + * NAME: CBLK_INC_RRQ_LOCK + * + * FUNCTION: This routine is called whenever an RRQ has been processed. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ + +static inline void CBLK_INC_RRQ_LOCK(cflsh_chunk_t *chunk, int path_index) +{ + + + + if (chunk == NULL) { + + errno = EINVAL; + + return; + } + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return; + + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu is null"); + + errno = EINVAL; + + return; + + } + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then return + * from this routine + */ + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return; + + } + + CBLK_INC_RRQ(chunk,path_index); + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return; +} + +/* + * NAME: CBLK_GET_CMD_DATA_LENGTH + * + * FUNCTION: Returns the data length associated with a command + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ +static inline uint32_t CBLK_GET_CMD_DATA_LENGTH(cflsh_chunk_t *chunk,cflsh_cmd_mgm_t *cmd) +{ + cflsh_cmd_info_t *cmdi; + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return -1; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return -1; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[cmdi->path_index]->fcn_ptrs.get_cmd_data_length == NULL) { + + errno = EINVAL; + + return 0; + } + + return (chunk->path[cmdi->path_index]->fcn_ptrs.get_cmd_data_length(chunk,cmd)); +} + +/* + * NAME: CBLK_GET_CMD_CDB + * + * FUNCTION: Returns the offset of the CDB in the command. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: + * + * + */ +static inline scsi_cdb_t * CBLK_GET_CMD_CDB(cflsh_chunk_t *chunk, + cflsh_cmd_mgm_t *cmd) +{ + cflsh_cmd_info_t *cmdi; + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return NULL; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return NULL; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return NULL; + + } + + if (chunk->path[cmdi->path_index]->fcn_ptrs.get_cmd_cdb == NULL) { + + errno = EINVAL; + + return NULL; + } + + return (chunk->path[cmdi->path_index]->fcn_ptrs.get_cmd_cdb(chunk,cmd)); +} + +/* + * NAME: CBLK_GET_CMD_RSP + * + * FUNCTION: Returns the offset of the command this response is for. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: + * + * + */ +static inline cflsh_cmd_mgm_t *CBLK_GET_CMD_RSP(cflsh_chunk_t *chunk, int path_index) +{ + + cflsh_cmd_mgm_t *cmd = NULL; + + if (chunk == NULL) { + + errno = EINVAL; + + return NULL; + } + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return NULL; + + } + + if (chunk->path[path_index]->fcn_ptrs.get_cmd_rsp == NULL) { + + errno = EINVAL; + + return NULL; + } + + + + + cmd = chunk->path[path_index]->fcn_ptrs.get_cmd_rsp(chunk,path_index); + + + + + return cmd; + +} + +/* + * NAME: CBLK_ADAP_SETUP + * + * FUNCTION: Builds and adapter specific command/request. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ +static inline int CBLK_ADAP_SETUP(cflsh_chunk_t *chunk, int path_index) +{ + int rc = 0; + + if (chunk == NULL) { + + errno = EINVAL; + + return -1; + } + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[path_index]->fcn_ptrs.adap_setup == NULL) { + + errno = EINVAL; + + return -1; + } + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then fail + * from this routine + */ + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return -1; + + } + + rc = chunk->path[path_index]->fcn_ptrs.adap_setup(chunk,path_index); + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return rc; +} + + +/* + * NAME: CBLK_BUILD_ADAP_CMD + * + * FUNCTION: Builds and adapter specific command/request. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ +static inline int CBLK_BUILD_ADAP_CMD(cflsh_chunk_t *chunk, + cflsh_cmd_mgm_t *cmd, + void *buf, size_t buf_len, int flags) +{ + cflsh_cmd_info_t *cmdi; + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return -1; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return -1; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[cmdi->path_index]->fcn_ptrs.build_adap_cmd == NULL) { + + errno = EINVAL; + + return -1; + } + + return (chunk->path[cmdi->path_index]->fcn_ptrs.build_adap_cmd(chunk,cmdi->path_index,cmd,buf,buf_len,flags)); +} + +/* + * NAME: CBLK_UPDATE_PATH_ADAP_CMD + * + * FUNCTION: Builds and adapter specific command/request. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ +static inline int CBLK_UPDATE_PATH_ADAP_CMD(cflsh_chunk_t *chunk, + cflsh_cmd_mgm_t *cmd, + int flags) +{ + cflsh_cmd_info_t *cmdi; + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return -1; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return -1; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[cmdi->path_index]->fcn_ptrs.update_adap_cmd == NULL) { + + errno = EINVAL; + + return -1; + } + + return (chunk->path[cmdi->path_index]->fcn_ptrs.update_adap_cmd(chunk,cmdi->path_index,cmd,flags)); +} + +/* + * NAME: CBLK_ISSUE_ADAP_CMD + * + * FUNCTION: Issues a command to the adapter specific command/request + * to the adapter. The first implementation will issue IOARCBs. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ +static inline int CBLK_ISSUE_ADAP_CMD(cflsh_chunk_t *chunk, + cflsh_cmd_mgm_t *cmd) +{ + cflsh_cmd_info_t *cmdi; + + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return -1; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return -1; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + errno = EINVAL; + + return -1; + } + + if (chunk->path[cmdi->path_index]->fcn_ptrs.issue_adap_cmd == NULL) { + + errno = EINVAL; + + return -1; + } + + + return (chunk->path[cmdi->path_index]->fcn_ptrs.issue_adap_cmd(chunk,cmdi->path_index,cmd)); + +} + +/* + * NAME: CBLK_COMPLETE_STATUS_ADAP_CMD + * + * FUNCTION: Indicates at high level of command completed with error or not. + * + * + * + * RETURNS: None + * + * + */ +static inline int CBLK_COMPLETE_STATUS_ADAP_CMD(cflsh_chunk_t *chunk, cflsh_cmd_mgm_t *cmd) +{ + cflsh_cmd_info_t *cmdi; + + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return -1; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return -1; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[cmdi->path_index]->fcn_ptrs.complete_status_adap_cmd == NULL) { + + errno = EINVAL; + + return -1; + } + return (chunk->path[cmdi->path_index]->fcn_ptrs.complete_status_adap_cmd(chunk,cmd)); + +} + +/* + * NAME: CBLK_INIT_ADAP_CMD + * + * FUNCTION: Initialize command area so that it can be retried. + * + * + * + * RETURNS: None + * + * + */ +static inline void CBLK_INIT_ADAP_CMD(cflsh_chunk_t *chunk, + cflsh_cmd_mgm_t *cmd) +{ + cflsh_cmd_info_t *cmdi; + + + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return; + + } + + + if (chunk->path[cmdi->path_index]->fcn_ptrs.init_adap_cmd == NULL) { + + errno = EINVAL; + + return; + } + chunk->path[cmdi->path_index]->fcn_ptrs.init_adap_cmd(chunk,cmd); + + return; + +} + +/* + * NAME: CBLK_INIT_ADAP_CMD_RESP + * + * FUNCTION: Initialize command's response area so that it can be retried. + * + * + * + * RETURNS: None + * + * + */ +static inline void CBLK_INIT_ADAP_CMD_RESP(cflsh_chunk_t *chunk, + cflsh_cmd_mgm_t *cmd) +{ + cflsh_cmd_info_t *cmdi; + + + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return; + + } + + + if (chunk->path[cmdi->path_index]->fcn_ptrs.init_adap_cmd_resp == NULL) { + + errno = EINVAL; + + return; + } + chunk->path[cmdi->path_index]->fcn_ptrs.init_adap_cmd_resp(chunk,cmd); + + return; + +} + +/* + * NAME: CBLK_COPY_ADAP_CMD_RESP + * + * FUNCTION: Copy command's response area to specified buffer. + * + * + * + * RETURNS: None + * + * + */ +static inline void CBLK_COPY_ADAP_CMD_RESP(cflsh_chunk_t *chunk, + cflsh_cmd_mgm_t *cmd, + void *buffer, int buffer_size) +{ + cflsh_cmd_info_t *cmdi; + + + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return; + + } + + + if (chunk->path[cmdi->path_index]->fcn_ptrs.copy_adap_cmd_resp == NULL) { + + errno = EINVAL; + + return; + } + chunk->path[cmdi->path_index]->fcn_ptrs.copy_adap_cmd_resp(chunk,cmd,buffer,buffer_size); + + return; + +} + +/* + * NAME: CBLK_SET_ADAP_CMD_RSP_STATUS + * + * FUNCTION: Set command's response status for emulation. + * + * + * + * RETURNS: None + * + * + */ +static inline void CBLK_SET_ADAP_CMD_RSP_STATUS(cflsh_chunk_t *chunk,cflsh_cmd_mgm_t *cmd,int success) +{ + cflsh_cmd_info_t *cmdi; + + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return; + + } + + if (chunk->path[cmdi->path_index]->fcn_ptrs.set_adap_cmd_resp_status == NULL) { + + errno = EINVAL; + + return; + } + + chunk->path[cmdi->path_index]->fcn_ptrs.set_adap_cmd_resp_status(chunk,cmd,success); + + return; + +} + + + +/* + * NAME: CBLK_PROCESS_ADAP_INTRPT + * + * FUNCTION: Process adapter interrupts + * + * + * + * RETURNS: None + * + * + */ +static inline int CBLK_PROCESS_ADAP_INTRPT(cflsh_chunk_t *chunk, + cflsh_cmd_mgm_t **cmd, + int intrpt_num, + int *cmd_complete,size_t *transfer_size) +{ + cflsh_cmd_info_t *cmdi; + int path_index; + int rc = 0; + + + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return -1; + } + + + if ( (ulong)chunk & CHUNK_BAD_ADDR_MASK ) { + + errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Invalid chunk address"); + + return -1; + + } + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + + CBLK_TRACE_LOG_FILE(1,"Invalid chunk eyecatcher"); + + return -1; + } + + if (*cmd) { + cmdi = &chunk->cmd_info[(*cmd)->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return -1; + } + + path_index = cmdi->path_index; + + } else { + + path_index = chunk->cur_path; + } + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return -1; + + } + + if ( (ulong)(chunk->path[path_index]) & PATH_BAD_ADDR_MASK ) { + + CBLK_TRACE_LOG_FILE(1,"path has invalid address"); + + errno = EINVAL; + return -1; + + } + + + if (CFLSH_EYECATCH_PATH(chunk->path[path_index])) { + + CBLK_TRACE_LOG_FILE(1,"Invalid chunk eyecatcher"); + + errno = EINVAL; + return -1; + } + + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[path_index]->fcn_ptrs.process_adap_intrpt == NULL) { + + errno = EINVAL; + + return -1; + } + + if ( (ulong)(chunk->path[path_index]->afu) & AFU_BAD_ADDR_MASK ) { + + CBLK_TRACE_LOG_FILE(1,"AFU has invalid address"); + + errno = EINVAL; + return -1; + + } + + if (CFLSH_EYECATCH_AFU(chunk->path[path_index]->afu)) { + + CBLK_TRACE_LOG_FILE(1,"Invalid chunk eyecatcher"); + + errno = EINVAL; + return -1; + } + + + + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then fail + * from this routine + */ + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + errno = EINVAL; + + return -1; + + } + + rc = chunk->path[path_index]->fcn_ptrs.process_adap_intrpt(chunk,path_index,cmd,intrpt_num,cmd_complete,transfer_size); + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return rc; + +} + +/* + * NAME: CBLK_PROCESS_ADAP_CONVERT_INTRPT + * + * FUNCTION: Process adapter interrupts, but first converting from + * the generic library type to the AFU specify type. + * + * + * + * RETURNS: None + * + * + */ +static inline int CBLK_PROCESS_ADAP_CONVERT_INTRPT(cflsh_chunk_t *chunk, + cflsh_cmd_mgm_t **cmd, + int intrpt_num, + int *cmd_complete,size_t *transfer_size) +{ + cflsh_cmd_info_t *cmdi; + int path_index; + int rc = 0; + + + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return -1; + } + + if (*cmd) { + cmdi = &chunk->cmd_info[(*cmd)->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return -1; + } + + path_index = cmdi->path_index; + + } else { + + path_index = chunk->cur_path; + } + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return -1; + + } + + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[path_index]->fcn_ptrs.process_adap_convert_intrpt == NULL) { + + errno = EINVAL; + + return -1; + } + + + + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then fail + * from this routine + */ + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + errno = EINVAL; + + return -1; + + } + + rc = chunk->path[path_index]->fcn_ptrs.process_adap_convert_intrpt(chunk,path_index,cmd,intrpt_num,cmd_complete,transfer_size); + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return rc; + +} + +/* + * NAME: CBLK_PROCESS_ADAP_CMD_ERR + * + * FUNCTION: Process adapter error on this command + * + * + * + * RETURNS: None + * + * + */ +static inline int CBLK_PROCESS_ADAP_CMD_ERR(cflsh_chunk_t *chunk,cflsh_cmd_mgm_t *cmd) +{ + cflsh_cmd_info_t *cmdi; + + + if ((chunk == NULL) || (cmd == NULL)) { + + errno = EINVAL; + + return -1; + } + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + errno = EINVAL; + + return -1; + } + + if (chunk->path[cmdi->path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[cmdi->path_index]->fcn_ptrs.process_adap_err == NULL) { + + errno = EINVAL; + + return -1; + } + + return (chunk->path[cmdi->path_index]->fcn_ptrs.process_adap_err(chunk,cmdi->path_index, + cmd)); + +} + +/* + * NAME: CBLK_RESET_ADAP_CONTEXT + * + * FUNCTION: This will reset the adapter context so that + * any active commands will never be returned to the host. + * The AFU is not reset and new requests can be issued. + * This routine assumes the caller has the chunk lock. + * + * + * + * RETURNS: None + * + * + */ +static inline int CBLK_RESET_ADAP_CONTEXT(cflsh_chunk_t *chunk,int path_index) +{ + int rc = 0; + cflsh_path_t *tmp_path; + + + if (chunk == NULL) { + + errno = EINVAL; + + return -1; + } + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu is null"); + + errno = EINVAL; + + return -1; + + } + + if (chunk->path[path_index]->fcn_ptrs.reset_adap_contxt == NULL) { + + errno = EINVAL; + + return -1; + } + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + chunk->stats.num_reset_contexts++; + + rc = chunk->path[path_index]->fcn_ptrs.reset_adap_contxt(chunk,path_index); + + if (rc) { + + chunk->stats.num_reset_contxt_fails++; + } + + /* + * Update other paths that a context reset was done + */ + + + tmp_path = chunk->path[path_index]->afu->head_path; + + while (tmp_path) { + + + if (tmp_path != chunk->path[path_index]) { + + fetch_and_or(&(tmp_path->flags),CFLSH_PATH_RST); + + } + + tmp_path = tmp_path->next; + + } + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return rc; + +} + +/************************************************************************/ +/* End of Adapter Specific Inline Functions */ +/************************************************************************/ + + + +/* + * The code below is mostly (but not completely) adapter and + * OS agnostic. + */ + +/* + * NAME: CBLK_SAVE_IN_CACHE + * + * FUNCTION: Save data in cache tagged by lba. + * + * + * INPUTS: + * chunk - Chunk the read is associated. + * buf - Buffer to read data into + * lba - starting LBA (logical Block Address) + * in chunk to read data from. + * nblocks - Number of blocks to read. + * + * + * RETURNS: + * None + * + * + */ + +static inline void CBLK_SAVE_IN_CACHE(cflsh_chunk_t *chunk,void *buf, cflash_offset_t lba, size_t nblocks) +{ + cflsh_cache_line_t *line; + cflash_offset_t cur_lba, end_lba; + void *cur_buf; + uint64_t tag; + unsigned inx; + int lru; + + if (chunk == NULL) { + + return; + } + + if ((chunk->cache == NULL) || + (chunk->cache_size == 0)) { + + return; + } + + if (buf == NULL) { + + return; + + } + + end_lba = lba + nblocks; + + for (cur_lba = lba, cur_buf = buf; cur_lba < end_lba; cur_lba++, cur_buf += CAPI_FLASH_BLOCK_SIZE) { + + inx = CFLSH_BLK_GETINX (cur_lba,chunk->l2setsz); + tag = CFLSH_BLK_GETTAG (cur_lba,chunk->l2setsz); + line = &chunk->cache [inx]; + lru = line->lrulist; + + if ((line) && (line->entry[lru].data)) { + + /* + * Only update cache if data pointer is non-NULL + */ + + line->entry[lru].valid = 1; + line->entry[lru].tag = tag; + + + bcopy(cur_buf, line->entry[lru].data,nblocks * CAPI_FLASH_BLOCK_SIZE); + + line->lrulist = line->entry[lru].next; + } + + } /* for loop */ + + + return; +} + + + + +/* + * NAME: cblk_find_free_cmd + * + * FUNCTION: Finds the first free command. Assumes caller has chunk lock. + * + * + * INPUTS: + * chunk - The chunk to which a free + * command is needed. + * + * cmd - Pointer to found command. + * + * RETURNS: + * 0 - Command was found. + * otherwise - error + * + */ + +static inline int cblk_find_free_cmd(cflsh_chunk_t *chunk, cflsh_cmd_mgm_t **cmd,int flags) +{ + int rc = -1; + int found = FALSE; + int num_in_use = 0; + int pthread_rc; + int loop_cnt = 0; + cflsh_cmd_info_t *cmdi; + + + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"Invalid chunk"); + errno = EINVAL; + return -1; + } + + /* + * The head of the free queue will be the command + * on the free list the longest. So use that if + * it is available. + */ + + cmdi = chunk->head_free; + + + + if (cmdi == NULL) { + + chunk->stats.num_no_cmds_free++; + + if (flags & CFLASH_WAIT_FREE_CMD) { + + /* + * We do not have any free commands + * available. So we need to wait for + * a free command. + */ + + + while ((!found) && (loop_cnt < CFLASH_BLOCK_MAX_CMD_WAIT_RETRIES)) { + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + usleep(CFLASH_BLOCK_FREE_CMD_WAIT_DELAY); + + CFLASH_BLOCK_LOCK(chunk->lock); + + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"Invalid chunk"); + errno = EINVAL; + return -1; + } + + CBLK_TRACE_LOG_FILE(1,"No free command found num_active_cmds = %d, num_in_use = %d", + chunk->num_active_cmds,num_in_use); + + + cmdi = chunk->head_free; + + + if (cmdi == NULL) { + chunk->stats.num_no_cmds_free++; + + rc = -1; + + errno = EBUSY; + + + CBLK_TRACE_LOG_FILE(1,"No free command found num_active_cmds = %d",chunk->num_active_cmds); + + } else { + + found = TRUE; + } + + + } /* while */ + + if (!found) { + + + rc = -1; + + errno = EBUSY; + + + chunk->stats.num_no_cmds_free_fail++; + CBLK_TRACE_LOG_FILE(1,"Giving up No free command found num_active_cmds = %d",chunk->num_active_cmds); + return rc; + } + } else { + + + /* + * The caller does not want us + * wait for a command. So fail now. + */ + rc = -1; + + errno = EBUSY; + + chunk->stats.num_no_cmds_free_fail++; + + CBLK_TRACE_LOG_FILE(1,"No free command found num_active_cmds = %d num_in_use = %d", + chunk->num_active_cmds,num_in_use); + return rc; + } + + } + + if (chunk->cmd_start == NULL) { + CBLK_TRACE_LOG_FILE(1,"cmd_start is NULL"); + errno = EINVAL; + return -1; + } + + *cmd = &(chunk->cmd_start[cmdi->index]); + + if (*cmd == NULL) { + CBLK_TRACE_LOG_FILE(1,"cmd is NULL"); + errno = EINVAL; + return -1; + } + + + + bzero((void *)(*cmd),sizeof (**cmd)); + + pthread_rc = pthread_cond_init(&(cmdi->thread_event),NULL); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(1,"pthread_cond_init failed rc = %d errno= %d", + pthread_rc,errno); + rc = -1; + + return rc; + + } + +#ifndef _COMMON_INTRPT_THREAD + cmdi->async_data = NULL; +#endif /* !_COMMON_INTRPT_THREAD */ + + cmdi->flags = 0; + cmdi->path_index = chunk->cur_path; + cmdi->retry_count = 0; + cmdi->state = 0; + cmdi->transfer_size = 0; + cmdi->transfer_size_bytes = 0; + cmdi->thread_id = 0; + cmdi->buf = NULL; + cmdi->status = 0; + cmdi->nblocks = 0; + cmdi->lba = 0; + cmdi->user_status = NULL; + + + cmdi->cmd_time = time(NULL); + (*cmd)->cmdi = cmdi; + + + /* + * Remove command from free list + */ + + CBLK_DQ_NODE(chunk->head_free,chunk->tail_free,cmdi,free_prev,free_next); + + /* + * place command on active list + */ + + CBLK_Q_NODE_TAIL(chunk->head_act,chunk->tail_act,cmdi,act_prev,act_next); + + + cmdi->in_use = 1; + (*cmd)->index = cmdi->index; + + + return 0; +} + +/* + * NAME: CBLK_FREE_CMD + * + * FUNCTION: Marks command as free and ready for reuse + * + * + * INPUTS: + * chunk - The chunk to which a free + * command is needed. + * + * cmd - Pointer to found command. + * + * RETURNS: + * 0 - Command was found. + * otherwise - error + * + */ + +static inline void CBLK_FREE_CMD(cflsh_chunk_t *chunk, cflsh_cmd_mgm_t *cmd) +{ + if ((chunk == NULL) || + (cmd == NULL)) { + + + return; + } + + + if (cmd->cmdi == NULL) { + + + return; + } + + cmd->cmdi->in_use = 0; + + /* + * Remove command from active list + */ + + CBLK_DQ_NODE(chunk->head_act,chunk->tail_act,&(chunk->cmd_info[cmd->index]),act_prev,act_next); + + /* + * Place command on free list + */ + + CBLK_Q_NODE_TAIL(chunk->head_free,chunk->tail_free,&(chunk->cmd_info[cmd->index]),free_prev,free_next); + + return; +} + + +/* + * NAME: CBLK_ISSUE_CMD + * + * FUNCTION: Issues a commd to the adapter. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ +static inline int CBLK_ISSUE_CMD(cflsh_chunk_t *chunk, + cflsh_cmd_mgm_t *cmd,void *buf, + cflash_offset_t lba,size_t nblocks, int flags) +{ + int rc = 0; + cflsh_cmd_info_t *cmdi; +#ifdef _COMMON_INTRPT_THREAD + int pthread_rc; + + + if (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + + /* + * Notify common async interrupt thread, that it + * needs to wait for this command's completion. + */ + + chunk->thread_flags |= CFLSH_CHNK_POLL_INTRPT; + + pthread_rc = pthread_cond_signal(&(chunk->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(1,"pthread_cond_signall failed rc = %d,errno = %d", + pthread_rc,errno); + + /* + * If we are unable to signal the interrupt thread, + * then fail this request now, since we have no guarantee + * its completion will be handled. + */ + return -1; + + + } + } + +#endif /* COMMON_INTRPT_THREAD */ + + + CBLK_LWSYNC(); + + if (CBLK_ISSUE_ADAP_CMD(chunk,cmd)) { + + return -1; + } + + + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + CBLK_TRACE_LOG_FILE(1,"cmd info is NULL for index = %d,chunk->index = %d", + cmd->index,chunk->index); + + } + + cmdi->state = CFLSH_MGM_WAIT_CMP; + + + + if (!(flags & CFLASH_ISSUE_RETRY)) { + chunk->num_active_cmds++; + + /* + * Save off information + * about this request in the + * command management structure + */ + + cmdi->buf = buf; + + cmdi->lba = lba; + + cmdi->nblocks = nblocks; + + } + + return rc; +} + +/* + * NAME: CBLK_COMPLETE_CMD + * + * FUNCTION: Cleans up and ootential frees a command, + * which has had its returned status processed + * + * + * Environment: This routine assumes the chunk mutex + * lock is held by the caller. + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * cmd - Cmd which just completed + * + * RETURNS: + * 0 - Good completion + * -1 - Error + * + * + */ + +static inline int CBLK_COMPLETE_CMD(cflsh_chunk_t *chunk, cflsh_cmd_mgm_t *cmd, size_t *transfer_size) +{ + + int rc = 0; + cflsh_cmd_info_t *cmdi; + + + + if (transfer_size == NULL) { + + return (-1); + } + + if (cmd == NULL) { + + return (-1); + } + + + cmdi = &chunk->cmd_info[cmd->index]; + + if (cmdi == NULL) { + + return (-1); + } + + if (cmdi->in_use == 0) { + + *transfer_size = 0; + + return (rc); + + } + + + *transfer_size = cmdi->transfer_size; + + + if (cmdi->status) { + + errno = cmdi->status; + + rc = -1; + } + + + /* + * This command completed, + * clean it up. + */ + + if ((!(cmdi->flags & CFLSH_ASYNC_IO)) || + (cmdi->flags & CFLSH_CMD_INFO_USTAT)) { + + /* + * For async I/O that are not associated + * with user specified status areas don't mark the + * command as available yet. Instead + * let the caller do this via cblk_aresult + */ + chunk->num_active_cmds--; + + if (cmdi->flags & CFLSH_ASYNC_IO) { + + if (cmdi->flags & CFLSH_MODE_READ) { + + + chunk->stats.num_blocks_read += cmdi->transfer_size; + if (chunk->stats.num_act_areads) { + chunk->stats.num_act_areads--; + } else { + CBLK_TRACE_LOG_FILE(1,"!! ----- ISSUE PROBLEM ----- !! flags = 0x%x, chunk->index = %d", + cmdi->flags,chunk->index); + } + + + } else if (cmdi->flags & CFLSH_MODE_WRITE) { + + chunk->stats.num_blocks_written += cmdi->transfer_size; + if (chunk->stats.num_act_awrites) { + chunk->stats.num_act_awrites--; + } else { + CBLK_TRACE_LOG_FILE(1,"!! ----- ISSUE PROBLEM ----- !! flags = 0x%x, chunk->index = %d", + cmdi->flags,chunk->index); + } + } + + if (cmdi->flags & CFLSH_CMD_INFO_USTAT) { + + /* + * If this is command has a user defined status + * area, then update that now before freeing up + * the command. + */ + + + /* + * TODO: ?? Do we need to do anything like lwsync here? + */ + + cmdi->user_status->blocks_transferred = cmdi->transfer_size; + cmdi->user_status->fail_errno = cmdi->status; + + if (cmdi->status == 0) { + cmdi->user_status->status = CBLK_ARW_STATUS_SUCCESS; + } else { + cmdi->user_status->status = CBLK_ARW_STATUS_FAIL; + } + } + + + } else { + if (cmdi->flags & CFLSH_MODE_READ) { + + + chunk->stats.num_blocks_read += cmdi->transfer_size; + if (chunk->stats.num_act_reads) { + chunk->stats.num_act_reads--; + } else { + CBLK_TRACE_LOG_FILE(1,"!! ----- ISSUE PROBLEM ----- !! flags = 0x%x, chunk->index = %d", + cmdi->flags,chunk->index); + } + + + } else if (cmdi->flags & CFLSH_MODE_WRITE) { + + chunk->stats.num_blocks_written += cmdi->transfer_size; + if (chunk->stats.num_act_writes) { + chunk->stats.num_act_writes--; + } else { + CBLK_TRACE_LOG_FILE(1,"!! ----- ISSUE PROBLEM ----- !! flags = 0x%x, chunk->index = %d", + cmdi->flags,chunk->index); + } + } + + } + + CBLK_FREE_CMD(chunk,cmd); + } + + + CBLK_TRACE_LOG_FILE(8,"cmdi->in_use= 0x%x cmdi->lba = 0x%llx, rc = %d, chunk->index = %d, cmdi->flags = 0x%x", + cmdi->in_use,cmdi->lba,rc,chunk->index,cmdi->flags); + + return (rc); +} + + + +/* + * NAME: CBLK_PROCESS_CMD + * + * FUNCTION: Processes the status of a command + * that the AFU has completed. + * + * Environment: This routine assumes the chunk lock is held + * and afu mutex + * lock was conditionally taken by + * CFLASH_BLOCK_AFU_SHARE_LOCK. + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * cmd - Cmd which just completed + * + * RETURNS: + * -1 - Fatal error + * 0 - Ignore error (consider good completion) + * 1 - Retry recommended + * + * + */ + +static inline cflash_cmd_err_t CBLK_PROCESS_CMD(cflsh_chunk_t *chunk,int path_index, cflsh_cmd_mgm_t *cmd) +{ + cflash_cmd_err_t rc = CFLASH_CMD_IGNORE_ERR; + int rc2 = 0; + size_t transfer_size = 0; + int pthread_rc; + + + if (cmd == NULL) { + + CBLK_TRACE_LOG_FILE(1,"cmd is null"); + + errno = EINVAL; + rc = CFLASH_CMD_FATAL_ERR; + return rc; + + + } + + if (cmd->cmdi == NULL) { + + CBLK_TRACE_LOG_FILE(1,"cmdi is null for cmd->index",cmd->index); + + errno = EINVAL; + rc = CFLASH_CMD_FATAL_ERR; + return rc; + + + } + + if (!cmd->cmdi->in_use) { + + /* + * This command has already been + * processed. + */ + CBLK_TRACE_LOG_FILE(1,"!!---CMD WAS ALREADY COMPLETED: cmd->cmdi->lba = 0x%llx, cmd->cmdi->retry_count %d, flags = 0x%x",cmd->cmdi->lba,cmd->cmdi->retry_count,cmd->cmdi->flags); + CBLK_TRACE_LOG_FILE(1,"!!---CMD WAS ALREADY COMPLETED2: cmd = 0x%llx on chunk->index = %d",(uint64_t)cmd,chunk->index); + return CFLASH_CMD_IGNORE_ERR; + } + + CBLK_TRACE_LOG_FILE(8,"cmd = 0x%llx",(uint64_t)cmd); + + if (CBLK_INVALID_CHUNK_PATH_AFU(chunk,path_index,"CBLK_PROCESS_CMD")) { + + CBLK_TRACE_LOG_FILE(1,"chunk/path/AFU is bad"); + + errno = EINVAL; + rc = CFLASH_CMD_FATAL_ERR; + return rc; + + + } + + if (chunk->path[path_index]->afu->num_issued_cmds) { + fetch_and_add(&(chunk->path[cmd->cmdi->path_index]->afu->num_issued_cmds),-1); + + } else { + CBLK_TRACE_LOG_FILE(1,"bad num_issued_cmds field, cmd = 0x%llx,path_index = %d, ", + (uint64_t)cmd,path_index); + } + + if (CBLK_COMPLETE_STATUS_ADAP_CMD(chunk,cmd)) { + + /* + * Command completed with an error + */ + + cmd->cmdi->transfer_size = 0; + + rc = CBLK_PROCESS_ADAP_CMD_ERR(chunk,cmd); + + if ((rc == CFLASH_CMD_RETRY_ERR) || + (rc == CFLASH_CMD_DLY_RETRY_ERR)) { + + CBLK_TRACE_LOG_FILE(5,"retry recommended for cmd->cmdi->lba = 0x%llx, cmd->cmdi->retry_count = %d on chunk->index = %d", + cmd->cmdi->lba,cmd->cmdi->retry_count,chunk->index); + + if (cmd->cmdi->retry_count < CAPI_CMD_MAX_RETRIES) { + + /* + * Retry command + */ + + if (rc == CFLASH_CMD_DLY_RETRY_ERR) { + /* + * This is a retry after a delay. + * + * NOTE: Currently we are just + * sleeping here for the delay. + * Under the current implementation, this is + * approach is acceptable. However if we + * we may need to change this in the future. + * + * For this current approach we need + * to unlock (and allow other threads + * to progress) while we sleep. + */ + + CBLK_TRACE_LOG_FILE(5,"retry with delayrecommended for cmd->cmdi->lba = 0x%llx, cmd->cmdi->retry_count %d chunk->index = %d", + cmd->cmdi->lba,cmd->cmdi->retry_count,chunk->index); + CFLASH_BLOCK_UNLOCK(chunk->lock); + sleep(CAPI_SCSI_IO_RETRY_DELAY); + CFLASH_BLOCK_LOCK(chunk->lock); + + + } + + CBLK_INIT_ADAP_CMD_RESP(chunk,cmd); + + /* + * Update command possibly for new path or context + */ + + if (CBLK_UPDATE_PATH_ADAP_CMD(chunk,cmd,0)) { + + CBLK_TRACE_LOG_FILE(1,"CBLK_UPDATE_PATH_ADAP_CMD failed"); + + rc = CFLASH_CMD_FATAL_ERR; + } + + cmd->cmdi->retry_count++; + + + /* + * Since the caller used CFLASH_BLOCK_AFU_SHARE_LOCK, + * we need to use the CFLASH_BLOCK_AFU_SHARE_UNLOCK here. + */ + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + rc2 = CBLK_ISSUE_CMD(chunk,cmd,cmd->cmdi->buf, + cmd->cmdi->lba,cmd->cmdi->nblocks,CFLASH_ISSUE_RETRY); + + + /* + * Since we are going to potentially wait on a resume event, + * we need to explicitly take the afu->lock, instead + * of conditionally taking the lock as is done in + * CFLASH_BLOCK_AFU_SHARE_LOCK. We will explicitly release + * it after the waiting and if necessary do the conditional + * taking of it later via CFLASH_BLOCK_AFU_SHARE_LOCK, + * Since this error recovery, this should not impact good path + */ + + CFLASH_BLOCK_LOCK(chunk->path[path_index]->afu->lock); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then wait for it to + * resume. Since we are waiting for the AFU resume + * event, that afu-lock will be released, but our chunk + * lock will not be released. So do that now. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + + pthread_rc = pthread_cond_wait(&(chunk->path[path_index]->afu->resume_event), + &(chunk->path[path_index]->afu->lock.plock)); + + /* + * Chunk lock must be acquired first to prevent deadlock. + */ + + CFLASH_BLOCK_UNLOCK(chunk->path[path_index]->afu->lock); + CFLASH_BLOCK_LOCK(chunk->lock); + + /* + * Conditionally acquire the afu lock to match the + * lock state we entered this routine. + */ + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (pthread_rc) { + + + + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_wait failed for resume_event rc = %d errno = %d", + pthread_rc,errno); + + cmd->cmdi->status = EIO; + + errno = EIO; + + rc = CFLASH_CMD_FATAL_ERR; + return rc; + } + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * Give up if the AFU is still halted. + */ + + + CBLK_TRACE_LOG_FILE(5,"afu halted again afu->flag = 0x%x", + chunk->path[path_index]->afu->flags); + + + cmd->cmdi->status = EIO; + + errno = EIO; + + rc = CFLASH_CMD_FATAL_ERR; + return rc; + } + + } else { + + + /* + * Conditionally acquire the afu lock to match the + * lock state we entered this routine. + */ + CFLASH_BLOCK_UNLOCK(chunk->path[path_index]->afu->lock); + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + } + + + + if (rc2) { + + /* + * If we failed to issue this command for + * retry then give up on it now. + */ + + CBLK_TRACE_LOG_FILE(8,"retry issue failed with rc2 = 0x%x cmd->cmdi->lba = 0x%llx chunk->index = %d", + rc2,cmd->cmdi->lba,chunk->index); + cmd->cmdi->status = EIO; + + errno = EIO; + + rc = CFLASH_CMD_FATAL_ERR; + return rc; + } else { + + chunk->stats.num_retries++; + CBLK_TRACE_LOG_FILE(8,"retry issue succeeded cmd->cmdi->in_use= 0x%x cmd->cmdi->lba = 0x%llx chunk->index = %d", + cmd->cmdi->in_use,cmd->cmdi->lba,chunk->index); + return rc; + } + + + + } else { + + + /* + * If we exceeded retries then + * give up on it now. + */ + + errno = EIO; + cmd->cmdi->status = EIO; + + rc = CFLASH_CMD_FATAL_ERR; + } + + } /* rc == CFLASH_CMD_RETRY_ERR */ + + + } else { + + /* + * No serious error was seen, but we could + * have an underrun. + */ + + + cmd->cmdi->status = 0; + + if (!cmd->cmdi->transfer_size_bytes) { + + /* + * If this transfer is not in bytes, then it will + * be in blocks, which indicate this is a read/write. + * As result, if all data was transferred, then + * we should save this to the cache. + */ + + + if (cmd->cmdi->transfer_size_bytes == cmd->cmdi->nblocks) { + + CBLK_SAVE_IN_CACHE(chunk,cmd->cmdi->buf,cmd->cmdi->lba, + cmd->cmdi->nblocks); + } + } + + } + + + cmd->cmdi->flags |= CFLSH_PROC_CMD; + + +#ifdef _COMMON_INTRPT_THREAD + + if (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + + + /* + * Signal any one waiting for this specific command + */ + + pthread_rc = pthread_cond_signal(&(cmd->cmdi->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for hread_event rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + + + /* + * Signal any one waiting for any command to complete. + */ + + pthread_rc = pthread_cond_signal(&(chunk->cmd_cmplt_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for cmd_cmplt_event rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + } + + +#endif + + CBLK_TRACE_LOG_FILE(8,"cmd->cmdi->in_use= 0x%x cmd->cmdi->lba = 0x%llx, chunk->index = %d cmd->cmdi->flags = 0x%x", + cmd->cmdi->in_use,cmd->cmdi->lba,chunk->index,cmd->cmdi->flags); + + CBLK_TRACE_LOG_FILE(8,"chunk->dev_name = %s, chunk->flags = 0x%x, path_index = %d", + chunk->dev_name,chunk->flags,path_index); + + if (((rc != CFLASH_CMD_RETRY_ERR) && + (rc != CFLASH_CMD_DLY_RETRY_ERR)) && + (chunk->cmd_info[cmd->index].flags & CFLSH_CMD_INFO_USTAT)) { + + CBLK_COMPLETE_CMD(chunk,cmd,&transfer_size); + + } + + + return rc; +} + + +/* + * NAME: CBLK_CHECK_COMPLETE_PATH + * + * FUNCTION: Checks for commands received on this AFU, but + * via a different chunk that have not been processed. + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * cmd - Cmd this routine will wait for completion. + * + * RETURNS: + * 0 - Good completion, otherwise error. + * + * + */ + +static inline int CBLK_CHECK_COMPLETE_PATH(cflsh_chunk_t *chunk, int path_index, + cflsh_cmd_mgm_t *cmd, + size_t *transfer_size, + int *cmd_complete) +{ + + cflsh_cmd_info_t *p_cmdi; + cflsh_cmd_info_t *cmdi; + cflsh_cmd_mgm_t *p_cmd; + int rc = 0; + time_t timeout; + + + if (chunk == NULL) { + + CBLK_TRACE_LOG_FILE(8,"chunk is null"); + errno = EINVAL; + return -1; + } + + if ( (ulong)chunk & CHUNK_BAD_ADDR_MASK ) { + + CBLK_TRACE_LOG_FILE(1,"Corrupted chunk address = %p", + chunk); + + errno = EINVAL; + return -1; + + } + + if (cmd) { + + if (cmd->cmdi == NULL) { + + CBLK_TRACE_LOG_FILE(8,"cmdi is nulll for cmd = 0x%llx chunk->index = %d", + (uint64_t)cmd,chunk->index); + errno = EINVAL; + return -1; + } + + if (cmd->cmdi->state == CFLSH_MGM_CMP) { + + /* + * Our command completed. + */ + CBLK_TRACE_LOG_FILE(8,"check cmd lba = 0x%llx, cmd = 0x%llx cmd_index = %d, chunk->index = %d", + cmd->cmdi->lba,(uint64_t)cmd,cmd->index,chunk->index); + rc = CBLK_COMPLETE_CMD(chunk,cmd,transfer_size); + + *cmd_complete = TRUE; + + return rc; + } else if (cmd->cmdi->state == CFLSH_MGM_ASY_CMP) { + + /* + * We have already return status + * back to caller for this + * command and are just waiting + * for the async interrupt thread + * (most likely this thread) to complete + * before the command can be freed. + */ + + *cmd_complete = TRUE; + + + return rc; + } + + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + + cmdi = cmd->cmdi; + + if (cmdi) { + + + /* + * TODO: This code is duplicated in cblk_intrpt_thread. We + * need to modularize this. + */ + + if (cflsh_blk.timeout_units != CFLSH_G_TO_SEC) { + + /* + * If the time-out units are not in seconds + * then only give the command only 1 second to complete + */ + timeout = time(NULL) - 1; + } else { + timeout = time(NULL) - (10 * cflsh_blk.timeout); + } + + if (cmdi->cmd_time < timeout) { + + CBLK_TRACE_LOG_FILE(1,"Timeout for for cmd lba = 0x%llx, cmd = 0x%llx cmd_index = %d, chunk->index = %d", + cmdi->lba,(uint64_t)cmd,cmd->index,chunk->index); + + + cmdi->status = ETIMEDOUT; + cmdi->transfer_size = 0; + + + chunk->stats.num_fail_timeouts++; + + + CBLK_GET_INTRPT_STATUS(chunk,path_index); + + + cblk_notify_mc_err(chunk,cmdi->path_index,0x300,0, + CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + cblk_reset_context_shared_afu(chunk->path[path_index]->afu); + + CFLASH_BLOCK_LOCK(chunk->lock); + + *cmd_complete = TRUE; + + errno = ETIMEDOUT; + + rc = -1; + } + + } else { + + CBLK_TRACE_LOG_FILE(1,"invalid cmdi for cmd cmd = 0x%llx cmd_index = %d, chunk->index = %d", + (uint64_t)cmd,cmd->index,chunk->index); + } + } + + } + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then return + * from this routine + */ + + + CBLK_TRACE_LOG_FILE(5,"afu halted exiting, afu->flags = 0x%x", + chunk->path[path_index]->afu->flags); + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + return rc; + + + + } + + p_cmdi = chunk->path[path_index]->afu->head_complete; + while (p_cmdi) { + + cmdi = p_cmdi->complete_next; + + if (CFLSH_EYECATCH_CMDI(p_cmdi)) { + + CBLK_TRACE_LOG_FILE(1,"invalid eyecatcher for cmdi = 0x%x", + cmdi->eyec); + + break; + } + + + if (p_cmdi->chunk == chunk) { + + /* + * This is a command that has completed + * while waiting for commands on another chunk. + * Let's handle it now. + */ + + p_cmd = &chunk->cmd_start[p_cmdi->index]; + + + CBLK_DQ_NODE(chunk->path[path_index]->afu->head_complete,chunk->path[path_index]->afu->tail_complete,p_cmdi, + complete_prev,complete_next); + + p_cmdi->state = CFLSH_MGM_CMP; + + + rc = CBLK_PROCESS_CMD(chunk,path_index,p_cmd); + + + if (p_cmd == cmd) { + + /* + * Our command completed, Let's process it. + */ + + + if ((rc != CFLASH_CMD_RETRY_ERR) && + (rc != CFLASH_CMD_DLY_RETRY_ERR)) { + + /* + * Since we found our command completed and + * we are not retrying it, lets + * set the flag so we can avoid polling for any + * more interrupts. However we need to process + * all responses posted to the RRQ for this + * interrupt before exiting. + */ +#ifndef _COMMON_INTRPT_THREAD + + CBLK_COMPLETE_CMD(chunk,cmd,transfer_size); +#else + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + CBLK_COMPLETE_CMD(chunk,cmd,transfer_size); + } + +#endif + + *cmd_complete = TRUE; + + } + + } + + + } + + p_cmdi = cmdi; + } + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + + return rc; +} + + + +/* + * NAME: CBLK_WAIT_FOR_IO_COMPLETE_PATH + * + * FUNCTION: Waits for the specified cmd to receive + * a completion or time-out. + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * cmd - Cmd this routine will wait for completion. + * + * RETURNS: + * 0 - Good completion, otherwise error. + * + * + */ + +static inline int CBLK_WAIT_FOR_IO_COMPLETE_PATH(cflsh_chunk_t *chunk, int path_index, + cflsh_cmd_mgm_t *cmd, + int *cmd_index, size_t *transfer_size, + int wait,int *cmd_complete, + int *poll_retry, int *poll_fail_retries) +{ + int rc = 0; +#ifndef _SKIP_POLL_CALL + time_t timeout; + cflsh_cmd_info_t *cmdi = NULL; + int poll_ret; + CFLASH_POLL_LIST_INIT(chunk,(chunk->path[path_index]),poll_list); +#endif /* _SKIP_POLL_CALL */ + + + + + if (CBLK_INVALID_CHUNK_PATH_AFU(chunk,path_index,"CBLK_WAIT_FOR_IO_COMPLETE_PATH")) { + + errno = EINVAL; + return -1; + + } + + + CBLK_TRACE_LOG_FILE(9,"waiting for cmd with cmd_index = 0x%x on chunk->index = %d", + *cmd_index,chunk->index); + CBLK_TRACE_LOG_FILE(9,"chunk->fd = %d, poll_fd= %d", + chunk->fd,chunk->path[path_index]->afu->poll_fd); + +#ifndef BLOCK_FILEMODE_ENABLED + + CFLASH_BLOCK_LOCK(chunk->lock); + + if (cmd) { + CBLK_TRACE_LOG_FILE(8,"check cmd lba = 0x%llx, cmd = 0x%llx cmd_index = %d, chunk->index = %d", + cmd->cmdi->lba,(uint64_t)cmd,cmd->index,chunk->index); + } else if (chunk->num_active_cmds == 0) { + + /* + * If we do not have a specific command and there + * are no commands active, then let's give up. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + + return (-1); + + } + + /* + * Check if our command has already been + * completed. + */ + + rc = CBLK_CHECK_COMPLETE_PATH(chunk,path_index,cmd,transfer_size,cmd_complete); + if (rc) { + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return rc; + } + + + if ((rc) || (*cmd_complete)) { + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return rc; + } + + + +#ifndef _SKIP_POLL_CALL + + if (wait) { + + CFLASH_CLR_POLL_REVENTS(chunk,(chunk->path[path_index]),poll_list); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + if (cmd) { + CBLK_TRACE_LOG_FILE(8,"poll for cmd lba = 0x%llx",cmd->cmdi->lba); + } + + poll_ret = CFLASH_POLL(poll_list,CAPI_POLL_IO_TIME_OUT); + + CFLASH_BLOCK_LOCK(chunk->lock); + + CBLK_TRACE_LOG_FILE(8,"poll_ret = 0x%x, chunk->index = %d",poll_ret,chunk->index); + + + + + /* + * Check if our command has already been + * completed. + */ + + rc = CBLK_CHECK_COMPLETE_PATH(chunk,path_index,cmd,transfer_size,cmd_complete); + + + if ((rc) || (*cmd_complete)) { + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return rc; + } + + + if (chunk->num_active_cmds == 0) { + + /* + * If we do not have a specific command and there + * are no commands active, then let's give up. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + return rc; + + + } + + } else { + + // TODO: ?? Should we still continue here if we received a poll? + + /* + * Simulate POLLIN event + */ + + CFLASH_SET_POLLIN(chunk,(chunk->path[path_index]),poll_list); + poll_ret = 1; + } + +#endif /* !_SKIP_POLL_CALL */ + + + +#else + /* + * This is BLOCK_FILEMODE_ENABLED simulation + */ + + poll_ret = 1; + + CFLASH_BLOCK_LOCK(chunk->lock); + +#ifndef _COMMON_INTRPT_THREAD + + if ((*cmd_index == -1) || + (cmd->cmdi == NULL)) { + + /* + * FILE_MODE can not work if no tag is specified + * Thus fail now. + */ + + rc = -1; + + errno = EINVAL; + + CBLK_TRACE_LOG_FILE(5,"Invalid cmd_index"); + CFLASH_BLOCK_UNLOCK(chunk->lock); + return rc; + + } + + + if ((cmd->cmdi->in_use == 0) || (cmd->cmdi->state == CFLSH_MGM_ASY_CMP)) { + + CBLK_TRACE_LOG_FILE(1,"cmd->cmdi->in_use = 0 flags = 0x%x lba = 0x%llx, chunk->index = %d", + cmd->cmdi->flags,cmd->cmdi->lba,chunk->index); + rc = -1; + + errno = EINVAL; + + CFLASH_BLOCK_UNLOCK(chunk->lock); + return rc; + + } + + cblk_filemode_io(chunk,cmd); +#else + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + + if ((*cmd_index == -1) || + (cmd->cmdi == NULL)) { + + /* + * FILE_MODE can not work if no tag is specified + * Thus fail now. + */ + + rc = -1; + + errno = EINVAL; + + CBLK_TRACE_LOG_FILE(5,"Invalid cmd_index"); + CFLASH_BLOCK_UNLOCK(chunk->lock); + return rc; + + } + + + if ((cmd->cmdi->in_use == 0) || (cmd->cmdi->state == CFLSH_MGM_ASY_CMP)) { + + CBLK_TRACE_LOG_FILE(1,"cmd->cmdi->in_use = 0 flags = 0x%x lba = 0x%llx, chunk->index = %d", + cmd->cmdi->flags,cmd->cmdi->lba,chunk->index); + rc = -1; + + errno = EINVAL; + + CFLASH_BLOCK_UNLOCK(chunk->lock); + return rc; + + } + + cblk_filemode_io(chunk,cmd); + + } + +#endif /* _COMMON_INTRPT_THREAD */ + +#endif /* BLOCK_FILEMODE_ENABLED */ + +#ifndef _SKIP_POLL_CALL + + if ((poll_ret == 0) && (wait)) { + + /* + * We timed-out waiting for a command + * to complete. First let's check to see if + * perhaps our command has already completed (possibly + * via another thread). If so then we can process it + * now. Otherwise this is is an error. + */ + + + if ((cmd) && ((!cmd->cmdi->in_use) || (cmd->cmdi->state == CFLSH_MGM_ASY_CMP))) { + + + CBLK_TRACE_LOG_FILE(5,"cmd time-out unnecessary since cmd not in use cmd = 0x%llx, chunk->index = %d", + (uint64_t)cmd,chunk->index); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return rc; + } + + + + rc = CBLK_CHECK_COMPLETE_PATH(chunk,path_index,cmd,transfer_size,cmd_complete); + if (rc) { + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return rc; + } + + + + + if (chunk->num_active_cmds) { + + + + + rc = CBLK_PROCESS_ADAP_CONVERT_INTRPT(chunk,&cmd, + CFLSH_BLK_INTRPT_CMD_CMPLT, + cmd_complete, transfer_size); +#ifndef _ERROR_INTR_MODE + + /* + * Only use these traces and error logs if + * we are not in error interrupt mode (i.e + * if we are relying on command completion + * interrupts + */ + + if ((cmd) && (*cmd_complete)) { + + + CBLK_NOTIFY_LOG_THRESHOLD(9,chunk,path_index,0x302,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + + + CBLK_TRACE_LOG_FILE(5,"Poll time-out, but cmd complete cmd with lba = 0x%llx, with rc = %d, errno = %d in_use = %d, chunk->index = %d", + cmd->cmdi->lba, rc, errno,cmd->cmdi->in_use,chunk->index); + + + CBLK_TRACE_LOG_FILE(5,"Poll time-out, but cmd = %p", + cmd); + } +#endif /* ! _ERROR_INTR_MODE */ + } + + + if ((rc) || (*cmd_complete)) { + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + return rc; + } else { + + /* + * This appears to be a a potential time-out + */ + +#ifdef _COMMON_INTRPT_THREAD + if ((*poll_retry) && + (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) && + (cmd == NULL) && + (chunk->num_active_cmds == 0)) { + + /* + * When using a single common interrupt + * thread for all interrupts, we do not + * detect time-outs from poll time outs + * Instead they are detected in the common + * interrupt thread that looks at command time + * stamps. So if there have no active commands + * then we want to exit this loop. We are allowing + * at least on poll time-out iteraction in case + * something gets issued when we unlock and + * num_active_cmds is about to increase. + * + * NOTE: Currently for common interrupt + * thread one other caller (cblk_aresult) + * can also call this routine if one + * asked to wait for the next tag. + * In that case it will also specify + * no initial tag, but it needs to wait + * for commands to complete. + */ + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + return rc; + + + } + +#else + + CBLK_GET_INTRPT_STATUS(chunk,path_index); + + +#endif /* _COMMON_INTRPT_THREAD */ + + (*poll_retry)++; + + + rc = -1; + + errno = ETIMEDOUT; + + if (chunk->path[path_index]->afu->p_hrrq_curr) { + + CBLK_TRACE_LOG_FILE(7,"*(chunk->path[path_index]->afu->p_hrrq_curr) = 0x%llx, chunk->path[path_index]->toggle = 0x%llx , chunk->index = %d", + *(chunk->path[path_index]->afu->p_hrrq_curr),chunk->path[path_index]->afu->toggle, + chunk->index); + + } + + if (cmd) { + + + cmd->cmdi->status = errno; + + cmd->cmdi->transfer_size = 0; + + CBLK_TRACE_LOG_FILE(6,"potential cmd time-out lba = 0x%llx flags = 0x%x, chunk->index = %d", + cmd->cmdi->lba,cmd->cmdi->flags,chunk->index); + } else { + + CBLK_TRACE_LOG_FILE(6,"potential cmd time-out no command specified, chunk->index = %d",chunk->index); + } + + } + + chunk->stats.num_timeouts++; + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + if (*poll_retry < CFLASH_MAX_POLL_RETRIES) { + + + return rc; + } + + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + /* + * If we are not using a back ground thread + * for polling, then get interrupt status + */ + + CBLK_GET_INTRPT_STATUS(chunk,path_index); + } + if (cmd) { + + + CFLASH_BLOCK_LOCK(chunk->lock); + + chunk->stats.num_fail_timeouts++; + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD){ + + /* + * Mark command as complete (failed). + */ + + + cmdi = cmd->cmdi; + + if (cmdi) { + + + /* + * TODO: This code is duplicated in cblk_intrpt_thread. We + * need to modularize this. + */ + + if (cflsh_blk.timeout_units != CFLSH_G_TO_SEC) { + + /* + * If the time-out units are not in seconds + * then only give the command only 1 second to complete + */ + timeout = time(NULL) - 1; + } else { + timeout = time(NULL) - (10 * cflsh_blk.timeout); + } + + if (cmdi->cmd_time < timeout) { + + CBLK_TRACE_LOG_FILE(1,"Timeout for for cmd lba = 0x%llx, cmd = 0x%llx cmd_index = %d, chunk->index = %d", + cmdi->lba,(uint64_t)cmd,cmd->index,chunk->index); + + + cmdi->status = ETIMEDOUT; + cmdi->transfer_size = 0; + + + + CBLK_GET_INTRPT_STATUS(chunk,path_index); + + cblk_notify_mc_err(chunk,cmdi->path_index,0x301,0, + CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + cblk_reset_context_shared_afu(chunk->path[path_index]->afu); + + CFLASH_BLOCK_LOCK(chunk->lock); + + *cmd_complete = TRUE; + + errno = ETIMEDOUT; + + rc = -1; + } + + /* + * The caller should retry this until either time-out is + * exceeded or the command completes. So even for N_BG_TD, + * we just return here (a few lines below). + */ + + } + + + + } + + CFLASH_BLOCK_UNLOCK(chunk->lock); + } + + + return rc; + + } else if (poll_ret < 0) { + + + /* + * Poll failed, Give up + */ + + + if (errno == EIO) { + + + CBLK_TRACE_LOG_FILE(1,"Potential UE encountered for command room\n"); + + cblk_check_os_adap_err(chunk,path_index); + } + + + CBLK_TRACE_LOG_FILE(1,"poll failed, with errno = %d, chunk->index = %d",errno,chunk->index); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + (*poll_fail_retries)++; + + if (*poll_fail_retries < CFLASH_MAX_POLL_FAIL_RETRIES) { + + /* + * Retry + */ + + return rc; + } + + if (cmd) { + + + cmd->cmdi->status = errno; + + cmd->cmdi->transfer_size = 0; + + CBLK_TRACE_LOG_FILE(6,"Poll failure lba = 0x%llx flags = 0x%x, chunk->index = %d", + cmd->cmdi->lba,cmd->cmdi->flags,chunk->index); + } + return rc; + + } else { +#endif /* !_SKIP_POLL_CALL */ + /* + * We may have received events for this file descriptor. Let's + * first read the events and then process them accordingly. + */ +#ifndef _SKIP_READ_CALL + + rc = cblk_read_os_specific_intrpt_event(chunk,path_index,&cmd,cmd_complete,transfer_size,poll_list); + +#else + /* + * Process command completion interrupt. + */ + rc = CBLK_PROCESS_ADAP_CONVERT_INTRPT(chunk,&cmd,CFLSH_BLK_INTRPT_CMD_CMPLT,cmd_complete, transfer_size); + +#ifdef _PERF_TEST + CFLASH_BLOCK_UNLOCK(chunk->lock); + usleep(cflsh_blk.adap_poll_delay); + + CFLASH_BLOCK_LOCK(chunk->lock); +#endif /* _PERF_TEST */ + +#endif + + +#ifndef _SKIP_POLL_CALL + } + +#endif /* !_SKIP_POLL_CALL */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + + if (cmd) { + + CBLK_TRACE_LOG_FILE(5,"waiting returned for cmd with lba = 0x%llx, with rc = %d, errno = %d in_use = %d, chunk->index = %d", + cmd->cmdi->lba, rc, errno,cmd->cmdi->in_use,chunk->index); + } + + + + return rc; +} + + + +/* + * NAME: CBLK_ERROR_INTR_MODE_DELAY_WAIT + * + * FUNCTION: Ensures that we wait long enough for + * to allow a command to commplete or time-out + * based on the environment it is invoked. + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * cmd - Cmd this routine will wait for completion. + * + * RETURNS: + * 0 - Good completion, otherwise error. + * + * + */ +static inline int CBLK_INTR_DELAY_WAIT(cflsh_chunk_t *chunk, int lib_flags, int rc, int loop_cnt) +{ + + + +#ifdef _ERROR_INTR_MODE + + /* + * If we are operating in error interrupt + * mode (i.e. block library is doing a poll + * to return immediately and block library + * does not expect poll to notify it about + * command completions), then we need special + * handling to avoid spinning too quickly + * on waiting for completions. + * + * When no back ground thread exists + * for a synchronous command we need to ensure + * we do not fail as a time-out too soon. + * + * When using a back ground thread, we do not want to spin + * too frequently either. + * + * The non-error interrupt case did not have this + * issue since it did a poll with a 1 millisecond + * time-out and we waited until the accumulation + * of all these before giving up. + */ + + if (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + + + /* + * If there is a back ground thread for + * processing interrupts. + */ + + if (loop_cnt > CFLASH_MIN_POLL_RETRIES) { + + /* + * Do not start delaying until we have done + * some checking for completion + */ + + usleep(CFLASH_DELAY_NO_CMD_INTRPT); + } + + if ( (rc) && (loop_cnt > CFLASH_MAX_POLL_RETRIES)) { + + /* + * Give up if we have looped too many + * times and are seeing errors. + */ + + + return 1; + } + + + + } else { + + /* + * If there is no background thread for processing + * interrupts (i.e. each caller into the library + * must do the polling either implicitly (for synchronous + * I/O) or explicitly for asynchronous I/O). + */ + + if (!(lib_flags & CFLASH_ASYNC_OP)) { + + + /* + * If this is a synchronous I/O request, + * then we need to delay for the caller + * until a command completes or we time-out. + */ + + if (loop_cnt > CFLASH_MIN_POLL_RETRIES) { + + /* + * Do not start delaying until we have done + * some checking for completion + */ + + usleep(CFLASH_DELAY_NO_CMD_INTRPT); + } + + + + } else if ( (rc) && (loop_cnt > CFLASH_MAX_POLL_RETRIES)) { + + /* + * Give up if we have looped too many + * times and are seeing errors. + */ + + + return 1; + } + } + +#else + + + + + if ( (rc) && (loop_cnt > CFLASH_MAX_POLL_RETRIES)) { + + /* + * Give up if we have looped too many + * times and are seeing errors. + */ + + + return 1; + } + + + +#endif /* !_ERROR_INTR_MODE */ + + + + return 0; +} + +/* + * NAME: CBLK_WAIT_FOR_IO_COMPLETE + * + * FUNCTION: Waits for the specified cmd to receive + * a completion or time-out. + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * cmd - Cmd this routine will wait for completion. + * + * RETURNS: + * 0 - Good completion, otherwise error. + * + * + */ + +static inline int CBLK_WAIT_FOR_IO_COMPLETE(cflsh_chunk_t *chunk, + int *cmd_index, size_t *transfer_size, + int wait, int lib_flags) +{ + int rc = 0; + int loop_cnt = 0; + int cmd_complete = FALSE; + cflsh_cmd_mgm_t *cmd = NULL; + int path_index = -1; + int poll_retry = 0; + int poll_fail_retries = 0; + + + + + if (chunk == NULL) { + + CBLK_TRACE_LOG_FILE(1,"chunk is null"); + + errno = EINVAL; + return (-1); + } + + if (cmd_index == NULL) { + + CBLK_TRACE_LOG_FILE(1,"cmd_index is null"); + errno = EINVAL; + return (-1); + } + + CBLK_TRACE_LOG_FILE(5,"waiting for cmd with cmd_index = 0x%x on chunk->index = %d", + *cmd_index,chunk->index); + + + + + if (*cmd_index != -1) { + + /* + * A tag of -1, indicates the caller wants + * this routine to return when any command completes. + */ + + if ((*cmd_index >= chunk->num_cmds) || + (*cmd_index < 0)) { + + CBLK_TRACE_LOG_FILE(1,"Invalid cmd_index = 0x%x, chunk->index = %d",*cmd_index,chunk->index); + + errno = EINVAL; + return (-1); + } + + + cmd = &(chunk->cmd_start[*cmd_index]); + + if (cmd->cmdi == NULL) { + + CBLK_TRACE_LOG_FILE(1,"Invalid cmd_index = 0x%x, chunk->index = %d, cmdi is null",*cmd_index,chunk->index); + + errno = EINVAL; + return (-1); + } + + if ( (cmd->cmdi->in_use == 0) || (cmd->cmdi->state == CFLSH_MGM_ASY_CMP)) { + + CBLK_TRACE_LOG_FILE(1,"cmd->cmdi->in_use = 0 flags = 0x%x lba = 0x%llx, chunk->index = %d", + cmd->cmdi->flags,cmd->cmdi->lba,chunk->index); + + errno = EINVAL; + + return -1; + } + + path_index = chunk->cmd_info[*cmd_index].path_index; + + + CBLK_TRACE_LOG_FILE(7,"waiting for cmd with lba = 0x%llx flags = 0x%x, chunk->index = %d", + cmd->cmdi->lba,cmd->cmdi->flags,chunk->index); + } + + + if (path_index >= 0) { + + poll_retry = 0; + poll_fail_retries = 0; + + while ((!cmd_complete) && + (loop_cnt < CFLASH_MAX_WAIT_LOOP_CNT)) { + + + rc = CBLK_WAIT_FOR_IO_COMPLETE_PATH(chunk, path_index,cmd,cmd_index, transfer_size,wait, + &cmd_complete,&poll_retry,&poll_fail_retries); + + loop_cnt++; + + + + if (chunk->num_active_cmds == 0) { + + /* + * If there are no commands active, then let's give up. + */ + + break; + } + + + + if (CBLK_INTR_DELAY_WAIT(chunk,lib_flags,rc,loop_cnt)) { + + break; + } + + + +#ifdef BLOCK_FILEMODE_ENABLED +#ifdef _COMMON_INTRPT_THREAD + + if (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + /* + * TODO ?? Is this the right loop count + */ + + if (loop_cnt > 5) { + + break; + } + } + +#endif /* _COMMON_INTRPT_THREAD */ +#endif /* BLOCK_FILEMODE_ENABLED */ + + } /* while */ + + } else { + + for (path_index=0;path_index < chunk->num_paths;path_index++) { + + + + if (chunk->path[path_index]->afu->num_issued_cmds) { + + /* + * If there are commands for this path then check for completion. + */ + + poll_retry = 0; + poll_fail_retries = 0; + + + + while ((!cmd_complete) && + (loop_cnt < CFLASH_MAX_WAIT_LOOP_CNT)) { + + rc = CBLK_WAIT_FOR_IO_COMPLETE_PATH(chunk, path_index,cmd,cmd_index, transfer_size,wait, + &cmd_complete,&poll_retry,&poll_fail_retries); + + loop_cnt++; + + + if (chunk->num_active_cmds == 0) { + + /* + * If there are no commands active, then let's give up. + */ + + break; + } + + + + if (CBLK_INTR_DELAY_WAIT(chunk,lib_flags,rc,loop_cnt)) { + + break; + } + + + +#ifdef BLOCK_FILEMODE_ENABLED +#ifdef _COMMON_INTRPT_THREAD + + if (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + /* + * TODO ?? Is this the right loop count + */ + + if (loop_cnt > 5) { + + break; + } + } + +#endif /* _COMMON_INTRPT_THREAD */ +#endif /* BLOCK_FILEMODE_ENABLED */ + } + + } + + } + + } + + if (cmd) { + + CBLK_TRACE_LOG_FILE(5,"waiting returned for cmd with lba = 0x%llx, with rc = %d, errno = %d in_use = %d, chunk->index = %d", + cmd->cmdi->lba, rc, errno,cmd->cmdi->in_use,chunk->index); + } + + + + return rc; + +} + + + +#endif /* _H_CFLASH_BLOCK_INLINE */ diff --git a/src/block/cflash_block_int.c b/src/block/cflash_block_int.c new file mode 100644 index 00000000..96a218cd --- /dev/null +++ b/src/block/cflash_block_int.c @@ -0,0 +1,6324 @@ + +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/cflash_block_int.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +#define CFLSH_BLK_FILENUM 0x0200 +#include "cflash_block_internal.h" +#include "cflash_block_inline.h" + +#ifdef BLOCK_FILEMODE_ENABLED +#include +#endif + + +char cblk_filename[PATH_MAX]; + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_setup_trace_files + * + * FUNCTION: Set up trace files + * + * + * + * + * + * RETURNS: NONE + * + * ---------------------------------------------------------------------------- + */ +void cblk_setup_trace_files(int new_process) +{ + int i; + char *env_verbosity = getenv("CFLSH_BLK_TRC_VERBOSITY"); + char *env_use_syslog = getenv("CFLSH_BLK_TRC_SYSLOG"); + char *env_trace_append = getenv("CFLSH_BLK_TRC_APPEND"); + char *log_pid = getenv("CFLSH_BLK_TRACE_PID"); + char *env_num_thread_logs = getenv("CFLSH_BLK_TRACE_TID"); + char *env_user = getenv("USER"); + uint32_t thread_logs = 0; + char filename[PATH_MAX]; + char *filename_ptr = filename; + int open_flags = 0; + + +#ifndef _AIX + cflsh_blk.flags |= CFLSH_G_SYSLOG; + + if (env_use_syslog) { + + if (strcmp(env_use_syslog,"ON")) { + + /* + * Don't use syslog tracing. Instead + * use tracing to a file. + */ + cflsh_blk.flags &= ~CFLSH_G_SYSLOG; + + + /* + * By default for linux, enable appending of + * of log file if someone turns off the default + * of syslog. + */ + + open_flags |= TRACE_LOG_EXT_OPEN_APPEND_FLG; + } + } +#endif /* !_AIX */ + + + + + if (new_process) { + + if ((log_pid == NULL) || + (cflsh_blk.flags & CFLSH_G_SYSLOG)) { + + /* + * If this is a new process (forked process) + * and we are not using traces per process, + * or we are logging via syslog + * then continue to the use the tracing + * in place for the parent process. + */ + + return; + } + + strcpy(filename,cblk_log_filename); + } + + if (env_trace_append) { + + + if (!strcmp(env_trace_append,"ON")) { + + /* + * Append to existing trace file. The default + * it to truncate and overwrite. + */ + open_flags |= TRACE_LOG_EXT_OPEN_APPEND_FLG; + } else { + + open_flags &= ~TRACE_LOG_EXT_OPEN_APPEND_FLG; + } + + } + + if (env_verbosity) { + cblk_log_verbosity = atoi(env_verbosity); + + } else { +#ifdef _AIX + cblk_log_verbosity = 0; +#else + cblk_log_verbosity = 1; +#endif + } + + cblk_log_filename = getenv("CFLSH_BLK_TRACE"); + if (cblk_log_filename == NULL) + { + sprintf(cblk_filename, "/tmp/%s.cflash_block_trc", env_user); + cblk_log_filename = cblk_filename; + } + + if ((log_pid) && !(cflsh_blk.flags & CFLSH_G_SYSLOG)) { + + /* + * Use different filename for each process, when + * not using syslogging. + */ + + sprintf(cblk_filename,"%s.%d",cblk_log_filename,getpid()); + + if ((new_process) && + !strcmp(cblk_log_filename,filename)) { + + /* + * If this is a new process (forked process) + * and the process trace filename is same as before, + * then return here, since we are already set up. + * This situation can occur if there are multiple chunks + * that are cloned after a fork. Only the first + * one would change the trace file. + */ + + return; + } + + cblk_log_filename = cblk_filename; + + } + + bzero((void *)&(cflsh_blk.trace_ext),sizeof(trace_log_ext_arg_t)); + + /* + * We need to serialize access to this log file + * while we are setting it up. + */ + + pthread_mutex_lock(&cblk_log_lock); + + if (cflsh_blk.flags & CFLSH_G_SYSLOG) { + + openlog("CXLBLK",LOG_PID,LOG_USER); + + + } else if (cblk_log_verbosity) { + + if (setup_trace_log_file_ext(&cblk_log_filename,&cblk_logfp,cblk_log_filename,open_flags)) { + + //fprintf(stderr,"Failed to set up tracing for filename = %s\n",cblk_log_filename); + + /* + * Turn off tracing if this fails. + */ + cblk_log_verbosity = 0; + } + } + + + + if ((env_num_thread_logs) && !(cflsh_blk.flags & CFLSH_G_SYSLOG)) { + + /* + * This indicates they want a trace log file per thread + * and we are not using syslog. + * We will still trace all threads in one common file, + * but also provide a thread log per thread too. + */ + + if ((new_process) && (num_thread_logs)) { + + /* + * If this is a new process (i.e. forked + * process), then we need to free up + * the resources from parent first. + */ + + free(cflsh_blk.thread_logs); + } + + num_thread_logs = atoi(env_num_thread_logs); + + num_thread_logs = MIN(num_thread_logs, MAX_NUM_THREAD_LOGS); + + if (num_thread_logs) { + + /* + * Allocate there array of thread_log file pointers: + */ + + cflsh_blk.thread_logs = (cflsh_thread_log_t *) malloc(num_thread_logs * sizeof(cflsh_thread_log_t)); + + if (cflsh_blk.thread_logs) { + + bzero((void *)cflsh_blk.thread_logs, num_thread_logs * sizeof(cflsh_thread_log_t)); + + + for (i=0; i< num_thread_logs;i++) { + + sprintf(filename,"%s.%d",cblk_log_filename,i); + + + if (setup_trace_log_file(&filename_ptr,&cflsh_blk.thread_logs[i].logfp,filename)) { + + fprintf(stderr,"Failed to set up tracing for filename = %s\n",filename); + free(cflsh_blk.thread_logs); + + num_thread_logs = 0; + break; + } + + cflsh_blk.thread_logs[i]. ext_arg.flags |= TRACE_LOG_NO_USE_LOG_NUM; + + } /* for */ + + /* + * We need to create a mask to allow us to hash + * thread ids into the our various thread log files. + * Thus we need mask that is based on the number_thread_log + * files. We'll create a mask that is contains a 1 for + * every bit up to the highest bit used to represent the number + * thread log files. + */ + + thread_logs = num_thread_logs; + cflsh_blk.thread_log_mask = 0; + + while (thread_logs) { + + cflsh_blk.thread_log_mask = (cflsh_blk.thread_log_mask << 1) | 1; + thread_logs >>= 1; + + } /* while */ + + } else { + + /* + * If we fail to allocate the thread trace log, then + * set num_thread_logs back to 0. + */ + num_thread_logs = 0; + } + } + + } + + pthread_mutex_unlock(&cblk_log_lock); + + + return; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_valid_endianess + * + * FUNCTION: Determines the Endianess of the host that + * the binary is running on. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: 1 Host endianess matches compile flags + * 0 Host endianess is invalid based on compile flags + * + * ---------------------------------------------------------------------------- + */ +int cblk_valid_endianess(void) +{ + int rc = FALSE; + short test_endian = 0x0102; + char *ptr; + char byte; + + ptr = (char *) &test_endian; + + byte = ptr[0]; + + if (byte == 0x02) { + + /* + * In a Little Endian host, the first indexed + * byte will be 0x2 + */ +#ifdef CFLASH_LITTLE_ENDIAN_HOST + rc = TRUE; +#else + rc = FALSE; +#endif /* !CFLASH_LITTLE_ENDIAN_HOST */ + + + } else { + + /* + * In a Big Endian host, the first indexed + * byte will be 0x1 + */ + +#ifdef CFLASH_LITTLE_ENDIAN_HOST + rc = FALSE; +#else + rc = TRUE; +#endif /* !CFLASH_LITTLE_ENDIAN_HOST */ + + + } + + + + return rc; +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_sigsev_handler + * + * FUNCTION: Since a failing CAPI adapter, can generate SIGSEV + * for a now invalid MMIO address, let us collect some + * debug information here in this SIGSEGV hanndler + * to determine this. + * + * + * + * + * + * RETURNS: NONE + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_sigsev_handler (int signum, siginfo_t *siginfo, void *uctx) +{ + cflsh_chunk_t *chunk; + int i,j; + + CBLK_TRACE_LOG_FILE(1,"si_code = %d, si_addr = 0x%p", + siginfo->si_code,siginfo->si_addr); + + switch (siginfo->si_code) { +#ifdef _AIX + case SEGV_MAPERR: + CBLK_TRACE_LOG_FILE(1,"Address not mapped, address = 0x%p", + siginfo->si_addr); + + break; +#endif /* _AIX */ + case SEGV_ACCERR: + + CBLK_TRACE_LOG_FILE(1,"Invalid permissions, address = 0x%p", + siginfo->si_addr); + + break; + default: + + CBLK_TRACE_LOG_FILE(1,"Unknown si_code = %d, address = 0x%p", + siginfo->si_code,siginfo->si_addr); + } + + + for (i=0; i < MAX_NUM_CHUNKS_HASH; i++) { + + chunk = cflsh_blk.hash[i]; + + + while (chunk) { + + + for (j=0; j < chunk->num_paths;j++) { + if ((chunk->flags & CFLSH_CHNK_SIGH) && + (chunk->path[j]) && + (chunk->path[j]->afu) && + (chunk->path[j]->afu->mmio <= siginfo->si_addr) && + (chunk->path[j]->upper_mmio_addr >= siginfo->si_addr)) { + + longjmp(chunk->path[j]->jmp_mmio,1); + } + + } /* for */ + + chunk = chunk->next; + + } /* while */ + + + } /* for */ + + + /* + * If we get here then SIGSEGV is mostly + * likely not associated with a bad MMIO + * address (due to adapter reset or + * UE. Issue default signal. + */ + + signal(signum,SIG_DFL); + kill(getpid(),signum); + + return; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_prepare_fork + * + * FUNCTION: If a process using this library does a fork, then + * this routine will be invoked + * prior to fork to the library into a consistent state + * that will be preserved across fork. + * + * + * + * + * + * RETURNS: NONE + * + * ---------------------------------------------------------------------------- + */ +void cblk_prepare_fork (void) +{ + cflsh_chunk_t *chunk = NULL; + int i; + + + pthread_mutex_lock(&cblk_log_lock); + + CFLASH_BLOCK_WR_RWLOCK(cflsh_blk.global_lock); + + + for (i=0; i < MAX_NUM_CHUNKS_HASH; i++) { + + chunk = cflsh_blk.hash[i]; + + + while (chunk) { + + CFLASH_BLOCK_LOCK(chunk->lock); + chunk = chunk->next; + + } /* while */ + + + } /* for */ + + + return; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_parent_post_fork + * + * FUNCTION: If a process using this library does a fork, then + * this routine will be run on the parent after fork + * to release locks. + * + * + * + * + * + * RETURNS: NONE + * + * ---------------------------------------------------------------------------- + */ +void cblk_parent_post_fork (void) +{ + cflsh_chunk_t *chunk = NULL; + int i; + int rc; + + + + for (i=0; i < MAX_NUM_CHUNKS_HASH; i++) { + + chunk = cflsh_blk.hash[i]; + + + while (chunk) { + + CFLASH_BLOCK_UNLOCK(chunk->lock); + chunk = chunk->next; + + } /* while */ + + + } /* for */ + + + + rc = pthread_mutex_unlock(&cblk_log_lock); + + if (rc) { + + /* + * Find the first chunk do a notify + * against it. + */ + + for (i=0; i < MAX_NUM_CHUNKS_HASH; i++) { + + chunk = cflsh_blk.hash[i]; + + + if (chunk) { + + break; + } + + + } /* for */ + + if (chunk) { + + cblk_notify_mc_err(chunk,0,0x205,rc, CFLSH_BLK_NOTIFY_SFW_ERR,NULL); + + } + } + + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + return; +} + + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_child_post_fork + * + * FUNCTION: If a process using this library does a fork, then + * this routine will be run on the child after fork + * to release locks. + * + * + * + * + * + * RETURNS: NONE + * + * ---------------------------------------------------------------------------- + */ +void cblk_child_post_fork (void) +{ + cflsh_chunk_t *chunk = NULL; + int i; + int rc; + + + for (i=0; i < MAX_NUM_CHUNKS_HASH; i++) { + + chunk = cflsh_blk.hash[i]; + + + while (chunk) { + + CFLASH_BLOCK_UNLOCK(chunk->lock); + chunk = chunk->next; + + } /* while */ + + + } /* for */ + + + + rc = pthread_mutex_unlock(&cblk_log_lock); + + if (rc) { + + + /* + * Find the first chunk do a notify + * against it. + */ + + for (i=0; i < MAX_NUM_CHUNKS_HASH; i++) { + + chunk = cflsh_blk.hash[i]; + + + if (chunk) { + + break; + } + + + } /* for */ + + if (chunk) { + + cblk_notify_mc_err(chunk,0,0x206,rc, CFLSH_BLK_NOTIFY_SFW_ERR,NULL); + + } + } + + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + return; +} + + + + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_init_cache + * + * FUNCTION: Initialize cache for a chunk + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_init_cache (cflsh_chunk_t *chunk, size_t nblocks) +{ + cflsh_cache_line_t *line; + uint n; + + + if (chunk == NULL) { + + return; + } + + if (nblocks == 0) { + + + return; + } + + + chunk->cache_size = MIN(nblocks,cblk_cache_size); + + if (chunk->cache_size == 0) { + + return; + } + + + CBLK_TRACE_LOG_FILE(5,"cache_size",chunk->cache_size); + + chunk->cache = (cflsh_cache_line_t *) malloc(chunk->cache_size * sizeof(cflsh_cache_line_t)); + + if (chunk->cache == (cflsh_cache_line_t *) NULL) { + + CBLK_TRACE_LOG_FILE(1,"Could not allocate cache with size = %d\n", + chunk->cache_size); + fprintf (stderr, + "Could not allocate cache with size = %d\n", + chunk->cache_size); + return; + } + + bzero(chunk->cache,(chunk->cache_size * sizeof(cflsh_cache_line_t))); + + chunk->cache_buffer = NULL; + if ( posix_memalign((void *)&(chunk->cache_buffer),4096, + (CAPI_FLASH_BLOCK_SIZE * chunk->cache_size))) { + + CBLK_TRACE_LOG_FILE(1,"posix_memalign failed cache_size = %d,errno = %d", + chunk->cache_size,errno); + + + free(chunk->cache); + + chunk->cache = NULL; + + return; + + + } + + bzero(chunk->cache_buffer,(CAPI_FLASH_BLOCK_SIZE * chunk->cache_size)); + + for (line = chunk->cache; line < &chunk->cache[chunk->cache_size]; line++) { + for (n = 0; n < CFLSH_BLK_NSET; n++) { + + line->entry[n].data = chunk->cache_buffer + (n * CAPI_FLASH_BLOCK_SIZE); + + line->entry[n].valid = 0; + line->entry[n].next = n + 1; + line->entry[n].prev = n - 1; + } + } + + return; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_free_cache + * + * FUNCTION: free cache for a chunk + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_free_cache (cflsh_chunk_t *chunk) +{ + + if (chunk == NULL) { + + return; + } + + if (chunk->cache_size == 0) { + + + return; + } + + + CBLK_TRACE_LOG_FILE(5,"cache_size",chunk->cache_size); + + free(chunk->cache_buffer); + + free(chunk->cache); + + return; +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_flush_cache + * + * FUNCTION: Flush a chunk's cache. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_flush_cache (cflsh_chunk_t *chunk) +{ + cflsh_cache_line_t *line; + int n; + + + + CBLK_TRACE_LOG_FILE(5,"cache_size",chunk->cache_size); + + for (line = chunk->cache; line < &chunk->cache[chunk->cache_size]; line++) { + for (n = 0; n < CFLSH_BLK_NSET; n++) { + line->entry[n].valid = 0; + } + } + + + return; +} + + +/* + * NAME: cblk_get_chunk_type + * + * FUNCTION: Returns chunk type of the specified device. + * + * + * + * + * INPUTS: + * device path name + * + * RETURNS: + * command type + * + */ +cflsh_block_chunk_type_t cblk_get_chunk_type(const char *path, int arch_type) +{ + + cflsh_block_chunk_type_t chunk_type; + + + if (arch_type) { + + /* + * If architecture type is set, then + * evaluate it for the specific OS.. + */ + + return (cblk_get_os_chunk_type(path,arch_type)); + + } + + + + /* + * For now we only support one chunk type: + * the SIS lite type. + */ + + chunk_type = CFLASH_BLK_CHUNK_SIS_LITE; + + return chunk_type; +} + + +/* + * NAME: cblk_set_fcn_ptrs + * + * FUNCTION: Sets function pointers for a chunk based on + * it chunk type. + * + * + * + * + * INPUTS: + * chunk + * + * RETURNS: + * 0 - Good completion + * Otherwise an erro. + * + */ +int cblk_set_fcn_ptrs(cflsh_path_t *path) +{ + int rc = 0; + + if (path == NULL) { + + errno = EFAULT; + + return -1; + } + + switch (path->type) { + case CFLASH_BLK_CHUNK_SIS_LITE: + + /* + * SIS Lite adapter/AFU type + */ + + rc = cblk_init_sisl_fcn_ptrs(path); + + break; + + default: + errno = EINVAL; + + rc = -1; + + } + + return rc; +} + + +/* + * NAME: cblk_alloc_hrrq_afu + * + * FUNCTION: This routine allocates and initializes + * the RRQ per AFU. + * + * NOTE: This routine assumes the caller + * has the cflsh_blk.global_lock. + * + * + * INPUTS: + * NONE + * + * RETURNS: + * 0 - Success + * nonzero - Failure + * + */ +int cblk_alloc_hrrq_afu(cflsh_afu_t *afu, int num_cmds) +{ + int rc = 0; + + + if (num_cmds == 0) { + + CBLK_TRACE_LOG_FILE(1,"num_cmds = 0"); + + return -1; + + } + + if (afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"No AFU provided"); + + return -1; + + } + + + if (afu->p_hrrq_start) { + + //TODO:?? CBLK_TRACE_LOG_FILE(5,"RRQ allocated already"); + + // TODO:?? Maybe this should be failed eventually. + return 0; + } + + /* + * Align RRQ on cacheline boundary. + */ + + if ( posix_memalign((void *)&(afu->p_hrrq_start),128, + (sizeof(*(afu->p_hrrq_start)) * num_cmds))) { + + + CBLK_TRACE_LOG_FILE(1,"Failed posix_memalign for rrq errno= %d",errno); + + return -1; + + } + + bzero((void *)afu->p_hrrq_start , + (sizeof(*(afu->p_hrrq_start)) * num_cmds)); + + + afu->p_hrrq_end = afu->p_hrrq_start + (num_cmds - 1); + + afu->p_hrrq_curr = afu->p_hrrq_start; + + + /* + * Since the host RRQ is + * bzeroed. The toggle bit in the host + * RRQ that initially indicates we + * have a new RRQ will need to be 1. + */ + + + afu->toggle = 1; + + return rc; +} + + +/* + * NAME: cblk_free_hrrq_afu + * + * FUNCTION: This routine frees the + * RRQ per AFU. + * + * NOTE: This routine assumes the caller + * has the cflsh_blk.global_lock. + * + * + * INPUTS: + * NONE + * + * RETURNS: + * 0 - Success + * nonzero - Failure + * + */ +int cblk_free_hrrq_afu(cflsh_afu_t *afu) +{ + + + if (afu == NULL) { + + return 0; + + } + + + if (afu->p_hrrq_start == NULL) { + + return 0; + } + + + free(afu->p_hrrq_start); + + afu->p_hrrq_start = NULL; + afu->p_hrrq_end = NULL; + + return 0; +} + + + +/* + * NAME: cblk_get_afu + * + * FUNCTION: This routine checks if an AFU structure + * already exists for the specified AFU. Otherwise + * it will allocate one. + * + * NOTE: This routine assumes the caller + * has the cflsh_blk.global_lock. + * For MPIO we allow sharing of AFUs from the same + * same chunk but different paths to the same disk. + * Otherwise sharing of contexts is needed. + * + * + * INPUTS: + * NONE + * + * RETURNS: + * NULL_CHUNK_ID for error. + * Otherwise the chunk_id is returned. + * + */ + +cflsh_afu_t *cblk_get_afu(cflsh_path_t *path, char *dev_name, dev64_t adap_devno,cflsh_block_chunk_type_t type,int num_cmds, + cflsh_afu_in_use_t *in_use, int share) +{ + cflsh_afu_t *afu = NULL; + char *afu_name = NULL; + cflsh_path_t *tmp_path; + int mpio_shared; /* MPIO shared AFU */ + int pthread_rc; + + + + if (in_use == NULL) { + + return NULL; + } + + + afu_name = cblk_find_parent_dev(dev_name); + + *in_use = CFLASH_AFU_NOT_INUSE; + + afu = cflsh_blk.head_afu; + + + while (afu) { + +#ifdef _AIX + + + if ((adap_devno) && + (afu->adap_devno == adap_devno) && + (afu->type == type)) { + + + + tmp_path = afu->head_path; + + mpio_shared = FALSE; + + while (tmp_path) { + + if (tmp_path->chunk == path->chunk) { + + + mpio_shared = TRUE; + break; + } + tmp_path = tmp_path->next; + } + + + if (mpio_shared) { + + + afu->ref_count++; + + *in_use = CFLASH_AFU_MPIO_INUSE; + + break; + } else if ((share) && + (afu->flags & CFLSH_AFU_SHARED)) { + + afu->ref_count++; + + *in_use = CFLASH_AFU_SHARE_INUSE; + + break; + } + } + +#else + +#ifdef _KERNEL_MASTER_CONTXT + + if (afu_name == NULL) { + + /* + * Fail if there is no AFU name + */ + + return NULL; + } +#endif /* _KERNEL_MASTER_CONTXT */ + + if ((share) && + (afu_name) && + (!strcmp(afu->master_name,afu_name)) && + (afu->flags & CFLSH_AFU_SHARED) && + (afu->type == type)) { + + + afu->ref_count++; + + *in_use = CFLASH_AFU_SHARE_INUSE; + break; + } + + + if ((afu_name) && + (!strcmp(afu->master_name,afu_name)) && + (afu->type == type)) { + + + + tmp_path = afu->head_path; + + mpio_shared = FALSE; + + while (tmp_path) { + + if (tmp_path->chunk == path->chunk) { + + + mpio_shared = TRUE; + } + tmp_path = tmp_path->next; + } + + if (mpio_shared) { + + + afu->ref_count++; + + *in_use = CFLASH_AFU_MPIO_INUSE; + + break; + } else if ((share) && + (afu->flags & CFLSH_AFU_SHARED)) { + + afu->ref_count++; + + *in_use = CFLASH_AFU_SHARE_INUSE; + + break; + } + + + } +#endif + + + + afu = afu->next; + } + + if (afu == NULL) { + + /* + * If no path was found, then + * allocate one. + */ + + /* + * Align on 4K boundary to make it easier + * for debug purposes. + */ + + if ( posix_memalign((void *)&afu,4096, + (sizeof(*afu)))) { + + + CBLK_TRACE_LOG_FILE(1,"Failed posix_memalign for afu, errno= %d",errno); + + + + afu = NULL; + + } else { + + + bzero((void *) afu,sizeof (*afu)); + afu->ref_count++; + + if (share) { + + afu->flags |= CFLSH_AFU_SHARED; + } + + afu->type = type; + + afu->num_rrqs = num_cmds; + +#ifdef _AIX + afu->adap_devno = adap_devno; +#endif /* AIX */ + + if (cblk_alloc_hrrq_afu(afu,num_cmds)) { + free(afu); + + return NULL; + } + + + pthread_rc = pthread_cond_init(&(afu->resume_event),NULL); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(1,"pthread_cond_init failed for resume_event rc = %d errno= %d", + pthread_rc,errno); + + cblk_free_hrrq_afu(afu); + free(afu); + + return NULL; + + } + + + + + if (afu_name) { + strncpy(afu->master_name,afu_name,MIN(strlen(afu_name), + PATH_MAX)); + } + + CFLASH_BLOCK_LOCK_INIT(afu->lock); + + afu->eyec = CFLSH_EYEC_AFU; + + CBLK_Q_NODE_TAIL(cflsh_blk.head_afu,cflsh_blk.tail_afu,afu,prev,next); + + } + + } + + CBLK_Q_NODE_TAIL(afu->head_path,afu->tail_path,path,prev,next); + + return afu; +} + + +/* + * NAME: cblk_update_afu_type + * + * FUNCTION: This routine updates the AFU type if + * possible. + * + * NOTE: This routine assumes the caller + * has the cflsh_blk.global_lock. + * + * + * INPUTS: + * + * + * RETURNS: + * 0 - Success + * otherwise - Failure + * + */ + +int cblk_update_afu_type(cflsh_afu_t *afu,cflsh_block_chunk_type_t type) +{ + int rc = 0; + + if (afu == NULL) { + + errno = EINVAL; + + return -1; + + } + + + if (afu->ref_count == 1) { + + /* + * Update AFU only if ref_count + * is 1. Otherwise, it most likely + * is already in use. + */ + + + if (cblk_alloc_hrrq_afu(afu,afu->num_rrqs)) { + + + errno = ENOMEM; + + return -1; + } + + afu->type = type; + } + + return rc; + +} + +/* + * NAME: cblk_release_afu + * + * FUNCTION: This routine checks if the afu is used by others + * if not, then it frees the afu structure; + * + * NOTE: This routine assumes the caller + * has the cflsh_blk.global_lock. + * + * + * INPUTS: + * NONE + * + * RETURNS: + * NULL_CHUNK_ID for error. + * Otherwise the chunk_id is returned. + * + */ + +void cblk_release_afu(cflsh_path_t *path,cflsh_afu_t *afu) +{ + if ((afu) && (path)) { + + CBLK_DQ_NODE(afu->head_path,afu->tail_path,path,prev,next); + + afu->ref_count--; + if (!(afu->ref_count)) { + + CBLK_DQ_NODE(cflsh_blk.head_afu,cflsh_blk.tail_afu,afu,prev,next); + + if (afu->head_complete) { + + CBLK_TRACE_LOG_FILE(1,"AFU has commands on completion list"); + + } + cblk_free_hrrq_afu(afu); + free(afu); + } + + } + + return; + +} + + +/* + * NAME: cblk_get_path + * + * FUNCTION: This routine checks if a path structure + * already exists for the specified path. Otherwise + * it will allocate one. + * + * NOTE: This routine assumes the caller + * has the cflsh_blk.global_lock. + * + * + * INPUTS: + * NONE + * + * RETURNS: + * NULL_CHUNK_ID for error. + * Otherwise the chunk_id is returned. + * + */ + +cflsh_path_t *cblk_get_path(cflsh_chunk_t *chunk, dev64_t adap_devno,cflsh_block_chunk_type_t type,int num_cmds, + cflsh_afu_in_use_t *in_use, int share) +{ + cflsh_path_t *path = NULL; + int pthread_rc; + int j; + + + + + /* + * Align on 4K boundary to make it easier + * for debug purposes. + */ + + if ( posix_memalign((void *)&path,4096, + (sizeof(*path)))) { + + + CBLK_TRACE_LOG_FILE(1,"Failed posix_memalign for path, errno= %d",errno); + + + + path = NULL; + + } else { + + + bzero((void *) path,sizeof (*path)); + + path->type = type; + + path->chunk = chunk; + + if (cblk_set_fcn_ptrs(path)) { + + CBLK_TRACE_LOG_FILE(1,"Failed to set up function pointers. errno= %d",errno); + + free(path); + + return NULL; + + } + + + path->afu = cblk_get_afu(path,chunk->dev_name,adap_devno,type,num_cmds,in_use,share); + + if (path->afu == NULL) { + + free(path); + + return NULL; + } + + pthread_rc = pthread_cond_init(&(path->resume_event),NULL); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(1,"pthread_cond_init failed for resume_event rc = %d errno= %d", + pthread_rc,errno); + + cblk_release_afu(path,path->afu); + free(path); + + return NULL; + + } + + + path->eyec = CFLSH_EYEC_PATH; + + /* + * Ensure the chunk's num_cmds does + * not exceed the AFU's number of rrq + * elements. If num_cmds is larger than + * num_rrqs, then set num_cmds to num_rrqs, + * This also requires that there are only + * num_rrqs command sin the chunk free list. + */ + + + if (chunk->num_cmds > path->afu->num_rrqs) { + + + for (j = chunk->num_cmds; j < path->afu->num_rrqs; j++) { + + CBLK_DQ_NODE(chunk->head_free,chunk->tail_free,&(chunk->cmd_info[j]),free_prev,free_next); + } + + + chunk->num_cmds = path->afu->num_rrqs; + } + + chunk->num_paths++; + + } + + + + + return path; +} + + +/* + * NAME: cblk_release_path + * + * FUNCTION: This routine checks if the path is used by others + * if not, then it frees the path structure; + * + * NOTE: This routine assumes the caller + * has the cflsh_blk.global_lock. + * + * + * INPUTS: + * NONE + * + * RETURNS: + * NULL_CHUNK_ID for error. + * Otherwise the chunk_id is returned. + * + */ + +void cblk_release_path(cflsh_chunk_t *chunk, cflsh_path_t *path) +{ + if (path) { + + + cblk_release_afu(path,path->afu); + path->afu = NULL; + path->eyec = 0; + free(path); + chunk->num_paths--; + + } + + return; + +} + +/* + * NAME: cblk_update_path_type + * + * FUNCTION: This routine updates a path's type when + * more information is known. + * + * NOTE: This routine assumes the caller + * has the cflsh_blk.global_lock. + * + * + * INPUTS: + * + * + * RETURNS: + * 0 - Success + * otherwise - Failure + * + */ + +int cblk_update_path_type(cflsh_chunk_t *chunk, cflsh_path_t *path, cflsh_block_chunk_type_t type) +{ + int rc = 0; + + + if (path == NULL) { + + + errno = EINVAL; + + return -1; + } + + rc = cblk_update_afu_type(path->afu,type); + + + return rc; +} + + + +/* + * NAME: cblk_get_chunk + * + * FUNCTION: This routine gets chunk of the specified + * command type. + * + * NOTE: This routine assumes the caller + * has the cflsh_blk.global_lock. + * + * + * INPUTS: + * NONE + * + * RETURNS: + * NULL_CHUNK_ID for error. + * Otherwise the chunk_id is returned. + * + */ +chunk_id_t cblk_get_chunk(int flags,int max_num_cmds) +{ + chunk_id_t ret_chunk_id = NULL_CHUNK_ID; + cflsh_chunk_t *chunk = NULL; + cflsh_chunk_t *tmp_chunk; + int j; + + +#ifdef BLOCK_FILEMODE_ENABLED + char *max_transfer_size_blocks = getenv("CFLSH_BLK_MAX_XFER"); +#endif /* BLOCK_FILEMODE_ENABLED */ + + + + if (max_num_cmds <= 0) { + /* + * If max_num_cmds not passed + * then use our default size. + */ + max_num_cmds = NUM_CMDS; + } else if (max_num_cmds > MAX_NUM_CMDS) { + /* + * If max_num_cmds is larger than + * our upper limit then fail this request. + */ + + errno = ENOMEM; + return ret_chunk_id; + } + + /* + * Align on 4K boundary, so that we can use + * the low order bits for eyecatcher ir hashing + * if we decide to pass back a modified pointer + * to the user. Currently we are not doing this, + * but depending on the efficiency of the hash table + * we may need to in the future. + */ + + if ( posix_memalign((void *)&chunk,4096, + (sizeof(*chunk)))) { + + + CBLK_TRACE_LOG_FILE(1,"Failed posix_memalign for chunk, errno= %d",errno); + + + return ret_chunk_id; + + } + + /* + * Initialize chunk for use; + */ + + + if (flags & CFLSH_BLK_CHUNK_SET_UP) { + + + bzero((void *) chunk,sizeof (*chunk)); + + + CFLASH_BLOCK_LOCK_INIT(chunk->lock); + + chunk->num_cmds = max_num_cmds; + + /* + * Align commands on cacheline boundary. + */ + + if ( posix_memalign((void *)&(chunk->cmd_start),128, + (sizeof(*(chunk->cmd_start)) * chunk->num_cmds))) { + + + CBLK_TRACE_LOG_FILE(1,"Failed posix_memalign for cmd_start errno= %d",errno); + + // ?? TODO maybe should return special type of error. + free(chunk); + + return ret_chunk_id; + } + + bzero((void *)chunk->cmd_start , + (sizeof(*(chunk->cmd_start)) * chunk->num_cmds)); + + chunk->cmd_curr = chunk->cmd_start; + + chunk->cmd_end = chunk->cmd_start + chunk->num_cmds; + + chunk->in_use = TRUE; + + /* + * Alocate command infos for each command + */ + + chunk->cmd_info = malloc(sizeof(cflsh_cmd_info_t) * chunk->num_cmds); + + if (chunk->cmd_info == NULL) { + + + // ?? TODO maybe should return special type of error. + + free(chunk->cmd_start); + chunk->cmd_start = NULL; + free(chunk); + + return ret_chunk_id; + + } + + bzero((void *)chunk->cmd_info,(sizeof(cflsh_cmd_info_t) * chunk->num_cmds)); + + for (j = 0; j < chunk->num_cmds; j++) { + chunk->cmd_start[j].index = j; + chunk->cmd_start[j].cmdi = &chunk->cmd_info[j]; + chunk->cmd_info[j].index = j; + chunk->cmd_info[j].chunk = chunk; + chunk->cmd_info[j].eyec = CFLSH_EYEC_INFO; + + CBLK_Q_NODE_TAIL(chunk->head_free,chunk->tail_free,&(chunk->cmd_info[j]),free_prev,free_next); + } + + + + CFLASH_BLOCK_LOCK_INIT((chunk->lock)); + + + + + cflsh_blk.num_active_chunks++; + cflsh_blk.num_max_active_chunks = MAX(cflsh_blk.num_active_chunks,cflsh_blk.num_max_active_chunks); + + + chunk->index = cflsh_blk.next_chunk_id++; + + ret_chunk_id = chunk->index; + + chunk->stats.block_size = CAPI_FLASH_BLOCK_SIZE; + + chunk->stats.max_transfer_size = 1; + +#ifdef BLOCK_FILEMODE_ENABLED + + /* + * For filemode let user adjust maximum transfer size + */ + + if (max_transfer_size_blocks) { + chunk->stats.max_transfer_size = atoi(max_transfer_size_blocks); + } +#endif /* BLOCK_FILEMODE_ENABLED */ + + + /* + * Insert chunk into hash list + */ + + chunk->eyec = CFLSH_EYEC_CHUNK; + + + if (cflsh_blk.hash[chunk->index & CHUNK_HASH_MASK] == NULL) { + + cflsh_blk.hash[chunk->index & CHUNK_HASH_MASK] = chunk; + } else { + + tmp_chunk = cflsh_blk.hash[chunk->index & CHUNK_HASH_MASK]; + + while (tmp_chunk) { + + if ((ulong)tmp_chunk & CHUNK_BAD_ADDR_MASK ) { + + /* + * Chunk addresses are allocated + * on certain alignment. If this + * potential chunk address does not + * have the correct alignment then fail + * this request. + */ + + cflsh_blk.num_bad_chunk_ids++; + + CBLK_TRACE_LOG_FILE(1,"Corrupted chunk address = 0x%p, hash[] = 0x%p index = 0x%x", + tmp_chunk, cflsh_blk.hash[chunk->index & CHUNK_HASH_MASK], + (chunk->index & CHUNK_HASH_MASK)); + + CBLK_LIVE_DUMP_THRESHOLD(5,"0x200"); + + free(chunk->cmd_info); + free(chunk->cmd_start); + + chunk->eyec = 0; + + free(chunk); + + errno = EFAULT; + return NULL_CHUNK_ID; + } + + + if (tmp_chunk->next == NULL) { + + tmp_chunk->next = chunk; + + chunk->prev = tmp_chunk; + break; + } + + tmp_chunk = tmp_chunk->next; + + } /* while */ + + } + + } + + + if (ret_chunk_id == NULL_CHUNK_ID) { + + CBLK_TRACE_LOG_FILE(1,"no chunks found , num_active = 0x%x",cflsh_blk.num_active_chunks); + errno = ENOSPC; + } + + return ret_chunk_id; +} + + + + + +/* + * NAME: cblk_get_buf_cmd + * + * FUNCTION: Finds free command and allocates data buffer for command + * + * + * + * INPUTS: + * chunk - Chunk the read is associated. + * buf - Buffer to read data into + * lba - starting LBA (logical Block Address) + * in chunk to read data from. + * nblocks - Number of blocks to read. + * + * + * RETURNS: + * None + * + * + */ + +int cblk_get_buf_cmd(cflsh_chunk_t *chunk,void **buf, size_t buf_len, + cflsh_cmd_mgm_t **cmd) +{ + int rc = 0; + + + /* + * AFU requires data buffer to have 16 byte alignment + */ + + if ( posix_memalign((void *)buf,64,buf_len)) { + + CBLK_TRACE_LOG_FILE(1,"posix_memalign failed for buffer size = %d,errno = %d", + chunk->cache_size,errno); + + + return -1; + + + } + + CFLASH_BLOCK_LOCK(chunk->lock); + rc = cblk_find_free_cmd(chunk,cmd,CFLASH_WAIT_FREE_CMD); + + if (rc) { + + + free(*buf); + CBLK_TRACE_LOG_FILE(1,"could not find a free cmd, num_active_cmds = %d",chunk->num_active_cmds); + CFLASH_BLOCK_UNLOCK(chunk->lock); + errno = EBUSY; + + return -1; + } + + chunk->cmd_info[(*cmd)->index].path_index = chunk->cur_path; + CBLK_BUILD_ADAP_CMD(chunk,*cmd,*buf,buf_len,CFLASH_READ_DIR_OP); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + + return rc; +} + +#ifdef _COMMON_INTRPT_THREAD +/* + * NAME: cblk_start_common_intrpt_thread + * + * FUNCTION: Starts common interrupt thread. + * When the block library is compiled + * in this mode, there is exactly one + * dedicated thread for processing all + * interrupts. The alternate mode to compile + * the block library is cooperative interrupt + * processing, where multiple threads can + * coordinate the processing of interrupts. + * + * NOTE: This routine assumes the caller + * is holding both the chunk lock and + * the global lock. + * + * + * INPUTS: + * chunk - Chunk associated with a lun + * + * + * RETURNS: + * 0 - Success + * -1 - Error/failure + * + */ + +int cblk_start_common_intrpt_thread(cflsh_chunk_t *chunk) +{ + int rc = 0; + int pthread_rc; + cflsh_async_thread_cmp_t *async_data; + + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + + /* + * Background threads are not allowed + */ + + return rc; + } + + chunk->thread_flags = 0; + + pthread_rc = pthread_cond_init(&(chunk->thread_event),NULL); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(1,"pthread_cond_init failed for thread_event rc = %d errno= %d", + pthread_rc,errno); + + + errno = EAGAIN; + return -1; + + } + + pthread_rc = pthread_cond_init(&(chunk->cmd_cmplt_event),NULL); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(1,"pthread_cond_init failed for cmd_cmplt_event rc = %d errno= %d", + pthread_rc,errno); + + + errno = EAGAIN; + return -1; + + } + + async_data = &(chunk->intrpt_data); + async_data->chunk = chunk; + async_data->cmd_index = 0; + + pthread_rc = pthread_create(&(chunk->thread_id),NULL,cblk_intrpt_thread,async_data); + + if (pthread_rc) { + + chunk->stats.num_failed_threads++; + + CBLK_TRACE_LOG_FILE(5,"pthread_create failed rc = %d,errno = %d num_active_cmds = 0x%x", + pthread_rc,errno, chunk->num_active_cmds); + + errno = EAGAIN; + return -1; + } + + /* + * We successfully started the thread. + * Update statistics reflecting this. + */ + + chunk->stats.num_success_threads++; + + chunk->stats.num_active_threads++; + + chunk->stats.max_num_act_threads = MAX(chunk->stats.max_num_act_threads,chunk->stats.num_active_threads); + + + return rc; +} + +#endif /* _COMMON_INTRPT_THREAD */ + + + +/* + * NAME: cblk_get_lun_id + * + * FUNCTION: Gets the lun id of the physical + * lun associated with this chunk. + * + * NOTE: This routine assumes the caller + * is holding both the chunk lock and + * the global lock. + * + * + * INPUTS: + * chunk - Chunk associated with a lun + * + * + * RETURNS: + * NONE + * + */ + +int cblk_get_lun_id(cflsh_chunk_t *chunk) +{ + int rc = 0; + void *raw_lun_list = NULL; + int list_size = 4096; + uint64_t *lun_ids; + int num_luns = 0; + cflsh_cmd_mgm_t *cmd; + size_t transfer_size = 0; +#ifdef BLOCK_FILEMODE_ENABLED + struct lun_list_hdr *list_hdr; +#else + int cmd_index = 0; +#endif /* BLOCK_FILEMODE_ENABLED */ + + + + + if (cflsh_blk.flags & CFLSH_G_LUN_ID_VAL) { + + /* + * We have alread determined the lun id. + * So just set it for the chunk + * and return. + */ + + chunk->path[chunk->cur_path]->lun_id = cflsh_blk.lun_id; + + CBLK_TRACE_LOG_FILE(5,"rc = %d,lun_id = 0x%llx", + rc,cflsh_blk.lun_id); + return rc; + } + + + if (cblk_get_buf_cmd(chunk,&raw_lun_list,list_size, + &cmd)) { + + + return -1; + + } + + bzero(raw_lun_list,list_size); + + + /* + * This command will use transfer size in bytes + */ + + cmd->cmdi->transfer_size_bytes = 1; + + if (cflash_build_scsi_report_luns(CBLK_GET_CMD_CDB(chunk,cmd), + list_size)) { + + CBLK_TRACE_LOG_FILE(5,"build_scsi_report_luns failed rc = %d,", + rc); + CBLK_FREE_CMD(chunk,cmd); + free(raw_lun_list); + return -1; + + } + + + + + + + + + if (CBLK_ISSUE_CMD(chunk,cmd,raw_lun_list,0,0,0)) { + + + CBLK_FREE_CMD(chunk,cmd); + free(raw_lun_list); + return -1; + + } + +#ifdef BLOCK_FILEMODE_ENABLED + + /* + * For BLOCK_FILEMODE_ENABLED get the size of this file that was + * just opened + */ + + + list_hdr = raw_lun_list; + + list_hdr->lun_list_length = CFLASH_TO_ADAP32((sizeof(uint64_t) + sizeof(*list_hdr))); + lun_ids = (uint64_t *) ++list_hdr; + + lun_ids[0] = cblk_lun_id; + + /* + * This command completed, + * clean it up. + */ + + chunk->num_active_cmds--; + + CBLK_FREE_CMD(chunk,cmd); + + + transfer_size = sizeof (struct lun_list_hdr ); + +#else + + cmd_index = cmd->index; + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,&(cmd_index),&transfer_size,TRUE,0); + +#ifdef _COMMON_INTRPT_THREAD + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,&(cmd_index),&transfer_size,TRUE,0); + } else { + rc = CBLK_COMPLETE_CMD(chunk,cmd,&transfer_size); + } + +#endif /* _COMMON_INTRPT_THREAD */ + +#endif /* BLOCK_FILEMODE_ENABLED */ + + if (!rc) { + + /* + * For good completion, extract the first + * lun_id + */ + + if (transfer_size < sizeof (struct lun_list_hdr )) { + + CBLK_TRACE_LOG_FILE(1,"Report Luns returned data size is too small = 0x%x",transfer_size); + + errno = ENOMSG; + return -1; + + } + + rc = cflash_process_scsi_report_luns(raw_lun_list,list_size, + &lun_ids,&num_luns); + + if (rc) { + + /* + * Failed to process returned lun list + */ + + + + CBLK_TRACE_LOG_FILE(1,"cflash_process_scsi_report_luns failed rc = %d",rc); + + errno = ENOMSG; + rc = -1; + + } else { + + + /* + * We successfully processed the returned + * lun list. + */ + + + + + if (num_luns) { + + /* + * Report luns found some luns. + * Let's choose the first lun + * in the lun list. + */ + + if ((lun_ids[0] == 0) && + (num_luns > 1)) { + + /* + * If more than 1 lun was returned and + * the first lun is 0, then choose + * the second lun. + */ + cflsh_blk.lun_id = lun_ids[1]; + + } else { + cflsh_blk.lun_id = lun_ids[0]; + + } + + + cflsh_blk.flags |= CFLSH_G_LUN_ID_VAL; + + chunk->path[chunk->cur_path]->lun_id = cflsh_blk.lun_id; + + + } else { + + /* + * No luns found fail this request. + */ + + rc = -1; + + errno = ENXIO; + +#ifndef BLOCK_FILEMODE_ENABLED + + CBLK_TRACE_LOG_FILE(5,"no luns found. hardcode lun_id"); + + chunk->path[chunk->cur_path]->lun_id = cblk_lun_id; + +#endif /* BLOCK_FILEMODE_ENABLED */ + + } + + } + + } + + + free(raw_lun_list); + + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d,lun_id = 0x%llx, num_luns = %d", + rc,errno,cflsh_blk.lun_id, num_luns); + return rc; +} + +/* + * NAME: cblk_get_lun_capacity + * + * FUNCTION: Gets the capacity (number of + * blocks) for a lun associated with + * a specific chunk. + * + * NOTE: This routine assumes the caller + * is holding both the chunk lock and + * the global lock. + * + * + * INPUTS: + * chunk - Chunk associated with a lun + * + * + * RETURNS: + * 0 - Success, otherwise error. + * + */ + +int cblk_get_lun_capacity(cflsh_chunk_t *chunk) +{ + int rc = 0; +#ifdef BLOCK_FILEMODE_ENABLED + struct stat stats; + uint64_t st_size; +#else + int cmd_index = 0; +#endif /* !BLOCK_FILEMODE_ENABLED */ + struct readcap16_data *readcap16_data = NULL; + cflsh_cmd_mgm_t *cmd; + size_t transfer_size = 0; + uint32_t block_size = 0; + uint64_t last_lba = 0; + + + + + + if (cblk_get_buf_cmd(chunk,(void **)&readcap16_data,sizeof(struct readcap16_data), + &cmd)) { + + + return -1; + + } + + bzero(readcap16_data,sizeof(*readcap16_data)); + + + /* + * This command will use transfer size in bytes + */ + + cmd->cmdi->transfer_size_bytes = 1; + + if (cflash_build_scsi_read_cap16(CBLK_GET_CMD_CDB(chunk,cmd), + sizeof(struct readcap16_data))) { + + + CBLK_TRACE_LOG_FILE(5,"build_scsi_read_cap16 failed rc = %d,", + rc); + free(readcap16_data); + return -1; + + } + + + + if (CBLK_ISSUE_CMD(chunk,cmd,readcap16_data,0,0,0)) { + + + + CBLK_FREE_CMD(chunk,cmd); + + free(readcap16_data); + return -1; + + } + +#ifdef BLOCK_FILEMODE_ENABLED + + /* + * For BLOCK_FILEMODE_ENABLED get the size of this file that was + * just opened + */ + + /* + * This command completed, + * clean it up. + */ + + chunk->num_active_cmds--; + + CBLK_FREE_CMD(chunk,cmd); + + bzero((void *) &stats,sizeof(struct stat)); + + rc = fstat(chunk->fd,&stats); + + if (rc) { + + + CBLK_TRACE_LOG_FILE(1,"fstat failed with rc = %d, errno = %d",rc, errno); + free(readcap16_data); + return rc; + } + + if (S_ISBLK(stats.st_mode) || S_ISCHR(stats.st_mode)) { + + /* + * Do not allow special files for file mode + */ + + errno = EINVAL; + CBLK_TRACE_LOG_FILE(1,"fstat failed with rc = %d, errno = %d",rc, errno); + free(readcap16_data); + perror("cblk_open: Can not use device special files for file mode"); + return -1; + + + } + + st_size = stats.st_size; + CBLK_TRACE_LOG_FILE(5,"st_size = 0x%llx",st_size); + st_size /= CAPI_FLASH_BLOCK_SIZE; + CBLK_TRACE_LOG_FILE(5,"number of blocks from stat = 0x%llx",st_size); + /* + * LBA is the last valid sector on the disk, not the number + * blocks on the disk. So decrement to get last LBA. + */ + st_size--; + + + CBLK_TRACE_LOG_FILE(5,"last blocks from stat = 0x%llx",st_size); + + readcap16_data->len = CFLASH_TO_ADAP32(CAPI_FLASH_BLOCK_SIZE); + + readcap16_data->lba = CFLASH_TO_ADAP64(st_size); + + if (readcap16_data->lba <= 1) { + + + free(readcap16_data); + CBLK_TRACE_LOG_FILE(1,"fstat returned size of 0 blocks"); + perror("cblk_open: file too small"); + + return -1; + } + + transfer_size = sizeof(*readcap16_data); +#else + + + cmd_index = cmd->index; + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,&(cmd_index),&transfer_size,TRUE,0); + +#ifdef _COMMON_INTRPT_THREAD + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,&(cmd_index),&transfer_size,TRUE,0); + } else { + rc = CBLK_COMPLETE_CMD(chunk,cmd,&transfer_size); + } + +#endif /* _COMMON_INTRPT_THREAD */ + +#endif /* BLOCK_FILEMODE_ENABLED */ + + if (!rc) { + + /* + * For good completion, extract number of + * 4K blocks.. + */ + + if (transfer_size < sizeof(*readcap16_data)) { + + CBLK_TRACE_LOG_FILE(1,"Read capacity 16 returned data size is too small = 0x%x",transfer_size); + + errno = ENOMSG; + return -1; + + } + + + if (cflash_process_scsi_read_cap16(readcap16_data,&block_size,&last_lba) == 0) { + + CBLK_TRACE_LOG_FILE(5,"block_size = 0x%x,capacity = 0x%llx", + block_size,last_lba); + + + if (block_size == CAPI_FLASH_BLOCK_SIZE) { + /* + * If the device is reporting back 4K block size, + * then use the number of blocks specified as its + * capacity. + */ + chunk->num_blocks_lun = last_lba + 1; + chunk->blk_size_mult = 1; + } else { + /* + * If the device is reporting back an non-4K block size, + * then then convert it capacity to the number of 4K + * blocks. + */ + + chunk->num_blocks_lun = + ((last_lba + 1) * block_size)/CAPI_FLASH_BLOCK_SIZE; + + if (block_size) { + chunk->blk_size_mult = CAPI_FLASH_BLOCK_SIZE/block_size; + } else { + chunk->blk_size_mult = 8; + } + } + } + } + + + free(readcap16_data); + + + if (chunk->num_blocks_lun == 0) { + + errno = EIO; + rc = -1; + } + + + if (!(chunk->flags & CFLSH_CHNK_VLUN)) { + + /* + * If this is a physical lun + * (not a virtual lun) then assign + * the lun's capacity to this chunk. + */ + + chunk->num_blocks = chunk->num_blocks_lun; + + } + + + CBLK_TRACE_LOG_FILE(5,"rc = %d,errno = %d,capacity = 0x%llx", + rc,errno,chunk->num_blocks_lun); + return rc; +} + +/* + * NAME: cblk_open_cleanup_wait_thread + * + * FUNCTION: If we are running with a single common interrupt thread + * per chunk, then this routine terminates that thread + * and waits for completion. + * + * + * INPUTS: + * chunk - Chunk to be cleaned up. + * + * RETURNS: + * NONE + * + */ +void cblk_open_cleanup_wait_thread(cflsh_chunk_t *chunk) +{ +#ifdef _COMMON_INTRPT_THREAD + int pthread_rc = 0; + + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + + /* + * Background threads are not allowed + */ + + return; + } + + + chunk->thread_flags |= CFLSH_CHNK_EXIT_INTRPT; + + pthread_rc = pthread_cond_signal(&(chunk->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed rc = %d,errno = %d", + pthread_rc,errno); + } + + /* + * Since we are going to do pthread_join we need to unlock here. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + pthread_join(chunk->thread_id,NULL); + + CFLASH_BLOCK_LOCK(chunk->lock); + + chunk->stats.num_active_threads--; + + chunk->thread_flags &= ~CFLSH_CHNK_EXIT_INTRPT; + +#endif /* _COMMON_INTRPT_THREAD */ + + return; +} + + +/* + * NAME: cblk_chunk_open_cleanup + * + * FUNCTION: Cleans up a chunk and resets it + * for reuse. This routine assumes + * the caller has the chunk's lock. + * + * + * INPUTS: + * chunk - Chunk to be cleaned up. + * + * RETURNS: + * NONE + * + */ + +void cblk_chunk_open_cleanup(cflsh_chunk_t *chunk, int cleanup_depth) +{ + int i; + + CBLK_TRACE_LOG_FILE(5,"cleanup = %d",cleanup_depth); + + switch (cleanup_depth) { + + case 50: + + cblk_chunk_free_mc_device_resources(chunk); + /* Fall through */ + case 45: + + cblk_open_cleanup_wait_thread(chunk); + /* Fall through */ + case 40: + + cblk_chunk_unmap(chunk,FALSE); + + case 35: + + cblk_chunk_detach(chunk,FALSE); + + + case 30: + + close(chunk->fd); + /* Fall through */ + case 20: + + + free(chunk->cmd_start); + + free(chunk->cmd_info); + + chunk->cmd_start = NULL; + chunk->cmd_curr = NULL; + chunk->cmd_end = NULL; + chunk->num_cmds = 0; + /* Fall through */ + case 10: + + for (i=0;i < chunk->num_paths;i++) { + cblk_release_path(chunk,(chunk->path[i])); + + chunk->path[i] = NULL; + } + + /* Fall through */ + + default: + + + + + + if (cflsh_blk.num_active_chunks > 0) { + cflsh_blk.num_active_chunks--; + } + +#ifndef _MASTER_CONTXT + + if (chunk->flags & CFLSH_CHNK_VLUN) { + + if (cflsh_blk.num_active_chunks == 0) { + + /* + * If this is the last chunk then + * the next cblk_open could use the physical + * lun. + */ + + + cflsh_blk.next_chunk_starting_lba = 0; + } else if (cflsh_blk.next_chunk_starting_lba == + (chunk->start_lba + chunk->num_blocks)) { + + /* + * If chunk is the using physical LBAs + * at the end of the disk, then release them. + * Thus another chunk could use them. + */ + + cflsh_blk.next_chunk_starting_lba = chunk->start_lba; + + } + } + +#endif + + chunk->eyec = 0; + + bzero(chunk->dev_name,PATH_MAX); + + for (i=0;i < chunk->num_paths;i++) { + if (chunk->path[i]) { + chunk->path[i]->lun_id = 0; + } + } + + chunk->num_blocks = 0; + chunk->flags = 0; + chunk->in_use = FALSE; + chunk->num_paths = 0; + + /* + * Remove chunk from hash list + */ + + if (((ulong)chunk->next & CHUNK_BAD_ADDR_MASK ) || + ((ulong)chunk->prev & CHUNK_BAD_ADDR_MASK )) { + + /* + * Chunk addresses are allocated + * on certain alignment. If these + * potential chunk addresses do not + * have the correct alignment then + * print an error to the trace log. + */ + + cflsh_blk.num_bad_chunk_ids++; + /* + * Try continue in this case. + */ + + cblk_notify_mc_err(chunk,0,0x208,0, CFLSH_BLK_NOTIFY_SFW_ERR,NULL); + + CBLK_TRACE_LOG_FILE(1,"Corrupted chunk next address = 0x%p, prev address = 0x%p, hash[] = 0x%p", + chunk->next, chunk->prev, cflsh_blk.hash[chunk->index & CHUNK_HASH_MASK]); + + } + + if (chunk->prev) { + chunk->prev->next = chunk->next; + + } else { + + cflsh_blk.hash[chunk->index & CHUNK_HASH_MASK] = chunk->next; + } + + if (chunk->next) { + chunk->next->prev = chunk->prev; + } + + } + + return; +} + + +/* + * NAME: cblk_listio_arg_verify + * + * FUNCTION: Verifies arguments to cblk_listio API + * + * + * INPUTS: + * chunk_id - Chunk identifier + * flags - Flags on this request. + * + * RETURNS: + * 0 for good completion + * -1 for error and errno is set. + * + */ + +int cblk_listio_arg_verify(chunk_id_t chunk_id, + cblk_io_t *issue_io_list[],int issue_items, + cblk_io_t *pending_io_list[], int pending_items, + cblk_io_t *wait_io_list[],int wait_items, + cblk_io_t *completion_io_list[],int *completion_items, + uint64_t timeout,int flags) +{ + + int rc = 0; + cblk_io_t *io; + int i; /* General counter */ + + + + if ((issue_items == 0) && + (pending_items == 0) && + (wait_items == 0)) { + + CBLK_TRACE_LOG_FILE(1,"No items specified for chunk_id = %d", + chunk_id); + errno = EINVAL; + return -1; + + } + + if ((wait_items) && + (wait_io_list == NULL)) { + + CBLK_TRACE_LOG_FILE(1,"No waiting list items specified for chunk_id = %d, with wait_items = %d", + chunk_id,wait_items); + errno = EINVAL; + return -1; + + } + + if (completion_items == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"No completion list items specified for chunk_id = %d", + chunk_id); + errno = EINVAL; + return -1; + } + + + if ((*completion_items) && + (completion_io_list == NULL)) { + + CBLK_TRACE_LOG_FILE(1,"No completion list items specified for chunk_id = %d with completion_items = %d", + chunk_id,*completion_items); + errno = EINVAL; + return -1; + + } + + if ((wait_items + *completion_items) < (issue_items + pending_items)) { + + /* + * Completion list needs to have enough space to place + * all requests completing in this invocation. + */ + + CBLK_TRACE_LOG_FILE(1,"completion list too small chunk_id = %d completion_items = %d, wait_items = %d", + chunk_id,*completion_items,wait_items); + errno = EINVAL; + return -1; + + } + + + + // TODO:?? This should be modularized into one or more subroutines. + if (issue_items) { + + + if (issue_io_list == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"Issue_io_list array is a null pointer for chunk_id = %d and issue_items = %d", + chunk_id,issue_items); + errno = EINVAL; + + return -1; + + } + + + for (i=0; i< issue_items;i++) { + + io = issue_io_list[i]; + + + if (io == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"Issue_io_list[%d] is a null pointer for chunk_id = %d and issue_items = %d", + i,chunk_id,issue_items); + errno = EINVAL; + + return -1; + + } + + io->stat.blocks_transferred = 0; + io->stat.fail_errno = 0; + io->stat.status = CBLK_ARW_STATUS_PENDING; + + if (io->buf == NULL) { + + + + CBLK_TRACE_LOG_FILE(1,"data buffer is a null pointer for chunk_id = %d and index = %d", + chunk_id,i); + + + io->stat.status = CBLK_ARW_STATUS_INVALID; + io->stat.fail_errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Issue_io_list[%d] is invalid for chunk_id = %d and issue_items = %d", + i,chunk_id,issue_items); + errno = EINVAL; + + return -1; + + } + + if ((io->request_type != CBLK_IO_TYPE_READ) && + (io->request_type != CBLK_IO_TYPE_WRITE)) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid request_type = %d chunk_id = %d and index = %d", + io->request_type,chunk_id,i); + + + io->stat.status = CBLK_ARW_STATUS_INVALID; + io->stat.fail_errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Issue_io_list[%d] is invalid for chunk_id = %d and issue_items = %d", + i,chunk_id,issue_items); + errno = EINVAL; + + return -1; + + } + + } /* for */ + + } + + + + if (pending_items) { + + + if (pending_io_list == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"pending_io_list array is a null pointer for chunk_id = %d and pending_items = %d", + chunk_id,pending_items); + errno = EINVAL; + + return -1; + + } + + for (i=0; i< pending_items;i++) { + + io = pending_io_list[i]; + + + if (io == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"pending_io_list[%d] is a null pointer for chunk_id = %d and pending_items = %d", + i,chunk_id,pending_items); + errno = EINVAL; + + return -1; + + } + + if (io->buf == NULL) { + + + + CBLK_TRACE_LOG_FILE(1,"data buffer is a null pointer for chunk_id = %d and index = %d", + chunk_id,i); + + + io->stat.status = CBLK_ARW_STATUS_INVALID; + io->stat.fail_errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Pending_io_list[%d] is invalid for chunk_id = %d and pending_items = %d", + i,chunk_id,pending_items); + errno = EINVAL; + + return -1; + + } + + if ((io->request_type != CBLK_IO_TYPE_READ) && + (io->request_type != CBLK_IO_TYPE_WRITE)) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid request_type = %d chunk_id = %d and index = %d", + io->request_type,chunk_id,i); + + + io->stat.status = CBLK_ARW_STATUS_INVALID; + io->stat.fail_errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Issue_io_list[%d] is invalid for chunk_id = %d and pending_items = %d", + i,chunk_id,pending_items); + errno = EINVAL; + + return -1; + + } + + } /* for */ + + } + + + + + if (wait_items) { + + + if (wait_io_list == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"wait_io_list array is a null pointer for chunk_id = %d and wait_items = %d", + chunk_id,wait_items); + errno = EINVAL; + + return -1; + + } + + for (i=0; i< wait_items;i++) { + + io = wait_io_list[i]; + + + if (io == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"wait_io_list[%d] is a null pointer for chunk_id = %d and wait_items = %d", + i,chunk_id,wait_items); + errno = EINVAL; + + return -1; + + } + + if (io->buf == NULL) { + + + + CBLK_TRACE_LOG_FILE(1,"data buffer is a null pointer for chunk_id = %d and index = %d", + chunk_id,i); + + + io->stat.status = CBLK_ARW_STATUS_INVALID; + io->stat.fail_errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Pending_io_list[%d] is invalid for chunk_id = %d and pending_items = %d", + i,chunk_id,wait_items); + errno = EINVAL; + + return -1; + + } + + if ((io->request_type != CBLK_IO_TYPE_READ) && + (io->request_type != CBLK_IO_TYPE_WRITE)) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid request_type = %d chunk_id = %d and index = %d", + io->request_type,chunk_id,i); + + + io->stat.status = CBLK_ARW_STATUS_INVALID; + io->stat.fail_errno = EINVAL; + + CBLK_TRACE_LOG_FILE(1,"Issue_io_list[%d] is invalid for chunk_id = %d and issue_items = %d", + i,chunk_id,wait_items); + errno = EINVAL; + + return -1; + + } + + if (io->flags & CBLK_IO_USER_STATUS) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid to wait when user status supplied type e = %d chunk_id = %d and index = %d", + io->request_type,chunk_id,i); + + + io->stat.status = CBLK_ARW_STATUS_INVALID; + io->stat.fail_errno = EINVAL; + + errno = EINVAL; + + return -1; + } + + } /* for */ + + } + + + if (*completion_items) { + + + if (completion_io_list == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"completion_io_list array is a null pointer for chunk_id = %d and completion_items = %d", + chunk_id,*completion_items); + errno = EINVAL; + + return -1; + + } + + for (i=0; i< wait_items;i++) { + + io = wait_io_list[i]; + + + if (io == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"wait_io_list[%d] is a null pointer for chunk_id = %d and wait_items = %d", + i,chunk_id,pending_items); + errno = EINVAL; + + return -1; + + } + + } /* for */ + + } + + return rc; +} + +#ifdef _NOT_YET + +/* + * NAME: cblk_listio_result + * + * FUNCTION: Checks for results on the specified list supplied + * cblk_listio. + * + * + * INPUTS: + * chunk_id - Chunk identifier + * flags - Flags on this request. + * + * RETURNS: + * 0 for good completion + * -1 for error and errno is set. + * + */ + +int cblk_listio_result(cflsh_chunk_t *chunk,chunk_id_t chunk_id, + cblk_io_t *io_list[],int io_items + cblk_io_t *wait_io_list[],int wait_items, + int waiting,int *completion_items, + uint64_t timeout,int flags) +{ + int rc = 0; + int i,j; /* General counters */ + cblk_io_t *io; + struct timespec start_time; + struct timespec last_time; + uint64_t uelapsed_time = 0; /* elapsed time in microseconds */ + int cmd_not_complete; + cblk_io_t *wait_io; + int wait_item_found; + + + if (io_items) { + + /* + * Caller is requesting I/Os to issued. + */ + + + if (io_list == NULL) { + + + + CBLK_TRACE_LOG_FILE(1,"io_list array is a null pointer for chunk_id = %d and num_items = %d, waiting = %d", + chunk_id,io_items,waiting); + errno = EINVAL; + + return -1; + + } + + + if (waiting) { + + // TODO:?? Can this be moved to caller. + clock_gettime(CLOCK_MONOTONIC,&start_time); + clock_gettime(CLOCK_MONOTONIC,&last_time); + + + // TODO: ?? Add macros to replace this. + + // TODO:?? Can this be moved to caller. + uelapsed_time = ((last_time.tv_sec - start_time.tv_sec) * 1000000) + ((last_time.tv_nsec - start_time.tv_nsec)/1000); + + } + + while ((timeout == 0) || + (uelapsed_time < timeout)) { + + /* + * If no time out is specified then only go thru this loop + * once. Otherwise continue thru this loop until + * our time has elapsed. + */ + + if (waiting) { + cmd_not_complete = FALSE; + } + + + for (i=0; i< io_items;i++) { + + io = io_list[i]; + + + if (io == NULL) { + + continue; + + } + + + if ((io->buf == NULL) && (!waiting)) { + + + + CBLK_TRACE_LOG_FILE(1,"data buffer is a null pointer for chunk_id = %d and index = %d", + chunk_id,i); + + + io->stat.status = CBLK_ARW_STATUS_INVALID; + io->stat.fail_errno = EINVAL; + + continue; + + } + + if ((io->request_type != CBLK_IO_TYPE_READ) && + (io->request_type != CBLK_IO_TYPE_WRITE)) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid request_type = %d chunk_id = %d and index = %d", + io->request_type,chunk_id,i); + + + io->stat.status = CBLK_ARW_STATUS_INVALID; + io->stat.fail_errno = EINVAL; + continue; + } + + if (io->stat.status != CBLK_ARW_STATUS_PENDING) { + + /* + * This I/O request has already completed. + * continue to the next wait I/O request. + */ + + continue; + } + + + + /* + * Process this I/O request + */ + + + // TODO:?? Need mechanism to specify time-out + + io_flags = 0; + + + if ((timeout == 0) && (waiting)) { + + io_flags |= CBLK_ARESULT_BLOCKING; + } + + if (io->flags & CBLK_IO_USER_TAG) { + + io_flags |= CBLK_ARESULT_USER_TAG; + + } + + rc = cblk_aresult(chunk_id,&(io->tag),&status,io_flags); + + if (rc < 0) { + + CBLK_TRACE_LOG_FILE(1,"Request failed for chunk_id = %d and index = %d with rc = %d, errno = %d", + chunk_id,i,rc,errno); + + + // TODO:?? Should we filter on EINVAL and uses a different status? + io->stat.status = CBLK_ARW_STATUS_FAIL; + io->stat.fail_errno = errno; + io->stat.blocks_transferred = 0; + + } else if (rc) { + + if (!waiting) { + + CBLK_TRACE_LOG_FILE(9,"Request chunk_id = %d and index = %d with rc = %d, errno = %d", + chunk_id,i,rc,errno); + + wait_item_found = FALSE; + + + if (wait_items) { + + /* + * If there are wait_items, then see + * if this item is one of them. If so + * update the associated wait_item. + */ + + + for (j=0; j < wait_items; j++) { + + wait_io = wait_io_list[j]; + + if ((wait_io->buf == io->buf) && + (wait_io->lba == io->lba) && + (wait_io->nblocks == io->nblocks) && + (wait_io->tag == io->tag)) { + + wait_io->stat.status = CBLK_ARW_STATUS_SUCCESS; + wait_io->stat.fail_errno = errno; + wait_io->stat.blocks_transferred = rc; + + wait_item_found = TRUE; + + break; + + } + + } /* inner for */ + + } + + if (!wait_item_found) { + + if ((complete_io) && + (*completion_items stat.status = CBLK_ARW_STATUS_SUCCESS; + complete_io->stat.fail_errno = errno; + complete_io->stat.blocks_transferred = rc; + complete_io++; + (*completion_items)++; + } else { + + + CBLK_TRACE_LOG_FILE(1,"Request chunk_id = %d and index = %d no complete_io entry found", + chunk_id,i); + } + } + + } else { + io->stat.status = CBLK_ARW_STATUS_SUCCESS; + io->stat.fail_errno = errno; + io->stat.blocks_transferred = rc; + } + } else if (waiting) { + + /* + * This command has not completed yet. + */ + + cmd_not_complete = TRUE; + } + + + } /* for */ + + if (timeout == 0) { + + /* + * Only go thru the while loop one time if + * no time out is specified, since we will block until + * command completion. + */ + + break; + } + + if ((cmd_not_complete) && (waiting)) { + + /* + * Sleep for one microsecond + */ + + usleep(1); + } else { + + /* + * All I/O has completed. So exit this loop. + */ + + break; + } + + if (waiting) { + + clock_gettime(CLOCK_MONOTONIC,&last_time); + + + // TODO: ?? Add macros to replace this. + uelapsed_time = ((last_time.tv_sec - start_time.tv_sec) * 1000000) + ((last_time.tv_nsec - start_time.tv_nsec)/1000); + + } + + + } /* while */ + + } + + return rc; + +} + +#endif /* _NOT_YET */ + +/* + * NAME: cblk_chk_cmd_bad_page + * + * FUNCTION: This routine checks if the bad page fault address is + * associated with a specific command. If so then that + * command is failed. + * + * Environment: This routine assumes the chunk mutex + * lock is held by the caller. + * + * INPUTS: + * chunk - Chunk associated with this error + * + * RETURNS: + * None + * + */ +int cblk_chk_cmd_bad_page(cflsh_chunk_t *chunk, uint64_t bad_page_addr) +{ + int i; + int found_cmd = FALSE; + cflsh_cmd_mgm_t *cmd = NULL; +#ifdef _COMMON_INTRPT_THREAD + int pthread_rc = 0; +#endif /* _COMMON_INTRPT_THREAD */ + + + if (chunk->num_active_cmds) { + + for (i=0; i < chunk->num_cmds; i++) { + if ((chunk->cmd_info[i].in_use) && + ((chunk->cmd_info[i].state == CFLSH_MGM_WAIT_CMP) || + (chunk->cmd_info[i].state == CFLSH_MGM_HALTED))) { + + /* + * cmd_info and cmd_start array entries + * with the same index correspond to the + * same command. + */ + + cmd = &chunk->cmd_start[i]; + + if ((bad_page_addr >= (uint64_t)cmd) && + (bad_page_addr <= ((uint64_t)(cmd) + sizeof(*cmd)))) { + + /* + * Bad page fault is associated with this command + */ + found_cmd = TRUE; + + cblk_notify_mc_err(chunk,cmd->cmdi->path_index,0x209,bad_page_addr, CFLSH_BLK_NOTIFY_SFW_ERR,cmd); + + CBLK_TRACE_LOG_FILE(5,"Bad page addr = 0x%llx is command",bad_page_addr); + + } else if ((bad_page_addr >= (uint64_t) chunk->cmd_info[i].buf) && + (bad_page_addr <= ((uint64_t) (chunk->cmd_info[i].buf) + (chunk->cmd_info[i].nblocks * CAPI_FLASH_BLOCK_SIZE)))) { + + /* + * Bad page fault is associated with this command's data buffer: user/caller error + */ + found_cmd = TRUE; + + cblk_notify_mc_err(chunk,cmd->cmdi->path_index,0x20a,bad_page_addr, CFLSH_BLK_NOTIFY_SFW_ERR,cmd); + + CBLK_TRACE_LOG_FILE(5,"Bad page addr = 0x%llx is data buffer",bad_page_addr); + + } + + + if (found_cmd) { + + /* + * Fail this command. + */ + + + cmd->cmdi->status = EIO; + cmd->cmdi->transfer_size = 0; + + CBLK_TRACE_LOG_FILE(6,"cmd failed lba = 0x%llx flags = 0x%x, chunk->index = %d", + cmd->cmdi->lba,cmd->cmdi->flags,chunk->index); + + + /* + * Fail command back. + */ + + cmd->cmdi->state = CFLSH_MGM_CMP; + +#ifdef _COMMON_INTRPT_THREAD + + if (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + + /* + * If we are using a common interrupt thread + */ + + pthread_rc = pthread_cond_signal(&(cmd->cmdi->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signall failed rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + + + /* + * Signal any one waiting for any command to complete. + */ + + pthread_rc = pthread_cond_signal(&(chunk->cmd_cmplt_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for cmd_cmplt_event rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + } + +#endif /* _COMMON_INTRPT_THREAD */ + + break; + + } /* if found_cmd */ + + + } + + } /* for */ + + } + + + return found_cmd; +} + + +/* + * NAME: cblk_fail_all_cmds + * + * FUNCTION: This routine fails all commands + * + * Environment: This routine assumes the chunk mutex + * lock is held by the caller. + * + * INPUTS: + * chunk - Chunk associated with this error + * + * RETURNS: + * None + * + */ +void cblk_fail_all_cmds(cflsh_chunk_t *chunk) +{ + int i; + cflsh_cmd_mgm_t *cmd = NULL; +#ifdef _COMMON_INTRPT_THREAD + int pthread_rc = 0; +#endif /* _COMMON_INTRPT_THREAD */ + + if (chunk->num_active_cmds) { + + + + CBLK_LIVE_DUMP_THRESHOLD(9,"0x201"); + + + for (i=0; i < chunk->num_cmds; i++) { + if ((chunk->cmd_info[i].in_use) && + ((chunk->cmd_info[i].state == CFLSH_MGM_WAIT_CMP) || + (chunk->cmd_info[i].state == CFLSH_MGM_HALTED))) { + + /* + * cmd_info and cmd_start array entries + * with the same index correspond to the + * same command. + */ + + cmd = &chunk->cmd_start[i]; + + /* + * Fail this command. + */ + + + cmd->cmdi->status = EIO; + cmd->cmdi->transfer_size = 0; + + CBLK_TRACE_LOG_FILE(6,"cmd failed lba = 0x%llx flags = 0x%x, chunk->index = %d", + cmd->cmdi->lba,cmd->cmdi->flags,chunk->index); + + + /* + * Fail command back. + */ + + cmd->cmdi->state = CFLSH_MGM_CMP; + +#ifdef _COMMON_INTRPT_THREAD + + if (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + + /* + * If we are using a common interrupt thread + */ + + pthread_rc = pthread_cond_signal(&(cmd->cmdi->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signall failed rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + + + /* + * Signal any one waiting for any command to complete. + */ + + pthread_rc = pthread_cond_signal(&(chunk->cmd_cmplt_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for cmd_cmplt_event rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + } + +#endif /* _COMMON_INTRPT_THREAD */ + + + } + + } /* for */ + + } + + return; +} + +/* + * NAME: cblk_halt_all_cmds + * + * FUNCTION: This routine halts all commands. It assumes + * the AFU is being reset or about to reset. + * Thus it can mark all active commands in a halt + * state. + * + * Environment: This routine assumes the chunk mutex + * lock is held by the caller. + * + * INPUTS: + * chunk - Chunk associated with this error + * + * RETURNS: + * None + * + */ +void cblk_halt_all_cmds(cflsh_chunk_t *chunk, int path_index, int all_paths) +{ + int i; + + + chunk->flags |= CFLSH_CHNK_HALTED; + + + /* + * TODO:?? There maybe a race condition here. If we have an AFU shared + * by multiple chunks/paths, then the first one reset will correctly + * halt and resume. However other paths will not halt prior to context reset + * and thus when this routine is called for them, they may not get + * flushed again. + */ + + if (chunk->num_active_cmds) { + + for (i=0; i < chunk->num_cmds; i++) { + if ((chunk->cmd_info[i].in_use) && + ((chunk->cmd_info[i].state == CFLSH_MGM_PENDFREE) || + (chunk->cmd_info[i].state == CFLSH_MGM_WAIT_CMP)) && + (all_paths || (chunk->cmd_info[i].path_index == path_index))) { + + /* + * Halt this command. + */ + + chunk->cmd_info[i].state = CFLSH_MGM_HALTED; + + + } + + } /* for */ + + } + + return; +} + +/* + * NAME: cblk_resume_all_halted_cmds + * + * FUNCTION: This routine resumes all haltdd commands. It assumes + * the AFU reset is complete. + * + * Environment: This routine assumes the chunk mutex + * lock is held by the caller. + * + * INPUTS: + * chunk - Chunk associated with this error + * + * RETURNS: + * None + * + */ +void cblk_resume_all_halted_cmds(cflsh_chunk_t *chunk, int increment_retries, + int path_index, int all_paths) +{ + int i,j; + int rc = 0; + cflsh_cmd_mgm_t *cmd = NULL; + cflsh_cmd_info_t *cmdi; + int pthread_rc = 0; + + + /* + * TODO:?? There maybe a race condition here. If we have an AFU shared + * by multiple chunks/paths, then the first one reset will correctly + * halt and resume. However other paths will not halt prior to context reset + * and thus when this routine is called for them, they may not get + * flushed again. + */ + + + chunk->flags &= ~CFLSH_CHNK_HALTED; + + if (chunk->num_active_cmds) { + + for (i=0; i < chunk->num_cmds; i++) { + if ((chunk->cmd_info[i].in_use) && + (chunk->cmd_info[i].state == CFLSH_MGM_HALTED) && + (all_paths || (chunk->cmd_info[i].path_index == path_index))) { + + /* + * cmd_info and cmd_start array entries + * with the same index correspond to the + * same command. + */ + + cmd = &chunk->cmd_start[i]; + + /* + * Resume this command. + */ + + + + CBLK_INIT_ADAP_CMD_RESP(chunk,cmd); + + + cmdi = &chunk->cmd_info[cmd->index]; + + + cmdi->cmd_time = time(NULL); + + if (increment_retries) { + cmdi->retry_count++; + } + + if (cmdi->retry_count < CAPI_CMD_MAX_RETRIES) { + + + if (chunk->num_paths > 1) { + + /* + * If we have more than one path, then + * allow retry down another path. + */ + + CBLK_TRACE_LOG_FILE(9,"multiple paths, num_paths = %d path_index = %d",chunk->num_paths,path_index); + for (j=0;j< chunk->num_paths; j++) { + + if ((chunk->path[j]) && + (chunk->path[j]->flags & CFLSH_PATH_ACT) && + (j != path_index)) { + + /* + * This is a valid path. So select it + * and specify a retry for this path. + */ + + cmdi->path_index = j; + + CBLK_TRACE_LOG_FILE(9,"Retry path_index = %d",j); + + chunk->stats.num_path_fail_overs++; + + + break; + + } + + } + + } + + + /* + * Update command possibly for new path or updated context id. + */ + + if (CBLK_UPDATE_PATH_ADAP_CMD(chunk,cmd,0)) { + + CBLK_TRACE_LOG_FILE(1,"CBLK_UPDATE_PATH_ADAP_CMD failed"); + + rc = -1; + } + + if (!rc) { + + rc = CBLK_ISSUE_CMD(chunk,cmd,cmdi->buf, + cmd->cmdi->lba,cmd->cmdi->nblocks,CFLASH_ISSUE_RETRY); + } + + if (rc) { + + /* + * If we failed to issue this command, then fail it + */ + + CBLK_TRACE_LOG_FILE(8,"resume issue failed with rc = 0x%x cmd->cmdi->lba = 0x%llx chunk->index = %d", + rc,cmd->cmdi->lba,chunk->index); + cmd->cmdi->status = EIO; + + cmd->cmdi->transfer_size = 0; + + + + /* + * Fail command back. + */ + + cmd->cmdi->state = CFLSH_MGM_CMP; + +#ifdef _COMMON_INTRPT_THREAD + + if (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + + /* + * If we are using a common interrupt thread + */ + + pthread_rc = pthread_cond_signal(&(cmd->cmdi->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signall failed rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + + + /* + * Signal any one waiting for any command to complete. + */ + + pthread_rc = pthread_cond_signal(&(chunk->cmd_cmplt_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for cmd_cmplt_event rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + } + +#endif /* _COMMON_INTRPT_THREAD */ + } + + } else { + + + /* + * If we exceeded retries then + * give up on it now. + */ + + if (!(cmd->cmdi->status)) { + + cmd->cmdi->status = EIO; + } + + cmd->cmdi->transfer_size = 0; + + + + /* + * Fail command back. + */ + + cmd->cmdi->state = CFLSH_MGM_CMP; + +#ifdef _COMMON_INTRPT_THREAD + + if (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + + /* + * If we are using a common interrupt thread + */ + + pthread_rc = pthread_cond_signal(&(cmd->cmdi->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signall failed rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + + + /* + * Signal any one waiting for any command to complete. + */ + + pthread_rc = pthread_cond_signal(&(chunk->cmd_cmplt_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for cmd_cmplt_event rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + } + +#endif /* _COMMON_INTRPT_THREAD */ + } + } + + } /* for */ + + } + + // TOD0:?? What about reseting one AFU and using the other? + + + + /* + * NOTE: Even if we have no background thread, this is still valid. + * If we are being used by a single threaded process, then there + * will never be anything waiting to wake up. If we are being used + * by a multi-thread process, then there could be threads blocked + * waiting to resume. + * + * The assumption here is that who ever halts the commands will + * resume them before exiting this library. + */ + + pthread_rc = pthread_cond_broadcast(&(chunk->path[chunk->cur_path]->resume_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for resume_event rc = %d,errno = %d", + pthread_rc,errno); + } + + + return; +} + +/* + * NAME: cblk_reset_context_shared_afu + * + * FUNCTION: This routine will first determine which chunk's paths + * are associated with this shared AFU, then halt all commands + * that are active on this AFU. then reset the context and + * resume-retry the halted commands. + * + * + * Environment: This routine assumes no locks are taken. + * + * + * INPUTS: + * chunk - Chunk associated with this error + * + * RETURNS: + * None + * + */ +void cblk_reset_context_shared_afu(cflsh_afu_t *afu) +{ + cflsh_chunk_t *chunk = NULL; + cflsh_path_t *path; + int reset_context_success = TRUE; + int detach = FALSE; + int pthread_rc = 0; + int path_index = 0; + time_t timeout; + + if (afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"AFU is null"); + } + + CFLASH_BLOCK_WR_RWLOCK(cflsh_blk.global_lock); + + timeout = time(NULL) - 1; + + CFLASH_BLOCK_LOCK(afu->lock); + + if (afu->reset_time > timeout) { + + CBLK_TRACE_LOG_FILE(5,"afu reset done in the last second"); + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + CFLASH_BLOCK_UNLOCK(afu->lock); + + return; + } + + afu->flags |= CFLSH_AFU_HALTED; + + CFLASH_BLOCK_UNLOCK(afu->lock); + + + path = afu->head_path; + + while (path) { + + + chunk = path->chunk; + + if (chunk == NULL) { + CBLK_TRACE_LOG_FILE(1,"chunk is null for path_index of %d",path->path_index); + continue; + } + + CFLASH_BLOCK_LOCK(chunk->lock); + + path_index = path->path_index; + cblk_halt_all_cmds(chunk,path_index, FALSE); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + path = path->next; + } + + + if (chunk) { + + /* + * We found at least one chunk associated with this afu. + * Thus we'll reset the context and then all commands + * need to be failed/resumed. + */ + + + + if (CBLK_RESET_ADAP_CONTEXT(chunk,path_index)) { + + /* + * Reset context failed + */ + CBLK_TRACE_LOG_FILE(1,"reset context failed for path_index of %d",path->path_index); + reset_context_success = FALSE; + + // TODO:?? should we do unmap detach here instead of below. + } + + /* + * We need to explicitly take the afu + * lock here, since we are sending a broadcast + * to other threads. So we can not use + * the conditional taking of the afu lock + * via CFLASH_BLOCK_AFU_SHARE_LOCK. We'll + * explicitly release after the broadcast + */ + + CFLASH_BLOCK_LOCK(afu->lock); + + + afu->reset_time = time(NULL); + + afu->flags &= ~CFLSH_AFU_HALTED; + + afu->num_issued_cmds = 0; + + pthread_rc = pthread_cond_broadcast(&(afu->resume_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for AFU resume_event rc = %d,errno = %d", + pthread_rc,errno); + } + + CFLASH_BLOCK_UNLOCK(afu->lock); + + path = afu->head_path; + + while (path) { + + + chunk = path->chunk; + + if (chunk == NULL) { + CBLK_TRACE_LOG_FILE(1,"chunk is null for path_index of %d",path->path_index); + continue; + } + + CFLASH_BLOCK_LOCK(chunk->lock); + + + if (reset_context_success) { + + cblk_resume_all_halted_cmds(chunk, TRUE, + path->path_index, FALSE); + } else { + /* + * If any context reset failed, then fail all + * I/O. + */ + + cblk_notify_mc_err(chunk,path_index,0x200,0, CFLSH_BLK_NOTIFY_AFU_ERROR,NULL); + + chunk->flags |= CFLSH_CHUNK_FAIL_IO; + + cblk_chunk_free_mc_device_resources(chunk); + + if (!detach) { + + /* + * Only unmap and detach this AFU once + */ + + detach = TRUE; + cblk_chunk_unmap(chunk,TRUE); + + cblk_chunk_detach(chunk,TRUE); + + + } + + close(chunk->fd); + + + /* + * Fail all other commands. We are allowing the commands + * that saw the time out to be failed with ETIMEDOUT. + * All other commands are failed here with EIO. + */ + + cblk_fail_all_cmds(chunk); + + + } + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + path = path->next; + } + + + } + + + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + return; +} + + +/* + * NAME: cblk_retry_new_path + * + * FUNCTION: Determine if there is another path this can + * be retried with. There are two scenarios here: + * + * - virtual lun using both paths from same AFU. + * - physical lun with multiple paths (AIX). + * + * + * INPUTS: + * chunk - Chunk to which file I/O is being done. + * cmd - Command for which we are doing I/O + * + * RETURNS: + * NONE + * + */ + +int cblk_retry_new_path(cflsh_chunk_t *chunk, cflsh_cmd_mgm_t *cmd, int delay_needed_same_afu) +{ + cflash_cmd_err_t rc = CFLASH_CMD_FATAL_ERR; +#ifdef _KERNEL_MASTER_CONTXT + int cur_path; +#ifdef _AIX + int i; +#endif /* AIX */ +#endif /* !_KERNEL_MASTER_CONTXT */ + + + if ((chunk == NULL) || (cmd == NULL)) { + + CBLK_TRACE_LOG_FILE(5,"chunk or cmd is NULL"); + return rc; + } + +#ifdef _KERNEL_MASTER_CONTXT + + cur_path = chunk->cmd_info[cmd->index].path_index; + + + if (chunk->path[cur_path] == NULL) { + + CBLK_TRACE_LOG_FILE(5,"path is NULL, cur_path = %d, cmd_index = 0x%x",cur_path,cmd->index); + return rc; + + } + + if (chunk->path[cur_path]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(5,"afu is NULL"); + return rc; + + } + + CBLK_TRACE_LOG_FILE(9,"cur_path = %d, cmd_index = 0x%x",cur_path,cmd->index); + + if (chunk->path[cur_path]->num_ports > 1) { + + /* + * This AFU has multiple ports connected to this same + * path to this lun. So allow retry on the same host selected path + * (the AFU should try the other path FC port/path). + */ + + CBLK_TRACE_LOG_FILE(9,"multiple ports delay_needed_same_afu = %d",delay_needed_same_afu); + if (delay_needed_same_afu) { + rc = CFLASH_CMD_DLY_RETRY_ERR; + } else { + rc = CFLASH_CMD_RETRY_ERR; + } + + + +#ifdef _AIX + + + + } else if (chunk->num_paths > 1) { + + /* + * If we have more than one path, then + * allow retry down that path. + */ + + + + CBLK_TRACE_LOG_FILE(9,"multiple paths, num_paths = %d path_index = %d",chunk->num_paths,cur_path); + for (i=0;i< chunk->num_paths; i++) { + + if ((chunk->path[i]) && + (chunk->path[i]->flags & CFLSH_PATH_ACT) && + (i != cur_path)) { + + /* + * This is a valid path. So select it + * and specify a retry for this path. + */ + + + CBLK_NOTIFY_LOG_THRESHOLD(3,chunk,(chunk->cmd_info[cmd->index].path_index),0x20c, + i,CFLSH_BLK_NOTIFY_SFW_ERR,cmd); + + + chunk->cmd_info[cmd->index].path_index = i; + + CBLK_TRACE_LOG_FILE(9,"Retry path_index = %d",i); + rc = CFLASH_CMD_RETRY_ERR; + + chunk->stats.num_path_fail_overs++; + + break; + + + } + + } + +#endif /* AIX */ + + + } + + + if ((rc == CFLASH_CMD_DLY_RETRY_ERR) || + (rc == CFLASH_CMD_RETRY_ERR)) { + + /* + * Update command possibly for new path or context + */ + + if (CBLK_UPDATE_PATH_ADAP_CMD(chunk,cmd,0)) { + + CBLK_TRACE_LOG_FILE(1,"CBLK_UPDATE_PATH_ADAP_CMD failed"); + + rc = CFLASH_CMD_FATAL_ERR; + } + + + + } else { + + + /* + * This routine is only called for situations where a + * retry might be valid. Thus at the very minimum do + * the retry, but for the same path. + */ + + rc = CFLASH_CMD_RETRY_ERR; + } + +#else + + /* + * For non-kernel MC assume the AFU has both + * FC ports attached to the same lun and + * thus do a retry. + */ + + rc = CFLASH_CMD_RETRY_ERR; + +#endif /* !_KERNEL_MASTER_CONTXT */ + + + return rc; +} + + + + +#ifdef BLOCK_FILEMODE_ENABLED +/* + * NAME: cblk_filemde_io + * + * FUNCTION: Issue I/O to file instead of a lun + * + * + * INPUTS: + * chunk - Chunk to which file I/O is being done. + * cmd - Command for which we are doing I/O + * + * RETURNS: + * NONE + * + */ + +void cblk_filemode_io(cflsh_chunk_t *chunk, cflsh_cmd_mgm_t *cmd) +{ + size_t lseek_rc; + int rc = 0; + + +#ifdef _AIX + uint32_t tmp_val; +#endif /* AIX */ + + + + if (cmd->cmdi == NULL) { + CBLK_TRACE_LOG_FILE(1,"cmdi is invalid for chunk->index = %d",chunk->index); + + } + + + CBLK_TRACE_LOG_FILE(5,"llseek to lba = 0x%llx, chunk->index = %d",cmd->cmdi->lba,chunk->index); + +#ifdef _AIX + lseek_rc = llseek(chunk->fd,((cmd->cmdi->lba) * CAPI_FLASH_BLOCK_SIZE ),SEEK_SET); +#else + lseek_rc = lseek(chunk->fd,((cmd->cmdi->lba) * CAPI_FLASH_BLOCK_SIZE ),SEEK_SET); +#endif + + if (lseek_rc == ((cmd->cmdi->lba) * CAPI_FLASH_BLOCK_SIZE )) { + + + if (cmd->cmdi->flags & CFLSH_MODE_READ) { + + rc = read(chunk->fd,cmd->cmdi->buf,CBLK_GET_CMD_DATA_LENGTH(chunk,cmd)); + + } else if (cmd->cmdi->flags & CFLSH_MODE_WRITE) { + + rc = write(chunk->fd,cmd->cmdi->buf,CBLK_GET_CMD_DATA_LENGTH(chunk,cmd)); + } + + if (rc) { + /* + * Convert file mode rc (number of bytes + * read/written) into cblk rc (number + * of blocks read/written) + */ + rc = rc/CAPI_FLASH_BLOCK_SIZE; + } + + } else { + CBLK_TRACE_LOG_FILE(1,"llseek failed for lba = 0x%llx,,errno = %d, chunk->index = %d", + cmd->cmdi->lba,errno,chunk->index); + rc = -1; + /* + * If we failed this I/O + * request. For now + * just an arbitrary error. + */ + + + CBLK_SET_ADAP_CMD_RSP_STATUS(chunk,cmd,FALSE); + } + + + if (rc == cmd->cmdi->nblocks) { + + /* + * Data was trasnferred, return good completion + */ + + CBLK_SET_ADAP_CMD_RSP_STATUS(chunk,cmd,TRUE); + rc = 0; + } else { + + /* + * If we failed this I/O + * request. For now + * just an arbitrary error. + */ + + + CBLK_SET_ADAP_CMD_RSP_STATUS(chunk,cmd,FALSE); + + } + + +#if !defined(__64BIT__) && defined(_AIX) + /* + * Compiler complains about + * recasting and assigning directly + * from cmd into p_hrrq_curr. So + * we use a two step process. + */ + tmp_val = (uint32_t)cmd; + *(chunk->path[chunk->cur_path]->afu->p_hrrq_curr) = tmp_val | chunk->path[chunk->cur_path]->afu->toggle; +#else + *(chunk->path[chunk->cur_path]->afu->p_hrrq_curr) = (uint64_t) cmd | chunk->path[chunk->cur_path]->afu->toggle; +#endif + + + CBLK_TRACE_LOG_FILE(7,"*(chunk->path[chunk->cur_path].p_hrrq_curr) = 0x%llx, chunk->path[chunk->cur_path].toggle = 0x%llx, chunk->index = %d", + *(chunk->path[chunk->cur_path]->afu->p_hrrq_curr),chunk->path[chunk->cur_path]->afu->toggle, + chunk->index); + +} + +#endif /* BLOCK_FILEMODE_ENABLED */ + + +/* + * NAME: cblk_process_sense_data + * + * FUNCTION: This routine parses sense data + * + * Environment: This routine assumes the chunk mutex + * lock is held by the caller. + * + * INPUTS: + * chunk - Chunk associated with this error + * ioasa - I/O Adapter status response + * + * RETURNS: + * -1 - Fatal error + * 0 - Ignore error (consider good completion) + * 1 - Retry recommended + * + */ +cflash_cmd_err_t cblk_process_sense_data(cflsh_chunk_t *chunk,cflsh_cmd_mgm_t *cmd, struct request_sense_data *sense_data) +{ + cflash_cmd_err_t rc = CFLASH_CMD_IGNORE_ERR; + + + + if (sense_data == NULL) { + + cmd->cmdi->status = EIO; + return CFLASH_CMD_FATAL_ERR; + } + + CBLK_TRACE_LOG_FILE(5,"sense data: error code = 0x%x, sense_key = 0x%x, asc = 0x%x, ascq = 0x%x", + sense_data->err_code, sense_data->sense_key, + sense_data->add_sense_key, + sense_data->add_sense_qualifier); + + + + switch (sense_data->sense_key) { + + + case CFLSH_NO_SENSE: + /* + * Ignore error and treat as good completion + */ + rc = CFLASH_CMD_IGNORE_ERR; + + break; + case CFLSH_RECOVERED_ERROR: + + /* + * Ignore error and treat as good completion + * However log it. + */ + rc = CFLASH_CMD_IGNORE_ERR; + + cblk_notify_mc_err(chunk,chunk->cur_path,0x201,0,CFLSH_BLK_NOTIFY_SCSI_CC_ERR,cmd); + break; + case CFLSH_NOT_READY: + + /* + * Retry command + */ + cmd->cmdi->status = EIO; + rc = CFLASH_CMD_RETRY_ERR; + + break; + case CFLSH_MEDIUM_ERROR: + case CFLSH_HARDWARE_ERROR: + /* + * Fatal error do not retry. + */ + cmd->cmdi->status = EIO; + rc = CFLASH_CMD_FATAL_ERR; + + cblk_notify_mc_err(chunk,chunk->cur_path,0x202,0,CFLSH_BLK_NOTIFY_SCSI_CC_ERR,cmd); + + break; + case CFLSH_ILLEGAL_REQUEST: + /* + * Fatal error do not retry. + */ + cmd->cmdi->status = EIO; + rc = CFLASH_CMD_FATAL_ERR; + + cblk_notify_mc_err(chunk,chunk->cur_path,0x207,0,CFLSH_BLK_NOTIFY_SCSI_CC_ERR,cmd); + + break; + case CFLSH_UNIT_ATTENTION: + + + switch (sense_data->add_sense_key) { + + case 0x29: + /* + * Power on Reset or Device Reset. + * Retry command for now. + */ + + cmd->cmdi->status = EIO; + + + if (cblk_verify_mc_lun(chunk,CFLSH_BLK_NOTIFY_SCSI_CC_ERR,cmd,sense_data)) { + + /* + * Verification failed + */ + + rc = CFLASH_CMD_FATAL_ERR; + + } else { + + rc = CFLASH_CMD_RETRY_ERR; + } + break; + case 0x2A: + /* + * Device settings/capacity has changed + * Retry command for now. + */ + + + + cmd->cmdi->status = EIO; + + + if (cblk_verify_mc_lun(chunk,CFLSH_BLK_NOTIFY_SCSI_CC_ERR,cmd,sense_data)) { + + /* + * Verification failed + */ + + rc = CFLASH_CMD_FATAL_ERR; + + } else { + + rc = CFLASH_CMD_RETRY_ERR; + } + break; + case 0x3f: + + + if (sense_data->add_sense_qualifier == 0x0e) { + + /* + * Report Luns data has changed + * Retry command for now. + */ + + + cmd->cmdi->status = EIO; + + if (cblk_verify_mc_lun(chunk,CFLSH_BLK_NOTIFY_SCSI_CC_ERR,cmd,sense_data)) { + + /* + * Verification failed + */ + + rc = CFLASH_CMD_FATAL_ERR; + + } else { + + rc = CFLASH_CMD_RETRY_ERR; + } + break; + + } + + /* Fall thru */ + default: + /* + * Fatal error + */ + + + cmd->cmdi->status = EIO; + + rc = CFLASH_CMD_FATAL_ERR; + + cblk_notify_mc_err(chunk,chunk->cur_path,0x203,0,CFLSH_BLK_NOTIFY_SCSI_CC_ERR,cmd); + + } + + break; + case CFLSH_DATA_PROTECT: + case CFLSH_BLANK_CHECK: + case CFLSH_VENDOR_UNIQUE: + case CFLSH_COPY_ABORTED: + case CFLSH_ABORTED_COMMAND: + case CFLSH_EQUAL_CMD: + case CFLSH_VOLUME_OVERFLOW: + case CFLSH_MISCOMPARE: + default: + + + /* + * Fatal error do not retry. + */ + + + cblk_notify_mc_err(chunk,chunk->cur_path,0x204,0,CFLSH_BLK_NOTIFY_SCSI_CC_ERR,cmd); + + rc = CFLASH_CMD_FATAL_ERR; + + cmd->cmdi->status = EIO; + + + CBLK_TRACE_LOG_FILE(1,"Fatal generic error sense data: sense_key = 0x%x, asc = 0x%x, ascq = 0x%x", + sense_data->sense_key,sense_data->add_sense_key, sense_data->add_sense_qualifier); + break; + + } + + + return rc; +} + + +#ifdef _COMMON_INTRPT_THREAD +/* + * NAME: cblk_intrpt_thread + * + * FUNCTION: This routine is invoked as a common + * interrupt handler thread for all threads + * for this chunk. + * + * + * Environment: This routine assumes the chunk mutex + * lock is held by the caller. + * + * INPUTS: + * data - of type cflsh_async_thread_cmp_t + * + * RETURNS: + * + */ +void *cblk_intrpt_thread(void *data) +{ + void *ret_code = NULL; + int log_error; + cflsh_async_thread_cmp_t *async_data = data; + cflsh_chunk_t *chunk = NULL; + int pthread_rc = 0; + int tag; + size_t transfer_size; + cflsh_cmd_mgm_t *cmd = NULL; + cflsh_cmd_info_t *cmdi = NULL; + time_t timeout; + int path_reset_index[CFLSH_BLK_MAX_NUM_PATHS]; + int reset_context = FALSE; +#ifdef BLOCK_FILEMODE_ENABLED + int i; + volatile uint64_t *p_hrrq_curr; + uint64_t toggle; +#endif /* BLOCK_FILEMODE_ENABLED */ + + + + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,NULL); + + chunk = async_data->chunk; + + if (chunk == NULL) { + + CBLK_TRACE_LOG_FILE(5,"chunk filename = %s cmd_index = %d, cmd is NULL", + async_data->chunk->dev_name); + + return (ret_code); + } + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"chunk filename = %s pthread_cond_wait failed rc = %d errno = %d", + async_data->chunk->dev_name,pthread_rc,errno); + + return (ret_code); + } + + CBLK_TRACE_LOG_FILE(5,"start of thread chunk->index = %d",chunk->index); + + while (TRUE) { + + CFLASH_BLOCK_LOCK(chunk->lock); + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"chunk filename = %s invalid chunk eye catcher 0x%x", + async_data->chunk->dev_name,chunk->eyec); + + CBLK_LIVE_DUMP_THRESHOLD(9,"0x20d"); + CFLASH_BLOCK_UNLOCK(chunk->lock); + return (ret_code); + } + + + if ((chunk->num_active_cmds == 0) && + (chunk->flags & CFLSH_CHUNK_FAIL_IO)) { + + + /* + * If we have no active I/O + * and this chunk is in a failed + * state, then treat this as an + * exit for this thread. + */ + + + CBLK_TRACE_LOG_FILE(5,"exiting thread: chunk->index = %d because CFLSH_CHUNK_FAIL_IO", + chunk->index); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + break; + } + + if (!(chunk->thread_flags) && + (chunk->num_active_cmds == 0)) { + + /* + * Only wait if the thread_flags + * has not been set and there are no active commands + */ + pthread_rc = pthread_cond_wait(&(chunk->thread_event),&(chunk->lock.plock)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"chunk filename = %s, chunk->index = %d, pthread_cond_wait failed rc = %d errno = %d", + chunk->dev_name,chunk->index,pthread_rc,errno); + CFLASH_BLOCK_UNLOCK(chunk->lock); + return (ret_code); + } + + } + + + CBLK_TRACE_LOG_FILE(9,"chunk index = %d thread_flags = %d num_active_cmds = 0x%x", + chunk->index,chunk->thread_flags,chunk->num_active_cmds); + + if (chunk->thread_flags & CFLSH_CHNK_EXIT_INTRPT) { + + + + CBLK_TRACE_LOG_FILE(5,"exiting thread: chunk->index = %d thread_flags = %d", + chunk->index,chunk->thread_flags); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + break; + } else if ((chunk->thread_flags & CFLSH_CHNK_POLL_INTRPT) || + (chunk->num_active_cmds)) { + +#ifdef BLOCK_FILEMODE_ENABLED + p_hrrq_curr = (uint64_t*)chunk->path[chunk->cur_path]->afu->p_hrrq_curr; + toggle = chunk->path[chunk->cur_path]->afu->toggle; + /* + * TODO: ?? The following for loop should be replaced + * with a loop only walking the active list + * looking for commands to issue filemode_io. + */ + for (i=0; i < chunk->num_cmds; i++) { + if ((chunk->cmd_info[i].in_use) && + (chunk->cmd_info[i].state == CFLSH_MGM_WAIT_CMP)) { + + /* + * cmd_info and cmd_start array entries + * with the same index correspond to the + * same command. + */ + cmd = &chunk->cmd_start[i]; + + cblk_filemode_io(chunk,cmd); + CBLK_INC_RRQ_LOCK(chunk,chunk->cur_path); + } + + } /* for */ + + chunk->path[chunk->cur_path]->afu->p_hrrq_curr = p_hrrq_curr; + chunk->path[chunk->cur_path]->afu->toggle = toggle; + +#endif /* BLOCK_FILEMODE_ENABLED */ + + + chunk->thread_flags &= ~CFLSH_CHNK_POLL_INTRPT; + + tag = -1; + + CFLASH_BLOCK_UNLOCK(chunk->lock); + CBLK_WAIT_FOR_IO_COMPLETE(async_data->chunk,&tag,&transfer_size,TRUE,0); + + CFLASH_BLOCK_LOCK(chunk->lock); + + + + + if ((chunk->num_active_cmds) && + (chunk->head_act) && + !(chunk->flags & CFLSH_CHNK_HALTED) ) { + + /* + * We need to check for dropped commands here if + * we are not in a halted state. + * For common threads there is no effective mechanism in + * CBLK_WAIT_FOR_IO_COMPLETE to detect commmands that time-out. + * So we will do that here. First find the oldest command, + * which will be at the head of the chunk's active queue. + */ + + + /* + * Increase time-out detect to 10 times + * the value we are using in the IOARCB, because + * the recovery process will reset this context + */ + + if (cflsh_blk.timeout_units != CFLSH_G_TO_SEC) { + + /* + * If the time-out units are not in seconds + * then only give the command only 1 second to complete + */ + timeout = time(NULL) - 1; + } else { + timeout = time(NULL) - (10 * cflsh_blk.timeout); + } + + if (chunk->head_act->cmd_time < timeout) { + + /* + * At least one command timed out. Let's + * fail all commands that timed out. The longest + * active command will be the head of the active + * queue. The shortest active command will + * the tail of the active queue. So we will + * walk from the oldest to the newest. When + * we find a commmand that has not been active + * long enough to have timed out, we will stop + * walking this list (since subsequent commands would + * have been active no longer than that command). + */ + + CBLK_LIVE_DUMP_THRESHOLD(9,"0x202"); + + + bzero(&path_reset_index,sizeof(path_reset_index)); + reset_context = FALSE; + cmdi = chunk->head_act; + + log_error = TRUE; + + while (cmdi) { + + + if ((cmdi->in_use) && + (cmdi->state == CFLSH_MGM_WAIT_CMP) && + (cmdi->cmd_time < timeout)) { + + + cmd = &chunk->cmd_start[cmdi->index]; + + /* + * This commmand has timed out + */ + + + + if (log_error) { + + /* + * Only log first command to time-out. + * Don't log them all, since this could flood + * the error log. + */ + + cblk_notify_mc_err(chunk,cmdi->path_index,0x20b,0, + CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + + log_error = FALSE; + } + + + cmdi->status = ETIMEDOUT; + cmdi->transfer_size = 0; + + CBLK_TRACE_LOG_FILE(6,"cmd time-out lba = 0x%llx flags = 0x%x, chunk->index = %d", + cmd->cmdi->lba,cmd->cmdi->flags,chunk->index); + + + /* + * Fail command back. + */ +#ifdef REMOVE + cmd->cmdi->state = CFLSH_MGM_CMP; + + pthread_rc = pthread_cond_signal(&(cmd->cmdi->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signall failed rc = %d,errno = %d, chunk->index = %d", + pthread_rc,errno,chunk->index); + } + +#endif /* REMOVE */ + chunk->stats.num_fail_timeouts++; + + path_reset_index[cmdi->path_index] = TRUE; + + reset_context = TRUE; + + } else if (cmdi->cmd_time > timeout) { + + /* + * Since commands on the active queue are ordered, + * with the head being the oldest and the tail the newest, + * we do not need process the active queue further + * after we found the first command that is not considered + * timed out. + */ + + break; + + } + + + cmdi = cmdi->act_next; + + } /* while */ + + if (reset_context) { + + +#ifdef _KERNEL_MASTER_CONTXT + + int i; + + + + /* + * We found at least one valid time command. + * Thus we'll reset the context and then all commands + * will be retried. + */ + + + for (i = 0; i < chunk->num_paths;i++) { + + if (path_reset_index[i]) { + + + CBLK_GET_INTRPT_STATUS(chunk,i); + CFLASH_BLOCK_UNLOCK(chunk->lock); + + cblk_reset_context_shared_afu(chunk->path[i]->afu); + + CFLASH_BLOCK_LOCK(chunk->lock); + } + + } + + + + + +#else + + + CBLK_GET_INTRPT_STATUS(chunk,chunk->cur_path); + + + /* + * Tear down the context and prevent it from being used. + * This will prevent AFU from DMAing into the user's + * data buffer. + */ + + + chunk->flags |= CFLSH_CHUNK_FAIL_IO; + + cblk_chunk_free_mc_device_resources(chunk); + + cblk_chunk_unmap(chunk,TRUE); + + close(chunk->fd); + + + /* + * Fail all other commands. We are allowing the commands + * that saw the time out to be failed with ETIMEDOUT. + * All other commands are failed here with EIO. + */ + + cblk_fail_all_cmds(chunk); + +#endif /* !_KERNEL_MASTER_CONTXT */ + + + } + + } + } + + + } + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + } /* while */ + + return (ret_code); +} + +#endif /* COMMON_INTRPT_THREAD */ + +/* + * NAME: cblk_async_recv_thread + * + * FUNCTION: This routine is invoked as a thread + * to wait for async I/O completions. + * + * NOTE: This thread under some error conditions + * can be canceled via pthread_cancel. + * By default it will be cancelable, but + * deferred type. Thus pthread_cancel on this + * thread will only cause the thread to be + * canceled at cancelation points. The + * invocation of pthread_testcancel is a cancelation + * point. These need to be placed in situations where + * this thread is not holding any resources--especially + * mutex locks, because otherwise those resources will not + * be freed. In the case of mutex locks, if the thread + * is canceled while it is holding a lock, that lock + * will remain locked until this process terminates. + * + * This routine is not changing the canceltype to + * PTHREAD_CANCEL_ASYNCHRONOUS, because that appears to be + * less safe. It can be problematic in cases where resources--especially + * mutex locks--are in use, thus those resources are never freed. + * + * During certain portions of the code, it will change its + * cancelstate from the default (PTHREAD_CANCEL_ENABLE) to + * PTHREAD_CANCEL_DISABLE and vice versa. This is need primarily + * for the CBLK_TRACE_LOG_FILE macro. It acquiree log lock and then + * calls functions (such as fprintf) that are considered by the OS + * as valid cancelation points. If we allowed a cancel while + * these trace macros are running, we could cancel this thread + * and never unlock the log lock. + * + * INPUTS: + * data - of type cflsh_async_thread_cmp_t + * + * RETURNS: + * -1 - Fatal error + * 0 - Ignore error (consider good completion) + * 1 - Retry recommended + * + */ +void *cblk_async_recv_thread(void *data) +{ + void *ret_code = NULL; + int rc = 0; + cflsh_async_thread_cmp_t *async_data = data; + size_t transfer_size; + cflsh_cmd_mgm_t *cmd = NULL; + cflsh_chunk_t *chunk = NULL; + int pthread_rc = 0; + + + + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,NULL); + +#ifdef _REMOVE + pthread_rc = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS,NULL); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"cmd_index = %d, pthread_setcanceltype failed rc = %d, errno = %d", + async_data->cmd_index,pthread_rc,errno); + return (ret_code); + } +#endif /* _REMOVE */ + + + chunk = async_data->chunk; + + if (chunk == NULL) { + + CBLK_TRACE_LOG_FILE(5,"chunk filename = %s cmd_index = %d, cmd is NULL", + async_data->chunk->dev_name,async_data->cmd_index); + + return (ret_code); + } + + CBLK_TRACE_LOG_FILE(5,"chunk filename = %s chunk->index = %d cmd_index = %d",chunk->dev_name, + chunk->index, async_data->cmd_index); + + /* + * Create a cancellation point just before we + * try to take the chunk->lock. Thus if we + * are being canceled we would exit now + * before blocking on the lock. + */ + + pthread_setcancelstate(PTHREAD_CANCEL_ENABLE,NULL); + pthread_testcancel(); + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,NULL); + + + CFLASH_BLOCK_LOCK(chunk->lock); + + if (CFLSH_EYECATCH_CHUNK(chunk)) { + /* + * Invalid chunk. Exit now. + */ + + cflsh_blk.num_bad_chunk_ids++; + CBLK_TRACE_LOG_FILE(1,"chunk filename = %s pthread_cond_wait failed rc = %d errno = %d", + async_data->chunk->dev_name,pthread_rc,errno); + + return (ret_code); + } + + + cmd = &(chunk->cmd_start[async_data->cmd_index]); + + if (cmd == NULL) { + + CBLK_TRACE_LOG_FILE(5,"chunk filename = %s cmd_index = %d, cmd is NULL", + async_data->chunk->dev_name,async_data->cmd_index); + CFLASH_BLOCK_UNLOCK(chunk->lock); + return (ret_code); + } + + /* + * Since we start this thread just before we attempt + * to issue the corresponding command to the AFU, + * we need to wait for a signal that it was successfully + * issued before proceeding. + * + * It should also be noted that if we are being + * canceled we would also be signaled too and + * thus wake up. The pthread_test cancel + * after we unlock after waiting here, + * would be where this thread would exit. + */ + + if (!(cmd->cmdi->flags & CFLSH_ASYNC_IO_SNT)) { + + /* + * Only wait if the CFLSH_ASYNC_IO_SNT flag + * has not been set. + */ + pthread_rc = pthread_cond_wait(&(cmd->cmdi->thread_event),&(chunk->lock.plock)); + + if (pthread_rc) { + + cmd->cmdi->flags |= CFLSH_ATHRD_EXIT; + CBLK_TRACE_LOG_FILE(5,"chunk filename = %s cmd_index = %d, pthread_cond_wait failed rc = %d errno = %d", + async_data->chunk->dev_name,async_data->cmd_index,pthread_rc,errno); + CFLASH_BLOCK_UNLOCK(chunk->lock); + return (ret_code); + } + + } + + if (cmd->cmdi->state == CFLSH_MGM_ASY_CMP) { + + /* + * The originator of this command + * has been notified that this command + * completed, but wase unable + * to mark the command as free, since + * this thread is running. Mark + * the command as free now. + */ + + CBLK_FREE_CMD(chunk,cmd); + + CBLK_TRACE_LOG_FILE(5,"cmd_index = %d in_use = %d, cmd = 0x%llx", + async_data->cmd_index, cmd->cmdi->in_use,(uint64_t)cmd); + CFLASH_BLOCK_UNLOCK(chunk->lock); + return (ret_code); + } + + if ((!cmd->cmdi->in_use) || + (cmd->cmdi->state == CFLSH_MGM_CMP)) { + /* + * If the command is no longer in use, + * then exit now. + */ + + cmd->cmdi->flags |= CFLSH_ATHRD_EXIT; + CBLK_TRACE_LOG_FILE(5,"command not in use cmd_index = %d", + async_data->cmd_index); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + return (ret_code); + } + + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + /* + * Create a cancelation point just before + * we start polling for completion, just in + * case we are being canceled. This needs + * to be after we unlocked to avoid never + * releasing that lock. + */ + + pthread_setcancelstate(PTHREAD_CANCEL_ENABLE,NULL); + pthread_testcancel(); + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,NULL); + + rc = CBLK_WAIT_FOR_IO_COMPLETE(chunk,&(async_data->cmd_index),&transfer_size,TRUE,0); + + + + CFLASH_BLOCK_LOCK(chunk->lock); + + /* + * TODO: ?? This is ugly that we are + * acquiring a lock to only decrement + * the number of active threads (i.e + * keep statistics. We may want to + * look at removing this in the future + */ + + chunk->stats.num_active_threads--; + + + cmd->cmdi->flags |= CFLSH_ATHRD_EXIT; + + if (cmd->cmdi->state == CFLSH_MGM_ASY_CMP) { + + /* + * The originator of this command + * has been notified that this command + * completed, but was unable + * to mark the command as free, since + * this thread is running. Mark + * the command as free now. + */ + + CBLK_FREE_CMD(chunk,cmd); + + CBLK_TRACE_LOG_FILE(8,"cmd_index = %d in_use = %d, cmd = 0x%llx, chunk->index = %d", + async_data->cmd_index, cmd->cmdi->in_use,(uint64_t)cmd,chunk->index); + } + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CBLK_TRACE_LOG_FILE(5,"CBLK_WAIT_FOR_IO_COMPLETE returned rc = %d, cmd_index = %d in_use = %d, chunk->index = %d", + rc,async_data->cmd_index, cmd->cmdi->in_use,chunk->index); + + return (ret_code); +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_trace_log_data_ext + * + * FUNCTION: This is a function call (as opposed to inlined) version + * of trace_log_data_ext from trace_log.h. It uses + * the same setup (setup_log_file in trace_log.h) and defines. + * + * Print a message to trace log. This + * function is the same as trace_log_data, except + * this function requires the caller to maintain + * the static variables via the extended argument. In addition + * it gives the caller additional control over logging. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_trace_log_data_ext(trace_log_ext_arg_t *ext_arg, FILE *logfp,char *filename, char *function, + uint line_num,char *msg, ...) +{ + va_list ap; + struct timeb cur_time, log_time, delta_time; + uint print_log_number; + + if (ext_arg == NULL) { + + return; + } + + if (logfp == NULL) { + + return; + } + + + if (ext_arg->flags & TRACE_LOG_NO_USE_LOG_NUM) { + + if (ext_arg->log_number > 0) { + print_log_number = ext_arg->log_number - 1; + } else { + print_log_number = 0; + } + + } else { + print_log_number = ext_arg->log_number; + } + + ftime(&cur_time); + + if (!(ext_arg->flags & TRACE_LOG_START_VALID)) { + + /* + * If start time is not set, then + * set it now. + */ + + ext_arg->start_time = cur_time; + + + ext_arg->flags |= TRACE_LOG_START_VALID; + + + log_time.time = 0; + log_time.millitm = 0; + + delta_time.time = 0; + delta_time.millitm = 0; + + + /* + * Print header + */ + fprintf(logfp,"---------------------------------------------------------------------------\n"); + fprintf(logfp,"Date for %s is %s at %s\n",__FILE__,__DATE__,__TIME__); + fprintf(logfp,"Index Sec msec delta dmsec Filename function, line ...\n"); + fprintf(logfp,"------- ----- ----- ----- ----- -------------------- ---------------------\n"); + + } else { + + /* + * Find time offset since starting time. + */ + + log_time.time = cur_time.time - ext_arg->start_time.time; + log_time.millitm = cur_time.millitm - ext_arg->start_time.millitm; + + delta_time.time = log_time.time - ext_arg->last_time.time; + delta_time.millitm = log_time.millitm - ext_arg->last_time.millitm; + } + + fprintf(logfp,"%7d %5d.%05d %5d.%05d %-25s %-35s line:%5d :", + print_log_number,(int)log_time.time,log_time.millitm,(int)delta_time.time,delta_time.millitm,filename, function, line_num); + /* + * Initialize ap to store arguments after msg + */ + + va_start(ap,msg); + vfprintf(logfp, msg, ap); + va_end(ap); + + fprintf(logfp,"\n"); + + fflush(logfp); + + if (!(ext_arg->flags & TRACE_LOG_NO_USE_LOG_NUM)) { + ext_arg->log_number++; + } + + ext_arg->last_time = log_time; + + return; + +} + + + +/* + * NAME: cblk_display_stats + * + * FUNCTION: This routine is called whenever an RRQ has been processed. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ + +void cblk_display_stats(cflsh_chunk_t *chunk, int verbosity) +{ + CBLK_TRACE_LOG_FILE(verbosity,"\nCHUNK STATISTICS ..."); +#ifdef BLOCK_FILEMODE_ENABLED + CBLK_TRACE_LOG_FILE(verbosity,"FILEMODE"); +#endif /* BLOCK_FILEMODE_ENABLED */ + +#ifdef CFLASH_LITTLE_ENDIAN_HOST + CBLK_TRACE_LOG_FILE(verbosity,"Little Endian"); +#else + CBLK_TRACE_LOG_FILE(verbosity,"Big Endian"); +#endif +#ifdef _MASTER_CONTXT + CBLK_TRACE_LOG_FILE(verbosity,"Master Context"); +#else + CBLK_TRACE_LOG_FILE(verbosity,"No Master Context"); +#endif + CBLK_TRACE_LOG_FILE(verbosity,"cblk_log_verbosity 0x%x",cblk_log_verbosity); +#if !defined(__64BIT__) && defined(_AIX) + CBLK_TRACE_LOG_FILE(verbosity,"32-bit app support "); +#else + CBLK_TRACE_LOG_FILE(verbosity,"64-bit app support "); + +#endif + CBLK_TRACE_LOG_FILE(verbosity,"flags 0x%x",cflsh_blk.flags); + CBLK_TRACE_LOG_FILE(verbosity,"lun_id 0x%llx",cflsh_blk.lun_id); + CBLK_TRACE_LOG_FILE(verbosity,"next_chunk_id 0x%llx",cflsh_blk.next_chunk_id); + CBLK_TRACE_LOG_FILE(verbosity,"num_active_chunks 0x%x",cflsh_blk.num_active_chunks); + CBLK_TRACE_LOG_FILE(verbosity,"num_max_active_chunks 0x%x",cflsh_blk.num_max_active_chunks); + CBLK_TRACE_LOG_FILE(verbosity,"num_bad_chunk_ids 0x%x",cflsh_blk.num_bad_chunk_ids); + CBLK_TRACE_LOG_FILE(verbosity,"chunk_id 0x%llx",chunk->index); + CBLK_TRACE_LOG_FILE(verbosity,"chunk_block size 0x%x",chunk->stats.block_size); + CBLK_TRACE_LOG_FILE(verbosity,"num_paths 0x%x",chunk->num_paths); + CBLK_TRACE_LOG_FILE(verbosity,"primary_path_id 0x%x",chunk->path[0]->path_id); + CBLK_TRACE_LOG_FILE(verbosity,"chunk_type 0x%x",chunk->path[chunk->cur_path]->type); + CBLK_TRACE_LOG_FILE(verbosity,"num_blocks 0x%x",chunk->num_blocks); + CBLK_TRACE_LOG_FILE(verbosity,"num_blocks_lun 0x%llx",chunk->num_blocks_lun); + CBLK_TRACE_LOG_FILE(verbosity,"max_transfer_size 0x%llx",chunk->stats.max_transfer_size); + CBLK_TRACE_LOG_FILE(verbosity,"num_cmds 0x%x",chunk->num_cmds); + CBLK_TRACE_LOG_FILE(verbosity,"num_active_cmds 0x%x",chunk->num_active_cmds); + CBLK_TRACE_LOG_FILE(verbosity,"num_reads 0x%llx",chunk->stats.num_reads); + CBLK_TRACE_LOG_FILE(verbosity,"num_writes 0x%llx",chunk->stats.num_writes); + CBLK_TRACE_LOG_FILE(verbosity,"num_areads 0x%llx",chunk->stats.num_areads); + CBLK_TRACE_LOG_FILE(verbosity,"num_awrites 0x%llx",chunk->stats.num_awrites); + CBLK_TRACE_LOG_FILE(verbosity,"num_act_reads 0x%x",chunk->stats.num_act_reads); + CBLK_TRACE_LOG_FILE(verbosity,"num_act_writes 0x%x",chunk->stats.num_act_writes); + CBLK_TRACE_LOG_FILE(verbosity,"num_act_areads 0x%x",chunk->stats.num_act_areads); + CBLK_TRACE_LOG_FILE(verbosity,"num_act_awrites 0x%x",chunk->stats.num_act_awrites); + CBLK_TRACE_LOG_FILE(verbosity,"max_num_act_reads 0x%x",chunk->stats.max_num_act_reads); + CBLK_TRACE_LOG_FILE(verbosity,"max_num_act_writes 0x%x",chunk->stats.max_num_act_writes); + CBLK_TRACE_LOG_FILE(verbosity,"max_num_act_areads 0x%x",chunk->stats.max_num_act_areads); + CBLK_TRACE_LOG_FILE(verbosity,"max_num_act_awrites 0x%x",chunk->stats.max_num_act_awrites); + CBLK_TRACE_LOG_FILE(verbosity,"num_blocks_read 0x%llx",chunk->stats.num_blocks_read); + CBLK_TRACE_LOG_FILE(verbosity,"num_blocks_written 0x%llx",chunk->stats.num_blocks_written); + CBLK_TRACE_LOG_FILE(verbosity,"num_aresult_no_cmplt 0x%llx",chunk->stats.num_aresult_no_cmplt); + CBLK_TRACE_LOG_FILE(verbosity,"num_errors 0x%llx",chunk->stats.num_errors); + CBLK_TRACE_LOG_FILE(verbosity,"num_retries 0x%llx",chunk->stats.num_retries); + CBLK_TRACE_LOG_FILE(verbosity,"num_timeouts 0x%llx",chunk->stats.num_timeouts); + CBLK_TRACE_LOG_FILE(verbosity,"num_fail_timeouts 0x%llx",chunk->stats.num_fail_timeouts); + CBLK_TRACE_LOG_FILE(verbosity,"num_reset_contexts 0x%llx",chunk->stats.num_reset_contexts); + CBLK_TRACE_LOG_FILE(verbosity,"num_reset_context_fails 0x%llx",chunk->stats.num_reset_contxt_fails); + CBLK_TRACE_LOG_FILE(verbosity,"num_no_cmds_free 0x%llx",chunk->stats.num_no_cmds_free); + CBLK_TRACE_LOG_FILE(verbosity,"num_no_cmd_room 0x%llx",chunk->stats.num_no_cmd_room); + CBLK_TRACE_LOG_FILE(verbosity,"num_no_cmds_free_fail 0x%llx",chunk->stats.num_no_cmds_free_fail); + CBLK_TRACE_LOG_FILE(verbosity,"num_cc_errors 0x%llx",chunk->stats.num_cc_errors); + CBLK_TRACE_LOG_FILE(verbosity,"num_fc_errors 0x%llx",chunk->stats.num_fc_errors); + CBLK_TRACE_LOG_FILE(verbosity,"num_port0_linkdowns 0x%llx",chunk->stats.num_port0_linkdowns); + CBLK_TRACE_LOG_FILE(verbosity,"num_port1_linkdowns 0x%llx",chunk->stats.num_port1_linkdowns); + CBLK_TRACE_LOG_FILE(verbosity,"num_port0_no_logins 0x%llx",chunk->stats.num_port0_no_logins); + CBLK_TRACE_LOG_FILE(verbosity,"num_port1_no_logins 0x%llx",chunk->stats.num_port1_no_logins); + CBLK_TRACE_LOG_FILE(verbosity,"num_port0_fc_errors 0x%llx",chunk->stats.num_port0_fc_errors); + CBLK_TRACE_LOG_FILE(verbosity,"num_port1_fc_errors 0x%llx",chunk->stats.num_port1_fc_errors); + CBLK_TRACE_LOG_FILE(verbosity,"num_afu_errors 0x%llx",chunk->stats.num_afu_errors); + CBLK_TRACE_LOG_FILE(verbosity,"num_capi_false_reads 0x%llx",chunk->stats.num_capi_false_reads); + CBLK_TRACE_LOG_FILE(verbosity,"num_capi_adap_resets 0x%llx",chunk->stats.num_capi_adap_resets); + CBLK_TRACE_LOG_FILE(verbosity,"num_capi_adap_chck_err 0x%llx",chunk->stats.num_capi_adap_chck_err); + CBLK_TRACE_LOG_FILE(verbosity,"num_capi_read_fails 0x%llx",chunk->stats.num_capi_read_fails); + CBLK_TRACE_LOG_FILE(verbosity,"num_capi_data_st_errs 0x%llx",chunk->stats.num_capi_data_st_errs); + CBLK_TRACE_LOG_FILE(verbosity,"num_capi_afu_errors 0x%llx",chunk->stats.num_capi_afu_errors); + CBLK_TRACE_LOG_FILE(verbosity,"num_capi_afu_intrpts 0x%llx",chunk->stats.num_capi_afu_intrpts); + CBLK_TRACE_LOG_FILE(verbosity,"num_capi_unexp_afu_intrpts 0x%llx",chunk->stats.num_capi_unexp_afu_intrpts); + CBLK_TRACE_LOG_FILE(verbosity,"num_cache_hits 0x%llx",chunk->stats.num_cache_hits); + CBLK_TRACE_LOG_FILE(verbosity,"num_success_threads 0x%llx",chunk->stats.num_success_threads); + CBLK_TRACE_LOG_FILE(verbosity,"num_failed_threads 0x%llx",chunk->stats.num_failed_threads); + CBLK_TRACE_LOG_FILE(verbosity,"num_canc_threads 0x%llx",chunk->stats.num_canc_threads); + CBLK_TRACE_LOG_FILE(verbosity,"num_fail_canc_threads 0x%llx",chunk->stats.num_fail_canc_threads); + CBLK_TRACE_LOG_FILE(verbosity,"num_fail_detach_threads 0x%llx",chunk->stats.num_fail_detach_threads); + CBLK_TRACE_LOG_FILE(verbosity,"num_active_threads 0x%llx",chunk->stats.num_active_threads); + CBLK_TRACE_LOG_FILE(verbosity,"max_num_act_threads 0x%llx",chunk->stats.max_num_act_threads); + + return; +} + + +/* + * NAME: cblk_setup_dump_file + * + * FUNCTION: This routine dump data structures for + * the block library. + * + * + * + * RETURNS: 0 - Success, Otherwise error. + * + * + */ + +int cblk_setup_dump_file(void) +{ + int rc = 0; + char *env_user = getenv("USER"); + char *dump_filename = getenv("CFLSH_BLK_DUMP"); + char *log_pid = getenv("CFLSH_BLK_DUMP_PID"); + char filename[PATH_MAX]; + + + + + if (cblk_dumpfp) { + + /* + * If dump file pointer is setup, + * then do not set it up again. + */ + + return rc; + + } + + + if (dump_filename == NULL) + { + sprintf(filename, "/tmp/%s.cflash_block_dump", env_user); + dump_filename = filename; + } + + + if (log_pid) { + + /* + * Use different filename for each process + */ + + sprintf(filename,"%s.%d",dump_filename,getpid()); + + + } + + + + if ((cblk_dumpfp = fopen(dump_filename, "a")) == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"Failed to open dump_filename file %s",dump_filename); + + cblk_dumpfp = NULL; + rc = -1; + } + + + return rc; +} + +/* + * NAME: cblk_dump_debug_data + * + * FUNCTION: This routine dump data structures for + * the block library. + * + * NOTE: This routine does not use any locking to serialize with the rest + * of the library, since it may be called to debug a range of + * library issues--including deadlock. Thus there is risk, + * that it could hit a segmentation fault. As result, it needs + * to call fflush periodically to ensure any data it's able to get + * is written out the dump file. In addition this code has taken + * some steps to order and check things to minimize the likelihood + * of it hitting a segmentation fault, but there is still no + * guarantee this can be avoid. + * + * RETURNS: None + * + * + */ + +void cblk_dump_debug_data(const char *reason,const char *reason_filename,const char *reason_function, + int reason_line_num, const char *reason_date) +{ + int i,j; + cflsh_chunk_t *chunk; + cflsh_afu_t *afu = NULL; + scsi_cdb_t *cdb = NULL; + cflsh_cmd_info_t *cmd_info = NULL; +#ifndef TIMELEN +#define TIMELEN 26 /* Linux does have a define for the minium size of the a timebuf */ + /* However linux man pages say it is 26 */ +#endif + char timebuf[TIMELEN+1]; + time_t cur_time; + int num_cmds_processed; + int num_cmds_mgm_completed; + int num_cmds_asy_mgm_completed; + int num_waiting_cmds; + + + if (cblk_dumpfp == NULL) { + + return; + } + + /* + * Print header + */ + fprintf(cblk_dumpfp,"---------------------------------------------------------------------------\n"); + fprintf(cblk_dumpfp,"Build date for %s is %s at %s\n",__FILE__,__DATE__,__TIME__); + + fflush(cblk_dumpfp); + + cur_time = time(NULL); + + fprintf(cblk_dumpfp,"Dump occurred at %s\n",ctime_r(&cur_time,timebuf)); + + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp,"PID = 0x%"PRIx64", dump sequence number = 0x%x\n", + (uint64_t)cflsh_blk.caller_pid, dump_sequence_num); + fprintf(cblk_dumpfp,"dump reason %s, filename = %s, function = %s, line_number = %d, date = %s\n", + reason,reason_filename,reason_function,reason_line_num,reason_date); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp,"---------------------------------------------------------------------------\n"); + + fflush(cblk_dumpfp); + + fetch_and_add(&(dump_sequence_num),+1); + + +#ifdef BLOCK_FILEMODE_ENABLED + fprintf(cblk_dumpfp,"FILEMODE\n"); +#endif /* BLOCK_FILEMODE_ENABLED */ + +#ifdef CFLASH_LITTLE_ENDIAN_HOST + fprintf(cblk_dumpfp,"Little Endian\n"); +#else + fprintf(cblk_dumpfp,"Big Endian\n"); +#endif +#if !defined(__64BIT__) && defined(_AIX) + fprintf(cblk_dumpfp,"32-bit app support\n"); +#else + fprintf(cblk_dumpfp,"64-bit app support\n"); + +#endif + + fprintf(cblk_dumpfp,"\n\n"); + + fprintf(cblk_dumpfp,"cblk_log_verbosity 0x%x\n\n",cblk_log_verbosity); + + fprintf(cblk_dumpfp,"cblk_dump_level 0x%x\n\n",cblk_dump_level); + + fprintf(cblk_dumpfp,"cblk_notify_log_level 0x%x\n\n",cblk_notify_log_level); + + fprintf(cblk_dumpfp,"cblk_log_lock (addr) %p\n\n",&cblk_log_lock); + + + + fprintf(cblk_dumpfp,"cflsh_blk = %p\n\n",&cflsh_blk); + + fflush(cblk_dumpfp); + + fprintf(cblk_dumpfp," global_lock.file = 0x%x\n",cflsh_blk.global_lock.file); + fprintf(cblk_dumpfp," global_lock.fname = %s\n",cflsh_blk.global_lock.filename); + fprintf(cblk_dumpfp," global_lock.line = %d\n",cflsh_blk.global_lock.line); + fprintf(cblk_dumpfp," global_lock.thread &= %p\n",&(cflsh_blk.global_lock.thread)); + fprintf(cblk_dumpfp," global_lock.thread = 0x%x\n",(uint32_t)(cflsh_blk.global_lock.thread)); + fprintf(cblk_dumpfp," flags = 0x%x\n",cflsh_blk.flags); + fprintf(cblk_dumpfp," timeout = 0x%x\n",cflsh_blk.timeout); + fprintf(cblk_dumpfp," num_active_chunks = 0x%x\n",cflsh_blk.num_active_chunks); + fprintf(cblk_dumpfp," num_bad_chunk_ids = 0x%x\n",cflsh_blk.num_bad_chunk_ids); + fprintf(cblk_dumpfp," caller_pid = 0x%"PRIx64"\n",(uint64_t)cflsh_blk.caller_pid); + fprintf(cblk_dumpfp," process_name = %s\n",cflsh_blk.process_name); + fprintf(cblk_dumpfp," thread_log_mask = 0x%x\n",cflsh_blk.thread_log_mask); + fprintf(cblk_dumpfp," ext_arg.flags = 0x%x\n",cflsh_blk.trace_ext.flags); + fprintf(cblk_dumpfp," ext_arg.log_number = 0x%x\n",cflsh_blk.trace_ext.log_number); + fprintf(cblk_dumpfp," head_afu = %p\n",cflsh_blk.head_afu); + fprintf(cblk_dumpfp," tail_afu = %p\n",cflsh_blk.tail_afu); + + fprintf(cblk_dumpfp," hash[] = %p\n\n",&cflsh_blk.hash[0]); + + + fprintf(cblk_dumpfp," eyec = 0x%x",cflsh_blk.eyec); + if (!CFLSH_EYECATCH_CBLK(&cflsh_blk)) { + fprintf(cblk_dumpfp," (Valid)\n"); + } else { + fprintf(cblk_dumpfp," (Invalid !!)\n"); + + fflush(cblk_dumpfp); + + /* + * If global cflsh_blk is corrupted then return + * now. Don't attempt to traverse other + * data structures here, because this may + * result in a segmentation fault. + */ + return; + } + + fflush(cblk_dumpfp); + + for (i=0;ieyec); + + if (!CFLSH_EYECATCH_CHUNK(chunk)) { + fprintf(cblk_dumpfp," (Valid)\n"); + } else { + fprintf(cblk_dumpfp," (Invalid !!)\n"); + fflush(cblk_dumpfp); + + break; + } + + fprintf(cblk_dumpfp," in_use = %d\n",chunk->in_use); + fprintf(cblk_dumpfp," flags = 0x%x\n",chunk->flags); + fprintf(cblk_dumpfp," index = 0x%x\n",chunk->index); + fprintf(cblk_dumpfp," dev_name = %s\n",chunk->dev_name); + fprintf(cblk_dumpfp," num_blocks = 0x%"PRIx64"\n",(uint64_t)chunk->num_blocks); + fprintf(cblk_dumpfp," num_blocks_lun = 0x%"PRIx64"\n",(uint64_t)chunk->num_blocks_lun); + fprintf(cblk_dumpfp," lock.file = 0x%x\n",chunk->lock.file); + fprintf(cblk_dumpfp," lock.fname = %s\n",chunk->lock.filename); + fprintf(cblk_dumpfp," lock.line = %d\n",chunk->lock.line); + fprintf(cblk_dumpfp," lock.thread & = %p\n",&(chunk->lock.thread)); + fprintf(cblk_dumpfp," lock.thread = 0x%x\n",(uint32_t)(chunk->lock.thread)); + fprintf(cblk_dumpfp," cache_buffer = %p\n",chunk->cache_buffer); + fprintf(cblk_dumpfp," num_cmds = 0x%x\n",chunk->num_cmds); + fprintf(cblk_dumpfp," num_active_cmds = 0x%x\n",chunk->num_active_cmds); + fprintf(cblk_dumpfp," cmd_start = %p\n",chunk->cmd_start); + fprintf(cblk_dumpfp," cmd_info (addr) = %p\n\n",chunk->cmd_info); + + + fflush(cblk_dumpfp); + + if (chunk->cmd_info) { + + /* + * It is possible this might be called via signal when the library + * is closing. Since we are not locking, there is no guarantee + * the rug can not be pulled out from under us. However we + * will try to take some steps to minimize this. First will + * save off the cmd_info address, since close will NULL it after + * it is free. Thus there is still a possibility that the data buffers + * have been reused by others, but we will try to minimize this some. + * We are not going to filter/check on CFLSH_CHNK_CLOSE, since it + * is possible we need to debug some issues at close time, and + * there is no guarantee this close check would prevent all + * potential issues here. + */ + + + cmd_info = chunk->cmd_info; + + num_cmds_processed = 0; + + num_waiting_cmds = 0; + + num_cmds_mgm_completed = 0; + + num_cmds_asy_mgm_completed = 0; + + for (j=0; j < chunk->num_cmds; j++) { + + if ((chunk->cmd_info) && + (cmd_info[j].in_use)) { + + + /* + * If chunk->cmd_info is NULL, then don't + * attempt to read these commands, since they + * have been freed and maybe allocated for others. + */ + + fprintf(cblk_dumpfp,"\n cmd_info[%d] = %p\n",j,&(cmd_info[j])); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," flags = 0x%x\n",cmd_info[j].flags); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," user_tag = 0x%x\n",cmd_info[j].user_tag); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," index = 0x%x\n",cmd_info[j].index); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," path_index = 0x%x\n",cmd_info[j].path_index); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," buf = %p\n",cmd_info[j].buf); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," lba = 0x%"PRIx64"\n", + (uint64_t)cmd_info[j].lba); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," nblocks = 0x%"PRIx64"\n", + (uint64_t)cmd_info[j].nblocks); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," status = 0x%"PRIx64"\n", + (uint64_t)cmd_info[j].status); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," in_use = 0x%x\n",cmd_info[j].in_use); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," state = 0x%x\n",cmd_info[j].state); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," retry_count = 0x%x\n",cmd_info[j].retry_count); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," transfer_size_byt = 0x%x\n", + cmd_info[j].transfer_size_bytes); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," transfer_size = 0x%"PRIx64"\n", + cmd_info[j].transfer_size); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," cmd = %p\n",&(chunk->cmd_start[j])); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," cmd->cmd_info = %p",chunk->cmd_start[j].cmdi); + if (chunk->cmd_start[j].cmdi == &(cmd_info[j])) { + fprintf(cblk_dumpfp," (Valid)\n"); + } else { + fprintf(cblk_dumpfp," (Invalid !!)\n"); + fflush(cblk_dumpfp); + } + + fprintf(cblk_dumpfp," cmd->index = 0x%x\n",chunk->cmd_start[j].index); + cdb = CBLK_GET_CMD_CDB(chunk,&chunk->cmd_start[j]); + if (cdb) { + fprintf(cblk_dumpfp," cmd: op_code = 0x%x\n",cdb->scsi_op_code); + } + + fprintf(cblk_dumpfp," chunk = %p",cmd_info[j].chunk); + if (cmd_info[j].chunk == chunk) { + fprintf(cblk_dumpfp," (Valid)\n"); + } else { + fprintf(cblk_dumpfp," (Invalid !!)\n"); + } + + fprintf(cblk_dumpfp," eyec = 0x%x",cmd_info[j].eyec); + + if ( !CFLSH_EYECATCH_CMDI(&cmd_info[j])) { + fprintf(cblk_dumpfp," (Valid)\n"); + } else { + fprintf(cblk_dumpfp," (Invalid !!)\n"); + } + + + fflush(cblk_dumpfp); + + if (cmd_info[j].flags & CFLSH_PROC_CMD) { + num_cmds_processed++; + } + + if (cmd_info[j].state == CFLSH_MGM_CMP) { + num_cmds_mgm_completed++; + } else if (cmd_info[j].state == CFLSH_MGM_WAIT_CMP) { + num_waiting_cmds++; + } else if (cmd_info[j].state == CFLSH_MGM_ASY_CMP) { + num_cmds_asy_mgm_completed++; + } + + } + } /* for */ + + + fflush(cblk_dumpfp); + + fprintf(cblk_dumpfp," number of active commands processed = %d\n",num_cmds_processed); + + fprintf(cblk_dumpfp," number of active commands waiting = %d\n",num_waiting_cmds); + + fprintf(cblk_dumpfp," number of active commands completed = %d\n",num_cmds_mgm_completed); + + fprintf(cblk_dumpfp," number of active async commands completed = %d\n",num_cmds_asy_mgm_completed); + + + fprintf(cblk_dumpfp,"\n"); + } + + fprintf(cblk_dumpfp,"\n"); + fflush(cblk_dumpfp); + + fprintf(cblk_dumpfp," stats (addr) = %p\n\n",&chunk->stats); + fflush(cblk_dumpfp); + fprintf(cblk_dumpfp," block_size = 0x%x\n",chunk->stats.block_size); + fprintf(cblk_dumpfp," max_transfer = 0x%"PRIx64"\n",chunk->stats.max_transfer_size); + fprintf(cblk_dumpfp," num_blocks_read = 0x%"PRIx64"\n",chunk->stats.num_blocks_read); + fprintf(cblk_dumpfp," num_blocks_writ = 0x%"PRIx64"\n",chunk->stats.num_blocks_written); + fprintf(cblk_dumpfp," no_cmd_room = 0x%"PRIx64"\n",chunk->stats.num_no_cmd_room); + fprintf(cblk_dumpfp," no_cmds_free = 0x%"PRIx64"\n",chunk->stats.num_no_cmds_free); + fprintf(cblk_dumpfp," no_cmds_free_fai= 0x%"PRIx64"\n",chunk->stats.num_no_cmds_free_fail); + fprintf(cblk_dumpfp," num_timeouts = 0x%"PRIx64"\n",chunk->stats.num_timeouts); + fprintf(cblk_dumpfp," num_fail_timeout= 0x%"PRIx64"\n",chunk->stats.num_fail_timeouts); + fprintf(cblk_dumpfp," num_cc_errors = 0x%"PRIx64"\n",chunk->stats.num_cc_errors); + fprintf(cblk_dumpfp," num_fc_errors = 0x%"PRIx64"\n",chunk->stats.num_fc_errors); + fprintf(cblk_dumpfp," num_afu_errors = 0x%"PRIx64"\n",chunk->stats.num_afu_errors); + fprintf(cblk_dumpfp," capi_data_st_err= 0x%"PRIx64"\n",chunk->stats.num_capi_data_st_errs); + fprintf(cblk_dumpfp," num_reset_contex= 0x%"PRIx64"\n",chunk->stats.num_reset_contexts); + fprintf(cblk_dumpfp," num_capi_adap_rs= 0x%"PRIx64"\n",chunk->stats.num_capi_adap_resets); + fprintf(cblk_dumpfp,"\n"); + + fflush(cblk_dumpfp); + + fprintf(cblk_dumpfp," blk_size_mult = 0x%x\n",chunk->blk_size_mult); + fprintf(cblk_dumpfp," thread_flags = 0x%x\n",chunk->thread_flags); + fprintf(cblk_dumpfp," head_free = %p\n",chunk->head_free); + fprintf(cblk_dumpfp," tail_free = %p\n",chunk->tail_free); + fprintf(cblk_dumpfp," head_act = %p\n",chunk->head_free); + fprintf(cblk_dumpfp," tail_act = %p\n",chunk->tail_free); + fprintf(cblk_dumpfp," num_paths = 0x%x\n",chunk->num_paths); + fprintf(cblk_dumpfp," cur_path = 0x%x\n",chunk->cur_path); + fprintf(cblk_dumpfp," path[] = %p\n\n",chunk->path); + + + fflush(cblk_dumpfp); + for (j=0; j < chunk->num_paths; j++) { + + fprintf(cblk_dumpfp,"\n path[%d] = %p\n",j,chunk->path[j]); + + + /* + * Do some basic tests to see if this path address appears + * to be valid. If not, then move to the next path. + */ + + if (chunk->path[j] == NULL) { + + fflush(cblk_dumpfp); + continue; + } + + if ( (ulong)(chunk->path[j]) & PATH_BAD_ADDR_MASK ) { + + fprintf(cblk_dumpfp," BAD PATH ADDR!!\n"); + fflush(cblk_dumpfp); + + continue; + + } + + fprintf(cblk_dumpfp," afu = %p\n",chunk->path[j]->afu); + fprintf(cblk_dumpfp," flags = 0x%x\n",chunk->path[j]->flags); + fprintf(cblk_dumpfp," path_index = 0x%x\n",chunk->path[j]->path_index); + fprintf(cblk_dumpfp," path_id = 0x%x\n",chunk->path[j]->path_id); + fprintf(cblk_dumpfp," path_id_mask = 0x%x\n",chunk->path[j]->path_id_mask); + fprintf(cblk_dumpfp," num_ports = 0x%x\n",chunk->path[j]->num_ports); + fprintf(cblk_dumpfp," chunk = %p",chunk->path[j]->chunk); + if (chunk->path[j]->chunk == chunk) { + fprintf(cblk_dumpfp," (Valid)\n"); + } else { + fprintf(cblk_dumpfp," (Invalid !!)\n"); + } + fprintf(cblk_dumpfp," eyec = 0x%x",chunk->path[j]->eyec); + + if (!CFLSH_EYECATCH_PATH(chunk->path[j])) { + fprintf(cblk_dumpfp," (Valid)\n"); + } else { + fprintf(cblk_dumpfp," (Invalid !!)\n"); + } + + + + fflush(cblk_dumpfp); + } + + fprintf(cblk_dumpfp,"\n"); + + chunk = chunk->next; + } + + } + + fflush(cblk_dumpfp); + + fprintf(cblk_dumpfp,"\n afu list \n\n"); + + + fflush(cblk_dumpfp); + + afu = cflsh_blk.head_afu; + + + while (afu) { + + + fprintf(cblk_dumpfp,"\n afu = %p\n",afu); + + /* + * Do some basic tests to see if this AFU address appears + * to be valid. If not, then exit from this while loop, + * since we can not trust the next pointer . + */ + + if ( (ulong)afu & AFU_BAD_ADDR_MASK ) { + + fprintf(cblk_dumpfp," BAD AFU ADDR!!\n"); + + break; + + } + + fprintf(cblk_dumpfp," eyec = 0x%x",afu->eyec); + + if (!CFLSH_EYECATCH_AFU(afu)) { + fprintf(cblk_dumpfp," (Valid)\n"); + } else { + fprintf(cblk_dumpfp," (Invalid !!)\n"); + fflush(cblk_dumpfp); + + break; + } + + + fprintf(cblk_dumpfp," flags = 0x%x\n",afu->flags); + fprintf(cblk_dumpfp," ref_count = 0x%x\n",afu->ref_count); + fprintf(cblk_dumpfp," lock.file = 0x%x\n",afu->lock.file); + fprintf(cblk_dumpfp," lock.fname = %s\n",afu->lock.filename); + fprintf(cblk_dumpfp," lock.line = %d\n",afu->lock.line); + fprintf(cblk_dumpfp," lock.thread = 0x%x\n",(uint32_t)(afu->lock.thread)); + fprintf(cblk_dumpfp," lock.thread & = %p\n",&(afu->lock.thread)); + + fflush(cblk_dumpfp); + + fprintf(cblk_dumpfp," contxt_id = 0x%"PRIx64"\n",afu->contxt_id); + fprintf(cblk_dumpfp," contxt_handle = 0x%x\n",afu->contxt_handle); + fprintf(cblk_dumpfp," type = 0x%x\n",afu->type); +#ifdef _AIX + fprintf(cblk_dumpfp," adap_devno = 0x%"PRIx64"\n",(uint64_t)afu->adap_devno); +#endif + fprintf(cblk_dumpfp," toggle = 0x%"PRIx64"\n",(uint64_t)afu->toggle); + fprintf(cblk_dumpfp," hrrq_start = %p\n",afu->p_hrrq_start); + fprintf(cblk_dumpfp," hrrq_curr = %p\n",afu->p_hrrq_curr); + fprintf(cblk_dumpfp," hrrq_end = %p\n",afu->p_hrrq_end); + fprintf(cblk_dumpfp," num_rrqs = 0x%x\n",afu->num_rrqs); + fprintf(cblk_dumpfp," num_issued_cmds = 0x%x\n",afu->num_issued_cmds); + fprintf(cblk_dumpfp," cmd_room = 0x%"PRIx64"\n",afu->cmd_room); + + fflush(cblk_dumpfp); + + fprintf(cblk_dumpfp," mmio = %p\n",afu->mmio); + fprintf(cblk_dumpfp," mmap_size = 0x%"PRIx64"\n",afu->mmap_size); + fprintf(cblk_dumpfp," master_name = %s\n",afu->master_name); + fprintf(cblk_dumpfp," head_path = %p\n",afu->head_path); + fprintf(cblk_dumpfp," tail_path = %p\n",afu->tail_path); + fprintf(cblk_dumpfp," head_complete = %p\n",afu->head_complete); + fprintf(cblk_dumpfp," tail_complete = %p\n",afu->tail_complete); + + afu = afu->next; + + fflush(cblk_dumpfp); + + } /* while */ + + + fflush(cblk_dumpfp); + + return; +} + + +/* + * NAME: cblk_signal_dump_handler + * + * FUNCTION: Sets up a signal handler to + * For signal to dump our internal data + * structures. + * + * RETURNS: + * + * 0 - Good completion, otherwise error + * + */ + +void cblk_signal_dump_handler(int signum, siginfo_t *siginfo, void *uctx) +{ + char reason[100]; + + if (signum == SIGSEGV) { + + CBLK_TRACE_LOG_FILE(1,"si_code = %d, si_addr = 0x%p", + siginfo->si_code,siginfo->si_addr); + + sprintf(reason,"SIGSEGV: si_code = %d si_addr = %p",siginfo->si_code,siginfo->si_addr); + + + } else { + + sprintf(reason,"Signal = %d",signum); + + } + + cblk_dump_debug_data(reason,__FILE__,__FUNCTION__,__LINE__,__DATE__); + + + + /* + * If we get here then + * issue default signal. + */ + + if (signum != SIGUSR1) { + + signal(signum,SIG_DFL); + kill(getpid(),signum); + } + + return; +} + +/* + * NAME: cblk_setup_sigsev_dump + * + * FUNCTION: Sets up a signal handler to + * For SIGSEGV to dump our internal data + * structures. + * + * RETURNS: + * + * 0 - Good completion, otherwise error + * + */ + +int cblk_setup_sigsev_dump(void) +{ + int rc = 0; + struct sigaction action, oaction; + + + + if (cblk_setup_dump_file()){ + + return -1; + + } + + + bzero((void *)&action,sizeof(action)); + + bzero((void *)&oaction,sizeof(oaction)); + + action.sa_sigaction = cblk_signal_dump_handler; + action.sa_flags = SA_SIGINFO; + + + if (sigaction(SIGSEGV, &action,&oaction)) { + + CBLK_TRACE_LOG_FILE(1,"Failed to set up SIGSEGV handler with errno = %d\n", + errno); + + return -1; + } + + + return rc; +} + + +/* + * NAME: cblk_setup_sigusr1_dump + * + * FUNCTION: Sets up a signal handler to + * For SIGSEGV to dump our internal data + * structures. + * + * RETURNS: + * + * 0 - Good completion, otherwise error + * + */ + +int cblk_setup_sigusr1_dump(void) +{ + int rc = 0; + struct sigaction action, oaction; + + + + if (cblk_setup_dump_file()){ + + return -1; + + } + + + bzero((void *)&action,sizeof(action)); + + bzero((void *)&oaction,sizeof(oaction)); + + action.sa_sigaction = cblk_signal_dump_handler; + action.sa_flags = SA_SIGINFO; + + + if (sigaction(SIGUSR1, &action,&oaction)) { + + CBLK_TRACE_LOG_FILE(1,"Failed to set up SIGUSR handler with errno = %d\n", + errno); + + return -1; + } + + + return rc; +} diff --git a/src/block/cflash_block_internal.h b/src/block/cflash_block_internal.h new file mode 100644 index 00000000..df17fe64 --- /dev/null +++ b/src/block/cflash_block_internal.h @@ -0,0 +1,1530 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/cflash_block_internal.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _H_CFLASH_BLOCK_INT +#define _H_CFLASH_BLOCK_INT + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if !defined(_AIX) && !defined(_MACOSX) +#include +#include +#include /* For SYS_xxx definitions */ +#ifdef _LINUX_MTRACE +#include +#endif +#endif /* !_AIX && !_MACOSX */ +#ifdef SIM +#include "sim_pthread.h" +#else +#include +#endif +#include +#include +#ifndef _MACOSX +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef _OS_INTERNAL +#include +#else +#include +#endif + +#ifndef _AIX +typedef uint64_t dev64_t; +#endif /* !AIX */ + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + + +/************************************************************************/ +/* Ifdefs for different block library usage models */ +/************************************************************************/ + +#if defined(_KERNEL_MASTER_CONTXT) && defined(BLOCK_FILEMODE_ENABLED) +#error "Kernel MC is not supported in filemode" +#endif +//#define _SKIP_POLL_CALL 1 +//#define _SKIP_READ_CALL 1 +//#define _MASTER_CONTXT 1 +//#define _PERF_TEST 1 +#define _ERROR_INTR_MODE 1 /* Only rely on interrupts for errors. */ + /* Normal command completions are polled */ + /* from RRQ. */ + +#define _COMMON_INTRPT_THREAD 1 + + + + + + + +/************************************************************************/ +/* Legacy MMIO size */ +/************************************************************************/ +#define CFLASH_MMIO_CONTEXT_SIZE 0x10000 /* Size of each context's MMIO space */ + +#ifdef _MASTER_CONTXT +/* + * TODO: ?? Currently for master context we are still + * have the user map all processes space. When this + * changes we will move the values ifdefed out below. + */ + +#define CAPI_FLASH_REG_SIZE 0x2000000 + +#ifdef _NOT_YET +/* + * If there is a master context, then we will only see + * our (one users) MMIO space. Since + * each context is 64K (0x10000), this leads to + * a size of 0x10000. + */ +#define CAPI_FLASH_REG_SIZE CFLASH_MMIO_CONTEXT_SIZE +#endif /* _NOT_YET */ + +#else +/* + * If there is no master context, then we will see + * the full MMIO space for all 512 contexts. Since + * each context is 64K (0x10000), this leads to + * a size of 0x2000000. + */ +#define CAPI_FLASH_REG_SIZE 0x2000000 +#endif + + + +/************************************************************************/ +/* Block library limits */ +/************************************************************************/ + +#define NUM_CMDS 64 + +#ifdef _COMMON_INTRPT_THREAD + +#define MAX_NUM_CMDS 8192 + +#else +/* + * For the case of !_COMMON_INTRPT_THREAD, + * the max number of commands indicates the number + * threads we can create in the case of I/O. + * So we need to bound this lower than _COMMON_INTRPT_THREAD + * since it only has one thread. + */ +#define MAX_NUM_CMDS 1024 +#endif + +#define MAX_NUM_THREAD_LOGS 4095 /* Maximum number of thread logs allowed per */ + /* process */ + + +#define CFLSH_BLK_WAIT_CLOSE_RETRIES 500 /* Number of times to wait for active */ + /* commands to complete, when closing */ + +#define CFLSH_BLK_WAIT_CLOSE_DELAY 10000 /* Wait delay time in microseconds in */ + /* close when we are waiting for */ + /* active commands to complete */ +#define CFLSH_BLK_CHUNK_SET_UP 1 + +#define CAPI_FLASH_BLOCK_SIZE 4096 + +#define CFLASH_BLOCK_MAX_WAIT_ROOM_RETRIES 5000 /* Number of times to check for */ + /* command room before giving up */ +#define CFLASH_BLOCK_DELAY_ROOM 1000 /* Number of microseconds to delay */ + /* while waiting for command room */ + +#define CFLASH_BLOCK_MAX_WAIT_RRQ_RETRIES 10 /* Number of times to check for */ + /* RRQ before giving up */ +#define CFLASH_BLOCK_DELAY_RRQ 1000 /* Number of microseconds to delay */ + /* while waiting for RRQ */ + + +#define CFLASH_BLOCK_MAX_WAIT_RST_CTX_RETRIES 2500 /* Number of times to check for */ + /* reset context complete */ +#define CFLASH_BLOCK_DELAY_RST_CTX 10000 /* Number of microseconds to delay */ + /* while waiting for reset context */ + /* to complete. */ + + + + + +/************************************************************************/ +/* Block library eye catchers */ +/************************************************************************/ +#define CFLSH_EYEC_INFO __EYEC4('c','h','I','N') /* chIN */ + +#define CFLSH_EYEC_AFU __EYEC4('c','A','F','U') /* cAFU */ + +#define CFLSH_EYEC_PATH __EYEC4('c','h','P','A') /* chPA */ + +#define CFLSH_EYEC_CHUNK __EYEC4('c','h','N','K') /* chNK */ + +#define CFLSH_EYEC_CBLK __EYEC4('c','b','L','K') /* cbLK */ + +#define CFLSH_EYECATCH_CMDI(cmdi) ((cmdi)->eyec != CFLSH_EYEC_INFO) + +#define CFLSH_EYECATCH_CHUNK(chunk) ((chunk)->eyec != CFLSH_EYEC_CHUNK) + +#define CFLSH_EYECATCH_PATH(path) ((path)->eyec != CFLSH_EYEC_PATH) + +#define CFLSH_EYECATCH_AFU(afu) ((afu)->eyec != CFLSH_EYEC_AFU) + +#define CFLSH_EYECATCH_CBLK(cblk) ((cblk)->eyec != CFLSH_EYEC_CBLK) + + + + +/************************************************************************/ +/* Miscellaneous */ +/************************************************************************/ +extern uint64_t cblk_lun_id; + +#define MIN(a,b) ((a)<(b) ? (a) : (b)) +#define MAX(a,b) ((a)>(b) ? (a) : (b)) + + +#ifndef _AIX +/* + * fetch_and_or and fetch_and_add are not available for linux user space. + * It appears that __sync_fetch_and_or, and __sync_fetch_and_add are doing the + * required functionality, respectively. + */ + +#define fetch_and_or(ptr,val) (__sync_fetch_and_or(ptr,val)) + +#define fetch_and_add(ptr,val) (__sync_fetch_and_add(ptr,val)) + + +#endif /* !_AIX */ + + +/************************************************************************/ +/* Tracing/logging data structues and maroc */ +/************************************************************************/ +extern char *cblk_log_filename; +extern int cblk_log_verbosity; +extern FILE *cblk_logfp; +extern FILE *cblk_dumpfp; +extern int cblk_dump_level; +extern int cblk_notify_log_level; +extern int dump_sequence_num; + +extern pthread_mutex_t cblk_log_lock; + +extern uint32_t num_thread_logs; /* Number of thread log files */ + + +/* + * For thread trace tables, using pthread_t does not work + * work well since the values can be very large (64-bit) + * and there is not an obvious way to hash them into something + * more manageable. So for thread trace table we use + * gettid for non-AIX and pthread_getthreadid_np. pthread_getthreadid_np + * is a non-portible pthread interface to get thread sequence numbers, + * It is available in some BSD unixes, but not AIX nor Linux. + * However AIX does support a close derivative pthread_getunique_np, which + * can be used to implement it. For linux we use gettid via the syscall + * interface. + */ +#ifdef _AIX + +inline int pthread_getthreadid_np(void) +{ + int tid = 0; + pthread_t my_pthread = pthread_self(); + + if (pthread_getunique_np(&my_pthread,&tid)) { + + tid = 0; + } + + return tid; +} + + +#define CFLSH_BLK_GETTID (pthread_getthreadid_np()) +#else +#define CFLSH_BLK_GETTID (syscall(SYS_gettid)) +#endif /* ! AIX */ + +/* + * Trace/log the information to common file. If thread tracing + * is also on then log information to the hashed thread log file. + */ +#define CBLK_TRACE_LOG_FILE(verbosity,msg, ...) \ +do { \ + if ((cblk_log_filename != NULL) && \ + (verbosity <= cblk_log_verbosity)) { \ + pthread_mutex_lock(&cblk_log_lock); \ + if (cflsh_blk.flags & CFLSH_G_SYSLOG) { \ + char msg1[PATH_MAX]; \ + /* \ + * If we are using syslog \ + */ \ + sprintf(msg1,"%s,%s,%d,%s",__FILE__,__FUNCTION__,__LINE__,msg); \ + syslog(LOG_DEBUG,msg1,## __VA_ARGS__); \ + \ + } else { \ + /* \ + * If we are not using syslog, but tracing to files \ + */ \ + cblk_trace_log_data_ext(&(cflsh_blk.trace_ext), \ + cblk_logfp, \ + __FILE__,(char *)__FUNCTION__,__LINE__, \ + msg,## __VA_ARGS__); \ + if (num_thread_logs) { \ + int thread_index = CFLSH_BLK_GETTID & cflsh_blk.thread_log_mask; \ + \ + \ + if ((thread_index >=0) && \ + (thread_index < num_thread_logs)) { \ + \ + cflsh_blk.thread_logs[thread_index].ext_arg.log_number = cflsh_blk.trace_ext.log_number; \ + cblk_trace_log_data_ext(&(cflsh_blk.thread_logs[thread_index].ext_arg), \ + cflsh_blk.thread_logs[thread_index].logfp, \ + __FILE__,(char *)__FUNCTION__, \ + __LINE__,msg,## __VA_ARGS__); \ + \ + } \ + } \ + } \ + \ + pthread_mutex_unlock(&cblk_log_lock); \ + } \ +} while (0) + +typedef struct cflsh_thread_log_s { + trace_log_ext_arg_t ext_arg; + FILE *logfp; +} cflsh_thread_log_t; + + + +/************************************************************************/ +/* Live dump macro */ +/************************************************************************/ + +#define CBLK_LIVE_DUMP_THRESHOLD(level,msg) \ +do { \ + \ + if (level <= cblk_dump_level) { \ + cblk_dump_debug_data(msg,__FILE__,__FUNCTION__,__LINE__,__DATE__); \ + } \ + \ +} while (0) + +/************************************************************************/ +/* Notify/Log macro: This is only used when we want to control */ +/* amount of logging. */ +/************************************************************************/ + +#define CBLK_NOTIFY_LOG_THRESHOLD(level,chunk,path_index,error_num,out_rc,reason, cmd) \ +do { \ + \ + if (level <= cblk_notify_log_level) { \ + cblk_notify_mc_err((chunk),(path_index),(error_num),(out_rc),(reason),(cmd)); \ + } \ + \ +} while (0) + + +/************************************************************************/ +/* OS specific LWSYNC macros */ +/************************************************************************/ + +#ifdef _AIX +#define CBLK_LWSYNC() asm volatile ("lwsync") +#elif _MACOSX +#define CBLK_LWSYNC() asm volatile ("mfence") +#else +#define CBLK_LWSYNC() __asm__ __volatile__ ("lwsync") +#endif + + +/************************************************************************/ +/* cflash block's internal cache. */ +/************************************************************************/ + + +#ifdef _NOT_YET +#define CFLASH_CACHE_SIZE 1024 /* Cache size in blocks/sectors */ +#else +#define CFLASH_CACHE_SIZE 0 +#endif + +extern size_t cblk_cache_size; /* Maximum cache size allowed */ + /* in blocks. */ + +/** + ** Store/display cache + **/ + +/* + * The cache organization is: + * NSET x SETSZ pages + * the cache is "block oriented" + * because all requests are of sectors. This means + * all addresses are valid. As a result we can implement a cache + * in which do not ignore any bits in the address (lba) + * + * Each cache entry takes more then 64 bits, 32 for the page + * and the rest for lru lists, tag and valid bits. + * The cache is write-thru. + */ + +#define CFLSH_BLK_L2NSET 2 /* Number of bits needed to */ + /* uniquely reference NSET items. */ +#define CFLSH_BLK_NSET (1 << CFLSH_BLK_L2NSET) /* Each Cache line "hit" has a */ + /* maximum of NSET choices stored */ + /* at any given time. The tag */ + /* from the macro CFLSH_BLK_GETTAG*/ + /* is used to determine which is */ + /* valid. */ +#define CFLSH_BLK_L2SETSZ 17 /* Number of bits needed to */ + /* uniquely reference SETSZ items.*/ + /* NOTE: This define must never be*/ + /* smaller then the define */ + /* CFLSH_BLK_LARGEST_SIZE_TAG */ +#define CFLSH_BLK_SETSZ (1 <> (((uint64_t)lsize)+CFLSH_BLK_L2CACHEWSZ)) + +/* + * CFLSH_BLK_GETINX will compute cache line for a given address. The + * index is computed using the the lower M bits in the address, but not + * the last twelve bits, since we're assuming everything is word aligned + * (i.e. a multiple of 4). Thus M + N = 30. Where N is the number of + * bits used in CFLSH_BLK_GETTAG. + */ +#define CFLSH_BLK_GETINX(x,lsize) ((((uint64_t)x) >> CFLSH_BLK_L2CACHEWSZ) & ((1 << ((unsigned)lsize)) - 1)) + + + +#define CFLSH_BLK_FREEBITS (32 - 1 - 3*CFLSH_BLK_L2NSET) + +struct cflsh_cache_entry { + void *data; /* Pointer to data */ + uint valid : 1; + uint prev : CFLSH_BLK_L2NSET; + uint next : CFLSH_BLK_L2NSET; + uint lru : CFLSH_BLK_L2NSET; + uint free : CFLSH_BLK_FREEBITS; + uint64_t tag; /* old sizevalue CFLSH_BLK_L2TAGSZ */ +}; + + +#define lrulist entry[0].lru + +typedef struct cflsh_cache_line { + struct cflsh_cache_entry entry[CFLSH_BLK_NSET]; +} cflsh_cache_line_t; + + + +#define CFLSH_BLK_L2SSIZE 28 /* log base 2 of segment size */ + + +/*************************************************************************** + * + * cflsh_blk_lock + * + * Wrapper structure for mutex locks and accompanying macros + * + ***************************************************************************/ +typedef +struct cflsh_blk_lock_s { + pthread_mutex_t plock; + pthread_t thread; /* Thread id of lock holder */ + int count; /* Non-zero when someone holding this lock */ + char *filename;/* Filename string of lock taker */ + uint file; /* File number of lock taker */ + uint line; /* Line number of lock taker */ +} cflsh_blk_lock_t; + +/* + * Initialize the mutex lock fields + * + * cflsh_lock - cflsh_blk_lock_t structure + */ +#define CFLASH_BLOCK_LOCK_INIT(cflsh_lock) \ +do { \ + pthread_mutex_init(&((cflsh_lock).plock), NULL); \ + (cflsh_lock).count = 0; \ + (cflsh_lock).file = 0; \ + (cflsh_lock).line = 0; \ + (cflsh_lock).thread = 0; \ +} while (0) + +#define CFLASH_BLOCK_LOCK_IFREE(cflsh_lock) \ +do { \ + pthread_mutex_destroy(&((cflsh_lock).plock)); \ + (cflsh_lock).count = 0; \ + (cflsh_lock).file = 0; \ + (cflsh_lock).line = 0; \ + (cflsh_lock).thread = NULL; \ +} while (0) + +#define CFLASH_BLOCK_LOCK(cflsh_lock) \ +do { \ + \ + pthread_mutex_lock(&((cflsh_lock).plock)); \ + \ + if (cblk_log_verbosity >= 5) { \ + (cflsh_lock).thread = pthread_self(); \ + } \ + (cflsh_lock).file = CFLSH_BLK_FILENUM; \ + (cflsh_lock).filename = __FILE__; \ + (cflsh_lock).line = __LINE__; \ + \ + (cflsh_lock).count++; \ + \ +} while (0) + +#define CFLASH_BLOCK_UNLOCK(cflsh_lock) \ +do { \ + \ + (cflsh_lock).count--; \ + \ + (cflsh_lock).thread = 0; \ + (cflsh_lock).filename = NULL; \ + (cflsh_lock).file = 0; \ + (cflsh_lock).line = 0; \ + \ + pthread_mutex_unlock(&((cflsh_lock).plock)); \ + \ +} while (0) + + +/* + * To minimize locking overhead, we only need the AFU + * lock if the AFU is being shared: either explicitly + * with the CFLSH_AFU_SHARED flag set, or implicitly when using + * MPIO (afu->ref_count > 1). Otherwise we rely on only + * the chunk lock to serialize. + * + * Even in the non-shared/non-MPIO case there will be a few + * (error) cases where we will need to explicitly get the AFU lock, + * such as when we are signaling or waiting on AFU events. For + * those situations we will explicitly use the LOCK/UNLOCK macros + * abovve + * + * NOTE: In general there is the possibility that the ref_count + * might change between the attempted lock and unlock. However this + * library never changes the afu->ref_count under afu->lock. Futhermore + * it does this only under the global lock when a device + * is opening/closing/failing. So this should not + * occur when we are doing I/O (i.e. CFLSH_AFU_SHARED + * is only set when the AFU is created: never or'ed in later). + */ + +/* Conditionally get AFU lock */ + +#define CFLASH_BLOCK_AFU_SHARE_LOCK(afu) \ +do { \ + if (((afu)->flags & CFLSH_AFU_SHARED) || \ + ((afu)->ref_count > 1)) { \ + CFLASH_BLOCK_LOCK((afu)->lock); \ + } \ +} while (0) + +/* Conditionally release AFU lock */ +#define CFLASH_BLOCK_AFU_SHARE_UNLOCK(afu) \ +do { \ + if (((afu)->flags & CFLSH_AFU_SHARED) || \ + ((afu)->ref_count > 1)) { \ + CFLASH_BLOCK_UNLOCK((afu)->lock); \ + } \ +} while (0) + + + + + +/*************************************************************************** + * + * cflsh_blk_rwlock + * + * Wrapper structure for mutex rwlocks and accompanying macros + * + ***************************************************************************/ +typedef +struct cflsh_blk_rwlock_s { + pthread_rwlock_t plock; + pthread_t thread; /* Thread id of lock holder */ + int count; /* Non-zero when someone holding this lock */ + char *filename;/* Filename string of lock taker */ + uint file; /* File number of lock taker */ + uint line; /* Line number of lock taker */ +} cflsh_blk_rwlock_t; + +#ifdef _USE_RW_LOCK +/* + * Initialize the mutex read/write lock fields + * + * cflsh_lock - cflsh_blk_rwlock_t structure + */ +#define CFLASH_BLOCK_RWLOCK_INIT(cflsh_lock) \ +do { \ + pthread_rwlock_init(&((cflsh_lock).plock), NULL); \ + (cflsh_lock).count = 0; \ + (cflsh_lock).file = 0; \ + (cflsh_lock).line = 0; \ + (cflsh_lock).thread = 0; \ +} while (0) + +#define CFLASH_BLOCK_RWLOCK_IFREE(cflsh_lock) \ +do { \ + pthread_rwlock_destroy(&((cflsh_lock).plock)); \ + (cflsh_lock).count = 0; \ + (cflsh_lock).file = 0; \ + (cflsh_lock).line = 0; \ + (cflsh_lock).thread = NULL; \ +} while (0) + +#define CFLASH_BLOCK_RD_RWLOCK(cflsh_lock) \ +do { \ + \ + pthread_rwlock_rdlock(&((cflsh_lock).plock)); \ + \ + if (cblk_log_verbosity >= 5) { \ + (cflsh_lock).thread = pthread_self(); \ + } \ + (cflsh_lock).file = CFLSH_BLK_FILENUM; \ + (cflsh_lock).filename = __FILE__; \ + (cflsh_lock).line = __LINE__; \ + \ + (cflsh_lock).count++; \ + \ +} while (0) + +#define CFLASH_BLOCK_WR_RWLOCK(cflsh_lock) \ +do { \ + \ + pthread_rwlock_wrlock(&((cflsh_lock).plock)); \ + \ + if (cblk_log_verbosity >= 5) { \ + (cflsh_lock).thread = pthread_self(); \ + } \ + (cflsh_lock).file = CFLSH_BLK_FILENUM; \ + (cflsh_lock).filename = __FILE__; \ + (cflsh_lock).line = __LINE__; \ + \ + (cflsh_lock).count++; \ + \ +} while (0) + +#define CFLASH_BLOCK_RWUNLOCK(cflsh_lock) \ +do { \ + \ + (cflsh_lock).count--; \ + \ + (cflsh_lock).thread = 0; \ + (cflsh_lock).filename = NULL; \ + (cflsh_lock).file = 0; \ + (cflsh_lock).line = 0; \ + \ + pthread_rwlock_unlock(&((cflsh_lock).plock)); \ + \ +} while (0) + +#else +/* + * Do not use read/write locks use mutex locks instead + */ +#define CFLASH_BLOCK_RWLOCK_INIT(cflsh_lock) CFLASH_BLOCK_LOCK_INIT(cflsh_lock) +#define CFLASH_BLOCK_RWLOCK_IFREE(cflsh_lock) CFLASH_BLOCK_LOCK_IFREE(cflsh_lock) +#define CFLASH_BLOCK_RD_RWLOCK(cflsh_lock) CFLASH_BLOCK_LOCK(cflsh_lock) +#define CFLASH_BLOCK_WR_RWLOCK(cflsh_lock) CFLASH_BLOCK_LOCK(cflsh_lock) +#define CFLASH_BLOCK_RWUNLOCK(cflsh_lock) CFLASH_BLOCK_UNLOCK(cflsh_lock) + +#endif + + + +/******************************************************************** + * Queueing Macros + * + * The CBLK_Q/DQ_NODE macros enqueue and dequeue nodes to the head + * or tail of a doubly-linked list. They assume that 'next' and 'prev' + * are the names of the queueing pointers. + * + *******************************************************************/ + +/* + * Give a node and the head and tail pointer for a list, enqueue + * the node at the head of the list. Assumes the list is + * doubly-linked and NULL-terminated at both ends, and that node + * is non-NULL. Casts to void allow commands of different data + * types than the list to be queued into the list. This is useful + * if one data type is a subset of another. + */ +#define CBLK_Q_NODE_HEAD(head, tail, node,_node_prev,_node_next) \ +do { \ + \ + (node)->_node_prev = NULL; \ + (node)->_node_next = (head); \ + \ + if ((head) == NULL) { \ + \ + /* List is empty; 'node' is also the tail */ \ + (tail) = (node); \ + \ + } else { \ + \ + /* List isn't empty; old head must point to 'node' */ \ + (head)->_node_prev = (node); \ + } \ + \ + (head) = (node); \ + \ +} while (0) + +/* + * Give a node and the head and tail pointer for a list, enqueue + * the node at the tail of the list. Assumes the list is + * doubly-linked and NULL-terminated at both ends, and that node + * is non-NULL. Casts to void allow commands of different data + * types than the list to be queued into the list. This is useful + * if one data type is a subset of another. + */ +#define CBLK_Q_NODE_TAIL(head, tail, node,_node_prev,_node_next) \ +do { \ + \ + (node)->_node_prev = (tail); \ + (node)->_node_next = NULL; \ + \ + if ((tail) == NULL) { \ + \ + /* List is empty; 'node' is also the head */ \ + (head) = (node); \ + \ + } else { \ + \ + /* List isn't empty; old tail must point to 'node' */ \ + (tail)->_node_next = (node); \ + } \ + \ + (tail) = (node); \ + \ +} while (0) + +/* + * Given a node and the head and tail pointer for a list, dequeue + * the node from the list. Assumes the list is doubly-linked and + * NULL-terminated at both ends, and that node is non-NULL. + * + * Casts to void allow commands of different data types than the + * list to be dequeued into the list. This is useful if one data + * type is a subset of another. + */ +#define CBLK_DQ_NODE(head, tail, node,_node_prev,_node_next) \ +do { \ + /* If node was head, advance the head to node's next */ \ + if ((head) == (node)) \ + { \ + (head) = ((node)->_node_next); \ + } \ + \ + /* If node was tail, retract the tail to node's prev */ \ + if ((tail) == (node)) \ + { \ + (tail) = ((node)->_node_prev); \ + } \ + \ + /* A follower's predecessor is now node's predecessor */ \ + if ((node)->_node_next) \ + { \ + (node)->_node_next->_node_prev = ((node)->_node_prev); \ + } \ + \ + /* A predecessor's follower is now node's follower */ \ + if ((node)->_node_prev) \ + { \ + (node)->_node_prev->_node_next = ((node)->_node_next); \ + } \ + \ + (node)->_node_next = NULL; \ + (node)->_node_prev = NULL; \ + \ +} while(0) + + + +/************************************************************************/ +/* Block library's poll related defines */ +/************************************************************************/ +#ifdef _ERROR_INTR_MODE + + /* + * In error interrupt mode, we only poll for + * errors and not command completions. Thus we + * do not wait for errors. If none are available, + * then it returns immediately. + */ +#define CAPI_POLL_IO_TIME_OUT 0 /* Time-out in milliseconds */ +#else + +#define CAPI_POLL_IO_TIME_OUT 1 /* Time-out in milliseconds */ + +#endif /* !_ERROR_INTR_MODE */ + +#ifndef _PERF_TEST + #ifdef _AIX + #define CFLASH_MAX_WAIT_LOOP_CNT 100000 + #else + #define CFLASH_MAX_WAIT_LOOP_CNT 500000 + #endif +#else +#define CFLASH_MAX_WAIT_LOOP_CNT 1 +#endif + +#ifdef _AIX +#define CFLASH_DELAY_NO_CMD_INTRPT 500/* Time in microseconds to delay */ +#else +#define CFLASH_DELAY_NO_CMD_INTRPT 100/* Time in microseconds to delay */ +#endif + /* between checking RRQ when */ + /* running without command */ + /* completion interrupts. */ + + /* this value is set slightly */ + /* larger than 1 millisecond, */ + /* since are not delaying on the */ + /* first CFLASH_MIN_POLL_RETRIES */ + /* We wanted the total delay */ + /* to reach 50 seconds */ + + +#define CFLASH_MIN_POLL_RETRIES 100 /* Minimum number of poll retries*/ + /* before we start delaying. */ +#ifdef _AIX +#define CFLASH_MAX_POLL_RETRIES 20000 +#else +#define CFLASH_MAX_POLL_RETRIES 100000 +#endif + +#define CFLASH_MAX_POLL_FAIL_RETRIES 10 + +#define CFLASH_BLOCK_CMD_POLL_TIME 1000 /* Time in milliseconds to pass to poll */ + /* to wait for a free command */ + +#define CFLASH_BLOCK_MAX_CMD_WAIT_RETRIES 1000 /* Number of times to poll to wait */ + /* for free command */ + +#define CFLASH_BLOCK_FREE_CMD_WAIT_DELAY 100 /* Number of microseconds to wait */ + /* for a free command. */ + +#define CFLASH_BLOCK_ADAP_POLL_DELAY 100 /* Adapter poll delay in */ + /* microseconds. */ + +/* + * TODO?? Is there a cleaner way to do this? Ideally have the + * OS specific code like this in the OS specific source file. + * Maybe this means we need OS specific header files + * (cflash_block_linux.h, cflash_block_aix.h) for this. + */ + +/************************************************************************/ +/* CFLASH_POLL_LIST_INIT -- Initialize poll list prior to poll call; */ +/************************************************************************/ +#ifdef _AIX + +#define CFLASH_DISK_POLL_INDX 1 +#define CFLASH_ADAP_POLL_INDX 0 +#define CFLASH_POLL_LIST_INIT(chunk,path,poll_list) \ +struct pollfd (poll_list)[2] = { { (path)->afu->poll_fd, POLLIN|POLLMSG|POLLPRI, 0},\ + { (chunk)->fd, POLLPRI, 0} }; + +#define CFLASH_CLR_POLL_REVENTS(chunk,path,poll_list) \ +do { \ + (poll_list)[CFLASH_ADAP_POLL_INDX].revents = 0; \ + (poll_list)[CFLASH_DISK_POLL_INDX].revents = 0; \ +} while (0) + +#define CFLASH_SET_POLLIN(chunk,path,poll_list) \ +do { \ + (poll_list)[CFLASH_ADAP_POLL_INDX].revents |= POLLIN; \ +} while (0) + + + +#else +#define CFLASH_DISK_POLL_INDX 0 +#define CFLASH_POLL_LIST_INIT(chunk,path,poll_list) \ +struct pollfd (poll_list)[1] = {{ (path)->afu->poll_fd, POLLIN, 0}}; + + +/* + * Linux does not require revents to be cleared + */ + +#define CFLASH_CLR_POLL_REVENTS(chunk,path,poll_list) + + +/* + * Linux does not require POLLIN to be set + */ + +#define CFLASH_SET_POLLIN(chunk,path,poll_list) + + +#endif + + +/************************************************************************/ +/* CFLASH_POLL -- Wrapper for poll */ +/************************************************************************/ +#ifdef _AIX +#define CFLASH_POLL(poll_list,time_out) (poll(&((poll_list)[0]),2,(time_out))) +#else +#define CFLASH_POLL(poll_list,time_out) (poll(&((poll_list)[0]),1,(time_out))) +#endif + + + + +/************************************************************************/ +/* cflash block's data structures */ +/************************************************************************/ +typedef struct cflsh_async_thread_cmp_s { + struct cflsh_chunk_s *chunk; /* Chunk associated with this */ + /* thread. */ + int cmd_index; /* Command index associated */ + /* associated with thread. */ + +} cflsh_async_thread_cmp_t; + + +typedef +enum { + CFLASH_CMD_FATAL_ERR = -1, /* Fatal command error. No recovery */ + CFLASH_CMD_IGNORE_ERR = 0, /* Ignore command error */ + CFLASH_CMD_RETRY_ERR = 1, /* Retry command error recovery */ + CFLASH_CMD_DLY_RETRY_ERR = 2, /* Retry command with delay error */ + /* recovery */ +} cflash_cmd_err_t; + + + + +/************************************************************************/ +/* Block library I/O related defines */ +/************************************************************************/ +#define CFLASH_ASYNC_OP 0x01 /* Request is an async I/O operation */ +#define CFLASH_READ_DIR_OP 0x02 /* Read direction operation */ +#define CFLASH_WRITE_DIR_OP 0x04 /* Read direction operation */ + + +#define CAPI_SCSI_IO_TIME_OUT 5 + +#define CAPI_SCSI_IO_RETRY_DELAY 5 + +#define CAPI_CMD_MAX_RETRIES 3 /* Maximum number of times to retry command */ + + +/************************************************************************/ +/* cflash block's command (IOARCB wrapper) */ +/************************************************************************/ + +/* + * NOTE: cflsh_cmd_mgm_t needs to have a size that is + * a multiple of 64. As a result the reserved2 + * array needs to be adjusted appropriately to + * ensure this + */ +#define CFLASH_BLOCK_CMD_ALIGNMENT 64 /* Byte alignment required of */ + /* of commands. This must be */ + /* a power of 2. */ +typedef struct cflsh_cmd_mgm_s { + union { + sisl_iocmd_t sisl_cmd; /* SIS Lite AFU command and */ + /* response */ + char generic[64]; + }; + struct cflsh_cmd_info_s *cmdi; /* Associated command info */ + int index; /* index of command */ +#if !defined(__64BIT__) && defined(_AIX) + int reserved2[10]; /* Reserved for future use */ +#else + int reserved2[10]; /* Reserved for future use */ +#endif +} cflsh_cmd_mgm_t; + + + +/************************************************************************/ +/* cflash block's command info structure. There is a one-to-one */ +/* association between cmd_infos and command wrappers. */ +/************************************************************************/ +typedef struct cflsh_cmd_info_s { + + int flags; +#define CFLSH_ASYNC_IO 0x01 /* Async I/O */ +#define CFLSH_ASYNC_IO_SNT 0x02 /* Async I/O sent/issued */ +#define CFLSH_MODE_READ 0x04 /* Read request */ +#define CFLSH_MODE_WRITE 0x08 /* Write request */ +#define CFLSH_ATHRD_EXIT 0x10 /* Async interrupt thread for */ + /* command is exiting. */ +#define CFLSH_PROC_CMD 0x20 /* CBLK_PROCESS_CMD has */ + /* processed this command */ +#define CFLSH_CMD_INFO_UTAG 0x40 /* user_tag field is valid. */ +#define CFLSH_CMD_INFO_USTAT 0x80 /* User status field is valid */ + time_t cmd_time; /* Time this command was */ + /* created */ + int user_tag; /* User defined tag for this */ + /* command. */ + int index; /* index of command */ + int path_index; /* Path to issue command */ + cblk_arw_status_t *user_status;/* User specified status */ + void *buf; /* Buffer associate for this */ + /* I/O request */ + cflash_offset_t lba; /* Starting LBA for for this */ + /* I/O request */ + size_t nblocks; /* Size in blocks of this */ + /* I/O request */ + uint64_t status; /* Status of command */ + + int in_use:1; /* Indicates if the assciated */ + /* command is in use. */ + int state:4; /* State */ +#define CFLSH_MGM_PENDFREE 0x00 /* Not yet issued or free */ +#define CFLSH_MGM_WAIT_CMP 0x01 /* Waiting for completion */ +#define CFLSH_MGM_CMP 0x02 /* Command completed */ +#define CFLSH_MGM_ASY_CMP 0x03 /* Async command completion */ +#define CFLSH_MGM_HALTED 0x04 /* Command is halted due to */ + /* adapter error recovery */ + /* processing is mostly done */ + int retry_count: 4; /* Retry count */ + int transfer_size_bytes:1; /* When set indicates the */ + /* transfer size is bytes. */ + /* otherwise it is in blocks */ + int rsvd:22; /* reserved */ + size_t transfer_size; /* the amount of data */ + /* transferred in bytes or */ + /* blocks. */ + pthread_t thread_id; /* Async thread id */ + pthread_cond_t thread_event; /* Thread event for this cmd */ + struct cflsh_async_thread_cmp_s async_data; /* Data passed to async*/ + /* thread */ + uint64_t seq_num; /* The sequence number we */ + /* internally assigned to */ + /* this command. */ + struct cflsh_cmd_info_s *free_next;/* Next free command */ + struct cflsh_cmd_info_s *free_prev;/* Prevous free command */ + struct cflsh_cmd_info_s *act_next;/* Next active command */ + struct cflsh_cmd_info_s *act_prev;/* Prevous active command */ + struct cflsh_cmd_info_s *complete_next;/* Next completed command */ + struct cflsh_cmd_info_s *complete_prev;/* Prevous completed command */ + struct cflsh_chunk_s *chunk;/* Chunk associated with */ + /* command. */ + eye_catch4b_t eyec; /* Eye catcher */ + +} cflsh_cmd_info_t; + + +typedef +enum { + CFLASH_BLK_CHUNK_NONE = 0x0, /* No command type */ + CFLASH_BLK_CHUNK_SIS_LITE = 0x1, /* Chunk for SIS Lite device*/ + CFLASH_BLK_CHUNK_SIS_SAS64 = 0x2, /* Future SISSAS64 chunk */ + /* type. */ +} cflsh_block_chunk_type_t; + + + +/************************************************************************/ +/* Function pointers to associated AFU implementation for a specific */ +/* chunk. */ +/************************************************************************/ +typedef struct cflsh_chunk_fcn_ptrs { + int (*get_num_interrupts)(struct cflsh_chunk_s *chunk, int path_index); + uint64_t (*get_cmd_room)(struct cflsh_chunk_s *chunk, int path_index); + int (*adap_setup)(struct cflsh_chunk_s *chunk, int path_index); + uint64_t (*get_intrpt_status)(struct cflsh_chunk_s *chunk, int path_index); + void (*inc_rrq)(struct cflsh_chunk_s *chunk, int path_index); + uint32_t (*get_cmd_data_length)(struct cflsh_chunk_s *chunk, cflsh_cmd_mgm_t *cmd); + scsi_cdb_t *(*get_cmd_cdb)(struct cflsh_chunk_s *chunk, cflsh_cmd_mgm_t *cmd); + cflsh_cmd_mgm_t *(*get_cmd_rsp)(struct cflsh_chunk_s *chunk, int path_index); + int (*build_adap_cmd)(struct cflsh_chunk_s *chunk,int path_index,cflsh_cmd_mgm_t *cmd, + void *buf, size_t buf_len, int flags); + int (*update_adap_cmd)(struct cflsh_chunk_s *chunk,int path_index,cflsh_cmd_mgm_t *cmd, int flags); + int (*issue_adap_cmd)(struct cflsh_chunk_s *chunk, int path_index,cflsh_cmd_mgm_t *cmd); + int (*complete_status_adap_cmd)(struct cflsh_chunk_s *chunk,cflsh_cmd_mgm_t *cmd); + void (*init_adap_cmd)(struct cflsh_chunk_s *chunk,cflsh_cmd_mgm_t *cmd); + void (*init_adap_cmd_resp)(struct cflsh_chunk_s *chunk,cflsh_cmd_mgm_t *cmd); + void (*copy_adap_cmd_resp)(struct cflsh_chunk_s *chunk,cflsh_cmd_mgm_t *cmd,void *buffer, int buffer_size); + void (*set_adap_cmd_resp_status)(struct cflsh_chunk_s *chunk,cflsh_cmd_mgm_t *cmd, int success); + int (*process_adap_intrpt)(struct cflsh_chunk_s *chunk,int path_index,cflsh_cmd_mgm_t **cmd, int intrpt_num, + int *cmd_complete,size_t *transfer_size); + int (*process_adap_convert_intrpt)(struct cflsh_chunk_s *chunk,int path_index,cflsh_cmd_mgm_t **cmd, int intrpt_num, + int *cmd_complete,size_t *transfer_size); + cflash_cmd_err_t (*process_adap_err)(struct cflsh_chunk_s *chunk, int path_index,cflsh_cmd_mgm_t *cmd); + int (*reset_adap_contxt)(struct cflsh_chunk_s *chunk, int path_index); +} cflsh_chunk_fcn_ptrs_t; + + + +typedef struct cflsh_blk_master_s { + void *mc_handle; /* Master context handle */ + uint64_t num_blocks; /* Maximum size in blocks of */ + /* this chunk allocated by */ + /* master */ + uint64_t mc_page_size; /* Master page size in its */ + /* block allocation table. */ + + +} cflsh_blk_master_t; + + + +typedef +enum { + CFLASH_AFU_NOT_INUSE = 0x0, /* AFU is not in use by */ + /* other paths. */ + CFLASH_AFU_MPIO_INUSE = 0x1, /* AFU is in use by other */ + /* MPIO paths for this lun */ + CFLASH_AFU_SHARE_INUSE = 0x2, /* AFU is in use by another */ + /* lun only. */ +} cflsh_afu_in_use_t; + + + +/************************************************************************/ +/* cflsh_afu - The data structure for an AFU */ +/************************************************************************/ +typedef struct cflsh_afu_s { + struct cflsh_afu_s *prev; /* Previous path in list */ + struct cflsh_afu_s *next; /* Next path in list */ + int flags; /* Flags for this path */ +#define CFLSH_AFU_SHARED 0x1 /* This AFU can be shared */ +#define CFLSH_AFU_HALTED 0x2 /* AFU is in a halted state */ + int ref_count; /* Reference count for this */ + /* path */ + int poll_fd; /* File descriptor for poll or */ + /* select calls. */ + cflsh_blk_lock_t lock; /* Lock for this path */ + uint64_t contxt_id; /* Full Context ID provided */ + /* by master context, which */ + /* include additional generated*/ + /* counts in the upper word */ + int contxt_handle; /* The portion of the */ + /* contxt_id field that is */ + /* used to interact directly */ + /* with the AFU. */ + uint64_t toggle; /* Toggle bit for RRQ */ + cflsh_block_chunk_type_t type;/* CAPI block AFU type */ +#ifdef _AIX + dev64_t adap_devno; /* Devno of adapter. */ +#endif /* _AIX */ + uint64_t *p_hrrq_start; /* Start of Host */ + /* Request/Response Queue */ + uint64_t *p_hrrq_end; /* End of Host */ + /* Request/Response Queue */ + volatile uint64_t *p_hrrq_curr;/* Current Host */ + /* Request/Response Queue Entry*/ + int num_rrqs; /* Number of RRQ elements */ + int32_t num_issued_cmds; /* Number of issued commands */ + void *mmio_mmap; /* MMIO address returned by */ + /* MMAP. The value returned */ + /* is the starting address for */ + /* all contexts on this */ + /* adapter. For multi_context */ + /* (multi-process), our */ + /* context's MMIO starting */ + /* address will be in offset */ + /* from this returned address. */ + time_t reset_time; /* Time this AFU was reset */ + volatile void *mmio; /* Start of this chunk's MMIO */ + /* space */ + size_t mmap_size; /* Size of MMIO mapped area */ + uint64_t cmd_room; /* Number of commands we can */ + /* issue to AFU now */ + + char master_name[PATH_MAX]; /* Device special filename */ + /* for master context */ + pthread_cond_t resume_event;/* Thread event to wait for */ + /* resume (after AFU reset) */ + cflsh_blk_master_t master; /* Master context data */ + struct cflsh_path_s *head_path; /* Head of list of paths */ + struct cflsh_path_s *tail_path; /* Tail of list of paths */ + cflsh_cmd_info_t *head_complete; /* Head of complete */ + /* complete. These are */ + /* that completed for a */ + /* different chunk than the */ + /* one being handled. */ + cflsh_cmd_info_t *tail_complete; /* Tail of complete */ + /* complete. These are */ + /* that completed for a */ + /* different chunk than the */ + /* one being handled. */ + + eye_catch4b_t eyec; /* Eye catcher */ + + +} cflsh_afu_t; + +/************************************************************************/ +/* cflsh_path - The data structure for a chunk's path */ +/************************************************************************/ +typedef struct cflsh_path_s { + + struct cflsh_path_s *prev; /* Previous path in AFU list */ + struct cflsh_path_s *next; /* Next path in AFU list */ + cflsh_afu_t *afu; /* AFU associated with this */ + /* path */ + struct cflsh_chunk_s *chunk;/* Chunk associated for this */ + /* path. */ + int flags; /* Flags for this path */ +#define CFLSH_PATH_ACT 0x0001 /* Path is active/enabled */ +#define CFLSH_CHNK_SIGH 0x0002 /* MMIO signal handler is setup*/ +#define CFLSH_PATH_RST 0x0004 /* Unprocessed context reset */ +#define CFLSH_PATH_A_RST 0x0008 /* Unprocessed adap reset */ + + int path_index; /* Path to issue command */ + uint16_t path_id; /* Path id of selected path */ + uint32_t path_id_mask; /* paths to use to access this */ + /* LUN */ + uint16_t num_ports; /* Number of ports associated */ + /* this path. */ + + /* for this path. */ + uint64_t lun_id; /* Lun ID for this entity */ + + // TODO: ?? Does type belong in afu_t only? + cflsh_block_chunk_type_t type;/* CAPI block AFU type */ + cflsh_chunk_fcn_ptrs_t fcn_ptrs; /* Function pointers for */ + /* this chunk */ + + union { + struct { + uint32_t resrc_handle; /* Resource handle */ + } sisl; + }; + pthread_cond_t resume_event; /* Thread event to wait for */ + /* resume (after AFU reset) */ + // TODO: ?? Does sig jump belong in afu_t? + struct sigaction old_action;/* Old action */ + struct sigaction old_alrm_action;/* Old alarm action */ + volatile void *upper_mmio_addr;/* Upper offset for MMIOs to*/ + /* be used to detect bad MMIO */ + jmp_buf jmp_mmio; /* Used to long jump around bad*/ + /* MMIO operations */ +#ifdef _REMOVE + jmp_buf jmp_read; /* Used to long jump around */ + /* read hangs */ +#endif /* REMOVE */ + eye_catch4b_t eyec; /* Eye catcher */ + +} cflsh_path_t; + +#define CFLSH_BLK_MAX_NUM_PATHS 16 + +/************************************************************************/ +/* Chunk hashing defines */ +/************************************************************************/ +#define CHUNK_BAD_ADDR_MASK 0xfff /* Chunk's should be allocated page aligned */ + /* this mask allows us to check for bad */ + /* chunk addresses */ + +#define MAX_NUM_CHUNKS_HASH 64 + +#define CHUNK_HASH_MASK 0x0000003f + + +/************************************************************************/ +/* Other structs defines */ +/************************************************************************/ + +#define PATH_BAD_ADDR_MASK 0xfff /* Path should be allocated page aligned */ + /* this mask allows us to check for bad */ + /* path addresses */ + +#define AFU_BAD_ADDR_MASK 0xfff /* AFU should be allocated page aligned */ + /* this mask allows us to check for bad */ + /* path addresses */ + +/************************************************************************/ +/* cflsh_chunk - The data structure for a chunk: a virtual or physical */ +/* lun. */ +/************************************************************************/ +typedef struct cflsh_chunk_s { + struct cflsh_chunk_s *prev; /* Previous chunk in list */ + struct cflsh_chunk_s *next; /* Next chunk in list */ + uint8_t in_use; /* This chunk is in use */ + int flags; /* Flags for this chunk */ +#define CFLSH_CHNK_VLUN 0x0001 /* This is a virtual lun */ +#define CFLSH_CHNK_SHARED 0x0002 /* This can share AFUs with */ + /* other chunks. */ +#define CFLSH_CHNK_CLOSE 0x0004 /* chunk is closing */ +#define CFLSH_CHUNK_FAIL_IO 0x0010 /* Chunk is in failed state */ +#define CFLSH_CHNK_RDY 0x0020 /* chunk is online/ready */ +#define CFLSH_CHNK_RD_AC 0x0040 /* chunk has read access */ +#define CFLSH_CHNK_WR_AC 0x0080 /* chunk has write access */ +#define CFLSH_CHNK_NO_BG_TD 0x0100 /* Chunk is not allowed to use */ + /* background threads */ +#define CFLSH_CHNK_NO_RESRV 0x0200 /* Chunk is not using reserves */ +#define CFLSH_CHNK_HALTED 0x0400 /* Chunk is in a halted state */ +#define CFLSH_CHNK_MPIO_FO 0x0800 /* Chunk is using MPIO fail */ + /* over. */ +#define CFLSH_CHNK_VLUN_SCRUB 0x1000 /* Chunk vlun is scrubbed */ + /* on any size change */ + /* and close. */ + chunk_id_t index; /* Chunk index number */ + int fd; /* File descriptor */ + char dev_name[PATH_MAX]; /* Device special filename */ + uint32_t cache_size; /* Size of cache */ + uint32_t l2setsz; /* This determines how much */ + /* of the cache we can use. */ + /* A value of 11 indicates */ + /* 2048 (2K)cache lines, a */ + /* value 17 indicates (128K) */ + /* cache lines. */ + + uint64_t vlun_max_num_blocks;/* For virtual luns this */ + /* indicates how many blocks */ + /* the master allocated for us */ + /* which may be more than */ + /* initially requested. */ + /* this chunk. */ + uint64_t num_blocks; /* Maximum size in blocks of */ + /* this chunk. */ + + uint64_t start_lba; /* Physical LBA at which this */ + /* chunk starts */ + cflsh_blk_lock_t lock; /* Lock for this chunk */ + + uint32_t num_active_cmds; /* Number of active commands */ + int num_cmds; /* Number of commands in */ + /* in command queue */ + cflsh_cmd_mgm_t *cmd_start; /* Start of command queue */ + cflsh_cmd_mgm_t *cmd_end; /* End of command queue */ + cflsh_cmd_mgm_t *cmd_curr; /* Current command */ + cflsh_cmd_info_t *cmd_info; /* Command info structure */ + + cflsh_cache_line_t *cache; /* cache for this chunk */ + void *cache_buffer; /* Cached data buffer managed */ + /* by the cache data struct */ + + chunk_stats_t stats; + + uint32_t blk_size_mult; /* The multiple needed to */ + /* convert from the device's */ + /* block size to 4K. */ + pthread_t thread_id; /* Async thread id */ + uint16_t thread_flags; /* Flags passed to thread */ +#define CFLSH_CHNK_POLL_INTRPT 0x0001 /* Poll for interrupts */ +#define CFLSH_CHNK_EXIT_INTRPT 0x0002 /* Thread should exit */ + pthread_cond_t thread_event; /* Thread event for this chunk*/ + pthread_cond_t cmd_cmplt_event;/* Thread event indicating */ + /* commands have completed. */ + struct cflsh_async_thread_cmp_s intrpt_data;/* Data passed */ + /* interrupt thread handler. */ + cflsh_cmd_info_t *head_free; /* Head of free command */ + /* info. */ + cflsh_cmd_info_t *tail_free; /* Tail of free command */ + /* info. */ + cflsh_cmd_info_t *head_act; /* Head of active command */ + /* info. */ + cflsh_cmd_info_t *tail_act; /* Tail of active command */ + /* info. */ + int num_paths; /* Number of paths */ + int cur_path; /* Current path being used */ + uint64_t num_blocks_lun; /* Maximum size in blocks of */ + /* this lpysical lun */ + cflsh_path_t *path[CFLSH_BLK_MAX_NUM_PATHS]; /* Adapter paths for this chunk */ + eye_catch4b_t eyec; /* Eye catcher */ + +} cflsh_chunk_t; + + + +/************************************************************************/ +/* cflsh_block - Global library data structure */ +/************************************************************************/ + +typedef struct cflsh_block_s { +#ifdef _USE_RW_LOCK + cflsh_blk_rwlock_t global_lock; +#else + cflsh_blk_lock_t global_lock; +#endif + int flags; /* Global flags for this chunk */ +#define CFLSH_G_LUN_ID_VAL 0x0002 /* Lun ID field is valid */ +#define CFLSH_G_SYSLOG 0x0008 /* Use syslog for all tracing */ + int next_chunk_id; /* Chunk id of next allocated */ + /* chunk. */ + pid_t caller_pid; /* Process ID of caller of */ + /* this library. */ + + uint8_t timeout_units; /* The units used for time-outs*/ +#define CFLSH_G_TO_SEC 0x0 /* Time out is expressed in */ + /* seconds. */ +#define CFLSH_G_TO_MSEC 0x1 /* Time out is expressed in */ + /* milliseconds. */ +#define CFLSH_G_TO_USEC 0x2 /* Time out is expressed in */ + /* microseconds. */ + int timeout; /* Time out for IOARCBs using */ + /* this library. */ + int num_active_chunks; /* Number of active chunks */ + int num_max_active_chunks; /* Maximum number of active */ + /* chunks seen at a time. */ + int num_bad_chunk_ids; /* Number of times we see a */ + /* a bad chunk id. */ + cflsh_afu_t *head_afu; /* Head of list of AFUs */ + cflsh_afu_t *tail_afu; /* Tail of list of AFUs */ + + cflsh_chunk_t *hash[MAX_NUM_CHUNKS_HASH]; + + + uint64_t next_chunk_starting_lba; /* This is the starting LBA */ + /* available for the next chunk*/ + /* NOTE: The setup of a chunk's*/ + /* LBA will be done in the MC, */ + /* when code and functionality */ + /* is implemented. For now we */ + /* are using a simplistic and */ + /* flawed approach of assigning*/ + /* phyiscal LBAs to chunks. */ + /* This is approach is prone to*/ + /* fragmentation issues, but */ + /* allows simple virtual lun */ + /* environments */ + int port_select_mask; /* Port selection mask to use */ + /* in non-MC mode. */ + + char *process_name; /* Name of process using this */ + /* library if known. */ + uint64_t lun_id; /* Lun ID */ + + trace_log_ext_arg_t trace_ext; /* Extended argument for trace */ + uint32_t thread_log_mask; /* Mask used to hash thread */ + /* logs into specific files. */ + cflsh_thread_log_t *thread_logs; /* Array of log files per thread*/ + +#ifdef _SKIP_READ_CALL + int adap_poll_delay;/* Adapter poll delay time in */ + /* microseconds */ +#endif /* _SKIP_READ_CALL */ + eye_catch4b_t eyec; /* Eye catcher */ + +} cflsh_block_t; + +extern cflsh_block_t cflsh_blk; + +#define CFLASH_WAIT_FREE_CMD 1 /* Wait for a free command */ + +#define CFLASH_ISSUE_RETRY 1 /* Issue retry for a command */ + + +/* Compile time check suitable for use in a function */ +#define CFLASH_COMPILE_ASSERT(test) \ +do { \ + struct __Fo0 { char v[(test) ? 1 : -1]; } ; \ +} while (0) + +/************************************************************************/ +/* Interrupt numbers */ +/************************************************************************/ + +typedef +enum { + CFLSH_BLK_INTRPT_CMD_CMPLT = 1, /* Command complete interrupt */ + CFLSH_BLK_INTRPT_STATUS = 2, /* Status interrupt */ + + +} cflash_block_intrpt_numbers_t; + + +/************************************************************************/ +/* Notify reason codes */ +/************************************************************************/ + +typedef +enum { + CFLSH_BLK_NOTIFY_TIMEOUT = 1, /* Command time out */ + CFLSH_BLK_NOTIFY_AFU_FREEZE = 2, /* AFU freeze/UE */ + CFLSH_BLK_NOTIFY_AFU_ERROR = 3, /* AFU Error */ + CFLSH_BLK_NOTIFY_AFU_RESET = 4, /* AFU is being reset */ + CFLSH_BLK_NOTIFY_SCSI_CC_ERR = 5, /* Serious SCSI check */ + /* condition error */ + CFLSH_BLK_NOTIFY_DISK_ERR = 6, /* Serious disk error */ + CFLSH_BLK_NOTIFY_ADAP_ERR = 7, /* Serious adapter error */ + CFLSH_BLK_NOTIFY_SFW_ERR = 8 /* Serious software error*/ + + +} cflash_block_notify_reason_t; + +#endif /* _H_CFLASH_BLOCK_INT */ diff --git a/src/block/cflash_block_kern_mc.c b/src/block/cflash_block_kern_mc.c new file mode 100644 index 00000000..4b6c4eb7 --- /dev/null +++ b/src/block/cflash_block_kern_mc.c @@ -0,0 +1,4923 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/cflash_block_kern_mc.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/* ---------------------------------------------------------------------------- + * + * This file contains the linux specific code for the block library. + * For other OSes, this file should not be linked in and instead replaced + * with the analogous OS specific file. + * + * ---------------------------------------------------------------------------- + */ + + +#define CFLSH_BLK_FILENUM 0x0500 +#include "cflash_block_internal.h" +#include "cflash_block_inline.h" +#include "cflash_block_protos.h" +#ifdef _AIX +#include +#include +#include +#include "cflash_block_aix.h" + +typedef struct dk_capi_attach dk_capi_attach_t; +typedef struct dk_capi_detach dk_capi_detach_t; +typedef struct dk_capi_udirect dk_capi_udirect_t; +typedef struct dk_capi_uvirtual dk_capi_uvirtual_t; +typedef struct dk_capi_resize dk_capi_resize_t; +typedef struct dk_capi_release dk_capi_release_t; +typedef struct dk_capi_exceptions dk_capi_exceptions_t; +typedef struct dk_capi_log dk_capi_log_t; +typedef struct dk_capi_verify dk_capi_verify_t; +typedef struct dk_capi_recover_context dk_capi_recover_context_t; +#else +#include +#include + +#include +#ifndef DK_CAPI_ATTACH +/* + * Create common set of defines/types across + * OSes to improve code readibility. + */ +#define DK_CAPI_ATTACH DK_CXLFLASH_ATTACH +#define DK_CAPI_DETACH DK_CXLFLASH_DETACH +#define DK_CAPI_USER_DIRECT DK_CXLFLASH_USER_DIRECT +#define DK_CAPI_USER_VIRTUAL DK_CXLFLASH_USER_VIRTUAL +#define DK_CAPI_VLUN_RESIZE DK_CXLFLASH_VLUN_RESIZE +#define DK_CAPI_VLUN_CLONE DK_CXLFLASH_VLUN_CLONE +#define DK_CAPI_RELEASE DK_CXLFLASH_RELEASE +#define DK_CAPI_RECOVER_CTX DK_CXLFLASH_RECOVER_AFU +#define DK_CAPI_VERIFY DK_CXLFLASH_VERIFY + +typedef struct dk_cxlflash_attach dk_capi_attach_t; +typedef struct dk_cxlflash_detach dk_capi_detach_t; +typedef struct dk_cxlflash_udirect dk_capi_udirect_t; +typedef struct dk_cxlflash_uvirtual dk_capi_uvirtual_t; +typedef struct dk_cxlflash_resize dk_capi_resize_t; +typedef struct dk_cxlflash_clone dk_capi_clone_t; +typedef struct dk_cxlflash_release dk_capi_release_t; +typedef struct dk_cxlflash_exceptions dk_capi_exceptions_t; +typedef struct dk_cxlflash_verify dk_capi_verify_t; +typedef struct dk_cxlflash_recover_afu dk_capi_recover_context_t; + +#define return_flags hdr.return_flags + +#define DK_RF_REATTACHED DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET + +#endif /* DK_CAPI_ATTACH */ + +#endif /* _AIX */ + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_init_mc_interface + * + * FUNCTION: Initialize master context (MC) interfaces for this process. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_init_mc_interface(void) +{ + char *lun = getenv("CFLSH_BLK_LUN_ID"); + + + + if (lun) { + cblk_lun_id = strtoul(lun,NULL,16); + } + + + + + return; +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_cleanup_mc_interface + * + * FUNCTION: Initialize master context (MC) interfaces for this process. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_cleanup_mc_interface(void) +{ + + + + + return; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_get_os_chunk_type + * + * FUNCTION: Get OS specific chunk types + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +cflsh_block_chunk_type_t cblk_get_os_chunk_type(const char *path, int arch_type) +{ + cflsh_block_chunk_type_t chunk_type; + +#ifdef _AIX + if (arch_type) { + + /* + * If architecture type is set, then + * evaluate it. + */ + + if (arch_type != DK_ARCH_SISLITE) { + + return CFLASH_BLK_CHUNK_NONE; + } + + } +#endif + + /* + * For now we only support one chunk type: + * the SIS lite type. + */ + + chunk_type = CFLASH_BLK_CHUNK_SIS_LITE; + + return chunk_type; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_find_parent_dev + * + * FUNCTION: Find parent string of lun. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ + +#ifdef _AIX +char *cblk_find_parent_dev(char *device_name) +{ + + return NULL; +} +#else + +char *cblk_find_parent_dev(char *device_name) +{ + char *parent_name = NULL; + char *child_part = NULL; + const char *device_mode = NULL; + char *subsystem_name = NULL; + char *devname = NULL; + + + struct udev *udev_lib; + struct udev_device *device, *parent; + + + udev_lib = udev_new(); + + if (udev_lib == NULL) { + CBLK_TRACE_LOG_FILE(1,"udev_new failed with errno = %d",errno); + + + return parent_name; + + } + + + + /* + * Extract filename with absolute path removed + */ + + devname = rindex(device_name,'/'); + + if (devname == NULL) { + + devname = device_name; + } else { + + devname++; + + if (devname == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"invalid device name = %s",devname); + + + return parent_name; + } + + + } + + CBLK_TRACE_LOG_FILE(9,"device name = %s",devname); + + if (!(strncmp(devname,"sd",2))) { + + subsystem_name = "block"; + + } else if (!(strncmp(devname,"sg",2))) { + + subsystem_name = "scsi_generic"; + } else { + + CBLK_TRACE_LOG_FILE(1,"invalid device name = %s",devname); + + + return parent_name; + } + + + CBLK_TRACE_LOG_FILE(9,"subsystem name = %s",subsystem_name); + + device = udev_device_new_from_subsystem_sysname(udev_lib,subsystem_name,devname); + + if (device == NULL) { + + CBLK_TRACE_LOG_FILE(1,"udev_device_new_from_subsystem_sysname failed with errno = %d",errno); + + return parent_name; + + } + + device_mode = udev_device_get_sysattr_value(device,"device/mode"); + + + if (device_mode == NULL) { + + CBLK_TRACE_LOG_FILE(1,"no mode for this device "); + + + return NULL; + + } else { + + + if (strcmp(device_mode,"superpipe")) { + + CBLK_TRACE_LOG_FILE(1,"Device is not in superpipe mode = %s",device_mode); + + return NULL; + } + } + + parent = udev_device_get_parent(device); + + if (parent == NULL) { + + CBLK_TRACE_LOG_FILE(1,"udev_device_get_parent failed with errno = %d",errno); + + return parent_name; + + } + + parent_name = (char *)udev_device_get_devpath(parent); + + + CBLK_TRACE_LOG_FILE(9,"parent name = %s",parent_name); + + /* + * This parent name string will actually have sysfs directories + * associated with the connection information of the child. We + * need to get the base name of parent device that is not associated with + * child device. Find child portion of parent name string + * and insert null terminator there to remove it. + */ + + child_part = strstr(parent_name,"/host"); + + + if (child_part) { + + + child_part[0] = '\0'; + } + + return parent_name; +} + +#endif /* !AIX */ +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_attach_path + * + * FUNCTION: Attaches the current process to a chunk + * for a specific adapter path + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * 0 - Success + * nonzero - Failure + * + * ---------------------------------------------------------------------------- + */ +int cblk_chunk_attach_path(cflsh_chunk_t *chunk, int path_index,int mode, + dev64_t adap_devno, + int *cleanup_depth, int assign_path, + cflsh_afu_in_use_t in_use) +{ + + int rc = 0; + dk_capi_attach_t disk_attach; +#ifdef _MASTER_CONTXT + uint32_t block_size = 0; + +#endif /* _MASTER_CONTXT */ + + + + bzero(&disk_attach,sizeof(disk_attach)); + +#ifndef BLOCK_FILEMODE_ENABLED + +#ifdef _AIX + + if ((assign_path) && (!in_use)) { + + disk_attach.flags |= DK_AF_ASSIGN_AFU; + + //TODO: ?? This can lead to muliple paths with the same adap_devno. Is that a problem? + + } else { + disk_attach.devno = chunk->path[path_index]->afu->adap_devno; + } + disk_attach.num_exceptions = CFLASH_BLOCK_EXCEP_QDEPTH; + + + disk_attach.num_interrupts = CBLK_GET_NUM_INTERRUPTS(chunk,path_index); + + if (in_use) { + disk_attach.flags |= DK_AF_REUSE_CTX; + disk_attach.ctx_token = chunk->path[path_index]->afu->contxt_id; + CBLK_TRACE_LOG_FILE(6,"Reuse contxt_id = 0x%llx",chunk->path[path_index]->afu->contxt_id); + } + +#else + + // TODO:?? What do we do for Linux for two paths on same AFU? + + + disk_attach.num_interrupts = CBLK_GET_NUM_INTERRUPTS(chunk,path_index); + +#ifdef DK_CXLFLASH_ATTACH + disk_attach.hdr.flags = mode & O_ACCMODE; +#else + disk_attach.flags = mode & O_ACCMODE; +#endif + + if (in_use) { + disk_attach.context_id = chunk->path[path_index]->afu->contxt_id; +#ifdef DK_CXLFLASH_ATTACH + disk_attach.hdr.flags |= DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#else + disk_attach.flags |= DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#endif + CBLK_TRACE_LOG_FILE(6,"Reuse contxt_id = 0x%x",chunk->path[path_index]->afu->contxt_id); + } + +#endif /* !_AIX */ + + + // TODO:?? Is this needed disk_attach.flags = CXL_START_WORK_NUM_IRQS; + rc = ioctl(chunk->fd,DK_CAPI_ATTACH,&disk_attach); + + if (rc) { + + + + CBLK_TRACE_LOG_FILE(1,"Unable to attach errno = %d, return_flags = 0x%llx", + errno,disk_attach.return_flags); + + + + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + cblk_release_path(chunk,(chunk->path[path_index])); + + chunk->path[path_index] = NULL; + return -1; + + } + + + +#ifdef _MASTER_CONTXT + + block_size = disk_attach.block_size; + + + CBLK_TRACE_LOG_FILE(5,"block_size = %d, flags = 0x%llx", + block_size, disk_attach.return_flags); + + + +#ifndef _AIX + + /* + * For AIX we determine block size from IOCINFO, + * Thus we should just verify here for all paths + * that the block size returned is consistent. + */ + + if (path_index == 0) { + + + /* + * Only save block size on first path. + */ + + + if (block_size) { + chunk->blk_size_mult = CAPI_FLASH_BLOCK_SIZE/block_size; + } else { + chunk->blk_size_mult = 8; + } + + } else { +#endif + + /* + * Verify block size is the same for this path + */ + + + if (block_size) { + + if (chunk->blk_size_mult != CAPI_FLASH_BLOCK_SIZE/block_size) { + + return -1; + } + + } else { + + if (chunk->blk_size_mult != 8) { + + cblk_release_path(chunk,(chunk->path[path_index])); + + chunk->path[path_index] = NULL; + return -1; + } + } + +#ifndef _AIX + } +#endif + + + + +#endif /* _MASTER_CONTXT */ + + + if (in_use) { +#ifndef _AIX + + if (!(chunk->flags & CFLSH_CHNK_VLUN)) { + chunk->stats.max_transfer_size = disk_attach.max_xfer; + } else { + + /* + * Virtual luns only support a transfer size of 1 sector + */ + + chunk->stats.max_transfer_size = 1; + } + + chunk->num_blocks_lun = disk_attach.last_lba + 1; + + CBLK_TRACE_LOG_FILE(5,"last_lba = 0x%llx",chunk->num_blocks_lun); +#endif + return rc; + } +#ifdef _AIX + chunk->path[path_index]->afu->adap_devno = disk_attach.devno; +#endif /* AIX */ + + chunk->path[path_index]->afu->poll_fd = disk_attach.adap_fd; + + CBLK_TRACE_LOG_FILE(5,"chunk->fd = %d,adapter fd = %d, poll_fd = %d", + chunk->fd,disk_attach.adap_fd, + chunk->path[path_index]->afu->poll_fd); + + +#else + + chunk->path[path_index]->afu->poll_fd = chunk->fd; + disk_attach.num_interrupts = 0; + +#endif /* BLOCK_FILEMODE_ENABLED */ + + +#ifndef _MASTER_CONTXT + + + + chunk->path[path_index]->afu->mmap_size = CAPI_FLASH_REG_SIZE; + + +#endif /* !_MASTER_CONTXT */ + +#ifdef _AIX + chunk->path[path_index]->afu->contxt_id = disk_attach.ctx_token; + chunk->path[path_index]->afu->contxt_handle = 0xffffffff & disk_attach.ctx_token; +#else + chunk->path[path_index]->afu->contxt_id = disk_attach.context_id; + chunk->path[path_index]->afu->contxt_handle = 0xffffffff & disk_attach.context_id; +#endif /* !AIX */ + + + + + CBLK_TRACE_LOG_FILE(6,"contxt_id = 0x%llx",chunk->path[path_index]->afu->contxt_id); + + +#ifdef _AIX + + + chunk->path[path_index]->afu->mmio_mmap = disk_attach.mmio_start; + + chunk->path[path_index]->afu->mmio = chunk->path[chunk->cur_path]->afu->mmio_mmap; + + chunk->path[path_index]->afu->mmap_size = disk_attach.mmio_size; +#else + + + *cleanup_depth = 35; + + chunk->path[path_index]->afu->mmap_size = disk_attach.mmio_size; + + if (!(chunk->flags & CFLSH_CHNK_VLUN)) { + chunk->stats.max_transfer_size = disk_attach.max_xfer; + } else { + /* + * Virtual luns only support a transfer size of 1 sector + */ + + chunk->stats.max_transfer_size = 1; + } + + chunk->num_blocks_lun = disk_attach.last_lba + 1; + + CBLK_TRACE_LOG_FILE(5,"last_lba = 0x%llx",chunk->num_blocks_lun); + + +#endif + + + return rc; +} + + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_attach_process_map_path + * + * FUNCTION: Attaches the current process to a chunk and + * maps the MMIO space for a specific adapter path + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * 0 - Success + * nonzero - Failure + * + * ---------------------------------------------------------------------------- + */ +int cblk_chunk_attach_process_map_path(cflsh_chunk_t *chunk, int path_index,int mode, + dev64_t adap_devno, int arch_type, + int *cleanup_depth, int assign_path) +{ + + int rc = 0; + cflsh_block_chunk_type_t chunk_type; + cflsh_path_t *path = NULL; + cflsh_afu_in_use_t in_use = CFLASH_AFU_NOT_INUSE; + int share = FALSE; + + + if (chunk == NULL) { + + return (-1); + } + + + /* + * TODO:?? Can we consolidate this code segment below + * with the routine cblk_chunk_reuse_attach_mpio. + * There seems to be considerable overlap and maybe + * this could lead to simplified code. + */ + + if (chunk->path[path_index] == NULL) { + + /* + * For the case of forking a process, the + * child's path structures will already exist, + * and we need to preserve some of the data. + * So only get a path if one is not allocated. + */ + + chunk_type = cblk_get_chunk_type(chunk->dev_name,arch_type); + + + + if (chunk->flags & CFLSH_CHNK_SHARED) { + + share = TRUE; + } + + path = cblk_get_path(chunk,adap_devno,chunk_type,chunk->num_cmds,&in_use,share); + + + if (path == NULL) { + + return (-1); + } + + if (path->afu == NULL) { + + return (-1); + } + + chunk->path[path_index] = path; + + path->path_index = path_index; + + if (in_use == CFLASH_AFU_MPIO_INUSE) { + + /* + * If this AFU is already attached for this lun, + * via another path, then do not do another attach: + * Just reuse the AFU. + */ + + chunk->path[path_index]->flags |= CFLSH_PATH_ACT; + + } else if (in_use == CFLASH_AFU_SHARE_INUSE) { + + /* + * If this AFU is already attached for a different + * lun, but not this lun, then we need to do another + * attach using the REATTACH flag if possible. + */ + + rc = cblk_chunk_attach_path(chunk,path_index,mode,adap_devno,cleanup_depth,assign_path,in_use); + + if (!rc) { + + chunk->path[path_index]->flags |= CFLSH_PATH_ACT; + } + + return rc; + } + + + } + + + rc = cblk_chunk_attach_path(chunk,path_index,mode,adap_devno,cleanup_depth,assign_path,in_use); + + if (rc) { + + return rc; + } + +#ifndef _AIX + + + chunk->path[path_index]->afu->mmio_mmap = mmap(NULL,chunk->path[path_index]->afu->mmap_size,PROT_READ|PROT_WRITE, MAP_SHARED, + chunk->path[path_index]->afu->poll_fd,0); + + if (chunk->path[path_index]->afu->mmio_mmap == MAP_FAILED) { + CBLK_TRACE_LOG_FILE(1,"mmap of mmio space failed errno = %d, mmio_size = 0x%llx", + errno,(uint64_t)chunk->path[path_index]->afu->mmap_size); + + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + + cblk_release_path(chunk,(chunk->path[path_index])); + chunk->path[path_index] = NULL; + return -1; + } + + chunk->path[path_index]->afu->mmio = chunk->path[path_index]->afu->mmio_mmap; + + + if (fcntl(chunk->path[path_index]->afu->poll_fd,F_SETFL,O_NONBLOCK) == -1) { + + /* + * Ignore error for now + */ + + CBLK_TRACE_LOG_FILE(1,"fcntl failed with errno = %d",errno); + + } + +#endif + + *cleanup_depth = 40; + + CBLK_TRACE_LOG_FILE(6,"mmio = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmio_mmap); + + /* + * Set up response queue + */ + + if (CBLK_ADAP_SETUP(chunk,path_index)) { + + + cblk_release_path(chunk,(chunk->path[path_index])); + + chunk->path[path_index] = NULL; + return -1; + } + + if (!rc) { + + chunk->path[path_index]->flags |= CFLSH_PATH_ACT; + } + return rc; +} + +#ifdef _AIX +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_reuse_attach_mpio + * + * FUNCTION: Reuses an attach thru the same AFU + * for MPIO. We can not do a reatach + * for the same disk and process. If this is a different + * disk sharing the same AFU, then a reattach is needed. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +int cblk_chunk_reuse_attach_mpio(cflsh_chunk_t *chunk,int mode, dev64_t adap_devno,int arch_type, + int matching_path_index,int path_index, int *cleanup_depth) +{ + int rc = 0; + cflsh_block_chunk_type_t chunk_type; + cflsh_afu_in_use_t in_use; + cflsh_path_t *path = NULL; + int share = FALSE; + + + + chunk_type = cblk_get_chunk_type(chunk->dev_name,arch_type); + + + if (chunk->flags & CFLSH_CHNK_SHARED) { + + share = TRUE; + } + + path = cblk_get_path(chunk,adap_devno,chunk_type,chunk->num_cmds,&in_use,share); + + if (path == NULL) { + + return (-1); + } + + if (path->afu == NULL) { + + return (-1); + } + + chunk->path[path_index] = path; + + path->path_index = path_index; + + if (in_use == CFLASH_AFU_MPIO_INUSE) { + + /* + * If this AFU is already attached for this lun, + * via another path, then do not do another attach: + * Just reuse the AFU. + */ + + chunk->path[path_index]->flags |= CFLSH_PATH_ACT; + + } else if (in_use == CFLASH_AFU_SHARE_INUSE) { + + + /* + * If this AFU is already attached for a different + * lun, but not this lun, then we need to do another + * attach using the REATTACH flag. + */ + + + rc = cblk_chunk_attach_path(chunk,path_index,mode,adap_devno,cleanup_depth,FALSE,in_use); + + if (!rc) { + + chunk->path[path_index]->flags |= CFLSH_PATH_ACT; + } + + + } else { + + if (share) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid path_index Assigned path_id = %d and path_index = %d", + chunk->path[path_index]->path_id, path_index); + + cblk_release_path(chunk,(chunk->path[path_index])); + chunk->path[path_index] = NULL; + + return (-1); + } + } + + return rc; +} + +#endif /* AIX */ + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_attach_process + * + * FUNCTION: Attaches the current process to a chunk and + * maps the MMIO space. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +int cblk_chunk_attach_process_map (cflsh_chunk_t *chunk, int mode, int *cleanup_depth) +{ + int path_index = 0; + dev64_t adap_devno = 0; +#ifdef _AIX + struct devinfo iocinfo; + int i,j; /* general counter */ + int first_reserve_path = -1; + int first_good_path = -1; + int second_good_path = -1; + + struct cflash_paths { + struct dk_capi_paths path; + struct dk_capi_path_info paths[CFLSH_BLK_MAX_NUM_PATHS-1]; + } disk_paths; + + struct dk_capi_path_info *path_info = NULL; + uint32_t block_size = 0; + dev64_t prim_path_devno; + int prim_path_id = -1; + int previous_path_found; + cflsh_block_chunk_type_t chunk_type; + +#endif /* _AIX */ + + int rc = 0; + int assign_afu = FALSE; + + + if (chunk == NULL) { + + return (-1); + } + +#ifdef _AIX + bzero(&iocinfo,sizeof(iocinfo)); + + bzero(&disk_paths,sizeof(disk_paths)); + disk_paths.path.path_count = CFLSH_BLK_MAX_NUM_PATHS; + +#endif /* AIX */ + + + + // Set cur_path to path 0 in the chunk; + + chunk->cur_path = 0; + + +#ifndef BLOCK_FILEMODE_ENABLED + +#ifdef _AIX + + rc = ioctl(chunk->fd,IOCINFO,&iocinfo); + + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"Iocinfo failed errno = %d",errno); + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + + return -1; + + } + + if (iocinfo.devtype != DD_SCDISK) { + + CBLK_TRACE_LOG_FILE(1,"Invalid devtype = 0x%x",iocinfo.devtype); + + } + + + if (!((iocinfo.flags & DF_IVAL) && + (iocinfo.un.scdk64.flags & DF_CFLASH))) { + + CBLK_TRACE_LOG_FILE(1,"Not a CAPI flash disk"); + return -1; + + } + + block_size = iocinfo.un.scdk64.blksize; + + + CBLK_TRACE_LOG_FILE(5,"block_size = %d",block_size); + + if (block_size) { + chunk->blk_size_mult = CAPI_FLASH_BLOCK_SIZE/block_size; + } else { + chunk->blk_size_mult = 8; + block_size = 512; + } + + + if (!(chunk->flags & CFLSH_CHNK_VLUN)) { + chunk->stats.max_transfer_size = iocinfo.un.scdk64.lo_max_request; + + if (iocinfo.flags & DF_LGDSK) { + + chunk->stats.max_transfer_size |= (uint64_t)(iocinfo.un.scdk64.hi_max_request << 32); + + } + + /* + * Convert max transfer size from bytes to blocks (sectors) + */ + + chunk->stats.max_transfer_size /= block_size; + + } else { + /* + * Virtual luns only support a transfer size of 1 sector + */ + + chunk->stats.max_transfer_size = 1; + } + + + chunk->num_blocks_lun = iocinfo.un.scdk64.lo_numblks; + + + if (iocinfo.flags & DF_LGDSK) { + + chunk->num_blocks_lun |= (uint64_t)(iocinfo.un.scdk64.hi_numblks << 32); + + } + + // TODO: ?? Remove this, since we plan to only support 4K luns + + chunk->num_blocks_lun = chunk->num_blocks_lun/chunk->blk_size_mult; + + + CBLK_TRACE_LOG_FILE(5,"last_lba = 0x%llx",chunk->num_blocks_lun); + + + + + rc = ioctl(chunk->fd,DK_CAPI_QUERY_PATHS,&disk_paths); + + if (rc) { + + + CBLK_TRACE_LOG_FILE(1,"Query paths errno = %d, return_flags = 0x%llx", + errno,disk_paths.path.return_flags); + + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + + return -1; + + } + + if (disk_paths.path.returned_path_count == 0) { + + CBLK_TRACE_LOG_FILE(1,"Unable to Query Path errno = %d",errno); + + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + + return -1; + } + + + if (disk_paths.path.returned_path_count > CFLSH_BLK_MAX_NUM_PATHS) { + + + CBLK_TRACE_LOG_FILE(5,"Returned more paths that we provided issued = %d returned = %d", + CFLSH_BLK_MAX_NUM_PATHS,disk_paths.path.returned_path_count); + + + /* + * This call indicated they were more paths available then + * what we provided. Attempt to continue with the returned + * proper subset of paths. + */ + + disk_paths.path.returned_path_count = CFLSH_BLK_MAX_NUM_PATHS; + + } + + + CBLK_TRACE_LOG_FILE(5,"number of paths found = %d",disk_paths.path.returned_path_count); + + + /* + * find first reserved path + */ + + path_info = disk_paths.path.path_info; + + for (i = 0;i -1 ) { + + if (chunk->flags & CFLSH_CHNK_NO_RESRV) { + + /* + * We have detected a path with reservations, but + * we opened the disk with no reservations. This should + * not happen. So fail. + */ + + CBLK_TRACE_LOG_FILE(1,"path_info %d has a reservation, but there should not be any reserves",i); + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + + + return -1; + + } + + + if (chunk->flags & CFLSH_CHNK_MPIO_FO) { + + /* + * We have detected a path with reservations, but + * we opened the disk with MPIO.So fail. + */ + + CBLK_TRACE_LOG_FILE(1,"path_info %d has a reservation, but using MPIO",i); + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + + + return -1; + + } + + + + /* + * Use the first path reservations + */ + adap_devno = path_info[first_reserve_path].devno; + + prim_path_id = path_info[first_reserve_path].path_id; + + CBLK_TRACE_LOG_FILE(6,"First reserve path_id = %d and path_index = %d", + chunk->path[path_index]->path_id, path_index); + + + } else { + + + if ((chunk->flags & CFLSH_CHNK_MPIO_FO) && + !(chunk->flags & CFLSH_CHNK_VLUN)) { + + + /* + * If we are using MPIO for physical luns, then attach and map all + * paths. First let the disk driver assign the primary path (with + * the hope it will load balance these assignments) which will have + * path_index of 0. Then attach to the remaining paths. + */ + + if (cblk_chunk_attach_process_map_path(chunk,0,mode,0,0,cleanup_depth,TRUE)) { + + return -1; + } + + + + + prim_path_devno = chunk->path[path_index]->afu->adap_devno; + + + + /* + * Find primary AFU selected for us. It + * may be associated with multiple paths. + * Thus we need to find all of them for this AFU + */ + + for (i = 0;i 0) { + + /* + * If path_index is greater than 0, then we + * have already chosen the primary path for this AFU. + * However if this AFU has multiple ports (paths), then + * we would like to use those as the non-primary paths for + * MPIO. + */ + + if (chunk->path[path_index]) { + + CBLK_TRACE_LOG_FILE(1,"Invalid path_index Assigned path_id = %d and path_index = %d", + chunk->path[path_index]->path_id, path_index); + + continue; + } + + + if (cblk_chunk_reuse_attach_mpio(chunk,mode,path_info[i].devno,path_info[i].architecture, + 0,path_index,cleanup_depth)) { + + + continue; + } + + } else { + + /* + * Trace primary path_id for debug purposes + */ + + CBLK_TRACE_LOG_FILE(5,"Tentative Primary path_id = %d and path_index = %d", + chunk->path[path_index]->path_id, path_index); + } + + + /* + * Since we had the driver assign this path, we need to determine + * its chunk type. + */ + + chunk_type = cblk_get_chunk_type(chunk->dev_name,path_info[i].architecture); + + if (cblk_update_path_type(chunk,chunk->path[path_index],chunk_type)) { + + return -1; + } + + chunk->path[path_index]->path_id = path_info[i].path_id; + + chunk->path[path_index]->path_id_mask = 1 << path_info[i].path_id; + + + /* + * Only one port is associated with this path + */ + + chunk->path[path_index]->num_ports = 1; + + path_index++; + + + + CBLK_TRACE_LOG_FILE(9,"Assigned path_id = %d and path_index = %d", + chunk->path[path_index]->path_id, path_index); + + + } + } + + if (path_index == 0) { + + CBLK_TRACE_LOG_FILE(1,"Could not find select path, num_paths = %d", + disk_paths.path.returned_path_count); + return -1; + } + + for (i = 0;ipath[j]->afu->adap_devno == path_info[i].devno) { + + + + if (cblk_chunk_reuse_attach_mpio(chunk,mode,path_info[i].devno,path_info[i].architecture, + j,path_index,cleanup_depth)) { + + + break; + } + + + + previous_path_found = TRUE; + + break; + } + } + + if (!previous_path_found) { + + + if (cblk_chunk_attach_process_map_path(chunk,path_index,mode,path_info[i].devno, + path_info[i].architecture, + cleanup_depth,FALSE)) { + + break; + } + + chunk->path[path_index]->afu->adap_devno = path_info[i].devno; + + } + + chunk->path[path_index]->path_id = path_info[i].path_id; + + chunk->path[path_index]->path_id_mask = 1 << path_info[i].path_id; + + + /* + * Only one port is associated with this path + */ + + chunk->path[path_index]->num_ports = 1; + + CBLK_TRACE_LOG_FILE(9,"Attached path_id = %d and path_index = %d", + chunk->path[path_index]->path_id, path_index); + path_index++; + + + } + } + + + CBLK_TRACE_LOG_FILE(5,"rc = %d num_paths = %d", + rc, chunk->num_paths); + return rc; + + } else { + + /* + * We are not doing MPIO so let driver choose the path. + */ + + assign_afu = TRUE; + + + if (chunk->flags & CFLSH_CHNK_SHARED) { + + /* + * If we are also sharing contexts, then pick the first non-failed + * path. The subsequent code will use this non-failed path as + * an opportunity to share the context. If sharing is possible the assign_afu + * option will be overrdden in favor fo sharing. Otherwise if no sharing is + * possible the disk driver will assign the path. + * + * There is no guarantee that this disk has its paths ordered the same + * as another disk on this AFU. Thus the opportunity to share a context + * will only occur if we happen to pick the same path. For now this + * seems acceptable. Perhaps in the future we may find reasons + * to increase this likelihood. + */ + + for (i = 0;i -1) { + adap_devno = path_info[first_good_path].devno; + } + + } + +#ifdef _REMOVE + if (first_good_path > -1) { + + chunk->path[path_index]->afu->adap_devno = path_info[first_good_path].devno; + + chunk->path[path_index]->path_id_mask = 1 << path_info[first_good_path].path_id; + + if (chunk->flags & CFLSH_CHNK_VLUN) { + /* + * If we are using virtual luns, then see if + * there is another path thru this same adapter/AFU + * for this device. If so we can enable the AFU + * to use both. + */ + + for (i = 0;i path[path_index]->afu->adap_devno) && + (i != first_good_path)) { + + second_good_path = i; + break; + } + } + + if (second_good_path > -1) { + + /* + * If we found a second valid path on this same adapter, + * then add this path to the path id mask. + */ + + chunk->path[path_index]->path_id_mask |= 1 << path_info[second_good_path].path_id; + } + + } + + } else { + + + CBLK_TRACE_LOG_FILE(1,"No good paths returned"); + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + + + return -1; + + } + chunk->path[path_index]->path_id = path_info[first_good_path].path_id; + +#endif /* REMOVE */ + } + + } + + + + +#endif /* !_AIX */ + + + + + +#endif /* BLOCK_FILEMODE_ENABLED */ + + if (cblk_chunk_attach_process_map_path(chunk,path_index,mode,adap_devno,0,cleanup_depth,assign_afu)) { + + return -1; + } + +#ifdef _AIX + if (assign_afu) { + + + /* + * Find primary path selected for us. + */ + + prim_path_devno = chunk->path[path_index]->afu->adap_devno; + + if ((prim_path_id > -1) && + !(chunk->flags & CFLSH_CHNK_VLUN)) { + + /* + * We already know the path id for this + * path. Thus if this a physical lun, then + * save off the relevant information now. + */ + + chunk->path[path_index]->path_id = prim_path_id; + + chunk->path[path_index]->path_id_mask |= 1 << prim_path_id; + + chunk->path[path_index]->num_ports++; + + if (chunk->path[path_index]->num_ports == 0) { + + + chunk_type = cblk_get_chunk_type(chunk->dev_name,path_info[i].architecture); + + if (cblk_update_path_type(chunk,chunk->path[path_index],chunk_type)) { + + return -1; + } + } + + CBLK_TRACE_LOG_FILE(9,"Assigned path_id = %d and path_index = %d", + chunk->path[path_index]->path_id, path_index); + + } else { + + /* + * Find the associated path_id that we were assigned. + * In actuality we can only narrow the search to the + * adapter/AFU. For multiple paths from the same adapter/AFU + * we have no scheme yet to determine which of the multiple path_ids + * thru that adapter/AFU is correct. Thus we will just pick one + * now. When we do the USER_DIRECT we will ask the disk driver + * to assign a path from this adapter/AFU and then we'll adjust the + * the path_ids at that time (in case the one we choose now is + * not the one assigned). + */ + for (i = 0;ipath[path_index]->path_id = path_info[i].path_id; + + chunk->path[path_index]->path_id_mask |= 1 << path_info[i].path_id; + + + if (chunk->path[path_index]->num_ports == 0) { + + + chunk_type = cblk_get_chunk_type(chunk->dev_name,path_info[i].architecture); + + if (cblk_update_path_type(chunk,chunk->path[path_index],chunk_type)) { + + return -1; + } + } + + /* + * For virtual luns determine how many ports + * on this AFU are associated with this path. + */ + chunk->path[path_index]->num_ports++; + + CBLK_TRACE_LOG_FILE(9,"Assigned path_id = %d and path_index = %d", + chunk->path[path_index]->path_id, path_index); + + + if (chunk->flags & CFLSH_CHNK_VLUN) { + + /* + * Get all paths to get the full path_id mask set. + * The last path found will be the path_id set. + */ + + continue; + } else { + + /* + * For physical mode only use the first found path_id + */ + + break; + } + + } + } + } + } else { + + + chunk->path[path_index]->path_id_mask = 1 << path_info[first_reserve_path].path_id; + + chunk->path[path_index]->path_id = path_info[first_reserve_path].path_id; + + } +#else + chunk->path[path_index]->num_ports++; + +#endif /* AIX */ + + + return rc; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_detach_path + * + * FUNCTION: Detaches the current process for the current adapter path + * + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_detach_path (cflsh_chunk_t *chunk, int path_index,int force) +{ + + + dk_capi_detach_t disk_detach; + int rc; + + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_DETACH NULL path"); + return; + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_DETACH NULL afu"); + return; + } + + + if (!(chunk->path[path_index]->flags & CFLSH_PATH_ACT)) { + + /* + * Path is not active. Just return. + */ + + return; + } + + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then fail + * from this routine + */ + + CBLK_TRACE_LOG_FILE(5,"afu halted, failing detach path afu->flags = 0x%x", + chunk->path[path_index]->afu->flags); + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + return; + + + + } + + + /* + * Since we are reusing contexts, it is fine to detach + * even if this is not last path to uses this AFU. + */ + + + bzero(&disk_detach,sizeof(disk_detach)); + +#ifdef _AIX + disk_detach.devno = chunk->path[path_index]->afu->adap_devno; +#endif /* AIX */ + +#ifdef _AIX + disk_detach.ctx_token = chunk->path[path_index]->afu->contxt_id; +#else + disk_detach.context_id = chunk->path[path_index]->afu->contxt_id; +#endif /* !AIX */ + + rc = ioctl(chunk->fd,DK_CAPI_DETACH,&disk_detach); + + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_DETACH e failed with rc = %d, errno = %d, return_flags = 0x%llx", + rc,errno,disk_detach.return_flags); + + } + + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + + chunk->path[path_index]->flags &= ~CFLSH_PATH_ACT; + return; + +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_detach + * + * FUNCTION: Detaches the current process. + * + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_detach (cflsh_chunk_t *chunk,int force) +{ + int i; + + for (i=0; i< chunk->num_paths;i++) { + + cblk_chunk_detach_path(chunk,i,force); + } + + return; +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_umap_path + * + * FUNCTION: Unmaps the MMIO space for an adapter path + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_unmap_path (cflsh_chunk_t *chunk,int path_index,int force) +{ + + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"NULL path"); + return; + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"NULL afu"); + return; + } + + + /* + * TODO:?? Logic is needed to ensure this does not pull the + * rug out from other chunks using this AFU. + */ + + if (!(chunk->path[path_index]->flags & CFLSH_PATH_ACT)) { + + /* + * Path is not active. Just return. + */ + + return; + } + + + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if ((chunk->path[path_index]->afu->ref_count == 1) || + (force)) { + + + /* + * Only unmap on the last entity to use this afu unless + * force is set. + */ + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then fail + * from this routine + */ + + + CBLK_TRACE_LOG_FILE(5,"afu halted, failing unmap path afu->flags = 0x%x", + chunk->path[path_index]->afu->flags); + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + return; + + } + + +#ifndef _AIX + if (chunk->path[path_index]->afu->mmap_size == 0) { + + /* + * Nothing to unmap. + */ + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + return; + } + + + + if (munmap(chunk->path[path_index]->afu->mmio_mmap,chunk->path[path_index]->afu->mmap_size)) { + + + + /* + * Don't return here on error. Continue + * to close + */ + CBLK_TRACE_LOG_FILE(2,"munmap failed with errno = %d", + errno); + } + +#endif + + + chunk->path[path_index]->afu->mmio = 0; + chunk->path[path_index]->afu->mmio_mmap = 0; + chunk->path[path_index]->afu->mmap_size = 0; + + } + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_umap + * + * FUNCTION: Unmaps the MMIO space. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_unmap (cflsh_chunk_t *chunk, int force) +{ + + int i; + + for (i=0; i< chunk->num_paths;i++) { + + cblk_chunk_unmap_path(chunk,i, force); + } + +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_get_mc_phys_disk_resource_path + * + * FUNCTION: Get master context (MC) resources for phyiscal + * disk for a specific path, which + * include device information to allow + * the device to be accessed for read/writes. + * + * + * NOTES: This routine assumes the caller has the chunk lock. + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +int cblk_chunk_get_mc_phys_disk_resources_path(cflsh_chunk_t *chunk, + int path_index) +{ + int rc = 0; + dk_capi_udirect_t disk_physical; +#ifdef _AIX + int i; + uint32_t path_id_msk; +#endif + + + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"Null path passed, path_index = %d",path_index); + return -1; + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"Null afu path passed, path_index = %d",path_index); + return -1; + } + + bzero(&disk_physical,sizeof(disk_physical)); + +#ifdef _AIX + + /* + * For AIX physical need to do this ioctl per path + */ + + if ((path_index == 0) && + (!(chunk->flags & CFLSH_CHNK_NO_RESRV))){ + + + /* + * If this is the primary path and we are not + * using resevations (If we are using reservations, + * then we should have already chosen the path id + * for the primary path), then let + * the driver assign the optimum path. + */ + + disk_physical.flags = DK_UDF_ASSIGN_PATH; + } else { + disk_physical.path_id_mask = chunk->path[path_index]->path_id_mask; + } + + + disk_physical.devno = chunk->path[path_index]->afu->adap_devno; + + disk_physical.ctx_token = chunk->path[path_index]->afu->contxt_id; +#else + disk_physical.context_id = chunk->path[path_index]->afu->contxt_id; +#endif /* !AIX */ + + + + + + rc = ioctl(chunk->fd,DK_CAPI_USER_DIRECT,&disk_physical); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_USER_DIRECT failed with errno = %d, return_flags = 0x%llx", + errno,disk_physical.return_flags); + + return -1; + } + + chunk->path[path_index]->sisl.resrc_handle = 0xffffffff & disk_physical.rsrc_handle; + +#ifdef _AIX + if ((disk_physical.flags & DK_UDF_ASSIGN_PATH) && + (path_index == 0)) { + + /* + * If we asked the driver to assign the path, + * then save it off now. This should only be done + * for path_index of 0 (primary path). + */ + /* + * See if this path_id mask is being used for a different path + * to this same AFU. If so then swap them now. + * Thus the primary path will have this + * path_id_mask only. + */ + + for (i = 1; i < chunk->num_paths; i++) { + + if ((chunk->path[i]->afu->adap_devno == chunk->path[path_index]->afu->adap_devno) && + (disk_physical.path_id_mask == chunk->path[i]->path_id_mask)) { + + /* + * Exchange path_id_mask and path_id + */ + + chunk->path[i]->path_id_mask = chunk->path[path_index]->path_id_mask; + + chunk->path[i]->path_id = chunk->path[path_index]->path_id; + + break; + } + } + + chunk->path[path_index]->path_id_mask = disk_physical.path_id_mask; + + /* + * Get corresponding path_id from this + * path_id_mask + */ + + i = 0; + path_id_msk = chunk->path[path_index]->path_id_mask; + while (path_id_msk > 1) { + + path_id_msk = path_id_msk >> 1; + i++; + } + + chunk->path[path_index]->path_id = i; + + CBLK_TRACE_LOG_FILE(5,"Primary path_id = %d and path_index = %d", + chunk->path[path_index]->path_id, path_index); + + chunk->stats.primary_path_id = chunk->path[path_index]->path_id; + + } +#endif /* !AIX */ + + + + CBLK_TRACE_LOG_FILE(6,"USER_DIRECT ioctl success rsrc handle = 0x%x for path_index = %d", + chunk->path[path_index]->sisl.resrc_handle,path_index); + + return rc; +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_get_mc_device_resources + * + * FUNCTION: Get master context (MC) resources, which + * include device information to allow + * the device to be accessed for read/writes. + * + * + * NOTES: This routine assumes the caller has the chunk lock. + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +int cblk_chunk_get_mc_device_resources(cflsh_chunk_t *chunk, + int *cleanup_depth) +{ + int rc = 0; + dk_capi_uvirtual_t disk_virtual; + int i; + + + if (chunk == NULL) { + + return (-1); + } + + if (chunk->path[chunk->cur_path] == NULL) { + + + return -1; + } + + if (chunk->path[chunk->cur_path]->afu == NULL) { + + + return -1; + } + + + + +#ifndef _MASTER_CONTXT + + /* + * We can not be locked when we issue + * commands, since they will do a lock. + * Thus we would deadlock here. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + if (cblk_get_lun_id(chunk)) { + + CFLASH_BLOCK_LOCK(chunk->lock); + CBLK_TRACE_LOG_FILE(5,"cblk_get_lun_id failed errno = %d", + errno); + + return -1; + } + + if (cblk_get_lun_capacity(chunk)) { + + CFLASH_BLOCK_LOCK(chunk->lock); + CBLK_TRACE_LOG_FILE(5,"cblk_get_lun_capacity failed errno = %d", + errno); + + return -1; + } + + CFLASH_BLOCK_LOCK(chunk->lock); + +#else + + + if (chunk->flags & CFLSH_CHNK_VLUN) { + + /* + * Get a virtual lun of size 0 for the specified AFU and context. + */ + + bzero(&disk_virtual,sizeof(disk_virtual)); + +#ifdef _AIX + disk_virtual.devno = chunk->path[chunk->cur_path]->afu->adap_devno; + disk_virtual.path_id_mask = chunk->path[chunk->cur_path]->path_id_mask; + disk_virtual.ctx_token = chunk->path[chunk->cur_path]->afu->contxt_id; + + // TODO:? Need to set scrub on virtual lun, when ioctl defines it. +#else + disk_virtual.context_id = chunk->path[chunk->cur_path]->afu->contxt_id; + + if (chunk->flags & CFLSH_CHNK_VLUN_SCRUB) { + + disk_virtual.hdr.flags |= DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME; + } +#endif /* !AIX */ + +#ifdef _AIX + disk_virtual.vlun_size = 0; +#else + disk_virtual.lun_size = 0; +#endif /* !_AIX */ + + rc = ioctl(chunk->fd,DK_CAPI_USER_VIRTUAL,&disk_virtual); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_USER_VIRTUAL failed with errno = %d, return_flags = 0x%llx", + errno,disk_virtual.return_flags); + + cblk_chunk_free_mc_device_resources(chunk); + + return -1; + } + + chunk->path[chunk->cur_path]->sisl.resrc_handle = 0xffffffff & disk_virtual.rsrc_handle; + +#ifndef _AIX +#ifdef DK_CXLFLASH_ALL_PORTS_ACTIVE + + if (disk_virtual.return_flags & DK_CXLFLASH_ALL_PORTS_ACTIVE) { + + /* + * Both ports on this AFU are in use + */ + + chunk->path[chunk->cur_path]->num_ports++; + + } + CBLK_TRACE_LOG_FILE(6,"USER_VIRTUAL num_ports = 0x%x",chunk->path[chunk->cur_path]->num_ports); +#endif + +#endif + + CBLK_TRACE_LOG_FILE(6,"USER_VIRTUAL ioctl success rsrc handle = 0x%x", + chunk->path[chunk->cur_path]->sisl.resrc_handle); + + } else { + + /* + * Get a physical lun for all requested specified AFU and context. + * + * It should be noted that at attach time, we determined the maximum + * number of paths allowed for this. So now we just ensure that each + * has a valid attachment. + */ + + for (i=0;i < chunk->num_paths; i++) { + + if (cblk_chunk_get_mc_phys_disk_resources_path(chunk,i)) { + + + cblk_chunk_free_mc_device_resources(chunk); + + return -1; + } + } + + chunk->num_blocks = chunk->num_blocks_lun; + CBLK_TRACE_LOG_FILE(5,"last_lba = 0x%llx",chunk->num_blocks_lun); + } + + + +#endif /* Master context */ + + return rc; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_set_mc_size + * + * FUNCTION: Request master context to provide the + * the specified storage for this chunk. + * + * + * NOTES: This routine assumes the caller has the chunk lock. + * + * This code assumes if the caller passes -1 for the + * master context case, then it will return whatever + * space is available. + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: 0: Good completion + 0 non-zero: Error + * + * ---------------------------------------------------------------------------- + */ +int cblk_chunk_set_mc_size(cflsh_chunk_t *chunk, size_t nblocks) +{ + int rc = 0; + dk_capi_resize_t disk_resize; +#ifdef _MASTER_CONTXT + + + + bzero(&disk_resize,sizeof(disk_resize)); + + if (nblocks != -1) { + + /* + * Caller is requesting a specific amount of space + */ + + if (nblocks < chunk->vlun_max_num_blocks) { + + /* + * If the amount of space requested is is still within the current + * space allocated by the MC from the last size request, then just + * return the size requested by the caller. + */ + + CBLK_TRACE_LOG_FILE(5,"blocks already exist so just use them"); + chunk->num_blocks = nblocks; + return 0; + } + + + + } + + + if (chunk->path[chunk->cur_path] == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"NULL path"); + return -1; + } + + +#ifdef _AIX + disk_resize.devno = chunk->path[chunk->cur_path]->afu->adap_devno; + disk_resize.ctx_token = chunk->path[chunk->cur_path]->afu->contxt_id; + disk_resize.vlun_size = nblocks; +#else + disk_resize.context_id = chunk->path[chunk->cur_path]->afu->contxt_id; + disk_resize.req_size = nblocks; +#endif /* !AIX */ + + + disk_resize.rsrc_handle = chunk->path[chunk->cur_path]->sisl.resrc_handle; + + + + + rc = ioctl(chunk->fd,DK_CAPI_VLUN_RESIZE,&disk_resize); + + + + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_VLUN_RESIZE failed with rc = %d, errno = %d, size = 0x%llx, return_flags = 0x%llx", + rc, errno,(uint64_t)nblocks,disk_resize.return_flags); + + if (errno == 0) { + + errno = ENOMEM; + } + return -1; + } + + + + CBLK_TRACE_LOG_FILE(5,"DK_CAPI_VLUN_RESIZE succeed with size = 0x%llx and actual_size = 0x%llx", + (uint64_t)nblocks, disk_resize.last_lba); + + if ((nblocks != -1) && + ((disk_resize.last_lba + 1) < nblocks)) { + + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_VLUN_RESIZE returned smaller actual size = 0x%llx then requested = 0x%llx", + disk_resize.last_lba,(uint64_t)nblocks); + + errno = ENOMEM; + + return -1; + } + + + /* + * Save off the actual amount of space the MC allocated, which may be more than + * what the user requested. + */ + + chunk->vlun_max_num_blocks = disk_resize.last_lba + 1; + + if (nblocks == -1) { + + nblocks = chunk->vlun_max_num_blocks; + } +#else + + + /* + * This is temporary code for + * early development to allow virtual + * luns. Eventually the MC will provision + * this. For now the block layer will use + * a very simplistic and flawed approach + * that leads to inefficient memory usage + * and fragmentation. However it is hoped + * this flawed approach is sufficient until + * the MC can provide the real functionality. + * When the MC does add this functionality, + * this code can be removed if needed. + */ + + + if ((nblocks + cflsh_blk.next_chunk_starting_lba) > chunk->num_blocks_lun) { + + + CBLK_TRACE_LOG_FILE(1,"set_size failed with EINVAL, nblocks = 0x%llx, next_lba = 0x%llx num_blocks_lun = 0x%llx", + (uint64_t)nblocks,(uint64_t)cflsh_blk.next_chunk_starting_lba,(uint64_t)chunk->num_blocks_lun); + errno = EINVAL; + return -1; + } + + + if (chunk->num_blocks) { + + + /* + * If chunk->num_blocks is non-zero then this + * is a resize. + */ + + if (cflsh_blk.next_chunk_starting_lba == + (chunk->start_lba + chunk->num_blocks)) { + + + /* + * If chunk->num_blocks is non-zero then this + * is a resize. If this is the last chunk on this physical disk, + * then set the next_chunk_start_lba to our chunk's + * starting LBA. For this case we do not need + * to update our start_lba since it is correct. + */ + cflsh_blk.next_chunk_starting_lba = chunk->start_lba; + + } else { + + /* + * The current implementation is very inefficient + * and has fragmentation issues. In this case + * it will move the chunk past the other chunks + * on this physical lun. All previous data will be + * lossed + */ + chunk->start_lba = cflsh_blk.next_chunk_starting_lba; + } + } else { + + /* + * This is the first allocation of blocks + * for this chunk. + */ + + chunk->start_lba = cflsh_blk.next_chunk_starting_lba; + } + + + cflsh_blk.next_chunk_starting_lba += nblocks; + + /* + * TODO: End of virtual lun hack + */ + +#endif /* Master context */ + + + chunk->num_blocks = nblocks; + return rc; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_mc_clone + * + * FUNCTION: Requests master context to clone + * an existing AFU + context to this context + * on the same AFU. This is needed whenever + * a process has forked to reenable access + * to the chunks from the parent process in the child + * process. + * + * + * NOTES: This routine assumes the caller has the chunk lock. + * + * This routine is not valid for MPIO physical luns. + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: 0: Good completion + 0 non-zero: Error + * + * ---------------------------------------------------------------------------- + */ +int cblk_mc_clone(cflsh_chunk_t *chunk,int mode, int flags) +{ +#ifdef _AIX +#define DK_CAPI_VLUN_CLONE 0x8B + errno = EINVAL; + return -1; +#else + int rc = 0; +#ifdef _MASTER_CONTXT +#ifdef _NOT_YET + uint64_t chunk_flags; +#endif + int cleanup_depth; + dk_capi_clone_t disk_clone; + dk_capi_detach_t disk_detach; + res_hndl_t old_resrc_handle; + void *old_mmio_mmap; + size_t old_mmap_size; + int old_adap_fd; + uint64_t old_contxt_id; + + + /* + * Cloning only works for virtual luns. In addition, + * virtual luns can not use shared contexts. + */ + + if (!(chunk->flags & CFLSH_CHNK_VLUN)) { + + /* + * Clone is only supported on virtual luns + */ + + + CBLK_TRACE_LOG_FILE(1,"physical lun attempted to clone flags = 0x%x",chunk->flags); + return -1; + + } + + if (chunk->path[chunk->cur_path] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"Null path passed, cur_path = %d",chunk->cur_path); + return -1; + } + + if (chunk->path[chunk->cur_path]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"Null afu passed, cur_path = %d",chunk->cur_path); + return -1; + } + + + bzero(&disk_clone,sizeof(disk_clone)); + bzero(&disk_detach,sizeof(disk_detach)); + + + + /* + * It should be noted the chunk is not a fully functional chunk + * from this process' perspective after a fork. It has enough information that should allow + * us to clone it into a new chunk using the same chunk id and chunk structure. + * So first save off relevant information about the old chunk before unregistering + * it. + */ + + + + old_resrc_handle = chunk->path[chunk->cur_path]->sisl.resrc_handle; + old_mmio_mmap = chunk->path[chunk->cur_path]->afu->mmio_mmap; + old_mmap_size = chunk->path[chunk->cur_path]->afu->mmap_size; + old_contxt_id = chunk->path[chunk->cur_path]->afu->contxt_id; + old_adap_fd = chunk->path[chunk->cur_path]->afu->poll_fd; + + /* + * If we have a dedicated thread per chunk + * for interrupts, then stop it now. + */ + + cblk_open_cleanup_wait_thread(chunk); + + cleanup_depth = 30; + + + if (cblk_chunk_attach_process_map(chunk,mode,&cleanup_depth)) { + + CBLK_TRACE_LOG_FILE(1,"Unable to attach, errno = %d",errno); + + + + cblk_chunk_open_cleanup(chunk,cleanup_depth); + free(chunk); + + return -1; + + } + + + rc = munmap(old_mmio_mmap,old_mmap_size); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"munmap failed with rc = %d errno = %d", + rc,errno); + + + cblk_chunk_open_cleanup(chunk,cleanup_depth); + + + return -1; + } + + +#ifdef _COMMON_INTRPT_THREAD + + /* + * If we are using a common interrupt thread per chunk, + * then restart it now. + */ + + if (cblk_start_common_intrpt_thread(chunk)) { + + + CBLK_TRACE_LOG_FILE(1,"cblk_start_common_intrpt thread failed with errno= %d", + errno); + + + cblk_chunk_open_cleanup(chunk,cleanup_depth); + + + return -1; + } + + cleanup_depth = 45; + +#endif /* _COMMON_INTRPT_THREAD */ + + cleanup_depth = 50; + +#ifdef _NOT_YET + switch (mode & O_ACCMODE) { + + case O_RDONLY: + chunk_flags = MC_RDONLY; + break; + case O_WRONLY: + chunk_flags = MC_WRONLY; + break; + case O_RDWR: + chunk_flags = MC_RDWR; + break; + default: + chunk_flags = MC_RDONLY; + } + + + CBLK_TRACE_LOG_FILE(5,"mc_clone chunk_flags 0x%x", + chunk_flags); +#endif /* _NOT_YET */ + + +#ifndef _AIX +#ifdef DK_CXLFLASH_CLONE + disk_clone.hdr.flags = mode & O_ACCMODE; +#else + disk_clone.flags = mode & O_ACCMODE; +#endif + disk_clone.context_id_src = old_contxt_id; + disk_clone.context_id_dst = chunk->path[chunk->cur_path]->afu->contxt_id; + disk_clone.adap_fd_src = old_adap_fd; + + +#endif + + rc = ioctl(chunk->fd,DK_CAPI_VLUN_CLONE,&disk_clone); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_CLONE ioctl failed with rc = %d, errno = %d", + rc, errno); + + if (errno == 0) { + + errno = EINVAL; + } + cblk_chunk_open_cleanup(chunk,cleanup_depth); + + return -1; + + } + + /* + * We reuse the original resource handle after an mc_clone + */ + + chunk->path[chunk->cur_path]->sisl.resrc_handle = old_resrc_handle; + + + if (rc) { + + /* + * If any of the above operations fail then + * exit out this code. + */ + + + CBLK_TRACE_LOG_FILE(1,"close failed with rc = %d errno = %d", + rc,errno); + + } + + + +#else + + /* + * This is the case when there is no Master Context + */ + +#ifdef _COMMON_INTRPT_THREAD + + /* + * If we are using a common interrupt thread per chunk, + * and we are not using master context, then the fork will not + * forked our interrupt thread. So we need to start it now. + */ + + if (cblk_start_common_intrpt_thread(chunk)) { + + + CBLK_TRACE_LOG_FILE(1,"cblk_start_common_intrpt thread failed with errno= %d", + errno); + + + return -1; + } + +#else + + rc = EINVAL; +#endif /* _COMMON_INTRPT_THREAD */ + +#endif /* _MASTER_CONTXT */ + return rc; + +#endif /* ! AIX */ +} + + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_free_mc_device_resources_path + * + * FUNCTION: Free master context (MC) resources. + * + * + * NOTES: This routine assumes the caller has the chunk lock. + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_free_mc_device_resources_path(cflsh_chunk_t *chunk, int path_index) +{ + dk_capi_release_t disk_release; +#ifdef _MASTER_CONTXT + int rc = 0; +#endif /* _MASTER_CONTXT */ + + + + if (chunk == NULL) { + + return; + } + + + if (chunk->path[path_index] == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"NULL path"); + return; + } + + if (!(chunk->path[path_index]->flags & CFLSH_PATH_ACT)) { + + /* + * Path is not active. Just return. + */ + + return; + } + + bzero(&disk_release,sizeof(disk_release)); + + +#ifdef _MASTER_CONTXT + + /* + * Free resources for this lun. + */ + + if (chunk->path[path_index]->afu->contxt_id == 0) { + /* + * There is nothing to do here, exit + */ + + return; + + } + + + + +#ifdef _AIX + disk_release.devno = chunk->path[path_index]->afu->adap_devno; + disk_release.ctx_token = chunk->path[path_index]->afu->contxt_id; +#else + disk_release.context_id = chunk->path[path_index]->afu->contxt_id; +#endif /* !AIX */ + + + disk_release.rsrc_handle = chunk->path[path_index]->sisl.resrc_handle; + + + + rc = ioctl(chunk->fd,DK_CAPI_RELEASE,&disk_release); + + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_RELEASE e failed with rc = %d, errno = %d, return_flags = 0x%llx", + rc, errno,disk_release.return_flags); + return; + } + + + chunk->path[path_index]->afu->master.mc_handle = 0; + +#endif /* _MASTER_CONTXT */ + + return; +} + + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_free_mc_device_resources + * + * FUNCTION: Free master context (MC) resources. + * + * + * NOTES: This routine assumes the caller has the chunk lock. + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_free_mc_device_resources(cflsh_chunk_t *chunk) +{ + int i; + + + for (i = 0; i < chunk->num_paths; i++) { + + cblk_chunk_free_mc_device_resources_path(chunk,i); + } + + + return; +} + +#ifndef _AIX +/* + * NAME: cblk_process_nonafu_intrpt_cxl_events + * + * FUNCTION: This routine process non-AFU interrupt CAPI + * events. + * + * INPUTS: + * chunk - Chunk associated with this error + * ioasa - I/O Adapter status response + * + * RETURNS: + * -1 - Fatal error + * 0 - Ignore error (consider good completion) + * 1 - Retry recommended + * + */ +cflash_cmd_err_t cblk_process_nonafu_intrpt_cxl_events(cflsh_chunk_t *chunk,int path_index, + struct cxl_event *cxl_event) +{ + int rc = CFLASH_CMD_FATAL_ERR; + uint64_t intrpt_status; + + + errno = EIO; + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path == NULL"); + return -1; + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu == NULL"); + return -1; + } + + switch (cxl_event->header.type) { + case CXL_EVENT_RESERVED: + chunk->stats.num_capi_reserved_errs++; + CBLK_TRACE_LOG_FILE(1,"CXL_EVENT_RESERVED = size = 0x%x", + cxl_event->header.size); + break; + case CXL_EVENT_DATA_STORAGE: + chunk->stats.num_capi_data_st_errs++; + CBLK_TRACE_LOG_FILE(1,"CAPI_EVENT_DATA_STOARAGE addr = 0x%llx, dsisr = 0x%llx", + cxl_event->fault.addr,cxl_event->fault.dsisr); + CBLK_TRACE_LOG_FILE(6,"contxt_id = 0x%llx",chunk->path[path_index]->afu->contxt_id); + CBLK_TRACE_LOG_FILE(6,"mmio_map = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmio_mmap); + CBLK_TRACE_LOG_FILE(6,"mmio = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmio); + CBLK_TRACE_LOG_FILE(6,"mmap_size = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmap_size); + CBLK_TRACE_LOG_FILE(6,"hrrq_start = 0x%llx",(uint64_t)chunk->path[path_index]->afu->p_hrrq_start); + CBLK_TRACE_LOG_FILE(6,"hrrq_end = 0x%llx",(uint64_t)chunk->path[path_index]->afu->p_hrrq_end); + CBLK_TRACE_LOG_FILE(6,"cmd_start = 0x%llx",(uint64_t)chunk->cmd_start); + CBLK_TRACE_LOG_FILE(6,"cmd_end = 0x%llx",(uint64_t)chunk->cmd_end); + + intrpt_status = CBLK_GET_INTRPT_STATUS(chunk,path_index); + CBLK_TRACE_LOG_FILE(6,"intrpt_status = 0x%llx",intrpt_status); + + CBLK_TRACE_LOG_FILE(6,"num_active_cmds = 0x%x\n",chunk->num_active_cmds); + + + + if (!(cblk_chk_cmd_bad_page(chunk,cxl_event->fault.addr))) { + + /* + * If a command matching this bad address is not found, then log a general + * error here. + */ + cblk_notify_mc_err(chunk,path_index,0x508,cxl_event->fault.addr, CFLSH_BLK_NOTIFY_AFU_ERROR,NULL); + + + CBLK_TRACE_LOG_FILE(1,"Bad page addr = 0x%llx not found",cxl_event->fault.addr); + } + + break; + case CXL_EVENT_AFU_ERROR: + chunk->stats.num_capi_afu_errors++; + CBLK_TRACE_LOG_FILE(1,"CXL_EVENT_AFU_ERROR error = 0x%llx, flags = 0x%x", + cxl_event->afu_error.error,cxl_event->afu_error.flags); + + cblk_notify_mc_err(chunk,path_index,0x500,cxl_event->afu_error.error,CFLSH_BLK_NOTIFY_AFU_ERROR,NULL); + + break; + + + case CXL_EVENT_AFU_INTERRUPT: + /* + * We should not see this, since the caller + * should have parsed these out. + */ + + /* Fall thru */ + default: + CBLK_TRACE_LOG_FILE(1,"Unknown CAPI EVENT type = %d, process_element = 0x%x", + cxl_event->header.type, cxl_event->header.process_element); + + + cblk_notify_mc_err(chunk,path_index,0x501,cxl_event->header.type, CFLSH_BLK_NOTIFY_AFU_ERROR,NULL); + + } /* switch */ + + return rc; +} + +#endif /* !_AIX */ + +#ifdef _AIX +/* + * NAME: cblk_read_os_specific_intrpt_event + * + * FUNCTION: Reads an OS specific event for this interrupt + * + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * cmd - Cmd this routine will wait for completion. + * + * RETURNS: + * 0 - Good completion, otherwise error. + * + * + */ +int cblk_read_os_specific_intrpt_event(cflsh_chunk_t *chunk, int path_index,cflsh_cmd_mgm_t **cmd,int *cmd_complete, + size_t *transfer_size, struct pollfd poll_list[]) +{ + int rc = 0; + int eeh_event = 0; +#ifndef BLOCK_FILEMODE_ENABLED + dk_capi_exceptions_t dk_exceptions; + int i; + int original_primary_path; + int found_new_path = FALSE; +#endif + + +#ifdef BLOCK_FILEMODE_ENABLED + + chunk->stats.num_capi_afu_intrpts++; + + rc = CBLK_PROCESS_ADAP_CONVERT_INTRPT(chunk,cmd,(int)CFLSH_BLK_INTRPT_STATUS,cmd_complete,transfer_size); + +#else + + + if (cmd == NULL) { + + CBLK_TRACE_LOG_FILE(1,"cmd is null"); + + + return -1; + } + + if (cmd_complete == NULL) { + + CBLK_TRACE_LOG_FILE(1,"cmd_complete is null"); + + + return -1; + } + + if (transfer_size == NULL) { + + CBLK_TRACE_LOG_FILE(1,"transfer_size is null"); + + + return -1; + } + + CBLK_TRACE_LOG_FILE(7,"poll_list[CFLASH_ADAP_POLL_INDX].revents = 0x%x, poll_list[CFLASH_DISK_POLL_INDX].revents = 0x%x", + poll_list[CFLASH_ADAP_POLL_INDX].revents,poll_list[CFLASH_DISK_POLL_INDX].revents); + + if (poll_list[CFLASH_ADAP_POLL_INDX].revents & POLLPRI) { + + /* + * Adapter exception has occurred. + */ + + if ((*cmd) && ((*cmd)->cmdi)) { + + CBLK_TRACE_LOG_FILE(6," cmd = 0x%llx lba = 0x%llx flags = 0x%x, cmd->cmdi->buf = 0x%llx", + *cmd,(*cmd)->cmdi->lba,(*cmd)->cmdi->flags,(*cmd)->cmdi->buf); + + } + + do { + + + + chunk->stats.num_capi_data_st_errs++; + + bzero(&dk_exceptions, sizeof dk_exceptions); + dk_exceptions.devno = chunk->path[path_index]->afu->adap_devno; + dk_exceptions.ctx_token = chunk->path[path_index]->afu->contxt_id; + dk_exceptions.rsrc_handle = chunk->path[path_index]->sisl.resrc_handle; + dk_exceptions.flags = DK_QEF_ADAPTER; + + rc = ioctl(chunk->fd,DK_CAPI_QUERY_EXCEPTIONS,&dk_exceptions); + + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_QUERY_EXCEPTIONS failed with rc = %d, errno = %d, return_flags = 0x%llx\n", + rc,errno,dk_exceptions.return_flags); + + CBLK_NOTIFY_LOG_THRESHOLD(9,chunk,path_index,0x50a,errno,CFLSH_BLK_NOTIFY_AFU_ERROR,NULL); + + + break; + } + + + CBLK_TRACE_LOG_FILE(5,"Adapter exceptions = 0x%llx adap_except_type = 0x%llx adap_except_time = 0x%llx", + dk_exceptions.exceptions,dk_exceptions.adap_except_type,dk_exceptions.adap_except_time); + + CBLK_TRACE_LOG_FILE(5,"Adapter adap_except_data = 0x%llx adap_except_count = 0x%llx", + dk_exceptions.adap_except_data,dk_exceptions.adap_except_count); + + switch (dk_exceptions.adap_except_type) { + case DK_AET_BAD_PF: + + /* + * Bad page fault + */ + + CBLK_TRACE_LOG_FILE(6,"contxt_id = 0x%llx",chunk->path[path_index]->afu->contxt_id); + CBLK_TRACE_LOG_FILE(6,"mmio_map = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmio_mmap); + CBLK_TRACE_LOG_FILE(6,"mmio = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmio); + CBLK_TRACE_LOG_FILE(6,"mmap_size = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmap_size); + CBLK_TRACE_LOG_FILE(6,"hrrq_start = 0x%llx",(uint64_t)chunk->path[path_index]->afu->p_hrrq_start); + CBLK_TRACE_LOG_FILE(6,"hrrq_end = 0x%llx",(uint64_t)chunk->path[path_index]->afu->p_hrrq_end); + CBLK_TRACE_LOG_FILE(6,"cmd_start = 0x%llx",(uint64_t)chunk->cmd_start); + CBLK_TRACE_LOG_FILE(6,"cmd_end = 0x%llx",(uint64_t)chunk->cmd_end); + + if (!(cblk_chk_cmd_bad_page(chunk,dk_exceptions.adap_except_data))) { + + /* + * If a command matching this bad aaddress is not found, then log a general + * error here. + */ + cblk_notify_mc_err(chunk,path_index,0x505,dk_exceptions.adap_except_data, CFLSH_BLK_NOTIFY_AFU_ERROR,NULL); + } + break; + + case DK_AET_EEH_EVENT: + /* + * EEH has occurred. process EEH after all exceptions have been queride + */ + + CBLK_TRACE_LOG_FILE(6,"EEH exception reported, contxt_id = 0x%llx",chunk->path[path_index]->afu->contxt_id); + eeh_event++; + break; + + case DK_AET_AFU_ERROR: + + /* + * AFU erro detected, ignore return code + */ + + CBLK_PROCESS_ADAP_CONVERT_INTRPT(chunk,NULL,(int)CFLSH_BLK_INTRPT_STATUS,NULL,NULL); + + break; + + default: + CBLK_TRACE_LOG_FILE(1,"Unknown Adapter exceptions = 0x%llx adap_except_type = 0x%llx adap_except_time = 0x%llx", + dk_exceptions.exceptions,dk_exceptions.adap_except_type,dk_exceptions.adap_except_time); + + cblk_notify_mc_err(chunk,path_index,0x506,dk_exceptions.adap_except_type, CFLSH_BLK_NOTIFY_ADAP_ERR,NULL); + + } + } while (dk_exceptions.adap_except_count); + + + if (eeh_event) { + + cblk_check_os_adap_err(chunk,path_index); + + } + } + + if (poll_list[CFLASH_DISK_POLL_INDX].revents & POLLPRI) { + + /* + * Disk exception has occurred + */ + + + bzero(&dk_exceptions, sizeof dk_exceptions); + dk_exceptions.devno = chunk->path[path_index]->afu->adap_devno; + dk_exceptions.ctx_token = chunk->path[path_index]->afu->contxt_id; + dk_exceptions.rsrc_handle = chunk->path[path_index]->sisl.resrc_handle; + + + rc = ioctl(chunk->fd,DK_CAPI_QUERY_EXCEPTIONS,&dk_exceptions); + + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_QUERY_EXCEPTIONS failed with rc = %d, errno = %d, return_flags = 0x%llx\n", + rc,errno,dk_exceptions.return_flags); + + + cblk_notify_mc_err(chunk,path_index,0x502,errno, CFLSH_BLK_NOTIFY_DISK_ERR,NULL); + + + } + + CBLK_TRACE_LOG_FILE(5,"Disk exceptions = 0x%llx,adap_except_type = 0x%llx,adap_except_time = 0x%llx", + dk_exceptions.exceptions,dk_exceptions.adap_except_type,dk_exceptions.adap_except_time); + + CBLK_TRACE_LOG_FILE(5,"Disk adap_except_data = 0x%llx adap_except_count = 0x%llx", + dk_exceptions.adap_except_data,dk_exceptions.adap_except_count); + + if (dk_exceptions.exceptions & DK_CE_VLUN_TRUNCATED) { + /* + * This Virtual LUN is now smaller. + * Save off the new size. + */ + + CBLK_TRACE_LOG_FILE(1,"Lun has shrunk: return_flags = 0x%llx, new size = 0x%llx\n", + dk_exceptions.return_flags,dk_exceptions.last_lba); + + + chunk->vlun_max_num_blocks = dk_exceptions.last_lba; + + chunk->num_blocks = dk_exceptions.last_lba; + + + cblk_notify_mc_err(chunk,path_index,0x507,dk_exceptions.last_lba,CFLSH_BLK_NOTIFY_DISK_ERR,NULL); + + + + } else if (dk_exceptions.exceptions & DK_CE_PATH_LOST) { + + /* + * Someone entity has done a verify on this disk and that + * operation detected a path was lost. This exception only + * tells us a path was lost, but not which path it was. Even + * if we are not running MPIO, it is possible this notification + * is a path used by another process that was lost. So we need + * verify we have have a valid path. + */ + + if (cblk_verify_mc_lun(chunk,CFLSH_BLK_NOTIFY_DISK_ERR,NULL,NULL)) { + + /* + * Verification failed on primary path. Look for another good path + * if it exists. + */ + + original_primary_path = chunk->cur_path; + + for (i = 0; i < chunk->num_paths; i++) { + + + if (i == original_primary_path) { + + continue; + } + + /* + * Change primary path to this one and see if it is functional + */ + + chunk->cur_path = i; + + if (!(cblk_verify_mc_lun(chunk,CFLSH_BLK_NOTIFY_DISK_ERR,NULL,NULL))) { + + /* + * We found a good path, + */ + found_new_path = TRUE; + break; + } + + } /* for */ + + + if (!found_new_path) { + + /* + * We did not find a valid path for this + * device. + */ + + cblk_notify_mc_err(chunk,path_index,0x509,chunk->num_paths,CFLSH_BLK_NOTIFY_DISK_ERR,NULL); + + rc = EIO; + + chunk->flags |= CFLSH_CHUNK_FAIL_IO; + + /* + * Issue reset context to fail any active I/O. + */ + + + cblk_chunk_free_mc_device_resources(chunk); + + cblk_chunk_unmap(chunk,TRUE); + cblk_chunk_detach(chunk,TRUE); + close(chunk->fd); + + cblk_fail_all_cmds(chunk); + + } + } + + + + + } else { + + if ((dk_exceptions.exceptions & DK_CE_VERIFY_IN_PROGRESS) && + !(dk_exceptions.exceptions & DK_CE_VERIFY_SUCCEEDED)) { + + // TODO: ?? Should we just do verify and block until completes? + + cblk_halt_all_cmds(chunk, path_index, FALSE); + + + + } else if (dk_exceptions.exceptions & DK_CE_VERIFY_SUCCEEDED) { + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + cblk_reset_context_shared_afu(chunk->path[path_index]->afu); + CFLASH_BLOCK_LOCK(chunk->lock); + + + } + + + } + + + + if ((dk_exceptions.exceptions & DK_CE_SIZE_CHANGE) && + !(chunk->flags & CFLSH_CHUNK_FAIL_IO)) { + + if (cblk_verify_mc_lun(chunk,CFLSH_BLK_NOTIFY_DISK_ERR,NULL,NULL)) { + + /* + * Verification failed + */ + + rc = EIO; + + chunk->flags |= CFLSH_CHUNK_FAIL_IO; + + /* + * Issue reset context to fail any active I/O. + */ + + + cblk_chunk_free_mc_device_resources(chunk); + + cblk_chunk_unmap(chunk,TRUE); + cblk_chunk_detach(chunk,TRUE); + close(chunk->fd); + + cblk_fail_all_cmds(chunk); + } + } + } + + + if (poll_list[CFLASH_ADAP_POLL_INDX].revents & POLLMSG) { + + /* + * Adapter error interrupt + */ + + chunk->stats.num_capi_afu_intrpts++; + + rc = CBLK_PROCESS_ADAP_CONVERT_INTRPT(chunk,cmd,CFLSH_BLK_INTRPT_STATUS,cmd_complete,transfer_size); + } + + if (poll_list[CFLASH_ADAP_POLL_INDX].revents & POLLIN) { + + /* + * Adapter interrupt for command completion has occurred + */ + + chunk->stats.num_capi_afu_intrpts++; + + + rc = CBLK_PROCESS_ADAP_CONVERT_INTRPT(chunk,cmd,CFLSH_BLK_INTRPT_CMD_CMPLT,cmd_complete,transfer_size); + } + +#ifdef _ERROR_INTR_MODE + + /* + * In Error Interrupt Mode, we should never get POLLIN to indicate command + * complete. The code invoking poll, expects it to time-out and then just check the + * RRQ via the call to CBLK_PROCESS_ADAP_CONVERT_INTRPT. Thus in error interrupt + * mode, it is possible error events may get posted and this routine + * will be invoked instead of CBLK_PROCESS_ADAP_CONVERT_INTRPT. Since we + * also want to handle command completions in that case, lets check for them + * now to ensure they are processed. + */ + + rc = CBLK_PROCESS_ADAP_CONVERT_INTRPT(chunk,cmd,CFLSH_BLK_INTRPT_CMD_CMPLT,cmd_complete, + transfer_size); +#endif /* !_ERROR_INTR_MODE */ + +#endif /* !BLOCK_FILEMODE_ENABLED */ + + /* + * TODO: ?? Currently we are just returning the last rc seen, + * Is this the corect choice. + */ + + + return rc; +} + + +#else +/* + * NAME: cblk_read_os_specific_intrpt_event + * + * FUNCTION: Reads an OS specific event for this interrupt + * + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * cmd - Cmd this routine will wait for completion. + * + * RETURNS: + * 0 - Good completion, otherwise error. + * + * + */ +int cblk_read_os_specific_intrpt_event(cflsh_chunk_t *chunk, int path_index,cflsh_cmd_mgm_t **cmd,int *cmd_complete, + size_t *transfer_size, struct pollfd *poll_list) +{ + int rc = 0; + int read_bytes = 0; + int process_bytes = 0; + uint8_t read_buf[CAPI_FLASH_BLOCK_SIZE]; + struct cxl_event *cxl_event = (struct cxl_event *)read_buf; + + +#ifndef BLOCK_FILEMODE_ENABLED + + + read_bytes = read(chunk->path[path_index]->afu->poll_fd,cxl_event,CAPI_FLASH_BLOCK_SIZE); + +#else + /* + * For file mode fake an AFU interrupt + */ + + cxl_event->header.type = CXL_EVENT_AFU_INTERRUPT; + + read_bytes = sizeof(struct cxl_event); + + cxl_event->header.size = read_bytes; + + cxl_event->irq.irq = SISL_MSI_RRQ_UPDATED; + +#endif /* BLOCK_FILEMODE_ENABLED */ + + + if (read_bytes < 0) { + + if (*cmd) { + CBLK_TRACE_LOG_FILE(5,"read event failed, with rc = %d errno = %d, cmd = 0x%llx, cmd_index = %d, lba = 0x%llx", + read_bytes, errno,(uint64_t)*cmd, (*cmd)->index, (*cmd)->cmdi->lba); + } else { + CBLK_TRACE_LOG_FILE(7,"read event failed, with rc = %d errno = %d, cmd = 0x%llx", + read_bytes, errno,(uint64_t)*cmd); + + } + +#ifdef _SKIP_POLL_CALL + + /* + * If we are not using the poll call, + * then since we are not blocking on the + * read, we need to delay here before + * re-reading again. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + usleep(CAPI_POLL_IO_TIME_OUT * 1000); + + CFLASH_BLOCK_LOCK(chunk->lock); +#else + + + if ((read_bytes == -1) && (errno == EAGAIN)) { + +#ifdef _ERROR_INTR_MODE + /* + * When we poll with no time-out, we sometimes + * see read fail with EAGAIN. Do not consider + * this an error, but instead check the RRQ for + * completions. + */ + + rc = CBLK_PROCESS_ADAP_CONVERT_INTRPT(chunk,cmd, + CFLSH_BLK_INTRPT_CMD_CMPLT, + cmd_complete, transfer_size); + return rc; + +#else + /* + * Increment statistics + */ + + chunk->stats.num_capi_false_reads++; +#endif /* !_ERROR_INTR_MODE */ + } + +#endif /* _SKIP_POLL_CALL */ + + if ((read_bytes == -1) && (errno == EIO)) { + + /* + * This most likely indicates the adapter + * is being reset. + */ + + + chunk->stats.num_capi_adap_resets++; + + cblk_check_os_adap_err(chunk,path_index); + } + + return (-1); + } + + + + if (read_bytes > CAPI_FLASH_BLOCK_SIZE) { + + /* + * If the number of read bytes exceeded the + * size of the buffer we supplied then truncate + * read_bytes to our buffer size. + */ + + + if (*cmd) { + CBLK_TRACE_LOG_FILE(1,"read event returned too large buffer size = %d errno = %d, cmd = 0x%llx, cmd_index = %d, lba = 0x%llx", + read_bytes, errno,(uint64_t)cmd, (*cmd)->index, (*cmd)->cmdi->lba); + } else { + CBLK_TRACE_LOG_FILE(1,"read event returned too large buffer size = %d errno = %d, cmd = 0x%llx", + read_bytes, errno,(uint64_t)*cmd); + + } + read_bytes = CAPI_FLASH_BLOCK_SIZE; + } + + while (read_bytes > process_bytes) { + + /* + * The returned read data will have + * cxl event types. Unfortunately they + * are not using the common struct cxl_event + * structure for all in terms of size. Thus + * we need to read the header (common + * for all) and from the header's size + * field determine the size of the read + * entry. + */ + + + + CBLK_TRACE_LOG_FILE(7,"cxl_event type = %d, size = %d", + cxl_event->header.type,cxl_event->header.size); + + + if (cxl_event->header.size == 0) { + + + CBLK_TRACE_LOG_FILE(1,"cxl_event type = %d, invalid size = %d", + cxl_event->header.type,cxl_event->header.size); + + errno = 5; + + return (-1); + } + + + process_bytes += cxl_event->header.size; + + + if (cxl_event->header.type == CXL_EVENT_AFU_INTERRUPT) { + + + chunk->stats.num_capi_afu_intrpts++; + + rc = CBLK_PROCESS_ADAP_INTRPT(chunk,cmd,(int)cxl_event->irq.irq,cmd_complete,transfer_size); + } else { + + + rc = cblk_process_nonafu_intrpt_cxl_events(chunk,path_index,cxl_event); + } + + cxl_event = (struct cxl_event *)(((char*)cxl_event) + cxl_event->header.size); + + } + +#ifdef _ERROR_INTR_MODE + + /* + * In Error Interrupt Mode, we should never get events to indicate command + * complete. The code invoking poll, expects it to time-out and then just check the + * RRQ via the call to CBLK_PROCESS_ADAP_CONVERT_INTRPT. Thus in error interrupt + * mode, it is possible error events may get posted and this routine + * will be invoked instead of CBLK_PROCESS_ADAP_CONVERT_INTRPT. Since we + * also want to handle command completions in that case, lets check for them + * now to ensure they are processed. + */ + + rc = CBLK_PROCESS_ADAP_CONVERT_INTRPT(chunk,cmd,CFLSH_BLK_INTRPT_CMD_CMPLT,cmd_complete, + transfer_size); +#endif /* !_ERROR_INTR_MODE */ + + + /* + * TODO: ?? Currently we are just returning the last rc seen, + * Is this the corect choice. + */ + + + return rc; + +} +#endif /* !_AIX */ + + +/* + * NAME: cblk_check_os_adap_err_failure_cleanup + * + * FUNCTION: If when doing the check os adapter error recovery + * we suffer a fatal, then clean up by failing all I/O + * to all associated paths of this AFU. + * This routine assumes the caller is holding only the global lock. + * + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * RETURNS: + * None + * + * + */ +void cblk_check_os_adap_err_failure_cleanup(cflsh_chunk_t *chunk, cflsh_afu_t *afu) +{ + + cflsh_path_t *path; + int pthread_rc = 0; + cflsh_chunk_t *tmp_chunk; + int path_index; + + + + + afu->flags &= ~CFLSH_AFU_HALTED; + + + pthread_rc = pthread_cond_broadcast(&(afu->resume_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for AFU resume_event rc = %d,errno = %d", + pthread_rc,errno); + } + + path = afu->head_path; + + CFLASH_BLOCK_UNLOCK(afu->lock); + + + + chunk->flags |= CFLSH_CHUNK_FAIL_IO; + + + + + cblk_chunk_free_mc_device_resources(chunk); + + cblk_chunk_unmap(chunk,TRUE); + cblk_chunk_detach(chunk,TRUE); + close(chunk->fd); + + + /* + * For each chunk associated with this path: + * + * - Put the chunk in a failed state + * - fail all active I/O + * - Wakeup any threads waiting to issue + * commands for this. Since the chunk is + * in a failed state, these should all fail + * immediately. + */ + + while (path) { + + + tmp_chunk = path->chunk; + + if (tmp_chunk == NULL) { + CBLK_TRACE_LOG_FILE(1,"chunk is null for path_index of %d",path->path_index); + continue; + } + + CFLASH_BLOCK_LOCK(tmp_chunk->lock); + + path_index = path->path_index; + + + tmp_chunk->flags |= CFLSH_CHUNK_FAIL_IO; + + cblk_fail_all_cmds(tmp_chunk); + + pthread_rc = pthread_cond_broadcast(&(tmp_chunk->path[path_index]->resume_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for resume_event rc = %d,errno = %d", + pthread_rc,errno); + } + + CFLASH_BLOCK_UNLOCK(tmp_chunk->lock); + + path = path->next; + } + + return; +} + + +/* + * NAME: cblk_check_os_adap_err + * + * FUNCTION: Inform adapter driver that it needs to check if this + * is a fatal error that requires a reset. + * This routine assumes the caller is holding chunk->lock. + * + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * RETURNS: + * None + * + * + */ +void cblk_check_os_adap_err(cflsh_chunk_t *chunk, int path_index) +{ + int rc = 0; + cflsh_afu_t *afu; + cflsh_path_t *path; + int tmp_path_index; + cflsh_chunk_t *tmp_chunk; + dk_capi_recover_context_t disk_recover; + int pthread_rc = 0; + void *old_mmio; + size_t old_mmio_size; + + + if (CBLK_INVALID_CHUNK_PATH_AFU(chunk,path_index,__FUNCTION__)) { + + return; + } + + afu = chunk->path[path_index]->afu; + + + if (afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"AFU is null"); + } + + + /* + * Set the halted flag for this chunk, since + * we're about to unlock. + */ + + chunk->flags |= CFLSH_CHNK_HALTED; + + chunk->stats.num_capi_adap_chck_err++; + + /* + * Since we are going to grab the global lock as + * well as some other locks, we need to release this chunk + * lock, because the lock ordering scheme followed by this + * library requires Global lock first, then chunk lock. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + + /* + * Grab the global lock here, since we could + * have multiple chunk paths sharing this AFU. + */ + + CFLASH_BLOCK_WR_RWLOCK(cflsh_blk.global_lock); + + + + CFLASH_BLOCK_LOCK(afu->lock); + + if (chunk->path[path_index]->flags & CFLSH_PATH_A_RST) { + + /* + * It is possible in the case of a shared AFU among different chunk paths, + * that multiple ones may detect this adapter error and decide to issue + * the Recover Adapter ioctl. We need to ensure that is only done once + * for this case. Thus when teh Recover Adapter ioctl is done, we will set + * the each of the associated path flags with CFLSH_PATH_A_RST. This allows + * them to detect this case and avoid issuing the ioctl. The thread that did do + * the ioctl, will have updated all AFU fields and halted/resumes all I/Os to + * this AFU. Thus these subsequent chunks, should not need to do anything except + * possibly wake up block requests. + */ + + CBLK_TRACE_LOG_FILE(5,"afu recovery done via another path to this shared AFU"); + + chunk->flags &= ~CFLSH_CHNK_HALTED; + + chunk->path[path_index]->flags &= ~CFLSH_PATH_A_RST; + + CFLASH_BLOCK_UNLOCK(afu->lock); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + CFLASH_BLOCK_LOCK(chunk->lock); + + pthread_rc = pthread_cond_broadcast(&(chunk->path[chunk->cur_path]->resume_event)); + + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for resume_event rc = %d,errno = %d", + pthread_rc,errno); + } + + + return; + } + + + /* + * If we got here, then there was not other thread that + * processed this recover adapter ioctl. So let's issue + * it along with the associated work needed. + */ + + + afu->flags |= CFLSH_AFU_HALTED; + + path = afu->head_path; + + /* + * Since we are going to be grabbing each chunk lock for chunk's + * that have paths that use this AFU, we need to release the AFU + * lock. This is because lock order convention in this library + * is that chunk lock must be grabbed before AFU lock. + * We should be safe iterating over the paths in this AFU, + * since we still have the global lock. + */ + + CFLASH_BLOCK_UNLOCK(afu->lock); + + + + /* + * Mark all chunks that have paths associated with this AFU + * in the halted state to stop any new I/O from being attempted + * until we complete the Recover Adapter ioctl. + */ + + while (path) { + + + tmp_chunk = path->chunk; + + if (tmp_chunk == NULL) { + CBLK_TRACE_LOG_FILE(1,"chunk is null for path_index of %d",path->path_index); + continue; + } + + CFLASH_BLOCK_LOCK(tmp_chunk->lock); + + tmp_chunk->flags |= CFLSH_CHNK_HALTED; + CFLASH_BLOCK_UNLOCK(tmp_chunk->lock); + + path = path->next; + } + + + /* + * Now that all chunks have been "halted" that have + * paths associated with this AFU, let's grab the + * AFU lock again for this ioctl, since will be first + * reading values for the AFU, and the potential after the + * ioctl updating those values. + */ + + CFLASH_BLOCK_LOCK(afu->lock); + + + bzero(&disk_recover,sizeof(disk_recover)); + + CBLK_TRACE_LOG_FILE(5,"DK_CAPI_RECOVER initiated, for chunk->index = %d, chunk->dev_name = %s, path_index = %d,chunk->flags = 0x%x", + chunk->index,chunk->dev_name,path_index,chunk->flags); + + CBLK_TRACE_LOG_FILE(5,"DK_CAPI_RECOVER reattached new old afu->contxt_id = 0x%llx, old_adap_fd = %d", + chunk->path[path_index]->afu->contxt_id,chunk->path[path_index]->afu->poll_fd); + + + CBLK_TRACE_LOG_FILE(5,"DK_CAPI_RECOVER old mmio = 0x%llx, old mmio_size = 0x%llx", + chunk->path[path_index]->afu->mmio,chunk->path[path_index]->afu->mmap_size); + + + CBLK_TRACE_LOG_FILE(5,"DK_CAPI_RECOVER num_active_cmds = %d",chunk->num_active_cmds); + + + +#ifdef _AIX + disk_recover.devno = chunk->path[path_index]->afu->adap_devno; + + disk_recover.ctx_token = chunk->path[path_index]->afu->contxt_id; +#else + + disk_recover.context_id = chunk->path[path_index]->afu->contxt_id; +#endif /* _AIX */ + + + + rc = ioctl(chunk->fd,DK_CAPI_RECOVER_CTX,&disk_recover); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_RECOVER failed with rc = %d, errno = %d, return_flags = 0x%llx\n", + rc,errno,disk_recover.return_flags); + + + + + /* + * If this ioctl failed, then we have no recourse other + * than to fail everything. So mark all + * associated chunks in failed + * state and fail all their I/O. + * + * NOTE: cblk_check_os_adap_err_failure_cleanup + * unlocks the afu->lock. + */ + + cblk_notify_mc_err(chunk,path_index,0x503,errno, CFLSH_BLK_NOTIFY_ADAP_ERR,NULL); + + CBLK_LIVE_DUMP_THRESHOLD(1,"0x503"); + + + cblk_check_os_adap_err_failure_cleanup(chunk,afu); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + return; + + } else { + + + + if (disk_recover.return_flags & DK_RF_REATTACHED) { + + /* + * The ioctl succeeded and an AFU reset has been done. + * We need to process the updated information from this + */ + + path = afu->head_path; + + CFLASH_BLOCK_UNLOCK(afu->lock); + + + /* + * For each chunk associated with this path: + * + * - Mark the chunk's path to indicate adapter + * reset occurred. + * - Halt all active I/O + */ + + while (path) { + + + tmp_chunk = path->chunk; + + if (tmp_chunk == NULL) { + CBLK_TRACE_LOG_FILE(1,"chunk is null for path_index of %d",path->path_index); + continue; + } + + CFLASH_BLOCK_LOCK(tmp_chunk->lock); + + tmp_path_index = path->path_index; + cblk_halt_all_cmds(tmp_chunk,tmp_path_index, FALSE); + + path->flags |= CFLSH_PATH_A_RST; + + CFLASH_BLOCK_UNLOCK(tmp_chunk->lock); + + path = path->next; + } + + + CFLASH_BLOCK_LOCK(afu->lock); + + /* + * Extract new AFU information + */ + +#ifdef _AIX + chunk->path[path_index]->afu->contxt_id = disk_recover.new_ctx_token; + chunk->path[path_index]->afu->contxt_handle = 0xffffffff & disk_recover.new_ctx_token; + chunk->path[path_index]->afu->poll_fd = disk_recover.new_adap_fd; + + + chunk->path[path_index]->afu->mmio_mmap = disk_recover.mmio_start; + + chunk->path[path_index]->afu->mmio = chunk->path[path_index]->afu->mmio_mmap; + + chunk->path[path_index]->afu->mmap_size = disk_recover.mmio_size; + + + CBLK_TRACE_LOG_FILE(5,"DK_CAPI_RECOVER reattached new afu->contxt_id = 0x%llx, new_adap_fd = %d", + chunk->path[path_index]->afu->contxt_id,chunk->path[path_index]->afu->poll_fd); + + + CBLK_TRACE_LOG_FILE(5,"DK_CAPI_RECOVER mmio = 0x%llx, mmio_size = 0x%llx", + chunk->path[path_index]->afu->mmio,chunk->path[path_index]->afu->mmap_size); + + + + + +#else + + old_mmio = chunk->path[path_index]->afu->mmio_mmap; + + old_mmio_size = chunk->path[path_index]->afu->mmap_size; + + chunk->path[path_index]->afu->contxt_id = disk_recover.context_id; + chunk->path[path_index]->afu->contxt_handle = 0xffffffff & disk_recover.context_id; + chunk->path[path_index]->afu->poll_fd = disk_recover.adap_fd; + + + chunk->path[path_index]->afu->mmap_size = disk_recover.mmio_size; + + + + + + CBLK_TRACE_LOG_FILE(5,"DK_CAPI_RECOVER reattached new afu->contxt_id = 0x%llx, new_adap_fd = %d", + chunk->path[path_index]->afu->contxt_id,chunk->path[path_index]->afu->poll_fd); + + + /* + * We do not need to unmap the old MMIO space, since the recover adapter ioctl did this + * for us. So we just need to map the new MMIO space here. + */ + + chunk->path[path_index]->afu->mmio_mmap = mmap(NULL,chunk->path[path_index]->afu->mmap_size,PROT_READ|PROT_WRITE, MAP_SHARED, + chunk->path[path_index]->afu->poll_fd,0); + + + if (chunk->path[path_index]->afu->mmio_mmap == MAP_FAILED) { + + CBLK_TRACE_LOG_FILE(1,"mmap of mmio space failed for dev_name = %s, errno = %d, mmio_size = 0x%llx", + chunk->dev_name,errno,(uint64_t)chunk->path[path_index]->afu->mmap_size); + + /* + * Setup failed, mark chunk in failed + * state and fail all I/O. + * + * NOTE: cblk_check_os_adap_err_failure_cleanup + * unlocks the afu->lock. + */ + + + cblk_check_os_adap_err_failure_cleanup(chunk,afu); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + return; + + } + + chunk->path[path_index]->afu->mmio = chunk->path[path_index]->afu->mmio_mmap; + + + if (fcntl(chunk->path[path_index]->afu->poll_fd,F_SETFL,O_NONBLOCK) == -1) { + + /* + * Ignore error for now + */ + + CBLK_TRACE_LOG_FILE(1,"fcntl failed with errno = %d",errno); + + } + + + + if (munmap(old_mmio,old_mmio_size)) { + + + + /* + * Don't return here on error. Continue + * to close + */ + CBLK_TRACE_LOG_FILE(2,"munmap failed with errno = %d", + errno); + } + + CBLK_TRACE_LOG_FILE(5,"DK_CAPI_RECOVER mmio = 0x%llx, mmio_size = 0x%llx", + chunk->path[path_index]->afu->mmio,chunk->path[path_index]->afu->mmap_size); + + + + +#endif /* !AIX */ + + /* + * Clear AFU halted state, so that ADAP_SETUP + * can proceed. + */ + + + afu->flags &= ~CFLSH_AFU_HALTED; + + + CFLASH_BLOCK_UNLOCK(afu->lock); + + + + if (CBLK_ADAP_SETUP(chunk,path_index)) { + + + /* + * Setup failed, mark chunk in failed + * state and fail all I/O. + * + * NOTE: cblk_check_os_adap_err_failure_cleanup + * unlocks the afu->lock. + */ + + CBLK_TRACE_LOG_FILE(1,"ADAP_SETUP fails, for chunk->index = %d, chunk->dev_name = %s, path_index = %d,chunk->flags = 0x%x", + chunk->index,chunk->dev_name,path_index,chunk->flags); + + cblk_notify_mc_err(chunk,path_index,0x504,errno, CFLSH_BLK_NOTIFY_AFU_ERROR,NULL); + + CBLK_LIVE_DUMP_THRESHOLD(1,"0x504"); + + cblk_check_os_adap_err_failure_cleanup(chunk,afu); + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + return; + } + + + /* + * Since, we are re-using the same chunk, make sure + * to reset some fields. + */ + + chunk->cmd_curr = chunk->cmd_start; + + CFLASH_BLOCK_LOCK(afu->lock); + + afu->num_issued_cmds = 0; + + chunk->path[path_index]->afu->p_hrrq_curr = chunk->path[path_index]->afu->p_hrrq_start; + + chunk->path[path_index]->afu->toggle = 1; + + + bzero((void *)chunk->path[path_index]->afu->p_hrrq_start , + (sizeof(*(chunk->path[path_index]->afu->p_hrrq_start)) * chunk->path[path_index]->afu->num_rrqs)); + + + } + + + } + + /* + * Ensure the CFLSH_AFU_HALTED is cleared + */ + + afu->flags &= ~CFLSH_AFU_HALTED; + + pthread_rc = pthread_cond_broadcast(&(afu->resume_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_signal failed for AFU resume_event rc = %d,errno = %d", + pthread_rc,errno); + } + + path = afu->head_path; + + CFLASH_BLOCK_UNLOCK(afu->lock); + + + while (path) { + + + tmp_chunk = path->chunk; + + if (tmp_chunk == NULL) { + CBLK_TRACE_LOG_FILE(1,"chunk is null for path_index of %d",path->path_index); + continue; + } + + CFLASH_BLOCK_LOCK(tmp_chunk->lock); + + tmp_path_index = path->path_index; + + + /* + * NOTE: cblk_resume_all_halted_cmds clears CFLSH_CHNK_HALTED + * and only resumes I/O that was halted by cblk_halt_all_cmds + */ + + cblk_resume_all_halted_cmds(tmp_chunk, FALSE, tmp_path_index, FALSE); + + if (!(chunk->flags & CFLSH_CHNK_NO_BG_TD)) { + + /* + * It is possible that the common interrupt thread + * has given up on the current set of commands and is + * waiting for a signal to proceed. So signal to check + * for I/O completions. + */ + + chunk->thread_flags |= CFLSH_CHNK_POLL_INTRPT; + + pthread_rc = pthread_cond_signal(&(chunk->thread_event)); + + if (pthread_rc) { + + CBLK_TRACE_LOG_FILE(1,"pthread_cond_signall failed rc = %d,errno = %d", + pthread_rc,errno); + + /* + * Ignore error and continue. + */ + } + } + + + CFLASH_BLOCK_UNLOCK(tmp_chunk->lock); + + path = path->next; + } + + + CFLASH_BLOCK_RWUNLOCK(cflsh_blk.global_lock); + + CFLASH_BLOCK_LOCK(chunk->lock); + + /* + * Since we issued the reset and fully processed it, + * clear our reset flag. + */ + + chunk->path[path_index]->flags &= ~CFLSH_PATH_A_RST; + return; +} + +#ifdef _AIX +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_get_program_name + * + * FUNCTION: Finds the name of the process associated with our PID. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: NULL string - No process name found. + * string - Process name found. + * + * ---------------------------------------------------------------------------- + */ + +char *cblk_get_program_name(pid_t pid) +{ + +#define MAX_FETCH_PROCS 100 + + char *process_name = NULL; + pid_t process_index = 0; + int process_list_size; + int i; + int num_process; + + + +#if defined(__64BIT__) + + /* + * 64-bit application + */ + + /* + * NOTE: AIX does not have a mechanism to get a process + * name via getproc (the 32-bit version of the + * getprocs64 call) for 32-bit applications. This + * is due to the reduced size of the 32-bit procinfo + */ + + struct procentry64 *process_list; + + + + process_list_size = sizeof(*process_list) * MAX_FETCH_PROCS; + + + process_list = malloc(process_list_size); + + if (process_list == NULL) { + + CBLK_TRACE_LOG_FILE(1,"Failed to allocate process list of size = %d,with errno = %d", + process_list_size,errno); + + return NULL; + } + + + do { + + + bzero(process_list,process_list_size); + + num_process = getprocs64(process_list,sizeof(*process_list), + NULL,0,&process_index,MAX_FETCH_PROCS); + + + if (num_process == 0) { + + + CBLK_TRACE_LOG_FILE(5,"No processes returned from getprocs64. last index = %d", + process_index); + break; + } + + for (i=0;i < num_process; i++) { + + if (pid == (pid_t)process_list[i].pi_pid) { + + /* + * We found the matching process. + * Now let's extract the process' + * name. + */ + + process_name = strdup(process_list[i].pi_comm); + break; + + } + } + + if (process_name) { + + /* + * If we found the process name, then + * break out this loop. + */ + + break; + } + + if (num_process < MAX_FETCH_PROCS) { + + + /* + * There are no more process eleents + * to fetch. + */ + + CBLK_TRACE_LOG_FILE(5,"No more processes left to fetch. last index = %d", + process_index); + break; + } + + + } while (num_process); + + + +#endif /* 64-bit */ + + CBLK_TRACE_LOG_FILE(5,"Our process name = %s",process_name); + + + return process_name; +} + +#endif /* _AIX */ + + +/* + * NAME: cblk_notify_mc_err + * + * FUNCTION: Inform Master Context (MC) of this error. + * + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * RETURNS: + * None + * + * + */ +void cblk_notify_mc_err(cflsh_chunk_t *chunk, int path_index,int error_num, + uint64_t out_rc, + cflash_block_notify_reason_t reason,cflsh_cmd_mgm_t *cmd) +{ +#ifdef _MASTER_CONTXT +#ifdef _AIX + int rc = 0; + dk_capi_log_t disk_log; + scsi_cdb_t *cdb = NULL; + int process_name_len; + struct cflash_block_log log_data; + + + + + if (chunk->path[path_index] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path == NULL"); + return; + } + + if (chunk->path[path_index]->afu == NULL) { + + CBLK_TRACE_LOG_FILE(1,"afu == NULL"); + return; + } + + + + bzero(&log_data,sizeof(log_data)); + + /* + * Build detail data for logging + */ + + log_data.app_indicator = CFLSH_BLK_LIB; + + log_data.errnum = error_num; + + log_data.rc = out_rc; + + log_data.afu_type = chunk->path[path_index]->afu->type; + + + /* + * Fill in global library values + */ + + + + log_data.cflsh_blk_flags = cflsh_blk.flags; + + log_data.num_active_chunks = cflsh_blk.num_active_chunks; + log_data.num_max_active_chunks = cflsh_blk.num_max_active_chunks; + + + + /* + * Fill in chunk general values + */ + + log_data.chunk_flags = chunk->flags; + + log_data.num_active_cmds = chunk->num_active_cmds; + + log_data.num_cmds = chunk->num_cmds; + + log_data.num_paths = chunk->num_paths; + + + /* + * Fill in path general values + */ + + log_data.path_flags = chunk->path[path_index]->flags; + log_data.path_index = path_index; + + + /* + * Fill in AFU general values + */ + + log_data.afu_flags = chunk->path[path_index]->afu->flags; + + log_data.num_rrqs = chunk->path[path_index]->afu->num_rrqs; + + + /* + * Fill in statistics + */ + + + log_data.num_act_reads = chunk->stats.num_act_reads; + + log_data.num_act_writes = chunk->stats.num_act_writes; + + log_data.num_act_areads = chunk->stats.num_act_areads; + + log_data.num_act_awrites = chunk->stats.num_act_awrites; + + log_data.max_num_act_writes = chunk->stats.max_num_act_writes; + + log_data.max_num_act_reads = chunk->stats.max_num_act_reads; + + log_data.max_num_act_awrites = chunk->stats.max_num_act_awrites; + + log_data.max_num_act_areads = chunk->stats.max_num_act_areads; + + log_data.num_cc_errors = chunk->stats.num_cc_errors; + + log_data.num_afu_errors = chunk->stats.num_afu_errors; + + log_data.num_fc_errors = chunk->stats.num_fc_errors; + + log_data.num_errors = chunk->stats.num_errors; + + log_data.num_reset_contexts = chunk->stats.num_reset_contexts; + + log_data.num_reset_contxt_fails = chunk->stats.num_reset_contxt_fails; + + log_data.num_path_fail_overs = chunk->stats.num_path_fail_overs; + + log_data.block_size = chunk->stats.block_size; + + log_data.primary_path_id = chunk->path[0]->path_id; + + log_data.num_no_cmd_room = chunk->stats.num_no_cmd_room; + + log_data.num_no_cmds_free = chunk->stats.num_no_cmds_free; + + log_data.num_no_cmds_free_fail = chunk->stats.num_no_cmds_free_fail; + + log_data.num_fail_timeouts = chunk->stats.num_fail_timeouts; + + log_data.num_capi_adap_chck_err = chunk->stats.num_capi_adap_chck_err; + + log_data.num_capi_adap_resets = chunk->stats.num_capi_adap_resets; + + log_data.num_capi_data_st_errs = chunk->stats.num_capi_data_st_errs; + + log_data.num_capi_afu_errors = chunk->stats.num_capi_afu_errors; + + log_data.mmio = (uint64_t)chunk->path[path_index]->afu->mmio; + + log_data.hrrq_start = (uint64_t)chunk->path[path_index]->afu->p_hrrq_start; + + log_data.cmd_start = (uint64_t)chunk->cmd_start; + + if (cmd) { + + + cdb = CBLK_GET_CMD_CDB(chunk,cmd); + + if (cdb) { + bcopy(cdb,&log_data.failed_cdb,sizeof(*cdb)); + } + + + if (cmd->cmdi) { + + log_data.data_ea = (uint64_t)cmd->cmdi->buf; + + } + + log_data.data_len = (uint64_t)cmd->cmdi->nblocks; + + log_data.lba = (uint64_t)cmd->cmdi->lba; + + CBLK_COPY_ADAP_CMD_RESP(chunk,cmd,log_data.data,CFLASH_BLOCK_LOG_DATA_LEN); + + + } + + + bzero(&disk_log,sizeof(disk_log)); + + /* + * Log all errors are temporary. Only + * treat the adapter/disk error as software errors. + * All others are treated as hardware errors + */ + + + disk_log.flags = DK_LF_TEMP; + if ((reason == CFLSH_BLK_NOTIFY_DISK_ERR) || + (reason == CFLSH_BLK_NOTIFY_ADAP_ERR) || + (reason == CFLSH_BLK_NOTIFY_SFW_ERR)) { + + disk_log.flags |= DK_LF_SW_ERR; + } else { + + + disk_log.flags |= DK_LF_HW_ERR; + } + + disk_log.path_id = chunk->path[path_index]->path_id; + disk_log.devno = chunk->path[path_index]->afu->adap_devno; + + disk_log.ctx_token = chunk->path[path_index]->afu->contxt_id; + disk_log.rsrc_handle = chunk->path[path_index]->sisl.resrc_handle; + + + if (cflsh_blk.process_name == NULL) { + cflsh_blk.process_name = cblk_get_program_name(cflsh_blk.caller_pid); + } + + + process_name_len = MIN(strlen(cflsh_blk.process_name),DK_LOG_ASCII_SENSE_LEN); + + strncpy(disk_log.ascii_sense_data,cflsh_blk.process_name,process_name_len); + + + disk_log.reason = reason; + + bcopy(&log_data,disk_log.sense_data,MIN(DK_LOG_SENSE_LEN,sizeof(log_data))); + + + CBLK_TRACE_LOG_FILE(5,"Issuing DK_CAPI_LOG_EVENT chunk->dev_name = %s, reason = %d, process_name = %s", + chunk->dev_name,reason,cflsh_blk.process_name); + + + rc = ioctl(chunk->fd,DK_CAPI_LOG_EVENT,&disk_log); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DISK_CAPI_LOG_EVENT failed for chunk->dev_name = %s,with rc = %d, errno = %d, return_flags = 0x%llx\n", + chunk->dev_name,rc,errno,disk_log.return_flags); + + } + + +#else + + CBLK_TRACE_LOG_FILE(1,"LOG_EVENT reason %d error_num = 0x%x,for chunk->dev_name = %s, chunk index = %d\n", + reason,error_num,chunk->dev_name,chunk->index); + +#endif /* !AIX */ + + +#endif /* _MASTER_CONTXT */ + + return; +} + +/* + * NAME: cblk_verify_mc_lun + * + * FUNCTION: Request MC to verify lun. + * + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * RETURNS: + * 0 - Good completion + * -1 - Error + * + * + */ +int cblk_verify_mc_lun(cflsh_chunk_t *chunk, cflash_block_notify_reason_t reason, + cflsh_cmd_mgm_t *cmd, + struct request_sense_data *sense_data) +{ +#ifdef _MASTER_CONTXT + + int rc = 0; + dk_capi_verify_t disk_verify; + + + if (chunk->path[chunk->cur_path] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path == NULL"); + return -1; + } + + bzero(&disk_verify,sizeof(disk_verify)); + +#ifdef _AIX + disk_verify.path_id = chunk->path[chunk->cur_path]->path_id; + +#endif + + if (sense_data) { + +#ifdef _AIX + bcopy(sense_data,disk_verify.sense_data, + MIN(sizeof(*sense_data),DK_VERIFY_SENSE_LEN)); + + + disk_verify.hint = DK_HINT_SENSE; +#else + bcopy(sense_data,disk_verify.sense_data, + MIN(sizeof(*sense_data),DK_CXLFLASH_VERIFY_SENSE_LEN)); + + + disk_verify.hint = DK_CXLFLASH_VERIFY_HINT_SENSE; + disk_verify.context_id = chunk->path[chunk->cur_path]->afu->contxt_id; + disk_verify.rsrc_handle = chunk->path[chunk->cur_path]->sisl.resrc_handle; + +#endif /* ! AIX */ + + } + + + CBLK_TRACE_LOG_FILE(5,"Issuing DK_CAPI_VERIFY for chunk index = %d\n", + chunk->index); + + rc = ioctl(chunk->fd,DK_CAPI_VERIFY,&disk_verify); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"DK_CAPI_VERIFY failed with rc = %d, errno = %d, return_flags = 0x%llx\n", + rc,errno,disk_verify.return_flags); + + + /* + * If verify failed with DK_RF_PATH_LOST, and we are + * running MPIO, then this will also be detected when we + * query exceptions for the disk. At that time we'll check + * for a valid path and change our primary path to that one. + */ + + + chunk->flags |= CFLSH_CHUNK_FAIL_IO; + + + /* + * If verify disk failed, the fall all commands back, + * but first we need to ensure the AFU drops them first. + */ + + cblk_chunk_free_mc_device_resources(chunk); + + cblk_chunk_unmap(chunk,TRUE); + + cblk_chunk_detach(chunk,TRUE); + + close(chunk->fd); + + + cblk_fail_all_cmds(chunk); + + + return -1; + + + } else { + + chunk->num_blocks_lun = disk_verify.last_lba + 1; + + CBLK_TRACE_LOG_FILE(5,"Last block returned = 0x%llx\n", + chunk->num_blocks_lun); + + if (!(chunk->flags & CFLSH_CHNK_VLUN)) { + + /* + * If this chunk represents a physical lun, then update + * its number of valid blocks. + */ + + chunk->num_blocks = chunk->num_blocks_lun; + + } + } + +#endif /* _MASTER_CONTXT */ + + return 0; +} + diff --git a/src/block/cflash_block_linux.c b/src/block/cflash_block_linux.c new file mode 100644 index 00000000..e69a054d --- /dev/null +++ b/src/block/cflash_block_linux.c @@ -0,0 +1,1995 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/cflash_block_linux.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/* ---------------------------------------------------------------------------- + * + * This file contains the linux specific code for the block library. + * For other OSes, this file should not be linked in and instead replaced + * with the analogous OS specific file. + * + * ---------------------------------------------------------------------------- + */ + + +#define CFLSH_BLK_FILENUM 0x0400 +#include "cflash_block_internal.h" +#include "cflash_block_inline.h" +#include "cflash_block_protos.h" +#include +#ifdef _MASTER_CONTXT +#include +#endif /* _MASTER_CONTXT */ + +#ifdef _USE_LIB_AFU +#include +#endif /* _USE_LIB_AFU */ +#ifdef _USE_LIB_AFU +struct afu *p_afu = NULL; +#endif /* _USE_LIB_AFU */ + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_init_mc_interface + * + * FUNCTION: Initialize master context (MC) interfaces for this process. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_init_mc_interface(void) +{ + char *lun = getenv("CFLSH_BLK_LUN_ID"); + + + + if (lun) { + cblk_lun_id = strtoul(lun,NULL,16); + } + + +#ifdef _MASTER_CONTXT + + mc_init(); + +#endif /* _MASTER_CONTXT */ + + + return; +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_cleanup_mc_interface + * + * FUNCTION: Initialize master context (MC) interfaces for this process. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_cleanup_mc_interface(void) +{ + + + +#ifdef _MASTER_CONTXT + + mc_term(); + +#endif /* _MASTER_CONTXT */ + + + + return; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_get_os_chunk_type + * + * FUNCTION: Get OS specific chunk types + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +cflsh_block_chunk_type_t cblk_get_os_chunk_type(const char *path, int arch_type) +{ + cflsh_block_chunk_type_t chunk_type; + + + + /* + * For now we only support one chunk type: + * the SIS lite type. + */ + + chunk_type = CFLASH_BLK_CHUNK_SIS_LITE; + + return chunk_type; +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_find_parent_dev + * + * FUNCTION: Find parent string of lun. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +char *cblk_find_parent_dev(char *device_name) +{ + + return NULL; +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_attach_process + * + * FUNCTION: Attaches the current process to a chunk and + * maps the MMIO space. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +int cblk_chunk_attach_process_map (cflsh_chunk_t *chunk, int mode, int *cleanup_depth) +{ +#ifndef _USE_LIB_AFU + struct cxl_ioctl_start_work start_work; +#ifndef _MASTER_CONTXT +#ifdef _NOT_YET + uint64_t port_lun_table_start; +#endif /* _NOT_YET */ +#endif +#endif + cflsh_path_t *path = NULL; + cflsh_afu_in_use_t in_use; + int share = FALSE; + +#ifdef _MASTER_CONTXT + int end; +#endif /* _MASTER_CONTXT */ + int rc = 0; + cflsh_block_chunk_type_t chunk_type; + + + if (chunk == NULL) { + + return (-1); + } + + + if (chunk->path[chunk->cur_path] == NULL) { + + /* + * For the case of forking a process, the + * child's path structures will already exist, + * and we need to preserve some of the data. + * So only get a path if one is not allocated. + */ + + chunk_type = cblk_get_chunk_type(chunk->dev_name,0); + + + + /* + * For non-kernel MC, only use one path. + */ + + chunk->cur_path = 0; + + if (chunk->flags & CFLSH_CHNK_SHARED) { + + share = TRUE; + } + + + path = cblk_get_path(chunk,0,chunk_type,chunk->num_cmds,&in_use, share); + + + if (path == NULL) { + + return (-1); + } + + chunk->path[chunk->cur_path] = path; + + path->path_index = chunk->cur_path; + + } + + /* + * For GA1, the poll filedescriptor + * and the one used for open/ioctls are + * the same. + */ + chunk->path[chunk->cur_path]->afu->poll_fd = chunk->fd; + +#ifdef _USE_LIB_AFU + afu_start(p_afu); + + // set up RRQ + // these funcs have syncs in them. offset is in 4-byte words. + // assume problem space starts with the "Host transport MMIO regs" + // in SISLite p 7. + afu_mmio_write_dw(p_afu, 10, (uint64_t)chunk->path[chunk->cur_path]->afu->p_hrrq_start); // START_EA + afu_mmio_write_dw(p_afu, 12, (uint64_t)chunk->path[chunk->cur_path]->afu->p_hrrq_end); // END_EA + + +#else + + bzero(&start_work,sizeof(start_work)); + + start_work.flags = CXL_START_WORK_NUM_IRQS; + start_work.num_interrupts = 4; + + +#ifndef BLOCK_FILEMODE_ENABLED + rc = ioctl(chunk->fd,CXL_IOCTL_START_WORK,&start_work); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"Unable to attach errno = %d",errno); + perror("cblk_open: Unable to attach"); + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + cblk_release_path(chunk,(chunk->path[chunk->cur_path])); + + chunk->path[chunk->cur_path] = NULL; + return -1; + + } + rc = ioctl(chunk->fd,CXL_IOCTL_GET_PROCESS_ELEMENT,&chunk->path[chunk->cur_path]->afu->contxt_handle); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"Unable to attach errno = %d",errno); + perror("cblk_open: Unable to attach"); + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + + cblk_release_path(chunk,(chunk->path[chunk->cur_path])); + + chunk->path[chunk->cur_path] = NULL; + return -1; + + } +#else + start_work.num_interrupts = 0; + +#endif /* BLOCK_FILEMODE_ENABLED */ + + + CBLK_TRACE_LOG_FILE(6,"contxt_handle = 0x%x",chunk->path[chunk->cur_path]->afu->contxt_handle); + +#ifdef _MASTER_CONTXT + end = strlen(chunk->dev_name); + if (chunk->dev_name[end-1] != 'm') { + + /* + * If this is not the master special file then only + * use the MMIO space for one non-master user. + */ + + chunk->path[chunk->cur_path]->afu->mmap_size = CFLASH_MMIO_CONTEXT_SIZE; + } else { + /* + * If this is the master special file then + * request the adapter's full MMIO address space. + */ + + chunk->path[chunk->cur_path]->afu->mmap_size = CAPI_FLASH_REG_SIZE; + } + +#else + + chunk->path[chunk->cur_path]->afu->mmap_size = CAPI_FLASH_REG_SIZE; +#endif /* !_MASTER_CONTXT */ + + chunk->path[chunk->cur_path]->afu->mmio_mmap = mmap(NULL,chunk->path[chunk->cur_path]->afu->mmap_size,PROT_READ|PROT_WRITE, MAP_SHARED, + chunk->fd,0); + + if (chunk->path[chunk->cur_path]->afu->mmio_mmap == MAP_FAILED) { + CBLK_TRACE_LOG_FILE(1,"mmap of mmio space failed errno = %d",errno); + perror ("blk_open: mmap of mmio space failed"); + + /* + * Cleanup depth is set correctly on entry to this routine + * So it does not need to be adjusted for this failure + */ + + cblk_release_path(chunk,(chunk->path[chunk->cur_path])); + + chunk->path[chunk->cur_path] = NULL; + return -1; + } + + *cleanup_depth = 40; + + CBLK_TRACE_LOG_FILE(6,"mmio = 0x%llx",(uint64_t)chunk->path[chunk->cur_path]->afu->mmio_mmap); +#ifdef _MASTER_CONTXT + + + /* + * If we have a separate master context, then we will not be opening the master context + * special file. Thus the MMIO space returned is just for this process. + */ + + /* + * TODO: ?? Currently we are using the master special file with master context and this + * requires us to treat the MMIO space the same as if the code was compiled without + * _MASTER_CONTEXT. At some future time, we need to disable this and only allow + * the non-master file. + */ + + end = strlen(chunk->dev_name); + if (chunk->dev_name[end-1] != 'm') { + + /* + * If this is not the master special file then the + * returned mmap MMIO space is just this user's MMIO space. + */ + + chunk->path[chunk->cur_path]->afu->mmio = chunk->path[chunk->cur_path]->afu->mmio_mmap; + } else { + /* + * If this is the master special file then the + * returned mmap MMIO space is everyone's MMIO space. + * Thus we need to find our MMIO space in this by using + * the process_element. + */ + + chunk->path[chunk->cur_path]->afu->mmio = chunk->path[chunk->cur_path]->afu->mmio_mmap + CFLASH_MMIO_CONTEXT_SIZE * chunk->path[chunk->cur_path]->afu->contxt_handle; + + } + +#else + + + /* + * If we are using the master context special file, + * then each call to mmap for the same CAPI adapter should return the beginning + * of that adapter's MMIO space, we need to get to the MMIO space for this seecific + * process (context), which is achieved by multiplying the returned process_element + * number by the size of each context space. That gives the offset from the + * returned chunk->path[chunk->cur_path].mmio_mapp address. + */ + chunk->path[chunk->cur_path]->afu->mmio = chunk->path[chunk->cur_path]->afu->mmio_mmap + CFLASH_MMIO_CONTEXT_SIZE * chunk->path[chunk->cur_path]->afu->contxt_handle; + +#endif /* !_MASTER_CONTXT */ + /* + * Set up response queue + */ + + if (CBLK_ADAP_SETUP(chunk,chunk->cur_path)) { + + + cblk_release_path(chunk,(chunk->path[chunk->cur_path])); + + chunk->path[chunk->cur_path] = NULL; + return -1; + } + +#ifndef _MASTER_CONTXT +#ifndef BLOCK_FILEMODE_ENABLED + +#ifdef _NOT_YET + /* + * Set up lun table for each FC port 0 + */ + + port_lun_table_start = CAPI_AFU_GLOBAL_OFFSET + offsetof(struct sisl_global_map,fc_port_0[0]); + + + + if (CBLK_SETUP_BAD_MMIO_SIGNAL(chunk,chunk->cur_path,port_lun_table_start+8)) { + + /* + * If we get here then the MMIO below + * failed indicating the adapter either + * is being reset or encountered a UE. + */ + + cblk_release_path(chunk,(chunk->path[chunk->cur_path])); + + chunk->path[chunk->cur_path] = NULL; + return -1; + } + + CBLK_TRACE_LOG_FILE(6,"chunk->path[chunk->cur_path].mmio = 0x%llx, port_lun_table_start = 0x%llx",chunk->path[chunk->cur_path]->afu->mmio,port_lun_table_start); + + + out_mmio64 (chunk->path[chunk->cur_path]->afu->mmio + port_lun_table_start, cblk_lun_id); + + CBLK_CLEANUP_BAD_MMIO_SIGNAL(chunk,chunk->cur_path); + + + + /* + * Set up lun table for each FC port 1 + */ + + port_lun_table_start = CAPI_AFU_GLOBAL_OFFSET + offsetof(struct sisl_global_map,fc_port_1[0]); + + + + if (CBLK_SETUP_BAD_MMIO_SIGNAL(chunk,chunk->cur_path,port_lun_table_start+8)) { + + /* + * If we get here then the MMIO below + * failed indicating the adapter either + * is being reset or encountered a UE. + */ + + cblk_release_path(chunk,(chunk->path[chunk->cur_path])); + + chunk->path[chunk->cur_path] = NULL; + return -1; + } + + CBLK_TRACE_LOG_FILE(6,"chunk->path[chunk->cur_path].mmio = 0x%llx, port_lun_table_start = 0x%llx",chunk->path[chunk->cur_path]->afu->mmio,port_lun_table_start); + + + out_mmio64 (chunk->path[chunk->cur_path]->afu->mmio + port_lun_table_start, cblk_lun_id); + + CBLK_CLEANUP_BAD_MMIO_SIGNAL(chunk,chunk->cur_path); + +#endif /* _NOT_YET */ + +#endif /* !BLOCK_FILEMODE_ENABLED */ +#endif /* !_MASTER_CONTXT */ + +#endif /* _USE_LIB_AFU */ + + return rc; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_detach + * + * FUNCTION: Detaches the current process. + * maps the MMIO space. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_detach (cflsh_chunk_t *chunk,int force) +{ + + + return; + +} + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_umap + * + * FUNCTION: Attaches the current process to a chunk and + * maps the MMIO space. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_unmap (cflsh_chunk_t *chunk,int force) +{ + +#ifdef _USE_LIB_AFU + + afu_unmap(p_afu); +#else + + if (chunk->path[chunk->cur_path] == NULL) { + + /* + * Nothing to unmap. + */ + + return; + + } + + + if (chunk->path[chunk->cur_path]->afu->mmap_size == 0) { + + /* + * Nothing to unmap. + */ + + return; + } + + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[chunk->cur_path]->afu); + + if ((chunk->path[chunk->cur_path]->afu->ref_count == 1) || + (force)) { + + + /* + * Only unmap on the last entity to use this afu. unless + * force is set. + */ + + if (munmap(chunk->path[chunk->cur_path]->afu->mmio_mmap,chunk->path[chunk->cur_path]->afu->mmap_size)) { + + + + /* + * Don't return here on error. Continue + * to close + */ + CBLK_TRACE_LOG_FILE(2,"munmap failed with errno = %d", + errno); + } + + +#endif /* !_USE_LIB_AFU */ + + chunk->path[chunk->cur_path]->afu->mmio = 0; + chunk->path[chunk->cur_path]->afu->mmio_mmap = 0; + chunk->path[chunk->cur_path]->afu->mmap_size = 0; + + } + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[chunk->cur_path]->afu); +} + + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_get_mc_device_resources + * + * FUNCTION: Get master context (MC) resources, which + * include device information to allow + * the device to be accessed for read/writes. + * + * + * NOTES: This routine assumes the caller has the chunk lock. + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +int cblk_chunk_get_mc_device_resources(cflsh_chunk_t *chunk, + int *cleanup_depth) +{ + int rc = 0; +#ifdef _MASTER_CONTXT + char *env_block_size = getenv("CFLSH_BLK_SIZE"); + uint32_t block_size = 0; + mc_stat_t mstat; +#ifndef BLOCK_FILEMODE_ENABLED + int end; +#endif /* !BLOCK_FILEMODE_ENABLED */ + +#endif /* _MASTER_CONTXT */ + + if (chunk == NULL) { + + return (-1); + } + + if (chunk->path[chunk->cur_path] == NULL) { + + return (-1); + } + + +#ifndef _MASTER_CONTXT + + /* + * We can not be locked when we issue + * commands, since they will do a lock. + * Thus we would deadlock here. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + if (cblk_get_lun_id(chunk)) { + + CFLASH_BLOCK_LOCK(chunk->lock); + CBLK_TRACE_LOG_FILE(5,"cblk_get_lun_id failed errno = %d", + errno); + + return -1; + } + + if (cblk_get_lun_capacity(chunk)) { + + CFLASH_BLOCK_LOCK(chunk->lock); + CBLK_TRACE_LOG_FILE(5,"cblk_get_lun_capacity failed errno = %d", + errno); + + return -1; + } + + CFLASH_BLOCK_LOCK(chunk->lock); + +#else + + /* + * TODO: ?? We probably need to eventually fail here + * if a special file with a suffix of 'm' is being + * used since this indicate we (the user) are opening + * the master special file. + */ + + + +#ifndef BLOCK_FILEMODE_ENABLED + + end = strlen(chunk->dev_name); + + if (end < 1) { + + CFLASH_BLOCK_LOCK(chunk->lock); + CBLK_TRACE_LOG_FILE(5,"adapter name two short %s",chunk->dev_name); + + return -1; + } + + if (chunk->dev_name[end-1] != 'm') { + + sprintf(chunk->path[chunk->cur_path]->afu->master_name,"%s",chunk->dev_name); + + + if (chunk->dev_name[end-1] == 's') { + chunk->path[chunk->cur_path]->afu->master_name[end-1] = '\0'; + } + strcat(chunk->path[chunk->cur_path]->afu->master_name,"m"); + } else { + +#endif /* BLOCK_FILEMODE_ENABLED */ + + sprintf(chunk->path[chunk->cur_path]->afu->master_name,"%s",chunk->dev_name); + +#ifndef BLOCK_FILEMODE_ENABLED + } + +#endif /* BLOCK_FILEMODE_ENABLED */ + + /* + * Get a client handle for the specified AFU and context. + */ + + rc = mc_register(chunk->path[chunk->cur_path]->afu->master_name,chunk->path[chunk->cur_path]->afu->contxt_handle,chunk->path[chunk->cur_path]->afu->mmio, + &(chunk->path[chunk->cur_path]->afu->master.mc_handle)); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"mc_register failed with rc = %d, errno = %d for path = %s", + rc, errno,chunk->path[chunk->cur_path]->afu->master_name); + return -1; + } + + + /* + * Get a virtual lun for the specified AFU and context (via + * the client handle). + */ + + rc = mc_open(chunk->path[chunk->cur_path]->afu->master.mc_handle,MC_RDWR, + &(chunk->path[chunk->cur_path]->sisl.resrc_handle)); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"mc_open failed with rc = %d, errno = %d", + rc, errno); + + + /* + * Free resources for the old context and AFU. + */ + rc = mc_unregister(chunk->path[chunk->cur_path]->afu->master.mc_handle); + + if (rc) { + + CBLK_TRACE_LOG_FILE(5,"mc_unregister failed with rc = %d, errno = %d", + rc, errno); + } + + return -1; + } + + if (env_block_size) { + + /* + * If environment for block size is set, then + * use it. + */ + + block_size = atoi(env_block_size); + + if (block_size == CAPI_FLASH_BLOCK_SIZE) { + /* + * If the device is reporting back 4K block size, + */ + chunk->blk_size_mult = 1; + } else { + /* + * If the device is reporting back an non-4K block size, + * then determine appropriate multiple + */ + + + if (block_size) { + chunk->blk_size_mult = CAPI_FLASH_BLOCK_SIZE/block_size; + } else { + chunk->blk_size_mult = 8; + } + } + } else { + + /* + * Issue mc_stat call to get the device's block + * size and the chunk/page size used by the MC + * for that device. + */ + + bzero(&mstat,sizeof(mstat)); + + rc = mc_stat(chunk->path[chunk->cur_path]->afu->master.mc_handle, + chunk->path[chunk->cur_path]->sisl.resrc_handle,&mstat); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"mc_stat failed with rc = %d, errno = %d", + rc, errno); + + cblk_chunk_free_mc_device_resources(chunk); + + return -1; + } + + block_size = mstat.blk_len; + + + CBLK_TRACE_LOG_FILE(5,"mc_stat succeeded with block_size = %d, MC chunk/page size = %d, size= 0x%llx, flags = 0x%llx", + block_size, mstat.nmask,mstat.size,mstat.flags); + if (block_size) { + chunk->blk_size_mult = CAPI_FLASH_BLOCK_SIZE/block_size; + } else { + chunk->blk_size_mult = 8; + } + + /* + * The chunk/page size is determined by mstat.nmask + * and using it as a bit shifter. + */ + + chunk->path[chunk->cur_path]->afu->master.mc_page_size = 1 << mstat.nmask; + + if (chunk->path[chunk->cur_path]->afu->master.mc_page_size == 0) { + + CBLK_TRACE_LOG_FILE(5,"mc_stat returned an invalid MC chunk/page size = %d", + mstat.nmask); + + cblk_chunk_free_mc_device_resources(chunk); + + return -1; + } + } + +#endif /* Master context */ + + return rc; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_set_mc_size + * + * FUNCTION: Request master context to provide the + * the specified storage for this chunk. + * + * + * NOTES: This routine assumes the caller has the chunk lock. + * + * This code assumes if the caller passes -1 for the + * master context case, then it will return whatever + * space is available. + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: 0: Good completion + 0 non-zero: Error + * + * ---------------------------------------------------------------------------- + */ +int cblk_chunk_set_mc_size(cflsh_chunk_t *chunk, size_t nblocks) +{ + int rc = 0; +#ifdef _MASTER_CONTXT + uint64_t size = 0; + uint64_t actual_size = 0; + + + if (chunk->path[chunk->cur_path] == NULL) { + + + CBLK_TRACE_LOG_FILE(5,"chunk->path[chunk->cur_path] = NULL"); + + errno = EINVAL; + return -1; + } + + + if (chunk->path[chunk->cur_path]->afu->master.mc_page_size == 0) { + + + CBLK_TRACE_LOG_FILE(5,"mc_page_size = 0"); + + errno = EINVAL; + return -1; + } + + + if (nblocks != -1) { + + /* + * Caller is requesting a specific amount of space + */ + + if ((nblocks < chunk->path[chunk->cur_path]->afu->master.num_blocks) && + ((chunk->path[chunk->cur_path]->afu->master.num_blocks * chunk->blk_size_mult) >= chunk->path[chunk->cur_path]->afu->master.mc_page_size) && + (nblocks > (chunk->path[chunk->cur_path]->afu->master.num_blocks - (chunk->path[chunk->cur_path]->afu->master.mc_page_size/chunk->blk_size_mult)))) { + + /* + * If the amount of space requested is is still within the current + * current MC page/chunk size, then just use the new value without + * calling mc_size again (since it is already allocated). There are three + * possibilities here: they are increasing space, descreasing space, or + * requesting the same space (this last option is probably not very likely) , but + * all are still within the same mc_page_size. + * + * NOTE: mc_page_size is in units of numnber of the device's block size + * (ie. 512, or 4K) and num_blocks is in units of 4K. chunk->blk_size_mult + * is the multiple to go from the device's block size + * to 4K (i.e. 1, 8). + * + */ + + CBLK_TRACE_LOG_FILE(5,"blocks already exist so just use them"); + chunk->num_blocks = nblocks; + return 0; + } + + + /* + * if we get here then we need to request the MC for this + * specific size. + */ + + + if ((nblocks * chunk->blk_size_mult) % chunk->path[chunk->cur_path]->afu->master.mc_page_size) { + + /* + * The size is not divisible by chunk->path[chunk->cur_path]->master.mc_page_size (i.e. + * it is not on a mc_page_size boundary. So round up + * up to the next mc_page_size boundary. + * + * NOTE: mc_page_size is minimum + * granularity of the MC in terms of the devices' block size), + */ + + + size = (nblocks * (chunk->blk_size_mult))/chunk->path[chunk->cur_path]->afu->master.mc_page_size + 1; + } else { + + /* + * This size is on a mc_page_size boundary. + */ + + size = (nblocks * (chunk->blk_size_mult))/chunk->path[chunk->cur_path]->afu->master.mc_page_size; + } + } else { + + /* + * caller is request all remaining space on this device, + * for this case we just pass the -1 thru. + */ + + size = -1; + } + + rc = mc_size(chunk->path[chunk->cur_path]->afu->master.mc_handle, + chunk->path[chunk->cur_path]->sisl.resrc_handle, + size,&actual_size); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"mc_size failed with rc = %d, errno = %d, size = 0x%llx", + rc, errno,size); + + if (errno == 0) { + + errno = ENOMEM; + } + return -1; + } + + + + CBLK_TRACE_LOG_FILE(5,"mc_size succeed with size = 0x%llx and actual_size = 0x%llx", + size, actual_size); + + if ((size != -1) && + (actual_size < size)) { + + + CBLK_TRACE_LOG_FILE(1,"mc_size returned smaller actual size = 0x%llx then requested = 0x%llx", + actual_size,size); + + errno = ENOMEM; + + return -1; + } + + + /* + * Save off the actual amount of space the MC allocated, which may be more than + * what the user requested. Since mc_page_size is in unit of the device's block + * size (chunk->blk_size_mult is the multiple to go from the device's block + * size to 4K) and and our num_blocks is in units of 4K block sizes we need convert this + * size into 4K blocks. + */ + + chunk->path[chunk->cur_path]->afu->master.num_blocks = (actual_size * chunk->path[chunk->cur_path]->afu->master.mc_page_size)/chunk->blk_size_mult; + + if (size == -1) { + + nblocks = chunk->path[chunk->cur_path]->afu->master.num_blocks; + } +#else + + + /* + * TODO: ?? This is temporary code to for + * early development to allow virtual + * luns. Eventually the MC will provision + * this. For now the block layer will use + * a very simplistic and flawed approach + * that leads to inefficient memory usage + * and fragmentation. However it is hoped + * this flawed approach is sufficient until + * the MC can provide the real functionality. + * When the MC does add this functionality, + * this code can be removed. + */ + + + if ((nblocks + cflsh_blk.next_chunk_starting_lba) > chunk->num_blocks_lun) { + + + CBLK_TRACE_LOG_FILE(1,"set_size failed with EINVAL, nblocks = 0x%llx, next_lba = 0x%llx num_blocks_lun = 0x%llx", + (uint64_t)nblocks,(uint64_t)cflsh_blk.next_chunk_starting_lba,(uint64_t)chunk->num_blocks_lun); + errno = EINVAL; + return -1; + } + + + if (chunk->num_blocks) { + + + /* + * If chunk->num_blocks is non-zero then this + * is a resize. + */ + + if (cflsh_blk.next_chunk_starting_lba == + (chunk->start_lba + chunk->num_blocks)) { + + + /* + * If chunk->num_blocks is non-zero then this + * is a resize. If this is the last chunk on this physical disk, + * then set the next_chunk_start_lba to our chunk's + * starting LBA. For this case we do not need + * to update our start_lba since it is correct. + */ + cflsh_blk.next_chunk_starting_lba = chunk->start_lba; + + } else { + + /* + * The current implementation is very inefficient + * and has fragmentation issues. In this case + * it will move the chunk past the other chunks + * on this physical lun. All previous data will be + * lossed + */ + chunk->start_lba = cflsh_blk.next_chunk_starting_lba; + } + } else { + + /* + * This is the first allocation of blocks + * for this chunk. + */ + + chunk->start_lba = cflsh_blk.next_chunk_starting_lba; + } + + + cflsh_blk.next_chunk_starting_lba += nblocks; + + /* + * TODO: End of virtual lun hack + */ + +#endif /* Master context */ + + + chunk->num_blocks = nblocks; + return rc; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_mc_clone + * + * FUNCTION: Requests master context to clone + * an existing AFU + context to this context + * on the same AFU. This is needed whenever + * a process has forked to reenable access + * to the chunks from the parent process in the child + * process. + * + * + * NOTES: This routine assumes the caller has the chunk lock. + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: 0: Good completion + 0 non-zero: Error + * + * ---------------------------------------------------------------------------- + */ +int cblk_mc_clone(cflsh_chunk_t *chunk,int mode, int flags) +{ + int rc = 0; +#ifdef _MASTER_CONTXT + int open_flags; + uint64_t chunk_flags; + int cleanup_depth; + mc_hndl_t old_mc_handle; + res_hndl_t old_resrc_handle; + int old_fd; + void *old_mmio_mmap; + size_t old_mmap_size; + + + if (chunk->path[chunk->cur_path] == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path == NULL"); + return -1; + } + + /* + * It should be noted the chunk is not a fully functional chunk + * from this process' perspective after a fork. It has enough information that should allow + * us to clone it into a new chunk using the same chunk id and chunk structure. + * So first save off relevant information about the old chunk before unregistering + * it. + */ + + old_mc_handle = chunk->path[chunk->cur_path]->afu->master.mc_handle; + old_resrc_handle = chunk->path[chunk->cur_path]->sisl.resrc_handle; + old_fd = chunk->fd; + old_mmio_mmap = chunk->path[chunk->cur_path]->afu->mmio_mmap; + old_mmap_size = chunk->path[chunk->cur_path]->afu->mmap_size; + + /* + * If we have a dedicated thread per chunk + * for interrupts, then stop it now. + */ + + cblk_open_cleanup_wait_thread(chunk); + + open_flags = O_RDWR | O_NONBLOCK; /* ??TODO Try without O_CLOEXEC */ + + chunk->fd = open(chunk->dev_name,open_flags); + if (chunk->fd < 0) { + + CBLK_TRACE_LOG_FILE(1,"Unable to open device errno = %d",errno); + perror("cblk_open: Unable to open device"); + + cblk_chunk_open_cleanup(chunk,cleanup_depth); + free(chunk); + + + return -1; + } + + cleanup_depth = 30; + + if (cblk_chunk_attach_process_map(chunk,mode,&cleanup_depth)) { + + CBLK_TRACE_LOG_FILE(1,"Unable to attach, errno = %d",errno); + perror("cblk_open: Unable to open device"); + + + cblk_chunk_open_cleanup(chunk,cleanup_depth); + free(chunk); + + return -1; + + } + + cleanup_depth = 40; + +#ifdef _COMMON_INTRPT_THREAD + + /* + * If we are using a common interrupt thread per chunk, + * then restart it now. + */ + + if (cblk_start_common_intrpt_thread(chunk)) { + + + CBLK_TRACE_LOG_FILE(1,"cblk_start_common_intrpt thread failed with errno= %d", + errno); + + + cblk_chunk_open_cleanup(chunk,cleanup_depth); + + + return -1; + } + + cleanup_depth = 45; + +#endif /* _COMMON_INTRPT_THREAD */ + + /* + * Get a client handle for the specified AFU and context. + */ + + rc = mc_register(chunk->path[chunk->cur_path]->afu->master_name,chunk->path[chunk->cur_path]->afu->contxt_handle,chunk->path[chunk->cur_path]->afu->mmio_mmap, + &(chunk->path[chunk->cur_path]->afu->master.mc_handle)); + + if (rc) { + + + CBLK_TRACE_LOG_FILE(1,"mc_register for clone failed with rc = %d, errno = %d for path = %s", + rc,errno,chunk->path[chunk->cur_path]->afu->master_name); + + cblk_chunk_open_cleanup(chunk,cleanup_depth); + + return -1; + } + + cleanup_depth = 50; + + + switch (mode & O_ACCMODE) { + + case O_RDONLY: + chunk_flags = MC_RDONLY; + break; + case O_WRONLY: + chunk_flags = MC_WRONLY; + break; + case O_RDWR: + chunk_flags = MC_RDWR; + break; + default: + chunk_flags = MC_RDONLY; + } + + + CBLK_TRACE_LOG_FILE(5,"mc_clone chunk_flags 0x%x", + chunk_flags); + + + + rc = mc_clone(chunk->path[chunk->cur_path]->afu->master.mc_handle,old_mc_handle,chunk_flags); + + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"mc_clone failed with rc = %d, errno = %d", + rc, errno); + + if (errno == 0) { + + errno = EINVAL; + } + cblk_chunk_open_cleanup(chunk,cleanup_depth); + + return -1; + + } + + /* + * We reuse the original resource handle after an mc_clone + */ + + chunk->path[chunk->cur_path]->sisl.resrc_handle = old_resrc_handle; + + + + + /* + * Free resources for the old context and AFU. + */ + rc = mc_unregister(old_mc_handle); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"mc_unregister after clone failed with rc = %d, errno = %d", + rc, errno); + + } + + rc = munmap(old_mmio_mmap,old_mmap_size); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"munmap failed with rc = %d errno = %d", + rc,errno); + + } + + cleanup_depth = 20; + rc = close(old_fd); + + + if (rc) { + + /* + * If any of the above operations fail then + * exit out this code. + */ + + + CBLK_TRACE_LOG_FILE(1,"close failed with rc = %d errno = %d", + rc,errno); + + } + + +#else + + /* + * This is the case when there is no Master Context + */ + +#ifdef _COMMON_INTRPT_THREAD + + /* + * If we are using a common interrupt thread per chunk, + * and we are not using master context, then the fork will not + * forked our interrupt thread. So we need to start it now. + */ + + if (cblk_start_common_intrpt_thread(chunk)) { + + + CBLK_TRACE_LOG_FILE(1,"cblk_start_common_intrpt thread failed with errno= %d", + errno); + + + return -1; + } + +#endif /* _COMMON_INTRPT_THREAD */ + +#endif /* _MASTER_CONTXT */ + return rc; + +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: cblk_chunk_free_mc_device_resources + * + * FUNCTION: Free master context (MC) resources. + * + * + * NOTES: This routine assumes the caller has the chunk lock. + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void cblk_chunk_free_mc_device_resources(cflsh_chunk_t *chunk) +{ +#ifdef _MASTER_CONTXT + int rc = 0; +#endif /* _MASTER_CONTXT */ + + + if (chunk == NULL) { + + return; + } + + if (chunk->path[chunk->cur_path] == NULL) { + /* + * There is nothing to do here, exit + */ + + return; + + } + +#ifdef _MASTER_CONTXT + + /* + * Free resources for this virtual lun. + */ + + if (chunk->path[chunk->cur_path]->afu->master.mc_handle == 0) { + /* + * There is nothing to do here, exit + */ + + return; + + } + + rc = mc_close(chunk->path[chunk->cur_path]->afu->master.mc_handle, + chunk->path[chunk->cur_path]->sisl.resrc_handle); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"mc_close failed with rc = %d, errno = %d", + rc, errno); + return; + } + + /* + * Free resources for this context and AFU. + */ + rc = mc_unregister(chunk->path[chunk->cur_path]->afu->master.mc_handle); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"mc_unregister failed with rc = %d, errno = %d", + rc, errno); + return; + } + + chunk->path[chunk->cur_path]->afu->master.mc_handle = 0; + +#endif /* _MASTER_CONTXT */ + + + + return; +} + +/* + * NAME: cblk_process_nonafu_intrpt_cxl_events + * + * FUNCTION: This routine process non-AFU interrupt CAPI + * events. + * + * INPUTS: + * chunk - Chunk associated with this error + * ioasa - I/O Adapter status response + * + * RETURNS: + * -1 - Fatal error + * 0 - Ignore error (consider good completion) + * 1 - Retry recommended + * + */ +cflash_cmd_err_t cblk_process_nonafu_intrpt_cxl_events(cflsh_chunk_t *chunk,int path_index,struct cxl_event *cxl_event) +{ + int rc = CFLASH_CMD_FATAL_ERR; + uint64_t intrpt_status; + + /* + * TODO: ?? More work is needed here. + */ + + + errno = EIO; + + if (chunk == NULL) { + + return rc; + } + + if (chunk->path[path_index] == NULL) { + /* + * There is nothing to do here, exit + */ + + return rc; + + } + + switch (cxl_event->header.type) { + case CXL_EVENT_RESERVED: + chunk->stats.num_capi_reserved_errs++; + CBLK_TRACE_LOG_FILE(1,"CXL_EVENT_RESERVED = size = 0x%x", + cxl_event->header.size); + break; + case CXL_EVENT_DATA_STORAGE: + chunk->stats.num_capi_data_st_errs++; + CBLK_TRACE_LOG_FILE(1,"CAPI_EVENT_DATA_STOARAGE addr = 0x%llx, dsisr = 0x%llx", + cxl_event->fault.addr,cxl_event->fault.dsisr); + CBLK_TRACE_LOG_FILE(6,"contxt_handle = 0x%x",chunk->path[path_index]->afu->contxt_handle); + CBLK_TRACE_LOG_FILE(6,"mmio_map = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmio_mmap); + CBLK_TRACE_LOG_FILE(6,"mmio = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmio); + CBLK_TRACE_LOG_FILE(6,"mmap_size = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmap_size); + CBLK_TRACE_LOG_FILE(6,"hrrq_start = 0x%llx",(uint64_t)chunk->path[path_index]->afu->p_hrrq_start); + CBLK_TRACE_LOG_FILE(6,"hrrq_end = 0x%llx",(uint64_t)chunk->path[path_index]->afu->p_hrrq_end); + CBLK_TRACE_LOG_FILE(6,"cmd_start = 0x%llx",(uint64_t)chunk->cmd_start); + CBLK_TRACE_LOG_FILE(6,"cmd_end = 0x%llx",(uint64_t)chunk->cmd_end); + + intrpt_status = CBLK_GET_INTRPT_STATUS(chunk,path_index); + CBLK_TRACE_LOG_FILE(6,"intrpt_status = 0x%llx",intrpt_status); + + CBLK_TRACE_LOG_FILE(6,"num_active_cmds = 0x%x\n",chunk->num_active_cmds); + + + + + break; + case CXL_EVENT_AFU_ERROR: + chunk->stats.num_capi_afu_errors++; + CBLK_TRACE_LOG_FILE(1,"CXL_EVENT_AFU_ERROR error = 0x%llx, flags = 0x%x", + cxl_event->afu_error.error,cxl_event->afu_error.flags); + + cblk_notify_mc_err(chunk,path_index,0x400,cxl_event->afu_error.error,CFLSH_BLK_NOTIFY_AFU_ERROR,NULL); + + break; + + + case CXL_EVENT_AFU_INTERRUPT: + /* + * We should not see this, since the caller + * should have parsed these out. + */ + + /* Fall thru */ + default: + CBLK_TRACE_LOG_FILE(1,"Unknown CAPI EVENT type = %d, process_element = 0x%x", + cxl_event->header.type, cxl_event->header.process_element); + + + cblk_notify_mc_err(chunk,path_index,0x401,cxl_event->header.type, CFLSH_BLK_NOTIFY_AFU_ERROR,NULL); + + } /* switch */ + + return rc; +} + +/* + * NAME: cblk_read_os_specific_intrpt_event + * + * FUNCTION: Reads an OS specific event for this interrupt + * + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * cmd - Cmd this routine will wait for completion. + * + * RETURNS: + * 0 - Good completion, otherwise error. + * + * + */ +int cblk_read_os_specific_intrpt_event(cflsh_chunk_t *chunk, int path_index,cflsh_cmd_mgm_t **cmd,int *cmd_complete, + size_t *transfer_size,struct pollfd poll_list[]) +{ + int rc = 0; + int read_bytes = 0; + int process_bytes = 0; + uint8_t read_buf[CAPI_FLASH_BLOCK_SIZE]; + struct cxl_event *cxl_event = (struct cxl_event *)read_buf; + + +#ifndef BLOCK_FILEMODE_ENABLED + + + read_bytes = read(chunk->path[path_index]->afu->poll_fd,cxl_event,CAPI_FLASH_BLOCK_SIZE); + +#else + /* + * For file mode fake an AFU interrupt + */ + + cxl_event->header.type = CXL_EVENT_AFU_INTERRUPT; + + read_bytes = sizeof(struct cxl_event); + + cxl_event->header.size = read_bytes; + + cxl_event->irq.irq = SISL_MSI_RRQ_UPDATED; + +#endif /* BLOCK_FILEMODE_ENABLED */ + + + if (read_bytes < 0) { + + if (*cmd) { + CBLK_TRACE_LOG_FILE(5,"read event failed, with rc = %d errno = %d, cmd = 0x%llx, cmd_index = %d, lba = 0x%llx", + read_bytes, errno,(uint64_t)*cmd, (*cmd)->index, (*cmd)->cmdi->lba); + } else { + CBLK_TRACE_LOG_FILE(7,"read event failed, with rc = %d errno = %d, cmd = 0x%llx", + read_bytes, errno,(uint64_t)*cmd); + + } + +#ifdef _SKIP_POLL_CALL + + /* + * If we are not using the poll call, + * then since we are not blocking on the + * read, we need to delay here before + * re-reading again. + */ + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + usleep(CAPI_POLL_IO_TIME_OUT * 1000); + + CFLASH_BLOCK_LOCK(chunk->lock); +#else + + + if ((read_bytes == -1) && (errno == EAGAIN)) { + +#ifdef _ERROR_INTR_MODE + /* + * When we poll with no time-out, we sometimes + * see read fail with EAGAIN. Do not consider + * this an error, but instead check the RRQ for + * completions. + */ + + rc = CBLK_PROCESS_ADAP_CONVERT_INTRPT(chunk,cmd, + CFLSH_BLK_INTRPT_CMD_CMPLT, + cmd_complete, transfer_size); + return rc; + +#else + /* + * Increment statistics + */ + + chunk->stats.num_capi_false_reads++; +#endif /* !_ERROR_INTR_MODE */ + } + +#endif /* _SKIP_POLL_CALL */ + + if ((read_bytes == -1) && (errno == EIO)) { + + /* + * This most likely indicates the adapter + * is being reset. + */ + + + chunk->stats.num_capi_adap_resets++; + + cblk_notify_mc_err(chunk,path_index,0x402,0,CFLSH_BLK_NOTIFY_AFU_RESET,NULL); + } + + return (-1); + } + + + + if (read_bytes > CAPI_FLASH_BLOCK_SIZE) { + + /* + * If the number of read bytes exceeded the + * size of the buffer we supplied then truncate + * read_bytes to our buffer size. + */ + + + if (*cmd) { + CBLK_TRACE_LOG_FILE(1,"read event returned too large buffer size = %d errno = %d, cmd = 0x%llx, cmd_index = %d, lba = 0x%llx", + read_bytes, errno,(uint64_t)cmd, (*cmd)->index, (*cmd)->cmdi->lba); + } else { + CBLK_TRACE_LOG_FILE(1,"read event returned too large buffer size = %d errno = %d, cmd = 0x%llx", + read_bytes, errno,(uint64_t)*cmd); + + } + read_bytes = CAPI_FLASH_BLOCK_SIZE; + } + + while (read_bytes > process_bytes) { + + /* + * The returned read data will have + * cxl event types. Unfortunately they + * are not using the common struct cxl_event + * structure for all in terms of size. Thus + * we need to read the header (common + * for all) and from the header's size + * field determine the size of the read + * entry. + */ + + + + CBLK_TRACE_LOG_FILE(7,"cxl_event type = %d, size = %d", + cxl_event->header.type,cxl_event->header.size); + + + if (cxl_event->header.size == 0) { + + + CBLK_TRACE_LOG_FILE(1,"cxl_event type = %d, invalid size = %d", + cxl_event->header.type,cxl_event->header.size); + + errno = 5; + + return (-1); + } + + + process_bytes += cxl_event->header.size; + + + if (cxl_event->header.type == CXL_EVENT_AFU_INTERRUPT) { + + + chunk->stats.num_capi_afu_intrpts++; + + rc = CBLK_PROCESS_ADAP_INTRPT(chunk,cmd,(int)cxl_event->irq.irq,cmd_complete,transfer_size); + } else { + + + rc = cblk_process_nonafu_intrpt_cxl_events(chunk,path_index,cxl_event); + } + + cxl_event = (struct cxl_event *)(((char*)cxl_event) + cxl_event->header.size); + + } + + /* + * TODO: ?? Currently we are just returning the last rc seen, + * Is this the corect choice. + */ + + + return rc; + +} + + +/* + * NAME: cblk_check_os_adap_err + * + * FUNCTION: Inform adapter driver that it needs to check if this + * is a fatal error that requires a reset. + * + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * RETURNS: + * None + * + * + */ +void cblk_check_os_adap_err(cflsh_chunk_t *chunk,int path_index) +{ + int rc = 0; + + + + chunk->stats.num_capi_adap_chck_err++; + /* + * TODO:?? This ioctl has been removed for now. + */ + //rc = ioctl(chunk->fd,CXL_IOCTL_CHECK_ERROR,NULL); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"IOCL_CHECK_ERROR failed with rc = %d, errno = %d\n", + rc,errno); + + + } + + cblk_notify_mc_err(chunk,path_index,0x403,0,CFLSH_BLK_NOTIFY_AFU_FREEZE,NULL); + + return; +} + +/* + * NAME: cblk_notify_mc_err + * + * FUNCTION: Inform Master Context (MC) of this error. + * + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * RETURNS: + * None + * + * + */ +void cblk_notify_mc_err(cflsh_chunk_t *chunk, int path_index,int error_num, uint64_t out_rc, + cflash_block_notify_reason_t reason, + cflsh_cmd_mgm_t *cmd) +{ +#ifdef _MASTER_CONTXT + int rc = 0; + mc_notify_t notify; + + if (chunk == NULL) { + + return; + } + + if (chunk->path[path_index] == NULL) { + /* + * There is nothing to do here, exit + */ + + return; + + } + + bzero(¬ify,sizeof(notify)); + + + switch (reason) { + + case CFLSH_BLK_NOTIFY_TIMEOUT: + notify.event = MC_NOTIFY_CMD_TIMEOUT; + notify.cmd_timeout.res_hndl = chunk->path[path_index]->sisl.resrc_handle; + break; + case CFLSH_BLK_NOTIFY_AFU_FREEZE: + notify.event = MC_NOTIFY_AFU_EEH; + break; + case CFLSH_BLK_NOTIFY_AFU_ERROR: + notify.event = MC_NOTIFY_AFU_ERR; + break; + case CFLSH_BLK_NOTIFY_AFU_RESET: + notify.event = MC_NOTIFY_AFU_RST; + break; + case CFLSH_BLK_NOTIFY_SCSI_CC_ERR: + notify.event = MC_NOTIFY_SCSI_SENSE; + notify.scsi_sense.res_hndl = chunk->path[path_index]->sisl.resrc_handle; + if (cmd) { + + CBLK_COPY_ADAP_CMD_RESP(chunk,cmd,notify.scsi_sense.data, + MIN(sizeof(notify.scsi_sense.data),SISL_SENSE_DATA_LEN)); + } + break; + default: + CBLK_TRACE_LOG_FILE(1,"Unknown reason %d\n", + reason); + return; + } + + rc = mc_notify(chunk->path[path_index]->afu->master.mc_handle,¬ify); + + if (rc) { + + CBLK_TRACE_LOG_FILE(1,"mc_notify failed with rc = %d, errno = %d\n", + rc,errno); + + } + +#endif /* _MASTER_CONTXT */ + + return; +} + + + +/* + * NAME: cblk_verify_mc_lun + * + * FUNCTION: Request MC to verify lun. + * + * + * + * INPUTS: + * chunk - Chunk the cmd is associated. + * + * RETURNS: + * None + * + * + */ +int cblk_verify_mc_lun(cflsh_chunk_t *chunk, cflash_block_notify_reason_t reason, + cflsh_cmd_mgm_t *cmd, + struct request_sense_data *sense_data) +{ + + /* + * For user space MC, this is just a notification + */ + + + cblk_notify_mc_err(chunk,chunk->cur_path,0x404,0,CFLSH_BLK_NOTIFY_SCSI_CC_ERR,cmd); + return 0; +} diff --git a/src/block/cflash_block_protos.h b/src/block/cflash_block_protos.h new file mode 100644 index 00000000..7d114a7f --- /dev/null +++ b/src/block/cflash_block_protos.h @@ -0,0 +1,115 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/cflash_block_protos.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _H_CFLASH_BLOCK_PROTO +#define _H_CFLASH_BLOCK_PROTO +/************************************************************************/ +/* Function prototypes */ +/************************************************************************/ +#ifdef _COMMON_INTRPT_THREAD +int cblk_start_common_intrpt_thread(cflsh_chunk_t *chunk); +void *cblk_intrpt_thread(void *data); +#endif +void *cblk_async_recv_thread(void *data); +#ifdef _REMOVE +void cblk_wait_for_read_alarm(int signum, siginfo_t *siginfo, void *uctx); +#endif /* _REMOVE */ +void cblk_chunk_sigsev_handler (int signum, siginfo_t *siginfo, void *uctx); +void cblk_prepare_fork (void); +void cblk_parent_post_fork (void); +void cblk_child_post_fork (void); +void cblk_chunk_init_cache (cflsh_chunk_t *chunk, size_t chunk_size); +void cblk_chunk_free_cache (cflsh_chunk_t *chunk); +void cblk_chunk_flush_cache (cflsh_chunk_t *chunk); +void cblk_init_mc_interface(void); +void cblk_cleanup_mc_interface(void); +char *cblk_find_parent_dev(char *device_name); +int cblk_chunk_attach_process_map (cflsh_chunk_t *chunk, int mode, int *cleanup_depth); +int cblk_chunk_get_mc_device_resources(cflsh_chunk_t *chunk, int *cleanup_depth); +void cblk_chunk_free_mc_device_resources(cflsh_chunk_t *chunk); +void cblk_chunk_unmap (cflsh_chunk_t *chunk,int force); +void cblk_chunk_detach (cflsh_chunk_t *chunk,int force); +void cblk_setup_trace_files(int new_process); +int cblk_valid_endianess(void); + +cflsh_path_t *cblk_get_path(cflsh_chunk_t *chunk, dev64_t adap_devno,cflsh_block_chunk_type_t type,int num_cmds, + cflsh_afu_in_use_t *in_use, int share); +int cblk_update_path_type(cflsh_chunk_t *chunk, cflsh_path_t *path, cflsh_block_chunk_type_t type); +void cblk_release_path(cflsh_chunk_t *chunk, cflsh_path_t *path); +cflsh_block_chunk_type_t cblk_get_chunk_type(const char *path, int arch_type); +int cblk_set_fcn_ptrs(cflsh_path_t *path); +chunk_id_t cblk_get_chunk(int flags,int max_num_cmds); +int cblk_get_buf_cmd(cflsh_chunk_t *chunk,void **buf, size_t buf_len, + cflsh_cmd_mgm_t **cmd); +int cblk_get_lun_id(cflsh_chunk_t *chunk); +int cblk_get_lun_capacity(cflsh_chunk_t *chunk); +void cblk_open_cleanup_wait_thread(cflsh_chunk_t *chunk); +void cblk_chunk_open_cleanup(cflsh_chunk_t *chunk, int cleanup_depth); +int cblk_listio_arg_verify(chunk_id_t chunk_id, + cblk_io_t *issue_io_list[],int issue_items, + cblk_io_t *pending_io_list[], int pending_items, + cblk_io_t *wait_io_list[],int wait_items, + cblk_io_t *completion_io_list[],int *completion_items, + uint64_t timeout,int flags); +int cblk_chk_cmd_bad_page(cflsh_chunk_t *chunk, uint64_t bad_page_addr); +void cblk_fail_all_cmds(cflsh_chunk_t *chunk); +void cblk_halt_all_cmds(cflsh_chunk_t *chunk, int path_index, int all_paths); +void cblk_resume_all_halted_cmds(cflsh_chunk_t *chunk, int increment_retries, + int path_index, int all_paths); +void cblk_reset_context_shared_afu(cflsh_afu_t *afu); +int cblk_retry_new_path(cflsh_chunk_t *chunk, cflsh_cmd_mgm_t *cmd, int delay_needed_same_afu); +void cblk_trace_log_data_ext(trace_log_ext_arg_t *ext_arg, FILE *logfp,char *filename, char *function, + uint line_num,char *msg, ...); + +void cblk_display_stats(cflsh_chunk_t *chunk, int verbosity); + +int cblk_setup_dump_file(void); + +void cblk_dump_debug_data(const char *reason, const char *reason_filename,const char *reason_function, + int, const char *reason_date); +int cblk_setup_sigusr1_dump(void); +int cblk_setup_sigsev_dump(void); + +#ifdef BLOCK_FILEMODE_ENABLED +void cblk_filemode_io(cflsh_chunk_t *chunk, cflsh_cmd_mgm_t *cmd); +#endif /* BLOCK_FILEMODE_ENABLED */ +cflash_cmd_err_t cblk_process_sense_data(cflsh_chunk_t *chunk,cflsh_cmd_mgm_t *cmd,struct request_sense_data *sense_data); +cflsh_block_chunk_type_t cblk_get_os_chunk_type(const char *path, int arch_type); +int cblk_read_os_specific_intrpt_event(cflsh_chunk_t *chunk, int path_index,cflsh_cmd_mgm_t **cmd,int *cmd_complete, + size_t *transfer_size, struct pollfd poll_list[]); +int cblk_chunk_set_mc_size(cflsh_chunk_t *chunk, size_t nblocks); +int cblk_mc_clone(cflsh_chunk_t *chunk, int mode,int flags); +void cblk_check_os_adap_err(cflsh_chunk_t *chunk, int path_index); +void cblk_notify_mc_err(cflsh_chunk_t *chunk, int path_index,int error_num, + uint64_t out_rc,cflash_block_notify_reason_t reason, + cflsh_cmd_mgm_t *cmd); +int cblk_verify_mc_lun(cflsh_chunk_t *chunk, cflash_block_notify_reason_t reason, + cflsh_cmd_mgm_t *cmd, + struct request_sense_data *sense_data); + +/* cflash_block_sisl.c protos */ +int cblk_init_sisl_fcn_ptrs(cflsh_path_t *path); + +#endif /* _H_CFLASH_BLOCK_PROTO */ diff --git a/src/block/cflash_block_sisl.c b/src/block/cflash_block_sisl.c new file mode 100644 index 00000000..6ffd5017 --- /dev/null +++ b/src/block/cflash_block_sisl.c @@ -0,0 +1,2093 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/cflash_block_sisl.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#define CFLSH_BLK_FILENUM 0x0600 +#include "cflash_block_internal.h" +#include "cflash_block_inline.h" + +#define CFLSH_BLK_SISL_NUM_INTERRUPTS 4 + +/* + * NAME: CBLK_IN_MMIO_REG + * + * FUNCTION: This routine reads from the AFU registers. 32-bits + * applications are limited to 4 byte reads of MMIO + * space at one time. Where 64-bit apps read 8 bytes + * of MMIO space at one time. This routine will + * attempts to hide the difference between 32-bit and + * 64-bit apps from its callers (thus avoiding + * ifdefs throughout the code). + * + * NOTE: The get_msw parameter is only used for 32-bit applications. + * For 64-bit applications we always get 8 bytes of data. + * + * + * RETURNS: None + * + * + */ + +static inline __u64 CBLK_IN_MMIO_REG(volatile __u64 *addr, int get_msw) +{ + + __u64 rc; + +#if !defined(__64BIT__) && defined(_AIX) + __u32 rc32; + + rc = in_mmio32 (addr); + + // Get upper word. + + + if (get_msw) { + rc32 = in_mmio32 (addr+4); + + rc |= (uint64_t)rc32 << 32; + } +#else + rc = in_mmio64 (addr); +#endif + return rc; +} + +/* + * NAME: CBLK_OUT_MMIO_REG + * + * FUNCTION: This routine writes to the AFU registers.32-bits + * applications are limited to 4 byte reads of MMIO + * space at one time. Where 64-bit apps read 8 bytes + * of MMIO space at one time. This routine will + * attempts to hide the difference between 32-bit and + * 64-bit apps from its callers (thus avoiding + * ifdefs throughout the code). This routine assumes + * that there is never an upper word that needs to be + * written by a 32-bit app. + * + * + * + * RETURNS: None + * + * + */ + +static inline void CBLK_OUT_MMIO_REG(volatile __u64 *addr, __u64 val) +{ +#ifndef BLOCK_FILEMODE_ENABLED +#if !defined(__64BIT__) && defined(_AIX) + + /* + * NOTE: We are assuming for 32-bit apps that we + * never need to write more than 4 bytes. + * In the case of the endian control register, + * masking off the lsw is sufficient. + */ + + out_mmio32 (addr, (__u32) (val & 0xffffffff)); +#else + out_mmio64 (addr, val); +#endif +#endif /* !BLOCK_FILEMODE_ENABLED */ +} + +/* + * NAME: cblk_get_sisl_num_interrupts + * + * FUNCTION: This routine returns the number of interrupts for this + * AFU architecture. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: The number of interrupts supported per context. + * + * + */ + +int cblk_get_sisl_num_interrupts(cflsh_chunk_t *chunk, int path_index) +{ + + + return CFLSH_BLK_SISL_NUM_INTERRUPTS; +} + + +/* + * NAME: cblk_get_sisl_cmd_room + * + * FUNCTION: This routine is called whenever one needs to issue + * an IOARCB to see if there is room for another command + * to be accepted by the AFU from this context. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: The number of commands that can currently be issued to the AFU + * for this context. + * + * + */ + +uint64_t cblk_get_sisl_cmd_room(cflsh_chunk_t *chunk, int path_index) +{ + uint64_t cmd_room = 0; + + +#ifdef BLOCK_FILEMODE_ENABLED + + chunk->path[path_index]->afu->cmd_room = 1; + + cmd_room = chunk->path[path_index]->afu->cmd_room; +#else + + + if (chunk->path[path_index]->afu->cmd_room) { + + cmd_room = chunk->path[path_index]->afu->cmd_room; + chunk->path[path_index]->afu->cmd_room--; + } else { + /* + * Read the command room from the adaptere + */ + + chunk->path[path_index]->afu->cmd_room = CBLK_IN_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_CMD_ROOM_OFFSET,FALSE); + + CBLK_TRACE_LOG_FILE(9,"command room mmio is 0x%llx",chunk->path[path_index]->afu->cmd_room); + + cmd_room = chunk->path[path_index]->afu->cmd_room; + + if (chunk->path[path_index]->afu->cmd_room) { + + chunk->path[path_index]->afu->cmd_room--; + } + } + +#endif + if (cmd_room == 0) { + CBLK_TRACE_LOG_FILE(6,"No command room"); + chunk->stats.num_no_cmd_room++; + } + + return cmd_room; +} + +/* + * NAME: cblk_get_sisl_intrpt_status + * + * FUNCTION: This routine is called whenever one needs get + * the interrupt status register. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: Contents of interrupt status registers + * + * + */ + +uint64_t cblk_get_sisl_intrpt_status(cflsh_chunk_t *chunk, int path_index) +{ + uint64_t intrpt_status = 0; + + /* + * TODO: ?? Can we consolidate this routine with cblk_process_sisl_error_intrpt + * used below for processing adapter interrupts. + */ + +#ifndef BLOCK_FILEMODE_ENABLED + + + /* + * Read the command room from the adaptere + */ + + intrpt_status = CBLK_IN_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_INTR_STATUS_OFFSET,TRUE); + + CBLK_TRACE_LOG_FILE(9,"interrupt_status is 0x%llx",intrpt_status); + + +#endif + + return intrpt_status; +} + +/* + * NAME: cblk_process_sisl_error_intrpt + * + * FUNCTION: This routine processes SISlite adapter + * error/miscellaneous interrupts + * + * INPUTS: + * chunk - Chunk associated with this error + * cmd - command. + * + * RETURNS: + * + * + */ +void cblk_process_sisl_error_intrpt(cflsh_chunk_t *chunk,int path_index, + cflsh_cmd_mgm_t **cmd) +{ + uint64_t reg; + uint64_t reg_unmasked; + + + reg = CBLK_IN_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_INTR_STATUS_OFFSET,TRUE); + + reg_unmasked = (reg & SISL_ISTATUS_UNMASK); + + chunk->stats.num_capi_afu_intrpts++; + + CBLK_TRACE_LOG_FILE(1,"Unexpected interrupt = 0x%llx, reg_mask = 0x%llx, chunk->index = %d", + reg,reg_unmasked,chunk->index); + + if (reg_unmasked) { + + CBLK_OUT_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_INTR_CLEAR_OFFSET, reg_unmasked); + + } + + return; +} + + +/* + * NAME: cblk_inc_sisl_rrq + * + * FUNCTION: This routine is called whenever an RRQ has been processed. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ + +void cblk_inc_sisl_rrq(cflsh_chunk_t *chunk, int path_index) +{ + + + chunk->path[path_index]->afu->p_hrrq_curr++; + + + + if (chunk->path[path_index]->afu->p_hrrq_curr > chunk->path[path_index]->afu->p_hrrq_end) + { + + chunk->path[path_index]->afu->p_hrrq_curr = chunk->path[path_index]->afu->p_hrrq_start; + + chunk->path[path_index]->afu->toggle ^= SISL_RESP_HANDLE_T_BIT; + + } + + + + return; +} + +/* + * NAME: cblk_sisl_adap_setup + * + * FUNCTION: This routine is called to set up the adapter to + * recognize our command pool. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: The number of commands that can currently be issued to the AFU + * for this context. + * + * + */ + +int cblk_sisl_adap_setup(cflsh_chunk_t *chunk, int path_index) +{ + int rc = 0; +#ifdef DEBUG + uint64_t intrpt_status = 0; +#endif + + if (CBLK_SETUP_BAD_MMIO_SIGNAL(chunk,path_index,MAX(CAPI_RRQ0_START_EA_OFFSET,CAPI_CTX_CTRL_OFFSET))) { + + /* + * If we get here then the MMIO below + * failed indicating the adapter either + * is being reset or encountered a UE. + */ + + return -1; + } + + if (chunk->path[path_index]->afu->mmio == NULL) { + + CBLK_TRACE_LOG_FILE(1,"mmio is not valid for chunk index = %d, and path_index = %d", + chunk->index,path_index); + + return -1; + } + +#ifdef DEBUG + intrpt_status = cblk_get_sisl_intrpt_status(chunk,path_index); + + CBLK_TRACE_LOG_FILE(5,"Interrupt status before set up of adapter = 0x%llx", + intrpt_status); +#endif /* DEBUG */ + +#ifdef _AIX + + /* + * AIX uses Big Endian format. So set the + * endian control register now. + */ + + CBLK_OUT_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_ENDIAN_CTRL_OFFSET, (uint64_t)0x8000000000000080LL); + +#endif /* AIX */ + + + + CBLK_OUT_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_RRQ0_START_EA_OFFSET, (uint64_t)chunk->path[path_index]->afu->p_hrrq_start); + + + CBLK_OUT_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_RRQ0_END_EA_OFFSET, (uint64_t)chunk->path[path_index]->afu->p_hrrq_end); + + + /* + * Set up interrupts for when the interrupt status register + * is updated to use the SISL_MSI_SYNC_ERROR IRQ. + */ + + CBLK_OUT_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_CTX_CTRL_OFFSET,(uint64_t)SISL_MSI_SYNC_ERROR); + CBLK_OUT_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_INTR_MASK_OFFSET,(uint64_t)SISL_ISTATUS_MASK); + + + +#ifdef DEBUG + intrpt_status = cblk_get_sisl_intrpt_status(chunk,path_index); + + CBLK_TRACE_LOG_FILE(5,"Interrupt status after set up of adapter = 0x%llx", + intrpt_status); +#endif /* DEBUG */ + + + CBLK_CLEANUP_BAD_MMIO_SIGNAL(chunk,path_index); + + + return rc; +} + + +/* + * NAME: cblk_get_sisl_cmd_data_length + * + * FUNCTION: Returns the data length associated with a command + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ +uint32_t cblk_get_sisl_cmd_data_length(cflsh_chunk_t *chunk, cflsh_cmd_mgm_t *cmd) +{ + sisl_ioarcb_t *ioarcb; + + + ioarcb = &(cmd->sisl_cmd.rcb); + + return ioarcb->data_len; +} + +/* + * NAME: cblk_get_sisl_cmd_cdb + * + * FUNCTION: Returns the offset of the CDB in the command. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: + * + * + */ +scsi_cdb_t * cblk_get_sisl_cmd_cdb(cflsh_chunk_t *chunk, cflsh_cmd_mgm_t *cmd) +{ + sisl_ioarcb_t *ioarcb; + + + ioarcb = &(cmd->sisl_cmd.rcb); + + return (scsi_cdb_t *)ioarcb->cdb; +} + +/* + * NAME: cblk_get_sisl_cmd_rsp + * + * FUNCTION: Returns the offset of the command this response is for. + * + * + * NOTE; This routine assumes the caller is holding path->lock. + * + * RETURNS: + * + * + */ +cflsh_cmd_mgm_t *cblk_get_sisl_cmd_rsp(cflsh_chunk_t *chunk,int path_index) +{ +#ifdef _AIX + uint32_t tmp_val; +#endif /* AIX */ + cflsh_cmd_mgm_t *cmd = NULL; + +#if !defined(__64BIT__) && defined(_AIX) + + if ((*(chunk->path[path_index]->afu->p_hrrq_curr)) & 0xffffffff00000000LL) { + + /* + * This is not valid for 32-bit application. + */ + + CBLK_TRACE_LOG_FILE(1,"Invalid cmd pointer received by AFU = 0x%llx p_hrrq_curr = 0x%llx, chunk->index = %d", + (uint64_t)chunk->path[path_index]->afu->p_hrrq_curr,chunk->index); + cblk_notify_mc_err(chunk,path_index,0x604,(*(chunk->path[path_index]->afu->p_hrrq_curr)), + CFLSH_BLK_NOTIFY_AFU_ERROR,NULL); + + CBLK_LIVE_DUMP_THRESHOLD(1,"0x604"); + + /* + * clear upper word only. + */ + + *(chunk->path[path_index]->afu->p_hrrq_curr) &= *(chunk->path[path_index]->afu->p_hrrq_curr) & 0x00000000ffffffffLL; + + + } + + tmp_val = ((uint32_t)(*(chunk->path[path_index]->afu->p_hrrq_curr)) & 0xffffffff) & (~SISL_RESP_HANDLE_T_BIT); + cmd = (cflsh_cmd_mgm_t *)tmp_val; +#else + cmd = (cflsh_cmd_mgm_t *)((*(chunk->path[path_index]->afu->p_hrrq_curr)) & (~SISL_RESP_HANDLE_T_BIT)); +#endif + + + return cmd; +} + + + +/* + * NAME: cblk_build_sisl_cmd + * + * FUNCTION: Builds a SIS Lite adapter specific command/request. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ +int cblk_build_sisl_cmd(cflsh_chunk_t *chunk,int path_index, + cflsh_cmd_mgm_t *cmd, + void *buf, size_t buf_len, + int flags) +{ + + int rc = 0; + sisl_ioarcb_t *ioarcb; + + + + + + ioarcb = &(cmd->sisl_cmd.rcb); + + + // TODO: Add mask and maybe macro to get context id + + ioarcb->ctx_id = chunk->path[path_index]->afu->contxt_handle & 0xffff; + +#ifdef _MASTER_CONTXT + + ioarcb->res_hndl = chunk->path[path_index]->sisl.resrc_handle; +#else + ioarcb->lun_id = chunk->path[path_index]->lun_id; + + /* + * Use port selection mask chosen and library + * initialization time. + */ + ioarcb->port_sel = cflsh_blk.port_select_mask; +#endif /* _MASTER_CONTXT */ + +#ifndef _SKIP_READ_CALL +#ifdef _ERROR_INTR_MODE + + /* + * In error interrupt mode, we only get interrupts + * for general errors: AFU, EEH etc. We do not get + * interrupts for command completions. + */ + ioarcb->msi = 0; +#else + ioarcb->msi = SISL_MSI_RRQ_UPDATED; +#endif /* !_ERROR_INTR_MODE */ + +#else + + /* + * Do not send interrupts to host on completion. + */ + + ioarcb->msi = 0; +#endif + + if (flags & CFLASH_READ_DIR_OP) { + + +#ifdef _MASTER_CONTXT + + + ioarcb->req_flags = SISL_REQ_FLAGS_RES_HNDL | SISL_REQ_FLAGS_HOST_READ; + + +#else + + /* + * TODO: This needs to change to resource handle + * lun id when we have this information. + * For now just use PORT and LUN ID. Since + * these are both zero, + * + + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_READ; + + */ + +#endif /* _MASTER_CONTXT */ + + } else if (flags & CFLASH_WRITE_DIR_OP) { + +#ifdef _MASTER_CONTXT + + ioarcb->req_flags = SISL_REQ_FLAGS_RES_HNDL | SISL_REQ_FLAGS_HOST_WRITE; + +#else + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_WRITE; +#endif /* _MASTER_CONTXT */ + + } + + switch (cflsh_blk.timeout_units) { + case CFLSH_G_TO_MSEC: + ioarcb->req_flags |= SISL_REQ_FLAGS_TIMEOUT_MSECS; + break; + case CFLSH_G_TO_USEC: + ioarcb->req_flags |= SISL_REQ_FLAGS_TIMEOUT_USECS; + break; + default: + ioarcb->req_flags |= SISL_REQ_FLAGS_TIMEOUT_SECS; + } + + ioarcb->timeout = cflsh_blk.timeout; + + + + + ioarcb->data_ea = (ulong)buf; + + ioarcb->data_len = buf_len; + + return rc; +} + + +/* + * NAME: cblk_update_path_sisl_cmd + * + * FUNCTION: Updates an already built IOARCB for a new path. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ +int cblk_update_path_sisl_cmd(cflsh_chunk_t *chunk,int path_index, + cflsh_cmd_mgm_t *cmd, int flags) +{ + + int rc = 0; + sisl_ioarcb_t *ioarcb; + + + ioarcb = &(cmd->sisl_cmd.rcb); + + + //TODO:?? Future: bzero(ioarcb,sizeof(*ioarcb)); + + // TODO: Add mask and maybe macro to get context id + + ioarcb->ctx_id = chunk->path[path_index]->afu->contxt_handle & 0xffff; + +#ifdef _MASTER_CONTXT + + ioarcb->res_hndl = chunk->path[path_index]->sisl.resrc_handle; +#else + ioarcb->lun_id = chunk->path[path_index]->lun_id; + + /* + * Use port selection mask chosen and library + * initialization time. + */ + ioarcb->port_sel = cflsh_blk.port_select_mask; +#endif /* _MASTER_CONTXT */ + + + // TODO:?? This code does not handle fail over from non-SISLITE AFUs + + return rc; +} +/* + * NAME: cblk_issue_sisl_cmd + * + * FUNCTION: Issues a command to the adapter specific command/request + * to the adapter. The first implementation will issue IOARCBs. + * + * + * NOTE; This routine assumes the caller is holding chunk->lock. + * + * RETURNS: None + * + * + */ + +int cblk_issue_sisl_cmd(cflsh_chunk_t *chunk, int path_index,cflsh_cmd_mgm_t *cmd) +{ + int rc = 0; + int wait_room_retry = 0; + int wait_rrq_retry = 0; + sisl_ioarcb_t *ioarcb; + int pthread_rc; + + + ioarcb = &(cmd->sisl_cmd.rcb); +#ifdef _REMOVE + if (cblk_log_verbosity >= 9) { + fprintf(stderr,"Hex dump of ioarcb\n"); + hexdump(ioarcb,sizeof(*ioarcb),NULL); + } + +#endif /* _REMOVE */ + + +#ifdef _FOR_DEBUG + if (CBLK_SETUP_BAD_MMIO_SIGNAL(chunk,path_index,CAPI_IOARRIN_OFFSET+0x20)) { + + /* + * We must have failed the MMIO done below and long + * jump here. + */ + + return -1; + } + +#endif /* _FOR_DEBUG */ + +#ifdef _USE_LIB_AFU + afu_mmio_write_dw(p_afu, 8, (uint64_t)ioarcb); +#else + + while ((CBLK_GET_CMD_ROOM(chunk,path_index) == 0) && + (wait_room_retry < CFLASH_BLOCK_MAX_WAIT_ROOM_RETRIES)) { + + /* + * Wait a limited amount of time for the room on + * the AFU. Since we are waiting for the AFU + * to fetch some more commands, it is thought + * we can wait a little while here. It should also + * be noted we are not unlocking anything in this wait. + * Since the AFU is not waiting for us to process a command, + * this (not unlocking) may be alright. However it does mean + * other threads are being held off. If they are also trying + * to issue requests, then they would see this same issue. If + * these other threads are trying to process completions, then + * those will be delayed (perhaps unnecessarily). + */ + + CBLK_TRACE_LOG_FILE(5,"waiting for command room"); + usleep(CFLASH_BLOCK_DELAY_ROOM); + + if (chunk->flags & CFLSH_CHUNK_FAIL_IO) { + + errno = EIO; + + return -1; + } + + wait_room_retry++; + } + + + if (wait_room_retry >= CFLASH_BLOCK_MAX_WAIT_ROOM_RETRIES) { + + + + /* + * We do not have any room to send this + * command. Fail this operation now. + */ + +#ifdef _FOR_DEBUG + CBLK_CLEANUP_BAD_MMIO_SIGNAL(chunk,path_index); +#endif /* _FOR_DEBUG */ + errno = EBUSY; + + cblk_notify_mc_err(chunk,path_index,0x607,wait_room_retry,CFLSH_BLK_NOTIFY_ADAP_ERR,NULL); + return -1; + } + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * If path is in a halted state then wait for it to + * resume. Since we are waiting for the AFU resume + * event, that afu-lock will be released, but our chunk + * lock will not be released. So do that now. + * + * Since we are going to wait for a + * signal from other threads for + * this AFU recovery to complete, we need + * release our conditional access of the AFU + * lock and get an explicit access of the + * afu lock. + */ + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + CFLASH_BLOCK_LOCK(chunk->path[path_index]->afu->lock); + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + + /* + * We are still halted after explicitly + * acquiring the AFU lock. + */ + + pthread_rc = pthread_cond_wait(&(chunk->path[path_index]->afu->resume_event), + &(chunk->path[path_index]->afu->lock.plock)); + + if (pthread_rc) { + + + CBLK_TRACE_LOG_FILE(5,"pthread_cond_wait failed for resume_event rc = %d errno = %d", + pthread_rc,errno); + + errno = EIO; + + CFLASH_BLOCK_UNLOCK(chunk->path[path_index]->afu->lock); + CFLASH_BLOCK_LOCK(chunk->lock); + return -1; + } + + } + + /* + * Chunk lock must be acquired first to prevent deadlock. + * Also get our conditional AFU lock back to match the state + * above. + */ + + CFLASH_BLOCK_UNLOCK(chunk->path[path_index]->afu->lock); + CFLASH_BLOCK_LOCK(chunk->lock); + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + + if (chunk->path[path_index]->afu->flags & CFLSH_AFU_HALTED) { + + /* + * Give up if the AFU is still halted. + */ + + + CBLK_TRACE_LOG_FILE(5,"afu halted again afu->flag = 0x%x", + chunk->path[path_index]->afu->flags); + + errno = EIO; + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + return -1; + + } + + } + + while ((chunk->path[path_index]->afu->num_issued_cmds >= chunk->path[path_index]->afu->num_rrqs) && + (wait_rrq_retry < CFLASH_BLOCK_MAX_WAIT_RRQ_RETRIES)) { + + /* + * Do not issue more commands to this AFU then there are RRQ for command completions + */ + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + + /* + * Since we are waiting for commands to be processed, + * we need to unlock the chunk lock to allow this on + * other threads. + */ + + + CFLASH_BLOCK_UNLOCK(chunk->lock); + + usleep(CFLASH_BLOCK_DELAY_RRQ); + + if (chunk->flags & CFLSH_CHUNK_FAIL_IO) { + + errno = EIO; + + return -1; + } + + CFLASH_BLOCK_LOCK(chunk->lock); + + CFLASH_BLOCK_AFU_SHARE_LOCK(chunk->path[path_index]->afu); + + wait_rrq_retry++; + + } + + /* + * TODO:?? We are not rechecking command room here. Could + * we inadvertently attempt to issue a command with no + * command room? + */ + + + if (wait_rrq_retry >= CFLASH_BLOCK_MAX_WAIT_RRQ_RETRIES) { + + + + /* + * We do not have any room to send this + * command. Fail this operation now. + */ + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); + +#ifdef _FOR_DEBUG + CBLK_CLEANUP_BAD_MMIO_SIGNAL(chunk,path_index); +#endif /* _FOR_DEBUG */ + errno = EBUSY; + + return -1; + } + + chunk->path[path_index]->afu->num_issued_cmds++; + + + CBLK_OUT_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_IOARRIN_OFFSET, (uint64_t)ioarcb); + +#endif /* !_USE_LIB_AFU */ + + + CFLASH_BLOCK_AFU_SHARE_UNLOCK(chunk->path[path_index]->afu); +#ifdef _FOR_DEBUG + CBLK_CLEANUP_BAD_MMIO_SIGNAL(chunk,path_index); +#endif /* _FOR_DEBUG */ + + + return rc; + +} + +/* + * NAME: cblk_init_sisl_cmd + * + * FUNCTION: This routine initializes the command + * area for a command retry. + * + * INPUTS: + * chunk - Chunk associated with this error + * cmd - command that completed + * + * RETURNS: + * 0 - Good completoin + * Otherwise error. + * + */ +void cblk_init_sisl_cmd(cflsh_chunk_t *chunk,cflsh_cmd_mgm_t *cmd) +{ + sisl_ioarcb_t *ioarcb; + + ioarcb = &(cmd->sisl_cmd.rcb); + + + bzero(ioarcb,sizeof(*ioarcb)); + + return; +} + +/* + * NAME: cblk_init_sisl_cmd_rsp + * + * FUNCTION: This routine initializes the command + * response area for a command retry. + * + * INPUTS: + * chunk - Chunk associated with this error + * cmd - command that completed + * + * RETURNS: + * 0 - Good completoin + * Otherwise error. + * + */ +void cblk_init_sisl_cmd_rsp(cflsh_chunk_t *chunk,cflsh_cmd_mgm_t *cmd) +{ + sisl_ioasa_t *ioasa; + + ioasa = &(cmd->sisl_cmd.sa); + + bzero(ioasa,sizeof(*ioasa)); + + return; +} + +/* + * NAME: cblk_copy_sisl_cmd_rsp + * + * FUNCTION: This routine copies the response area + * for this command to specified buffer, + * + * INPUTS: + * chunk - Chunk associated with this error + * cmd - command that completed + * + * RETURNS: + * 0 - Good completoin + * Otherwise error. + * + */ +void cblk_copy_sisl_cmd_rsp(cflsh_chunk_t *chunk,cflsh_cmd_mgm_t *cmd, void *buffer, int buffer_size) +{ + sisl_ioasa_t *ioasa; + + ioasa = &(cmd->sisl_cmd.sa); + + bcopy(ioasa,buffer,MIN(sizeof(*ioasa),buffer_size)); + + return; +} + +/* + * NAME: cblk_set_sisl_cmd_rsp_status + * + * FUNCTION: This routine sets the + * response area for a command to either success + * or failure based on the flag. + * + * INPUTS: + * chunk - Chunk associated with this error + * cmd - command that completed + * + * RETURNS: + * 0 - Good completoin + * Otherwise error. + * + */ +void cblk_set_sisl_cmd_rsp_status(cflsh_chunk_t *chunk,cflsh_cmd_mgm_t *cmd, int success) +{ + sisl_ioasa_t *ioasa; + + ioasa = &(cmd->sisl_cmd.sa); + + if (success) { + /* + * caller wants to emulate good completion + */ + + ioasa->ioasc = SISL_IOASC_GOOD_COMPLETION; + } else { + + /* + * caller wants to emulate command failure + */ + ioasa->ioasc = 0xFF; + } + + return; +} + + +/* + * NAME: cblk_complete_status_sisl_cmd + * + * FUNCTION: This routine indicates if there is an error + * on the command that completed. + * + * INPUTS: + * chunk - Chunk associated with this error + * cmd - command that completed + * + * RETURNS: + * 0 - Good completoin + * Otherwise error. + * + */ +int cblk_complete_status_sisl_cmd(cflsh_chunk_t *chunk,cflsh_cmd_mgm_t *cmd) +{ + int rc = 0; + sisl_ioarcb_t *ioarcb; + sisl_ioasa_t *ioasa = NULL; + + ioarcb = &(cmd->sisl_cmd.rcb); + ioasa = &(cmd->sisl_cmd.sa); + + + if (cmd->cmdi == NULL) { + + CBLK_TRACE_LOG_FILE(1,"cmdi is null for cmd->index",cmd->index); + + errno = EINVAL; + return -1; + + + } + + if (ioasa->ioasc != SISL_IOASC_GOOD_COMPLETION) { + + /* + * Command completed with an error + */ + rc = -1; + } else { + + + /* + * For good completion set transfer_size + * to full data transfer. + */ + + if (cmd->cmdi->transfer_size_bytes) { + + /* + * The transfer size is in bytes + */ + cmd->cmdi->transfer_size = ioarcb->data_len; + } else { + + + /* + * The transfer size is in blocks + */ + cmd->cmdi->transfer_size = cmd->cmdi->nblocks; + } + + } + + return rc; +} + + + +/* + * NAME: cblk_process_sisl_cmd_intrpt + * + * FUNCTION: This routine processes SISlite completion + * interrupts + * + * INPUTS: + * chunk - Chunk associated with this error + * cmd - command. + * + * RETURNS: + * + * + */ +int cblk_process_sisl_cmd_intrpt(cflsh_chunk_t *chunk,int path_index,cflsh_cmd_mgm_t **cmd,int *cmd_complete,size_t *transfer_size) +{ + int rc = 0; + cflsh_cmd_mgm_t *p_cmd = NULL; + + + if (cmd == NULL) { + + CBLK_TRACE_LOG_FILE(1,"cmd is null"); + + + return -1; + } + + if (cmd_complete == NULL) { + + CBLK_TRACE_LOG_FILE(1,"cmd_complete is null"); + + + return -1; + } + + if (transfer_size == NULL) { + + CBLK_TRACE_LOG_FILE(1,"transfer_size is null"); + + + return -1; + } + + + if (CBLK_INVALID_CHUNK_PATH_AFU(chunk,path_index,__FUNCTION__)) { + + CBLK_LIVE_DUMP_THRESHOLD(5,"0x600"); + + return -1; + + } + + if (chunk->path[path_index]->afu->p_hrrq_curr == NULL) { + + CBLK_TRACE_LOG_FILE(1,"p_hrrq_curr is null, chunk = %p",chunk); + + CBLK_LIVE_DUMP_THRESHOLD(5,"0x601"); + return -1; + } + + + + CBLK_TRACE_LOG_FILE(7,"*(chunk->path[path_index]->afu->p_hrrq_curr) = 0x%llx, chunk->path[path_index]->afu->toggle = 0x%llx, p_hrrq_curr = 0x%llx, chunk->index = %d", + *(chunk->path[path_index]->afu->p_hrrq_curr),(uint64_t)chunk->path[path_index]->afu->toggle,(uint64_t)chunk->path[path_index]->afu->p_hrrq_curr,chunk->index); + + + while (((*(chunk->path[path_index]->afu->p_hrrq_curr)) & (SISL_RESP_HANDLE_T_BIT)) == chunk->path[path_index]->afu->toggle) { + + /* + * Process all RRQs that have been posted via this interrupt + */ + + p_cmd = CBLK_GET_CMD_RSP(chunk,path_index); + + CBLK_TRACE_LOG_FILE(8,"*(chunk->path[path_index].p_hrrq_curr) = 0x%llx, chunk->path[path_index].toggle = 0x%llx, p_hrrq_curr = 0x%llx, chunk->index = %d", + *(chunk->path[path_index]->afu->p_hrrq_curr),(uint64_t)chunk->path[path_index]->afu->toggle,(uint64_t)chunk->path[path_index]->afu->p_hrrq_curr,chunk->index); + + + + /* + * Increment the RRQ pointer + * and possibly adjust the toggle + * bit. + */ + + CBLK_INC_RRQ(chunk,path_index); + + + if (p_cmd) { + + + if (p_cmd->cmdi == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid p_cmd pointer received by AFU = 0x%llx p_hrrq_curr = 0x%llx, chunk->index = %d", + (uint64_t)p_cmd,(uint64_t)chunk->path[path_index]->afu->p_hrrq_curr,chunk->index); + continue; + } + + if (p_cmd->cmdi->chunk == NULL) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid Null chunk, cmd = 0x%llx p_hrrq_curr = 0x%llx, chunk->index = %d", + (uint64_t)p_cmd,(uint64_t)chunk->path[path_index]->afu->p_hrrq_curr,chunk->index); + continue; + } + + if (CFLSH_EYECATCH_CHUNK(p_cmd->cmdi->chunk)) { + + + + CBLK_TRACE_LOG_FILE(1,"Invalid eyecatcher= 0x%x in pchunnk = %p chunk = %p p_hrrq_curr = 0x%llx, chunk->index = %d", + p_cmd->cmdi->chunk->eyec,(p_cmd->cmdi->chunk),chunk, + (uint64_t)chunk->path[path_index]->afu->p_hrrq_curr,chunk->index); + continue; + } + + + + if ((p_cmd < p_cmd->cmdi->chunk->cmd_start) || + (p_cmd > p_cmd->cmdi->chunk->cmd_end)) { + + + + CBLK_TRACE_LOG_FILE(1,"Invalid p_cmd pointer received by AFU = 0x%llx p_hrrq_curr = 0x%llx, chunk->index = %d", + (uint64_t)p_cmd,(uint64_t)chunk->path[path_index]->afu->p_hrrq_curr,chunk->index); + if (*cmd) { + + CBLK_TRACE_LOG_FILE(1,"Invalid p_cmd occurred while waiting for cmd = 0x%llx flags = 0x%x lba = 0x%llx, chunk->index = %d", + (uint64_t)*cmd,(*cmd)->cmdi->flags,(*cmd)->cmdi->lba,chunk->index); + } + + + CBLK_TRACE_LOG_FILE(7,"*(chunk->path[path_index].p_hrrq_curr) = 0x%llx, chunk->path[path_index].toggle = %d, p_hrrq_curr = 0x%llx, chunk->index = %d", + *(chunk->path[path_index]->afu->p_hrrq_curr),chunk->path[path_index]->afu->toggle,(uint64_t)chunk->path[path_index]->afu->p_hrrq_curr,chunk->index); + + continue; + } + + + + + if (p_cmd->cmdi->chunk == chunk) { + + + + + p_cmd->cmdi->state = CFLSH_MGM_CMP; + + + rc = CBLK_PROCESS_CMD(chunk,path_index,p_cmd); + + if ((*cmd == NULL) && + (!(*cmd_complete))) { + + /* + * The caller is waiting for the next + * command. So set cmd to this + * command (p_cmd) that just completed. + */ + *cmd = p_cmd; + + } + + } else { + + /* + * Since this command info is for a different chunk than the one being + * processed (and the one for which were currently have a lock), put this + * command on that AFUs pending complete queue. The chunk associated with + * this command will walk this list and finish completion on it at that + * time. + */ + + + CBLK_TRACE_LOG_FILE(9,"command for other chunk *(chunk->path[path_index].p_hrrq_curr) = 0x%llx, chunk->path[path_index].toggle = %d, p_hrrq_curr = 0x%llx, chunk->path[path_index].index = %d", + *(chunk->path[path_index]->afu->p_hrrq_curr),chunk->path[path_index]->afu->toggle,(uint64_t)chunk->path[path_index]->afu->p_hrrq_curr,chunk->index); + + + if ((chunk->path[path_index] == NULL) || + (chunk->path[path_index]->afu == NULL)) { + + + CBLK_TRACE_LOG_FILE(1,"Invalid path or afu pointer seen from chunk->index = %d, with path_index = %d", + chunk->index,path_index); + continue; + } + + + CBLK_Q_NODE_TAIL(chunk->path[path_index]->afu->head_complete,chunk->path[path_index]->afu->tail_complete, + (p_cmd->cmdi),complete_prev,complete_next); + + + } + + } + + if ((p_cmd == *cmd) || + ((*cmd) && + ((*cmd)->cmdi->state == CFLSH_MGM_CMP) && + (!(*cmd_complete)))) { + + /* + * Either our command completed on this thread. + * or it completed on another thread. Let's process it. + */ + + + if ((*cmd) && + (rc != CFLASH_CMD_RETRY_ERR) && + (rc != CFLASH_CMD_DLY_RETRY_ERR)) { + + /* + * Since we found our command completed and + * we are not retrying it, lets + * set the flag so we can avoid polling for any + * more interrupts. However we need to process + * all responses posted to the RRQ for this + * interrupt before exiting. + */ +#ifndef _COMMON_INTRPT_THREAD + + CBLK_COMPLETE_CMD(chunk,*cmd,transfer_size); +#else + + if (chunk->flags & CFLSH_CHNK_NO_BG_TD) { + CBLK_COMPLETE_CMD(chunk,*cmd,transfer_size); + } + +#endif + *cmd_complete = TRUE; + + } + + } + + + CBLK_TRACE_LOG_FILE(7,"*(chunk->path[path_index].p_hrrq_curr) = 0x%llx, chunk->path[path_index].toggle = 0x%llx, chunk->index = %d", + *(chunk->path[path_index]->afu->p_hrrq_curr),chunk->path[path_index]->afu->toggle,chunk->index); + } /* Inner while loop on RRQ */ + + + + + return (rc); +} + + + + + + +/* + * NAME: cblk_process_sisl_adap_intrpt + * + * FUNCTION: This routine processes SISlite adapter + * interrupts + * + * INPUTS: + * chunk - Chunk associated with this error + * cmd - command. + * + * RETURNS: + * + * + */ +int cblk_process_sisl_adap_intrpt(cflsh_chunk_t *chunk, + int path_index, + cflsh_cmd_mgm_t **cmd, + int intrpt_num,int *cmd_complete, + size_t *transfer_size) +{ + int rc = 0; + + switch (intrpt_num) { + case SISL_MSI_RRQ_UPDATED: + /* + * Command completion interrupt + */ + + rc = cblk_process_sisl_cmd_intrpt(chunk,path_index,cmd,cmd_complete, + transfer_size); + break; + case SISL_MSI_SYNC_ERROR: + + /* + * Error interrupt + */ + cblk_process_sisl_error_intrpt(chunk,path_index,cmd); + break; + default: + + rc = -1; + CBLK_TRACE_LOG_FILE(1,"Unknown interupt number = %d",intrpt_num); + + + } + + + return rc; +} + + + +/* + * NAME: cblk_process_sisl_adap_convert_intrpt + * + * FUNCTION: This routine processes SISlite adapter + * interrupts by first converting from the + * generic library interrupt number to the AFU + * specific number. + * + * INPUTS: + * chunk - Chunk associated with this error + * cmd - command. + * + * RETURNS: + * + * + */ +int cblk_process_sisl_adap_convert_intrpt(cflsh_chunk_t *chunk, + int path_index, + cflsh_cmd_mgm_t **cmd, + int intrpt_num,int *cmd_complete, + size_t *transfer_size) +{ + int rc = 0; + + switch (intrpt_num) { + case CFLSH_BLK_INTRPT_CMD_CMPLT: + /* + * Command completion interrupt + */ + + rc = cblk_process_sisl_cmd_intrpt(chunk,path_index,cmd,cmd_complete, + transfer_size); + break; + case CFLSH_BLK_INTRPT_STATUS: + + /* + * Error interrupt + */ + cblk_process_sisl_error_intrpt(chunk,path_index,cmd); + break; + default: + + rc = -1; + CBLK_TRACE_LOG_FILE(1,"Unknown interupt number = %d",intrpt_num); + + + } + + + return rc; +} +/* + * NAME: cblk_process_sisl_cmd_err + * + * FUNCTION: This routine parses the iosa errors + * + * INPUTS: + * chunk - Chunk associated with this error + * ioasa - I/O Adapter status response + * + * RETURNS: + * -1 - Fatal error + * 0 - Ignore error (consider good completion) + * 1 - Retry recommended + * 2 - Retry with delay recommended. + * + */ +cflash_cmd_err_t cblk_process_sisl_cmd_err(cflsh_chunk_t *chunk,int path_index,cflsh_cmd_mgm_t *cmd) +{ + cflash_cmd_err_t rc = CFLASH_CMD_IGNORE_ERR; + cflash_cmd_err_t rc2; + sisl_ioarcb_t *ioarcb; + sisl_ioasa_t *ioasa; + + + + if (cmd == NULL) { + + return CFLASH_CMD_FATAL_ERR; + } + + if (cmd->cmdi == NULL) { + + CBLK_TRACE_LOG_FILE(1,"cmdi is null for cmd->index",cmd->index); + + errno = EINVAL; + return CFLASH_CMD_FATAL_ERR; + + + } + + ioarcb = &(cmd->sisl_cmd.rcb); + ioasa = &(cmd->sisl_cmd.sa); + +#ifdef _REMOVE + if (cblk_log_verbosity >= 9) { + fprintf(stderr,"Hex dump of ioasa\n"); + hexdump(ioasa,sizeof(*ioasa),NULL); + } + +#endif /* _REMOVE */ + + + CBLK_TRACE_LOG_FILE(5,"cmd error ctx_id = 0x%x, ioasc = 0x%x, resid = 0x%x, flags = 0x%x, port = 0x%x, path_index = %d", + cmd->sisl_cmd.rcb.ctx_id,ioasa->ioasc,ioasa->resid,ioasa->rc.flags,ioasa->port,path_index); + + + if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { + + CBLK_TRACE_LOG_FILE(5,"cmd underrun ctx_id = 0x%x, ioasc = 0x%x, resid = 0x%x, flags = 0x%x, port = 0x%x", + cmd->sisl_cmd.rcb.ctx_id,ioasa->ioasc,ioasa->resid,ioasa->rc.flags,ioasa->port); + /* + * We encountered a data underrun. Set + * transfer_size accordingly. + */ + + + if (ioarcb->data_len >= ioasa->resid) { + + if (cmd->cmdi->transfer_size_bytes) { + + /* + * The transfer size is in bytes + */ + cmd->cmdi->transfer_size = ioarcb->data_len - ioasa->resid; + } else { + + + /* + * The transfer size is in blocks + */ + cmd->cmdi->transfer_size = (ioarcb->data_len - ioasa->resid)/CAPI_FLASH_BLOCK_SIZE; + } + } else { + cmd->cmdi->transfer_size = 0; + } + + } + + if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { + + CBLK_TRACE_LOG_FILE(5,"cmd overrun ctx_id = 0x%x, ioasc = 0x%x, resid = 0x%x, flags = 0x%x, port = 0x%x", + cmd->sisl_cmd.rcb.ctx_id,ioasa->ioasc,ioasa->resid,ioasa->rc.flags,ioasa->port); + + + cmd->cmdi->transfer_size = 0; + } + + + CBLK_TRACE_LOG_FILE(7,"cmd failed ctx_id = 0x%x, ioasc = 0x%x, resid = 0x%x, flags = 0x%x, scsi_status = 0x%x", + cmd->sisl_cmd.rcb.ctx_id,ioasa->ioasc,ioasa->resid,ioasa->rc.flags, ioasa->rc.scsi_rc); + + CBLK_TRACE_LOG_FILE(7,"cmd failed port = 0x%x, afu_extra = 0x%x, scsi_entra = 0x%x, fc_extra = 0x%x", + ioasa->port,ioasa->afu_extra,ioasa->scsi_extra,ioasa->fc_extra); + + + + if (ioasa->rc.scsi_rc) { + + + + /* + * We have a SCSI status + */ + + if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { + + CBLK_TRACE_LOG_FILE(5,"sense data: error code = 0x%x, sense_key = 0x%x, asc = 0x%x, ascq = 0x%x", + ioasa->sense_data[0],ioasa->sense_data[2],ioasa->sense_data[12],ioasa->sense_data[13]); + + chunk->stats.num_cc_errors++; + + rc2 = cblk_process_sense_data(chunk,cmd,(struct request_sense_data *)ioasa->sense_data); + + + if (rc == CFLASH_CMD_IGNORE_ERR) { + + /* + * If we have not indicated an error, then use the + * return code from the sense data processing. + */ + + rc = rc2; + } + + + } else if (ioasa->rc.scsi_rc) { + + + /* + * We have a SCSI status, but no sense data + */ + + + CBLK_TRACE_LOG_FILE(1,"cmd failed ctx_id = 0x%x, ioasc = 0x%x, resid = 0x%x, flags = 0x%x, scsi_status = 0x%x", + cmd->sisl_cmd.rcb.ctx_id,ioasa->ioasc,ioasa->resid,ioasa->rc.flags, ioasa->rc.scsi_rc); + + cmd->cmdi->transfer_size = 0; + chunk->stats.num_errors++; + + switch (ioasa->rc.scsi_rc) { + case SCSI_CHECK_CONDITION: + + /* + * This mostly likely indicates a misbehaving device, that is + * reporting a check condition, but is returning no sense data + */ + + + rc = CFLASH_CMD_RETRY_ERR; + cmd->cmdi->status = EIO; + + break; + case SCSI_BUSY_STATUS: + case SCSI_QUEUE_FULL: + + /* + * Retry with delay + */ + + cmd->cmdi->status = EBUSY; + rc = CFLASH_CMD_DLY_RETRY_ERR; + + break; + case SCSI_RESERVATION_CONFLICT: + cmd->cmdi->status = EBUSY; + rc = CFLASH_CMD_FATAL_ERR; + + cblk_notify_mc_err(chunk,path_index,0x605,0,CFLSH_BLK_NOTIFY_DISK_ERR,cmd); + break; + + default: + rc = CFLASH_CMD_FATAL_ERR; + cmd->cmdi->status = EIO; + + cblk_notify_mc_err(chunk,path_index,0x606,0,CFLSH_BLK_NOTIFY_DISK_ERR,cmd); + } + + } + + + } + + + + /* + * We encountered an error. For now return + * EIO for all errors. + */ + + + if (ioasa->rc.fc_rc) { + + /* + * We have an FC status + */ + + + + CBLK_TRACE_LOG_FILE(1,"cmd failed ctx_id = 0x%x, ioasc = 0x%x, resid = 0x%x, flags = 0x%x, fc_extra = 0x%x", + cmd->sisl_cmd.rcb.ctx_id,ioasa->ioasc,ioasa->resid,ioasa->rc.flags, ioasa->fc_extra); + + + switch (ioasa->rc.fc_rc) { + + case SISL_FC_RC_LINKDOWN: + chunk->stats.num_fc_errors++; + if (ioasa->port == 0) { + chunk->stats.num_port0_linkdowns++; + } else { + chunk->stats.num_port1_linkdowns++; + } + chunk->stats.num_errors++; + + + CBLK_NOTIFY_LOG_THRESHOLD(5,chunk,path_index,0x608,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + rc = cblk_retry_new_path(chunk,cmd,FALSE); + cmd->cmdi->status = ENETDOWN; + cmd->cmdi->transfer_size = 0; + break; + case SISL_FC_RC_NOLOGI: + chunk->stats.num_fc_errors++; + if (ioasa->port == 0) { + chunk->stats.num_port0_no_logins++; + } else { + chunk->stats.num_port1_no_logins++; + } + chunk->stats.num_errors++; + + CBLK_NOTIFY_LOG_THRESHOLD(5,chunk,path_index,0x609,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + rc = cblk_retry_new_path(chunk,cmd,FALSE); + cmd->cmdi->status = ENETDOWN; + cmd->cmdi->transfer_size = 0; + + break; + + case SISL_FC_RC_ABORTPEND: + + chunk->stats.num_errors++; + rc = CFLASH_CMD_RETRY_ERR; + cmd->cmdi->status = ETIMEDOUT; + cmd->cmdi->transfer_size = 0; + + CBLK_NOTIFY_LOG_THRESHOLD(5,chunk,path_index,0x60a,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + if (ioasa->port == 0) { + chunk->stats.num_port0_fc_errors++; + } else { + chunk->stats.num_port1_fc_errors++; + } + break; + case SISL_FC_RC_RESID: + /* + * This indicates an FCP resid underrun + */ + + if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { + /* + * If the SISL_RC_FLAGS_OVERRUN flag was set, + * then we will handle this error else where. + * If not then we must handle it here. + * This is probably an AFU bug. We will + * attempt a retry to see if that resolves it. + */ + + chunk->stats.num_errors++; + rc = CFLASH_CMD_RETRY_ERR; + cmd->cmdi->status = EIO; + cmd->cmdi->transfer_size = 0; + if (ioasa->port == 0) { + chunk->stats.num_port0_fc_errors++; + } else { + chunk->stats.num_port1_fc_errors++; + } + + } + break; + case SISL_FC_RC_RESIDERR: // Resid mismatch between adapter and device + case SISL_FC_RC_TGTABORT: + case SISL_FC_RC_ABORTOK: + case SISL_FC_RC_ABORTFAIL: + + chunk->stats.num_errors++; + rc = CFLASH_CMD_RETRY_ERR; + cmd->cmdi->status = EIO; + cmd->cmdi->transfer_size = 0; + if (ioasa->port == 0) { + chunk->stats.num_port0_fc_errors++; + } else { + chunk->stats.num_port1_fc_errors++; + } + break; + + case SISL_FC_RC_WRABORTPEND: + case SISL_FC_RC_NOEXP: + case SISL_FC_RC_INUSE: + + + chunk->stats.num_fc_errors++; + chunk->stats.num_errors++; + if (ioasa->port == 0) { + chunk->stats.num_port0_fc_errors++; + } else { + chunk->stats.num_port1_fc_errors++; + } + rc = CFLASH_CMD_FATAL_ERR; + cmd->cmdi->status = EIO; + cmd->cmdi->transfer_size = 0; + break; + + + } + } + + if (ioasa->rc.afu_rc) { + + + /* + * We have a AFU error + */ + + CBLK_TRACE_LOG_FILE(6,"afu error ctx_id = 0x%x, ioasc = 0x%x, resid = 0x%x, flags = 0x%x, afu error = 0x%x", + cmd->sisl_cmd.rcb.ctx_id,ioasa->ioasc,ioasa->resid,ioasa->rc.flags, ioasa->rc.afu_rc); + + CBLK_TRACE_LOG_FILE(6,"contxt_handle = 0x%x",chunk->path[path_index]->afu->contxt_handle); + CBLK_TRACE_LOG_FILE(6,"mmio_map = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmio_mmap); + CBLK_TRACE_LOG_FILE(6,"mmio = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmio); + CBLK_TRACE_LOG_FILE(6,"mmap_size = 0x%llx",(uint64_t)chunk->path[path_index]->afu->mmap_size); + CBLK_TRACE_LOG_FILE(6,"hrrq_start = 0x%llx",(uint64_t)chunk->path[path_index]->afu->p_hrrq_start); + CBLK_TRACE_LOG_FILE(6,"hrrq_end = 0x%llx",(uint64_t)chunk->path[path_index]->afu->p_hrrq_end); + CBLK_TRACE_LOG_FILE(6,"cmd_start = 0x%llx",(uint64_t)chunk->cmd_start); + CBLK_TRACE_LOG_FILE(6,"cmd_end = 0x%llx",(uint64_t)chunk->cmd_end); + + CBLK_TRACE_LOG_FILE(6," cmd = 0x%llx lba = 0x%llx flags = 0x%x, cmd->cmdi->buf = 0x%llx", + cmd,cmd->cmdi->lba,cmd->cmdi->flags,cmd->cmdi->buf); + + + chunk->stats.num_afu_errors++; + + cmd->cmdi->transfer_size = 0; + + + switch (ioasa->rc.afu_rc) { + case SISL_AFU_RC_RHT_INVALID: + case SISL_AFU_RC_RHT_OUT_OF_BOUNDS: + case SISL_AFU_RC_LXT_OUT_OF_BOUNDS: + /* + * This most likely indicates a code bug + * in this code. + */ + + CBLK_TRACE_LOG_FILE(1,"afu error ctx_id = 0x%x, ioasc = 0x%x, resid = 0x%x, flags = 0x%x, afu error = 0x%x", + cmd->sisl_cmd.rcb.ctx_id,ioasa->ioasc,ioasa->resid,ioasa->rc.flags, ioasa->rc.afu_rc); + rc = CFLASH_CMD_FATAL_ERR; + cmd->cmdi->status = EIO; + break; + case SISL_AFU_RC_RHT_UNALIGNED: + case SISL_AFU_RC_LXT_UNALIGNED: + /* + * These should never happen + */ + + cblk_notify_mc_err(chunk,path_index,0x600,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + rc = CFLASH_CMD_FATAL_ERR; + cmd->cmdi->status = EIO; + break; + + case SISL_AFU_RC_NO_CHANNELS: + + /* + * Retry with delay + */ + + CBLK_NOTIFY_LOG_THRESHOLD(5,chunk,path_index,0x60c,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + cmd->cmdi->status = ENETDOWN; + rc = cblk_retry_new_path(chunk,cmd, TRUE); + + break; + + case SISL_AFU_RC_RHT_DMA_ERR: + case SISL_AFU_RC_LXT_DMA_ERR: + case SISL_AFU_RC_DATA_DMA_ERR: + switch (ioasa->afu_extra) { + case SISL_AFU_DMA_ERR_PAGE_IN: + + /* + * Retry + */ + + CBLK_NOTIFY_LOG_THRESHOLD(5,chunk,path_index,0x60d,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + cmd->cmdi->status = EIO; + rc = CFLASH_CMD_RETRY_ERR; + break; + + case SISL_AFU_DMA_ERR_INVALID_EA: + default: + + cblk_notify_mc_err(chunk,path_index,0x601,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + rc = CFLASH_CMD_FATAL_ERR; + cmd->cmdi->status = EIO; + } + break; + case SISL_AFU_RC_OUT_OF_DATA_BUFS: + /* + * Retry + */ + + CBLK_NOTIFY_LOG_THRESHOLD(5,chunk,path_index,0x60e,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + cmd->cmdi->status = EIO; + rc = CFLASH_CMD_RETRY_ERR; + break; + case SISL_AFU_RC_CAP_VIOLATION: + /* + * Retry, assume EEH recovery completes before retry. + */ + + cmd->cmdi->status = EIO; + rc = CFLASH_CMD_RETRY_ERR; + cblk_notify_mc_err(chunk,path_index,0x602,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + break; + case SISL_AFU_RC_TIMED_OUT_PRE_FC: + case SISL_AFU_RC_TIMED_OUT: + cmd->cmdi->status = ETIMEDOUT; + CBLK_NOTIFY_LOG_THRESHOLD(3,chunk,path_index,0x60b,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + rc = CFLASH_CMD_RETRY_ERR; + break; + default: + + cmd->cmdi->status = EIO; + cblk_notify_mc_err(chunk,path_index,0x603,0,CFLSH_BLK_NOTIFY_AFU_ERROR,cmd); + rc = cblk_retry_new_path(chunk,cmd,FALSE); + } + + } + + + if (cmd->cmdi->status) { + + errno = cmd->cmdi->status; + } + + + return rc; +} + +/* + * NAME: cblk_reset_context_sisl + * + * FUNCTION: This will reset the adapter context so that + * any active commands will never be returned to the host. + * The AFU is not reset and new requests can be issued. + * This routine assumes the caller has the afu->lock. + * + * NOTE: AFU does not properly support this yet. So it is not currently + * used. + * + * INPUTS: + * chunk - Chunk associated with this error + * + * RETURNS: + * 0 - Good completion + * + * + */ +int cblk_reset_context_sisl(cflsh_chunk_t *chunk, int path_index) +{ + int rc = 0; + int wait_reset_context_retry = 0; + +#ifdef _FOR_DEBUG + if (CBLK_SETUP_BAD_MMIO_SIGNAL(chunk,path_index,CAPI_IOARRIN_OFFSET+0x20)) { + + /* + * We must have failed the MMIO done below and long + * jump here. + */ + + return -1; + } + +#endif /* _FOR_DEBUG */ + + /* + * Writing 1 to the IOARRIN, will cause all active commands + * to ultimately be dropped by the AFU. Then the AFU can + * be issued commands again. + */ + + CBLK_OUT_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_IOARRIN_OFFSET, (uint64_t)1); + + while ((CBLK_IN_MMIO_REG(chunk->path[path_index]->afu->mmio + CAPI_IOARRIN_OFFSET,FALSE)) && + (wait_reset_context_retry < CFLASH_BLOCK_MAX_WAIT_RST_CTX_RETRIES)) { + + /* + * Wait a limited amount of time for the reset + * context to complete. We are notified of this when + * a read of IOARRIN returns 0. + */ + + CBLK_TRACE_LOG_FILE(5,"waiting for context reset to complete"); + usleep(CFLASH_BLOCK_DELAY_RST_CTX); + wait_reset_context_retry++; + + } + +#ifdef _FOR_DEBUG + CBLK_CLEANUP_BAD_MMIO_SIGNAL(chunk,path_index); +#endif /* _FOR_DEBUG */ + + + if (wait_reset_context_retry >= CFLASH_BLOCK_MAX_WAIT_RST_CTX_RETRIES) { + + + + /* + * Reset context failed. Fail this operation now. + */ + + CBLK_TRACE_LOG_FILE(1,"Reset context timed out, chunk->index = %d, num_active_cmds = %d", + chunk->index,chunk->num_active_cmds); +#ifdef _FOR_DEBUG + CBLK_CLEANUP_BAD_MMIO_SIGNAL(chunk,path_index); +#endif /* _FOR_DEBUG */ + errno = ETIMEDOUT; + + return -1; + } + + + /* + * Now we need to clean up the RRQ for all commands that + * may have posted completion. In this case we will discard + * all of their completion status, with the + * caller of this routine doing the retries. + */ + + while (((*(chunk->path[path_index]->afu->p_hrrq_curr)) & (SISL_RESP_HANDLE_T_BIT)) == chunk->path[path_index]->afu->toggle) { + + + /* + * Increment the RRQ pointer + * and possibly adjust the toggle + * bit. + */ + + CBLK_INC_RRQ(chunk,path_index); + + + + } /* while */ + + + // TODO: ?? Need to handle clean up of commmands on this AFU but other chunks + + return rc; +} + + + +/* + * NAME: cblk_init_sisl_fcn_ptrs + * + * FUNCTION: This routine initializes the function + * pointers for a SIS Lite chunk. + * + * INPUTS: + * chunk - Chunk associated with this error + * + * RETURNS: + * 0 - Good completion + * + * + */ +int cblk_init_sisl_fcn_ptrs(cflsh_path_t *path) +{ + + if (path == NULL) { + + CBLK_TRACE_LOG_FILE(1,"path = NULL"); + return -1; + } + + path->fcn_ptrs.get_num_interrupts = cblk_get_sisl_num_interrupts; + path->fcn_ptrs.get_cmd_room = cblk_get_sisl_cmd_room; + path->fcn_ptrs.adap_setup = cblk_sisl_adap_setup; + path->fcn_ptrs.get_intrpt_status = cblk_get_sisl_intrpt_status; + path->fcn_ptrs.inc_rrq = cblk_inc_sisl_rrq; + path->fcn_ptrs.get_cmd_data_length = cblk_get_sisl_cmd_data_length; + path->fcn_ptrs.get_cmd_cdb = cblk_get_sisl_cmd_cdb; + path->fcn_ptrs.get_cmd_rsp = cblk_get_sisl_cmd_rsp; + path->fcn_ptrs.build_adap_cmd = cblk_build_sisl_cmd; + path->fcn_ptrs.update_adap_cmd = cblk_update_path_sisl_cmd; + path->fcn_ptrs.issue_adap_cmd = cblk_issue_sisl_cmd; + path->fcn_ptrs.process_adap_err = cblk_process_sisl_cmd_err; + path->fcn_ptrs.process_adap_intrpt = cblk_process_sisl_adap_intrpt; + path->fcn_ptrs.process_adap_convert_intrpt = cblk_process_sisl_adap_convert_intrpt; + path->fcn_ptrs.complete_status_adap_cmd = cblk_complete_status_sisl_cmd; + path->fcn_ptrs.init_adap_cmd = cblk_init_sisl_cmd; + path->fcn_ptrs.init_adap_cmd_resp = cblk_init_sisl_cmd_rsp; + path->fcn_ptrs.copy_adap_cmd_resp = cblk_copy_sisl_cmd_rsp; + path->fcn_ptrs.set_adap_cmd_resp_status = cblk_set_sisl_cmd_rsp_status; + path->fcn_ptrs.reset_adap_contxt = cblk_reset_context_sisl; + + return 0; +} diff --git a/src/block/exportfile b/src/block/exportfile new file mode 100644 index 00000000..44e7c2f2 --- /dev/null +++ b/src/block/exportfile @@ -0,0 +1,49 @@ +/* +* %Z%%M% %I% %W% %G% %U% +*/ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/exportfile $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/* +* shared library export symbols +* data +* text +*/ + +cblk_init +cblk_term +cblk_open +cblk_close +cblk_get_lun_size +cblk_get_size +cblk_set_size +cblk_get_stats +cblk_read +cblk_write +cblk_aread +cblk_awrite +cblk_aresult +cblk_listio +cblk_clone_after_fork diff --git a/src/block/libcflash_exportmap b/src/block/libcflash_exportmap new file mode 100644 index 00000000..2f27d81c --- /dev/null +++ b/src/block/libcflash_exportmap @@ -0,0 +1,51 @@ +/* +* %Z%%M% %I% %W% %G% %U% +*/ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/libcflash_exportmap $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +/* +* shared library export symbols +* data +* text +*/ +{ +global: cblk_init; cblk_term; + cblk_open; + cblk_close; + cblk_get_lun_size; + cblk_get_size; + cblk_set_size; + cblk_get_stats; + cblk_read; + cblk_write; + cblk_aread; + cblk_awrite; + cblk_aresult; + cblk_listio; + cblk_clone_after_fork; +local: *; +}; diff --git a/src/block/libcflsh_block.exp b/src/block/libcflsh_block.exp new file mode 100644 index 00000000..8928a824 --- /dev/null +++ b/src/block/libcflsh_block.exp @@ -0,0 +1,44 @@ +* %Z%%M% %I% %W% %G% %U% +* IBM_PROLOG_BEGIN_TAG */ +* This is an automatically generated prolog. */ +* */ +* $Source: src/block/libcflsh_block.exp $ */ +* */ +* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +* */ +* Contributors Listed Below - COPYRIGHT 2014,2015 */ +* [+] International Business Machines Corp. */ +* */ +* */ +* Licensed under the Apache License, Version 2.0 (the "License"); */ +* you may not use this file except in compliance with the License. */ +* You may obtain a copy of the License at */ +* */ +* http://www.apache.org/licenses/LICENSE-2.0 */ +* */ +* Unless required by applicable law or agreed to in writing, software */ +* distributed under the License is distributed on an "AS IS" BASIS, */ +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +* implied. See the License for the specific language governing */ +* permissions and limitations under the License. */ +* */ +* IBM_PROLOG_END_TAG */ +* +* shared library export symbols +* data +* text +cblk_init +cblk_term +cblk_open +cblk_close +cblk_get_lun_size +cblk_get_size +cblk_set_size +cblk_get_stats +cblk_read +cblk_write +cblk_aread +cblk_awrite +cblk_aresult +cblk_listio +cblk_clone_after_fork diff --git a/src/block/makefile b/src/block/makefile new file mode 100644 index 00000000..a0b3a5cc --- /dev/null +++ b/src/block/makefile @@ -0,0 +1,89 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/block/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +ROOTPATH = ../.. + +CFLAGS += + +#enable file mode by default, if the env var is not set. +#to disable file mode, set BLOCK_FILEMODE_ENABLED=0 in surelock-sw/customrc +#or export BLOCK_FILEMODE_ENABLED=0 prior to building +ifndef BLOCK_FILEMODE_ENABLED + BLOCK_FILEMODE_ENABLED=1 +endif + +#if BLOCK_FILEMODE is enabled, then tell the block code as much +#pass down as a #define to the underlying code +ifdef BLOCK_FILEMODE_ENABLED +ifeq ($(BLOCK_FILEMODE_ENABLED),1) + CUSTOMFLAGS += -DBLOCK_FILEMODE_ENABLED +endif +endif + +#if BLOCK_MC is enabled, then tell the block code as much +#pass down as a #define to the underlying code +ifdef BLOCK_MC_ENABLED +ifeq ($(BLOCK_MC_ENABLED),1) + CUSTOMFLAGS += -D_MASTER_CONTXT +endif +endif + +MODULE = cflsh_block + +ifeq ($(BLOCK_KERNEL_MC_ENABLED),1) + +CUSTOMFLAGS += -D_KERNEL_MASTER_CONTXT + +OBJS = cflash_scsi_user.o cflash_tools_user.o cflash_block.o cflash_block_int.o cflash_block_kern_mc.o cflash_block_sisl.o + +OBJS64 = cflash_scsi_user.64o cflash_tools_user.64o cflash_block.64o cflash_block_int.64o cflash_block_kern_mc.64o cflash_block_sisl.64o + +else + +OBJS = cflash_scsi_user.o cflash_tools_user.o cflash_block.o cflash_block_int.o cflash_block_linux.o cflash_block_sisl.o + +OBJS64 = cflash_scsi_user.64o cflash_tools_user.64o cflash_block.64o cflash_block_int.64o cflash_block_linux.64o cflash_block_sisl.64o + +endif + +UNAME=$(shell uname) +ifeq ($(UNAME),AIX) +MODLIBS = -lpthreads +MODULE_LINKLIBS = ${MODLIBS} +EXPFLAGS = -bE:exportfile +else +ifeq ($(BLOCK_KERNEL_MC_ENABLED),1) +MODLIBS = -lpthread -ludev +else +MODLIBS = -lpthread +endif +MODULE_LINKLIBS = ${MODLIBS} -Wl,--version-script=libcflash_exportmap +endif + +LINKLIBS = -l${MODULE} +LIBPATHS = -L${ROOTPATH}/img + +SUBDIRS = test.d + +include ${ROOTPATH}/config.mk diff --git a/src/block/test/blk_api_tst.c b/src/block/test/blk_api_tst.c new file mode 100755 index 00000000..ddc17a25 --- /dev/null +++ b/src/block/test/blk_api_tst.c @@ -0,0 +1,2472 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/test/blk_api_tst.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "blk_tst.h" +#include +#include + +#ifndef _AIX +#include +#endif + + +char test_filename[256]; +pthread_t blk_thread[MAX_NUM_THREADS]; + + +pthread_mutex_t completion_lock; + +extern int num_opens; +extern uint32_t thread_count; +extern uint64_t block_number; +extern int num_loops; +extern int thread_flag; +extern int num_threads; +extern uint64_t max_xfer;; +extern int virt_lun_flags; +extern int share_cntxt_flags; +extern int test_max_cntx; +extern char* env_filemode; +extern char* env_max_xfer; +extern char* env_num_cntx; +extern char* env_num_list; +extern int io_bufcnt; +extern int num_listio; + +char def_capidev[50] = "/dev/cxl/afu0.0s"; + +char *dev_paths[MAX_LUNS]; + +char *env_blk_verbosity = NULL; + +char *env_num_list = NULL; + + +char *parents [MAX_LUNS] ; + +int blk_verbosity = 0; +int num_devs = 0; +int filemode = 0; + +blk_thread_data_t blk_data; + +char *paths; +char tpaths[512]; + +void initialize_blk_tests() +{ + int rc =0; + rc = cblk_init(NULL,0); + if (rc){ + fprintf(stderr, "cblk_term: failed . errno =%d\n", errno); + return; + } + io_bufcnt = 4; + +} + +void teminate_blk_tests() +{ + int rc =0; + rc = cblk_term(NULL,0); + if (rc){ + fprintf(stderr, "cblk_term: failed . errno =%d\n", errno); + return; + } +} + +int blk_fvt_setup(int size) + +{ + + char *env_user = getenv("USER"); + int fd; + + char *p; + + int i,ret; + + paths = getenv("FVT_DEV"); + + env_num_cntx = getenv("MAX_CNTX"); + if (env_num_cntx && (atoi(env_num_cntx) > 0)) + test_max_cntx = atoi(env_num_cntx); + + env_blk_verbosity = getenv("FVT_BLK_VERBOSITY"); + + env_num_list = getenv ("FVT_NUM_LISTIO"); + + /* user specifying num list io */ + if (env_num_list) { + num_listio = atoi(env_num_list); + if ((num_listio >= 500) || + (!num_listio) ) { + /* Use default if 0 or greater than 500 */ + num_listio = 500; + } + } + + if (env_blk_verbosity) + blk_verbosity = atoi(env_blk_verbosity); + + DEBUG_1 ("blk_verbosity = %s \n",env_blk_verbosity); + DEBUG_1 ("num_listio = %d \n",num_listio); + + DEBUG_1("env_filemode = %s\n",env_filemode); + DEBUG_1("env_max_xfer = %s\n",env_max_xfer); + + if (env_max_xfer) { + max_xfer = atoi(env_max_xfer); + DEBUG_1("max_xfer_size = 0x%lx\n",max_xfer); + } + if (paths == NULL) { + if ((env_filemode) && (filemode=atoi(env_filemode) == 1)) { + sprintf(test_filename, "/tmp/%s.capitestfile", env_user); + + fd = creat(test_filename, 0777); + if (fd != -1) { + paths = &test_filename[0]; + ret = ftruncate (fd, FILESZ); + if (ret){ + fprintf(stderr,"blk_fvt_setup: couldn't increase filesize\n"); + fprintf (stderr,"\nftruncate: rc = %d, err = %d\n", ret, errno); + close (fd); + } + } else { + fprintf(stderr,"blk_fvt_setup: couldn't create test file\n"); + fprintf(stderr,"blk_fvt_setup: fd = %d, errno = %d\n", fd, errno); + return (-1); + } + } else { + fprintf(stderr,"Environment FVT_DEV is not set\n"); + return (-1); + } + } + + /* strcpy instead */ + strcpy (&tpaths[0], paths); + + + DEBUG_2("\nsaving paths = %s to tpaths = %s\n",paths,tpaths); + + p = strtok(&tpaths[0], "," ); + for (i=0; p != NULL; i++ ) { + if (!filemode) { + parents[i] = find_parent(p); + DEBUG_2("\nparent of %s p = %s\n", p, parents[i]); + } + dev_paths[i] = p; + DEBUG_2("\npath %d : %s\n", i, p); + p = strtok((char *)NULL,","); + + } + num_devs = i; + DEBUG_1("blk_fvt_setup: num_devs = %d\n",num_devs); + + bzero (&blk_data, sizeof(blk_data)); + DEBUG_1("blk_fvt_setup: path = %s\n", dev_paths[0]); + DEBUG_1("blk_fvt_setup: allocating blocks %d\n", size); + ret = blk_fvt_alloc_bufs(size); + DEBUG_1("blk_fvt_alloc_bufs: ret = 0x%x\n", ret); + + DEBUG_2("blk_fvt_setupi_exit: dev1 = %s, dev2 = %s\n", + dev_paths[0], dev_paths[1]); + return (ret); + +} +/* check parents of all devs, and see if the context is sharable */ +int validate_share_context() +{ + int n,i=0; + int rc = 0; + char *p; + n = num_devs; + p = parents[i++]; + DEBUG_1("\nvalidate_share_context: num_devs = %d ", n); + for (; i < n ; i++) { + DEBUG_2("\nvalidate_share_context: %s\n and %s\n ", p, parents[i]); + rc = strcmp (p, parents[i]); + if (rc != 0) + break; + } + + DEBUG_1("\nvalidate_share_context: ret = 0x%x\n", rc); + return(rc); + +} + +void blk_open_tst_inv_path (const char* path, int *id, int max_reqs, int *er_no, int opn_cnt, int flags, int mode) +{ + chunk_id_t j = NULL_CHUNK_ID; + chunk_ext_arg_t ext = 0; + errno = 0; + + j = cblk_open (path, max_reqs, mode, ext, flags); + + *id = j; + *er_no = errno; +} + +void blk_open_tst (int *id, int max_reqs, int *er_no, int opn_cnt, int flags, int mode) +{ + + // chunk_id_t handle[MAX_OPENS+15]; + + int i = 0; + chunk_id_t j = NULL_CHUNK_ID; + chunk_ext_arg_t ext = 0; + errno = 0; + + DEBUG_2("blk_open_tst : open %s %d times\n",dev_paths[0],opn_cnt); + + if (opn_cnt > 0) { + for (i = 0; i!= opn_cnt; i++) { + DEBUG_1("Opening %s\n",dev_paths[0]); + j = cblk_open (dev_paths[0], max_reqs, mode, ext, flags); + if (j != NULL_CHUNK_ID) { + // handle[i] = j; + chunks[i] = j; + num_opens += 1; + DEBUG_2("blk_open_tst: OPENED %d, chunk_id=%d\n", (num_opens), j); + } else { + *id = j; + *er_no = errno; + DEBUG_2("blk_open_tst: Failed: open i = %d, errno = %d\n", + i, errno); + return; + } + } + *id = j; + } + DEBUG_2("blk_open_tst: id = %d, num_open = %d\n", j, num_opens); +} + +void blk_close_tst(int id, int *rc, int *err, int close_flag) +{ + + errno = 0; + + *rc = cblk_close (id, close_flag); + *err = errno; + // it should be done in cleanup + if ( !(*rc) && !(*err)) + num_opens --; + DEBUG_3("blk_close_tst: id = %d, erno = %d, rc = %d\n", id, errno, *rc); +} + +void blk_open_tst_cleanup () +{ + + int i = 0; + int rc = 0; + + errno = 0; + + DEBUG_1("blk_open_tst_cleanup: closing num_opens = %d\n", num_opens); + for (i=0; num_opens |= 0; i++) { + if (chunks[i] != NULL_CHUNK_ID) { + rc = cblk_close (chunks[i],0); + if (!rc) { + DEBUG_1("blk_open_tst_cleanup: closed %d\n", num_opens); + chunks[i] = NULL_CHUNK_ID; + num_opens--; + } else { + DEBUG_3("\nblk_open_tst_cleanup: Close FAILED chunk id = %d , rc = %d, errno %d\n", + chunks[i], rc, errno); + break; + } + } + } + + DEBUG_1("blk_open_tst_cleanup: ***Num Chunk CLOSED %d\n\n",i); + + // At the end of test executions, to free up allocated + // test buffers. + + if (blk_fvt_data_buf != NULL) + free(blk_fvt_data_buf); + if (blk_fvt_comp_data_buf != NULL) + free(blk_fvt_comp_data_buf); + + +} + + +void blk_fvt_get_set_lun_size(chunk_id_t id, + size_t *size, + int sz_flags, + int get_set_size_flag, + int *ret, + int *err) +{ + size_t c_size; + int rc; + errno =0; + + if (!get_set_size_flag) { + /* get physical lun size */ + rc = cblk_get_lun_size(id, &c_size, sz_flags); + *err = errno; + *size = c_size; + *ret = rc; + DEBUG_3("get_lun_size: sz = %d, ret = %d, err = %d\n", + (int)c_size, rc, errno); + } else if (get_set_size_flag == 1) { + /* get chunk size */ + rc= cblk_get_size(id, &c_size, sz_flags); + *err = errno; + *ret = rc; + *size = c_size; + DEBUG_3("get_size: sz = %d, ret = %d, err = %d\n", + (int)c_size, rc, errno); + } else if (get_set_size_flag == 2) { + /* set chunk size */ + c_size = *size; + rc= cblk_set_size(id, c_size, sz_flags); + *err = errno; + *ret = rc; + DEBUG_3("set_size: sz = %d, ret = %d, err = %d\n", + (int)c_size, rc, errno); + } + +} + +int blk_fvt_alloc_large_xfer_io_bufs(size_t size) +{ + + int ret = -1; + + + if (blk_fvt_data_buf != NULL) + free(blk_fvt_data_buf); + if (blk_fvt_comp_data_buf != NULL) + free(blk_fvt_comp_data_buf); + + + + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&blk_fvt_data_buf,4096,BLK_FVT_BUFSIZE*size)) { + + perror("posix_memalign failed for data buffer"); + return (ret); + } + bzero(blk_fvt_data_buf,BLK_FVT_BUFSIZE*size); + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&blk_fvt_comp_data_buf,4096,BLK_FVT_BUFSIZE*size)) { + perror("posix_memalign failed for comp data buffer"); + free(blk_fvt_data_buf); + return (ret); + + } + + return(0); +} + + + +int blk_fvt_alloc_bufs(int size) +{ + + int ret = -1; + int i,x; + char* p; + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&blk_fvt_data_buf,4096,BLK_FVT_BUFSIZE*size)) { + + perror("posix_memalign failed for data buffer"); + return (ret); + } + + bzero(blk_fvt_data_buf,BLK_FVT_BUFSIZE*size); + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&blk_fvt_comp_data_buf,4096,BLK_FVT_BUFSIZE*size)) { + perror("posix_memalign failed for comp data buffer"); + free(blk_fvt_data_buf); + return (ret); + + } + bzero(blk_fvt_comp_data_buf,BLK_FVT_BUFSIZE*size); + + p = blk_fvt_comp_data_buf; + for (i=1; i 0) { + if (blk_verbosity) { + DEBUG_0("Async read data completed ...\n"); + if (blk_verbosity == 9) { + DEBUG_0("Returned data is ...\n"); + hexdump(blk_fvt_comp_data_buf,100,NULL); + } + } + } else if (rc == 0) { + DEBUG_3("cblk_aresult completed: command still active for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + usleep(1000); + continue; + } else { + DEBUG_3("cblk_aresult completed for for tag = 0x%d, rc = %d, errno = %d\n",tag,rc,errno); + } + + break; + + } /* while */ + } + break; + + case FV_WRITE: + rc = cblk_write(id,blk_fvt_comp_data_buf+align,lba_no,nblocks,0); + DEBUG_4("cblk_write complete at lba = 0x%lx, size = %lx, rc = %d, errno = %d\n",lba_no,nblocks,rc,errno); + if (rc <= 0) { + DEBUG_3("cblk_write failed at lba = 0x%lx, rc = %d, errno = %d\n",lba_no,rc,errno); + } + break; + case FV_AWRITE: + if ((open_flag & FV_NO_INRPT) && + (io_flags & CBLK_ARW_USER_STATUS_FLAG)) { + rc = cblk_awrite(id, blk_fvt_comp_data_buf+align, lba_no, nblocks, &tag, &awrt_status, io_flags); + } else { + rc = cblk_awrite(id, blk_fvt_comp_data_buf+align, lba_no, nblocks, + &tag, NULL, io_flags); + } + + if (rc < 0) { + DEBUG_3("cblk_awrite error lba = 0x%lx, rc = %d, errno = %d\n", lba_no, rc, errno); + *ret = rc; + *err = errno; + return; + } else { + while (TRUE) { + + rc = cblk_aresult(id,&tag, &ar_status,arflag); + if (rc > 0) { + if (blk_verbosity) { + DEBUG_0("Async write data completed ...\n"); + if (blk_verbosity == 9) { + DEBUG_0("Returned data is ...\n"); + hexdump(blk_fvt_comp_data_buf,100,NULL); + } + } + } else if (rc == 0) { + DEBUG_3("cblk_aresult completed: command still active for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + usleep(1000); + continue; + } else { + DEBUG_3("cblk_aresult completed for for tag = 0x%d, rc = %d, errno = %d\n",tag,rc,errno); + } + + break; + + } /* while */ + } + break; + + default: + break; + } + *ret = rc; + *err = errno; + + DEBUG_2("blk_fvt_io: ret = %d, errno = %d\n", rc, errno); + +} + +void +blk_fvt_intrp_io_tst(chunk_id_t id, + int testflag, + int open_flag, + int *ret, + int *err) +{ + int rc = 0; + int err_no = 0; + errno = 0; + int tag = 0; + uint64_t lba_no = 1; + int nblocks = 1; + cblk_arw_status_t arw_status; + int fd; + int arflag = 0; + int io_flags = 0; + + bzero(&arw_status,sizeof(arw_status)); + + /* fill compare buffer with pattern */ + fd = open("/dev/urandom", O_RDONLY); + read(fd, blk_fvt_comp_data_buf, BLK_FVT_BUFSIZE); + close(fd); + + // testflag 1 - NO_INTRP set, null status, io_flags ARW_USER set + // testflag 2 - NO_INTRP _not set, status, io_flags ARW_USER not set + // testflag 3 - NO_INTRP _not set, status, io_flags ARW_USER set + // testflag 4 - NO_INTRP set, status, io_flags ARW_USER set + // testflag 5 - NO_INTRP set, status, ARW_USER | ARW_WAIT set + // testflag 6 - NO_INTRP set, status, ARW_USER | ARW_WAIT| ARW_USER_TAG set + + switch(testflag) { + case 1: + io_flags |= CBLK_ARW_USER_STATUS_FLAG; + break; + case 2: + io_flags |= 0; + break; + case 3: + io_flags |= CBLK_ARW_USER_STATUS_FLAG; + break; + case 4: + io_flags |= CBLK_ARW_USER_STATUS_FLAG; + break; + case 5: + io_flags |= CBLK_ARW_USER_STATUS_FLAG|CBLK_ARW_WAIT_CMD_FLAGS; + break; + case 6: + io_flags |= CBLK_ARW_USER_STATUS_FLAG|CBLK_ARW_WAIT_CMD_FLAGS|CBLK_ARW_USER_TAG_FLAG; + break; + + default: + break; + + } + + rc = cblk_awrite(id, blk_fvt_comp_data_buf, lba_no, nblocks, + &tag,((testflag == 1) ? NULL : (&arw_status)), io_flags); + + if (rc < 0) { + DEBUG_3("cblk_awrite error lba = 0x%lx, rc = %d, errno = %d\n", lba_no, rc, errno); + *ret = rc; + *err = errno; + return; + } + + check_astatus(id, &tag, arflag, open_flag, io_flags, &arw_status, &rc, &err_no); + + DEBUG_2("blk_fvt_io: ret = %d, errno = %d\n", rc, err_no); + *ret = rc; + *err = errno; + return; + +} + +void +check_astatus(chunk_id_t id, int *tag, int arflag, int open_flag, int io_flags, cblk_arw_status_t *arw_status, int *rc, int *err) +{ + + uint64_t ar_status = 0; + int ret = 0; + + + while (TRUE) { + if ((open_flag & ~FV_NO_INRPT) && + (io_flags & CBLK_ARW_USER_STATUS_FLAG)) { + switch (arw_status->status) { + case CBLK_ARW_STATUS_SUCCESS: + ret = arw_status->blocks_transferred; + break; + case CBLK_ARW_STATUS_PENDING: + ret = 0; + break; + default: + ret = -1; + errno = arw_status->fail_errno; + } + + } else { + ret = cblk_aresult(id, tag, &ar_status, arflag); + } + if (ret > 0) { + + DEBUG_0("Success\n"); + } else if (ret == 0) { + DEBUG_0("Cmd pending !\n"); + usleep(300); + continue; + } else { + DEBUG_2("Cmd completed ret = %d, errno = %d\n",ret,errno); + } + + break; + + } /* while */ + + *rc = ret; + *err = errno; + +} + + +void blk_fvt_dump_stats(chunk_stats_t *stats) +{ + fprintf(stderr,"chunk_statistics:\n"); + fprintf(stderr,"**********************************\n"); + fprintf(stderr,"max_transfer_size: 0x%lx\n", stats->max_transfer_size); + fprintf(stderr,"num_reads: 0x%lx\n", stats->num_reads); + fprintf(stderr,"num_writes: 0x%lx\n", stats->num_writes); + fprintf(stderr,"num_areads: 0x%lx\n", stats->num_areads); + fprintf(stderr,"num_awrites: 0x%lx\n", stats->num_awrites); + fprintf(stderr,"num_act_reads: 0x%x\n", stats->num_act_reads); + fprintf(stderr,"num_act_writes: 0x%x\n", stats->num_act_writes); + fprintf(stderr,"num_act_areads: 0x%x\n", stats->num_act_areads); + fprintf(stderr,"num_act_awrites: 0x%x\n", stats->num_act_awrites); + fprintf(stderr,"max_num_act_writes: 0x%x\n", stats->max_num_act_writes); + fprintf(stderr,"max_num_act_reads: 0x%x\n", stats->max_num_act_reads); + fprintf(stderr,"max_num_act_awrites: 0x%x\n", stats->max_num_act_awrites); + fprintf(stderr,"max_num_act_areads: 0x%x\n", stats->max_num_act_areads); + fprintf(stderr,"num_blocks_read: 0x%lx\n", stats->num_blocks_read); + fprintf(stderr,"num_blocks_written: 0x%lx\n", stats->num_blocks_written); + fprintf(stderr,"num_errors: 0x%lx\n", stats->num_errors); + fprintf(stderr,"num_aresult_no_cmplt: 0x%lx\n", stats->num_aresult_no_cmplt); + fprintf(stderr,"num_retries: 0x%lx\n", stats->num_retries); + fprintf(stderr,"num_timeouts: 0x%lx\n", stats->num_timeouts); + fprintf(stderr,"num_no_cmds_free: 0x%lx\n", stats->num_no_cmds_free); + fprintf(stderr,"num_no_cmd_room: 0x%lx\n", stats->num_no_cmd_room); + fprintf(stderr,"num_no_cmds_free_fail: 0x%lx\n", stats->num_no_cmds_free_fail); + fprintf(stderr,"num_fc_errors: 0x%lx\n", stats->num_fc_errors); + fprintf(stderr,"num_port0_linkdowns: 0x%lx\n", stats->num_port0_linkdowns); + fprintf(stderr,"num_port1_linkdowns: 0x%lx\n", stats->num_port1_linkdowns); + fprintf(stderr,"num_port0_no_logins: 0x%lx\n", stats->num_port0_no_logins); + fprintf(stderr,"num_port1_no_logins: 0x%lx\n", stats->num_port1_no_logins); + fprintf(stderr,"num_port0_fc_errors: 0x%lx\n", stats->num_port0_fc_errors); + fprintf(stderr,"num_port1_fc_errors: 0x%lx\n", stats->num_port1_fc_errors); + fprintf(stderr,"num_cc_errors: 0x%lx\n", stats->num_cc_errors); + fprintf(stderr,"num_afu_errors: 0x%lx\n", stats->num_afu_errors); + fprintf(stderr,"num_capi_false_reads: 0x%lx\n", stats->num_capi_false_reads); + fprintf(stderr,"num_capi_adap_resets: 0x%lx\n", stats->num_capi_adap_resets); + fprintf(stderr,"num_capi_afu_errors: 0x%lx\n", stats->num_capi_afu_errors); + fprintf(stderr,"num_capi_afu_intrpts: 0x%lx\n", stats->num_capi_afu_intrpts); + fprintf(stderr,"num_capi_unexp_afu_intrpts: 0x%lx\n", stats->num_capi_unexp_afu_intrpts); + fprintf(stderr,"num_active_threads: 0x%lx\n", stats->num_active_threads); + fprintf(stderr,"max_num_act_threads: 0x%lx\n", stats->max_num_act_threads); + fprintf(stderr,"num_cache_hits: 0x%lx\n", stats->num_cache_hits); + fprintf(stderr,"**********************************\n"); + + +} + +void blk_get_statistics (chunk_id_t id, int flags, int *ret, int *err) +{ + + int rc; + chunk_stats_t stats; + + rc = cblk_get_stats (id, &stats, flags); + *ret = rc; + *err = errno; + + if (rc) { + DEBUG_2("blk_get_statistics: failed ret = %d, errno = %d\n", rc, errno); + } else { + if (blk_verbosity == 9) { + fprintf(stderr,"cblk_get_stats completed ...\n"); + blk_fvt_dump_stats (&stats); + hexdump(&stats,sizeof(stats),NULL); + } + } +} + + + +void *blk_io_loop(void *data) +{ + int rc = 0; + int tag; + int i; + blk_thread_data_t *blk_data = data; + uint32_t local_thread_count = 0; + void *data_buf = NULL; + void *comp_data_buf = NULL; + uint64_t blk_number; + uint64_t ar_status; + int cmd_type; + int fd; + int arflag = 0; + int x, num_luns; + + + + pthread_mutex_lock(&completion_lock); + local_thread_count = thread_count++; + + /* + * Each thread is using a different + * block number range. + */ + + + blk_number = block_number + (num_loops * thread_count); + + num_luns = num_devs ; + + pthread_mutex_unlock(&completion_lock); + + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&data_buf,4096,BLK_FVT_BUFSIZE)) { + + perror("posix_memalign failed for data buffer"); + + blk_data->status.ret = -1; + blk_data->status.errcode = errno; + pthread_exit(&(blk_data->status)); + } + + + errno = 0; + if (local_thread_count % 2) { + cmd_type = FV_RW_AWAR; + } else { + cmd_type = FV_RW_COMP; + } + for (x =0; xstatus.ret = -1; + blk_data->status.errcode = errno; + pthread_exit(&(blk_data->status)); + + } + + fd = open ("/dev/urandom", O_RDONLY); + read (fd, comp_data_buf, BLK_FVT_BUFSIZE); + close (fd); + + rc = cblk_write(blk_data->chunk_id[x],comp_data_buf,blk_number,1,0); + + if (rc != 1) { + blk_data->status.ret = rc; + blk_data->status.errcode = errno; + free(comp_data_buf); + free(data_buf); + pthread_mutex_lock(&completion_lock); + DEBUG_2("Write failed rc = %d, errno = %d\n",rc, errno); + pthread_mutex_unlock(&completion_lock); + pthread_exit(&(blk_data->status)); + } + rc = cblk_read(blk_data->chunk_id[x],data_buf,blk_number,1,0); + + if (rc != 1) { + + blk_data->status.ret = rc; + blk_data->status.errcode = errno; + free(comp_data_buf); + free(data_buf); + pthread_mutex_lock(&completion_lock); + DEBUG_2("Read failed rc = %d, errno = %d\n",rc, errno); + pthread_mutex_unlock(&completion_lock); + pthread_exit(&(blk_data->status)); + } + + rc = memcmp(data_buf,comp_data_buf,BLK_FVT_BUFSIZE); + + if (rc) { + blk_data->status.ret = rc; + blk_data->status.errcode = errno; + if(blk_verbosity == 9) { + pthread_mutex_lock(&completion_lock); + fprintf(stderr,"Data compare failure\n"); + /* + fprintf(stderr,"Written data:\n"); + dumppage(data_buf,BLK_FVT_BUFSIZE); + fprintf(stderr,"**********************************************************\n\n"); + fprintf(stderr,"read data:\n"); + dumppage(comp_data_buf,BLK_FVT_BUFSIZE); + fprintf(stderr,"**********************************************************\n\n"); + */ + pthread_mutex_unlock(&completion_lock); + } + + rc = cblk_read(blk_data->chunk_id[x],data_buf,blk_number,1,0); + if (rc == 1) { + if(blk_verbosity == 9) { + pthread_mutex_lock(&completion_lock); + fprintf(stderr,"Dump of re-read\n"); + + dumppage(data_buf,BLK_FVT_BUFSIZE); + pthread_mutex_unlock(&completion_lock); + } + } + + } + free(comp_data_buf); + break; + case FV_RW_AWAR: + + /* + * Perform write then read comparision test + */ + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&comp_data_buf,4096,BLK_FVT_BUFSIZE)) { + perror("posix_memalign failed for data buffer"); + perror("posix_memalign failed for data buffer"); + blk_data->status.ret = 0; + blk_data->status.errcode = errno; + pthread_exit(&(blk_data->status)); + } + + fd = open ("/dev/urandom", O_RDONLY); + read (fd, comp_data_buf, BLK_FVT_BUFSIZE); + close (fd); + + rc = cblk_awrite(blk_data->chunk_id[x],comp_data_buf,blk_number,1,&tag,NULL,0); + + if (rc < 0) { + blk_data->status.ret = rc; + blk_data->status.errcode = errno; + free(comp_data_buf); + free(data_buf); + pthread_mutex_lock(&completion_lock); + DEBUG_2("Awrite failed rc = %d, errno = %d\n",rc, errno); + pthread_mutex_unlock(&completion_lock); + pthread_exit(&(blk_data->status)); + } + arflag = CBLK_ARESULT_BLOCKING; + while (TRUE) { + rc = cblk_aresult(blk_data->chunk_id[x],&tag, &ar_status,arflag); + if (rc > 0) { + pthread_mutex_lock(&completion_lock); + DEBUG_2("wrc=%d, tag = %x\n",rc, tag); + DEBUG_0("Async write data completed ...\n"); + pthread_mutex_unlock(&completion_lock); + } else if (rc == 0) { + pthread_mutex_lock(&completion_lock); + DEBUG_2("Waiting for command to complete wrc=%d, tag = %x\n",rc, tag); + pthread_mutex_unlock(&completion_lock); + usleep(1000); + continue; + } else { + pthread_mutex_lock(&completion_lock); + DEBUG_3("cblk_aresult completed (failed write) for for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + pthread_mutex_unlock(&completion_lock); + } + break; + } /* while */ + + + rc = cblk_aread(blk_data->chunk_id[x],data_buf,blk_number,1,&tag,NULL,0); + + if (rc < 0) { + blk_data->status.ret = rc; + blk_data->status.errcode = errno; + free(comp_data_buf); + free(data_buf); + pthread_mutex_lock(&completion_lock); + DEBUG_2("Aread failed rc = %d, errno = %d\n",rc, errno); + pthread_mutex_unlock(&completion_lock); + pthread_exit(&(blk_data->status)); + } + + arflag = CBLK_ARESULT_BLOCKING; + while (TRUE) { + + rc = cblk_aresult(blk_data->chunk_id[x],&tag, &ar_status,arflag); + if (rc > 0) { + pthread_mutex_lock(&completion_lock); + DEBUG_2("rc=%d, tag = %x\n",rc, tag); + DEBUG_0("Async read data completed ...\n"); + pthread_mutex_unlock(&completion_lock); + } else if (rc == 0) { + pthread_mutex_lock(&completion_lock); + DEBUG_2("rc=%d, tag = %x\n",rc, tag); + DEBUG_3("Waiting for command to complete for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + pthread_mutex_unlock(&completion_lock); + usleep(1000); + continue; + } else { + pthread_mutex_lock(&completion_lock); + DEBUG_3("cblk_aresult completed (failed read) for for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + pthread_mutex_unlock(&completion_lock); + } + + break; + + } /* while */ + + pthread_mutex_lock(&completion_lock); + DEBUG_2("Read ******** RC = %d tag %x\n",rc, tag); + + DEBUG_1("Read completed with rc = %d\n",rc); + pthread_mutex_unlock(&completion_lock); + + rc = memcmp(data_buf,comp_data_buf,BLK_FVT_BUFSIZE); + + if (rc) { + blk_data->status.ret = rc; + blk_data->status.errcode = errno; + if (blk_verbosity==9) { + pthread_mutex_lock(&completion_lock); + fprintf(stderr,"Data compare failure\n"); + /* + fprintf(stderr,"Written data:\n"); + dumppage(data_buf,BLK_FVT_BUFSIZE); + fprintf(stderr,"**********************************************************\n\n"); + fprintf(stderr,"read data:\n"); + dumppage(comp_data_buf,BLK_FVT_BUFSIZE); + fprintf(stderr,"**********************************************************\n\n"); + */ + pthread_mutex_unlock(&completion_lock); + } + rc = cblk_read(blk_data->chunk_id[x],data_buf,blk_number,1,0); + + if (rc == 1) { + if (blk_verbosity==9) { + DEBUG_0("Dump re-read OK\n"); + /* + dumppage(data_buf,BLK_FVT_BUFSIZE); + */ + } + } + + } else if (!thread_flag) { + DEBUG_0("Memcmp succeeded\n"); + } + free(comp_data_buf); + break; + default: + + fprintf(stderr,"Invalid cmd_type = %d\n",cmd_type); + i = num_loops; + } /* switch */ + + + blk_number++; + + pthread_mutex_lock(&completion_lock); + DEBUG_3("Dev = %d. Loop count = %d of %d\n",x, i,num_loops); + pthread_mutex_unlock(&completion_lock); + } /* for num_loops */ + } /* for num_luns */ + + free(data_buf); + + pthread_exit(&(blk_data->status)); +} + +void blk_thread_tst(int *ret, int *err) +{ + + + int rc; /* Return code */ + int ret_code=0; + int i,x; + void *status; + blk_thread_status_t thread_stat; + chunk_ext_arg_t ext = 0; + int flags = 0; + + + for (x=0; x < num_devs; x++) { + blk_data.size = 64; + if (virt_lun_flags) + flags = CBLK_OPN_VIRT_LUN; + if (share_cntxt_flags) + flags |= CBLK_OPN_SHARE_CTXT; + blk_data.chunk_id[x] = cblk_open(dev_paths[x],64,O_RDWR,ext,flags); + + if (blk_data.chunk_id[x] == NULL_CHUNK_ID) { + + DEBUG_2("Open of %s failed with errno = %d\n",dev_paths[x],errno); + *ret = -1; + *err = errno; + return; + } + + num_opens ++; + /* Skip for physical luns */ + if (virt_lun_flags ) { + + /* + * On the first pass thru this loop + * for virtual lun, open the virtual lun + * and set its size. Subsequent passes. + * skip this step. + */ + + + rc = cblk_set_size(blk_data.chunk_id[x],1024,0); + + if (rc) { + perror("cblk_set_size failed\n"); + *ret = -1; + *err = errno; + for (x=0; x < num_opens; x++) { + cblk_close(blk_data.chunk_id[x], 0); + } + return; + } + + } + + } + if (num_threads >= 1) { + + /* + * Create all threads here + */ + + for (i=0; i< num_threads; i++) { + + /* + rc = pthread_create(&blk_thread[i].thread,NULL,blk_io_loop,(void *)&blk_data); + */ + + rc = pthread_create(&blk_thread[i],NULL,blk_io_loop,(void *)&blk_data); + if (rc) { + + DEBUG_3("pthread_create failed for %d rc 0x%x, errno = 0x%x\n", + i, rc,errno); + *ret = -1; + *err = errno; + } + } + + + /* + * Wait for all threads to complete + */ + + + errno = 0; + + for (i=0; i< num_threads; i++) { + + rc = pthread_join(blk_thread[i],&status); + + thread_stat = *(blk_thread_status_t *)status; + if(thread_stat.ret || thread_stat.errcode) { + *ret = thread_stat.ret; + *err = thread_stat.errcode; + DEBUG_3("Thread %d returned fail ret %x, errno = %d\n",i, + thread_stat.ret, + thread_stat.errcode); + } + } + + } + + // fix close + + DEBUG_1("Calling cblk_close num open =%d...\n",num_opens); + for (x=0; num_opens!=0; x++) { + ret_code = cblk_close(blk_data.chunk_id[x],0); + if (ret_code) { + DEBUG_2("Close of %s failed with errno = %d\n",dev_paths[x],errno); + *ret = ret_code; + *err = errno; + } + num_opens --; + } + +} + + + +void blocking_io_tst (chunk_id_t id, int *ret, int *err) +{ + int rc = -1; + errno = 0; + int cmdtag[1000]; + /* + int artag[512]; + */ + int rtag = -1; + uint64_t ar_status = 0; + uint64_t lba; + size_t nblocks=1; + int fd, i; + int arflg = 0; + int t = 1; + + lba = 1; + for (i=0; i < 1000; i++,lba++) { + /* fill compare buffer with pattern */ + fd = open ("/dev/urandom", O_RDONLY); + read (fd, blk_fvt_comp_data_buf, BLK_FVT_BUFSIZE); + close (fd); + rc = cblk_awrite(id,blk_fvt_comp_data_buf,lba,nblocks,&cmdtag[i],NULL,arflg); + DEBUG_3("\n***** cblk_awrite rc = 0x%d, tag = %d, lba =0x%lx\n",rc, cmdtag[i], lba); + if (!rc) { + DEBUG_1("Async write returned tag = %d\n", cmdtag[i]); + } else if (rc < 0) { + DEBUG_3("awrite failed for lba = 0x%lx, rc = %d, errno = %d\n",lba,rc,errno); + } + } + + /* + arflg = CBLK_ARESULT_NEXT_TAG|CBLK_ARESULT_BLOCKING; + */ + arflg = CBLK_ARESULT_NEXT_TAG; + + while (TRUE) { + rc = cblk_aresult(id,&rtag, &ar_status,arflg); + + if (rc > 0) { + DEBUG_2("aresult rc = %d, tag = %d\n",rc,rtag); + t++; + if (t>1000) + break; + } + if (rc == 0) { + printf("Z"); + usleep(1000); + continue; + } else if (rc < 0) { + DEBUG_1("aresult error = %d\n",errno); + break; + } + } + + + *ret = rc; + *err = errno; + + return; +} + + +void io_perf_tst (chunk_id_t id, int *ret, int *err) +{ + int rc = -1; + errno = 0; + int cmdtag[20000]; + int rtag = -1; + uint64_t ar_status = 0; + uint64_t lba; + size_t nblocks=1; + int fd; + int i=0; + int arflg = 0; + int ret_code = 0; + int t ; + int x; + int y = 0; + char *env_num_cmds = getenv("FVT_NUM_CMDS"); + char *env_num_loop = getenv("FVT_NUM_LOOPS"); + char *env_io_comp = getenv("FVT_VALIDATE"); + + + int size = 4096; + int loops = 500; + int validate = 0; + + lba = 1; + + + int num_cmds = 4096; + + if (env_num_cmds && atoi(env_num_cmds )) { + num_cmds = atoi(env_num_cmds); + /** limit 4K cmds **/ + if (num_cmds > 4096) + num_cmds = 4096; + } + + if (env_num_loop && atoi(env_num_loop )) { + loops = atoi(env_num_loop); + } + + if (env_io_comp && (atoi(env_io_comp)==1)) { + validate = atoi(env_io_comp); + } + + + /* fill compare buffer with pattern */ + fd = open ("/dev/urandom", O_RDONLY); + read (fd, blk_fvt_comp_data_buf, BLK_FVT_BUFSIZE*num_cmds); + close (fd); + + for (x=0; x 0) { + t++; + if (t>num_cmds) + break; + } + if (rc == 0) + continue; + if (rc < 0) { + fprintf(stderr,"aresult cmdno = %d, error = %d, rc = 0x%x, \n", + t, errno,rc); + *ret = rc; + *err = errno; + return; + } + } + + /* read wrote buffer */ + + for (t=1, lba=1, i=0; i < num_cmds; i++,lba++) { + rc = cblk_aread(id, + (char*)(blk_fvt_data_buf + (i*size)), + lba,nblocks,&cmdtag[i],NULL,arflg); + if (rc < 0) { + fprintf(stderr,"awrite failed for lba = 0x%lx, rc = %d, errno = %d\n",lba,rc,errno); + *ret = rc; + *err = errno; + return; + } + } + + arflg = CBLK_ARESULT_NEXT_TAG; + + while (TRUE) { + rc = cblk_aresult(id,&rtag, &ar_status,arflg); + + if (rc > 0) { + t++; + if (t>num_cmds) + break; + } + if (rc == 0) + continue; + if (rc < 0) { + fprintf(stderr,"aresult error = %d\n",errno); + *ret = rc; + *err = errno; + return; + } + } + + if (!validate) { + ret_code = memcmp((char*)(blk_fvt_data_buf), + (char*)(blk_fvt_comp_data_buf), + BLK_FVT_BUFSIZE*size); + + if (ret_code) { + fprintf(stderr,"\n memcmp failed rc = 0x%x\n",ret_code); + *ret = ret_code; + *err = errno; + return; + } + } + } + DEBUG_2("Perf Test existing i = %d, x = %d\n", i, x ); + return; +} + + +int max_context(int *ret, int *err, int reqs, int cntx, int flags, int mode) +{ + + int i = 0; + int t = 0; + chunk_id_t j = NULL_CHUNK_ID; + chunk_ext_arg_t ext = 0; + errno = 0; + int status ; + + char *path = dev_paths[0]; + + pid_t child_pid [700]; + errno = 0; + + if (test_max_cntx) { + /* use user suppiled cntx value */ + cntx = test_max_cntx; + } + DEBUG_1("Testing max_cntx = %d\n", cntx); + + int ok_to_close = 0; + int close_now = 0; + int pid; + int ret_pid; + int child_ret = 0; + int child_err = 0; + int ret_code,errcode = 0; + int fds[2*cntx]; + int pipefds[2*cntx]; + + // create pipe to be used by child to pass back failure status + for (i=0; i< (cntx); i++) { + if (pipe(fds + (i*2) ) < 0 ) { + perror ("pipe"); + fprintf(stderr, "\nIncrease numfile count with \"ulimit -n 5000\" cmd\n"); + *ret = -1; + *err = errno; + return(-1); + } + } + + + // create pipes to be used by child to read permission to close + for (i=0; i< (cntx); i++) { + if (pipe(pipefds + (i*2) ) < 0 ) { + perror ("pipe"); + fprintf(stderr, "\nIncrease numfile count with \"ulimit -n 5000\" cmd\n"); + *ret = -1; + *err = errno; + return(-1); + } + } + + DEBUG_0("\nForking childrens to test max contexts\n "); + for (i = 0; i < cntx; i++) { + DEBUG_2("Opening %s opn_cnt = %d\n", path,i); + + child_pid [i] = fork(); + + if (child_pid [i] < 0 ) { + fprintf(stderr,"\nmax_context: fork %d failed err=%d\n",i,errno); + *ret = -1; + *err = errno; + return (-1); + } else if (child_pid [i] == 0) { + pid = getpid(); + j = cblk_open(path, reqs, mode, ext, flags); + if (j != -1) { + DEBUG_1("\nmax_context: Child = %d, OPENED \n", i+1 ); + close_now = 0; + while ( !close_now) { + read(pipefds[i*2], &close_now, sizeof(int)); + DEBUG_2("\nChild %d, Received %d \n",pid, close_now); + } + DEBUG_2("\nChild %d, Received Parent's OK =%d \n",pid, close_now); + cblk_close(j, 0); + /* exit success */ + exit(0); + } else { + fprintf(stderr,"\nmax_context: child =%d ret = 0x%x,open error = %d\n",i+1, j, errno); + child_ret = j; + child_err = errno; + /* Send errcode thru ouput side */ + write(fds[(i*2)+1],&child_ret, sizeof(int)); + write(fds[(i*2)+1],&child_err, sizeof(int)); + + /* exit error */ + exit(1); + } + } + DEBUG_1("\nmax_context: loops continue..opened = %d\n",i+1); + } + + sleep (5); + for (i = 0; i < cntx; i++) { + ok_to_close = 1; + write(pipefds[(i*2)+1], &ok_to_close, sizeof(int)); + DEBUG_1("\nparent sends ok_to_close to %d \n",i+1); + } + ret_code = errcode = 0; + /* Check all childs exited */ + for (i = 0; i < cntx; i++) { + for (t = 0; t < 5; t++) { + ret_pid = waitpid(child_pid[i], &status, 0); + if (ret_pid == -1) { + /* error */ + ret_code = -1; + fprintf(stderr,"\nChild exited with error %d \n",i); + break; + } else if (ret_pid == child_pid[i]) { + DEBUG_1("\nChild exited %d \n",i); + if (WIFEXITED(status)) { + DEBUG_2("child =%d, status 0x%x \n",i,status); + if (WEXITSTATUS(status)) { + read(fds[(i*2)], &ret_code, sizeof(ret_code)); + read(fds[(i*2)], &errcode, sizeof(errcode)); + DEBUG_3("\nchild %d errcode %d, ret_code %d\n", + i, errcode, ret_code); + } else { + DEBUG_1("child =%d, exited normally \n",i); + } + } + break; + } else { + if ( t == 4) { + errcode =ETIMEDOUT; + fprintf(stderr,"\nChild %d didn't exited Timed out\n",child_pid[i]); + break; + } + DEBUG_1("\nwaitpid returned = %d, give more time \n",ret_pid); + DEBUG_1("\nChild %d give need more time \n",i); + /* still running */ + } + sleep (1); + } /* give max 5 sec */ + /* end test on any error */ + if (ret_code || errcode) { + fprintf(stderr,"\nmax_context: Child = %d, ret = 0x%x, err = %d\n", + i+1, ret_code,errcode); + break; + } + } + + *ret = ret_code; + *err = errcode; + + // Close all pipes fds + + for (i=0; i< 2*(cntx); i++) { + close (pipefds [i]); + } + // Close all pipes fds + + for (i=0; i< 2*(cntx); i++) { + close (fds [i]); + } + + return(0); +} + +int child_open(int c, int max_reqs, int flags, int mode) +{ + chunk_id_t j = NULL_CHUNK_ID; + chunk_ext_arg_t ext = 0; + errno = 0; + + DEBUG_1 ("\nchild_open: opening for child %d\n", c); + + char *path = dev_paths[0]; + + j = cblk_open (path, max_reqs, mode, ext, flags); + if (j != NULL_CHUNK_ID) { + chunks[c] = j; + num_opens += 1; + return (0); + } else { + /* + *id = j; + *er_no = errno; + */ + DEBUG_2("child_open: Failed: open i = %d, errno = %d\n", c, errno); + return (-1); + } +} +int fork_and_clone_mode_test(int *ret, int *err, int pmode, int cmode) +{ + chunk_id_t id = 0; + int flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int open_cnt= 1; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + int rc; + int er; + + size_t temp_sz; + pid_t child_pid; + int child_status; + pid_t ret_pid; + + int child_ret; + int child_err; + int ret_code=0; + int t; + int errcode = 0; + int fd[2]; + + + // create pipe to be used by child to pass back status + pipe(fd); + + if (blk_fvt_setup(1) < 0) + return (-1); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er, open_cnt, flags, pmode); + + if (id == NULL_CHUNK_ID) + return (-1); + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &rc, &er); + if (rc | er) { + DEBUG_0("fork_and_clone: set_size failed\n"); + *ret = rc; + *err = er; + return (0); + } + + DEBUG_0("\nForking a child process "); + child_pid = fork(); + if (child_pid < 0 ) { + DEBUG_2("fork_and_clone: fork failed rc=%x,err=%d\n",rc,errno); + *ret = -1; + *err = errno; + return (0); + } + if (child_pid == 0) { + DEBUG_0("\nFork success,Running child process "); + + /* Close read side */ + close (fd[0]); + // child process + rc = cblk_clone_after_fork(id,cmode,0); + if (rc) { + DEBUG_2("\nfork_and_clone: clone failed rc=%x, err=%d\n",rc, errno); + child_ret = rc; + child_err = errno; + /* Send errcode thru ouput side */ + write(fd[1],&child_ret, sizeof(int)); + write(fd[1],&child_err, sizeof(int)); + DEBUG_1("Sending child_ret %d\n",child_ret); + cblk_close(id,0); + exit (1); + } + + DEBUG_0("\nfork_and_clone: Exiting child process normally "); + cblk_close(id,0); + exit (0); + + } else { + // parent's process + DEBUG_0("\nfork_and_clone:Parent waiting for child proc "); + ret_code = errcode = 0; + for (t = 0; t < 5; t++) { + ret_pid = waitpid(child_pid, &child_status, 0); + if (ret_pid == -1) { + *ret = -1; + *err = errno; + DEBUG_1("\nwaitpid error = %d \n",errno); + return(0); + } else if (ret_pid == child_pid) { + DEBUG_0("\nChild exited Check child_status \n"); + if (WIFEXITED(child_status)) { + DEBUG_2("child =%d, child_status 0x%x \n",child_pid,child_status); + if (WEXITSTATUS(child_status)) { + read(fd[0], &ret_code, sizeof(ret_code)); + DEBUG_2("\nchild %d retcode %d\n", + child_pid, ret_code); + read(fd[0], &errcode, sizeof(errcode)); + DEBUG_2("\nchild %d errcode %d\n", + child_pid, errcode); + } else { + DEBUG_1("child =%d, exited normally \n",child_pid); + } + } + break; + } else { + if ( t == 4) { + ret_code = -1; + errcode =ETIMEDOUT; + fprintf(stderr,"\nChild %d didn't exited Timed out\n",child_pid); + break; + } + fprintf(stderr,"\nwaitpid returned = %d, give more time \n",ret_pid); + /* still running */ + } + sleep (1); + } /* give max 5 sec */ + + *ret = ret_code; + *err = errcode; + } + + return(0); +} + +int fork_and_clone(int *ret, int *err, int mode) +{ + chunk_id_t id = 0; + int flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int open_cnt= 1; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + int rc; + int er; + + uint64_t lba; + int io_flags = 0; + int open_flag = 0; + size_t temp_sz,nblks; + int cmd; + pid_t child_pid; + int child_status; + pid_t w_ret; + + int child_ret; + int child_err; + int ret_code=0; + int ret_err=0; + int fd[2]; + + + // create pipe to be used by child to pass back status + + pipe(fd); + + if (blk_fvt_setup(1) < 0) + return (-1); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er, open_cnt, flags, mode); + + if (id == NULL_CHUNK_ID) + return (-1); + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &rc, &er); + if (rc | er) { + DEBUG_0("fork_and_clone: set_size failed\n"); + *ret = rc; + *err = er; + return (0); + } + + cmd = FV_WRITE; + lba = 1; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &rc, &er, io_flags, open_flag); + + if (rc != 1) { + DEBUG_0("fork_and_clone: blk_fvt_io failed\n"); + *ret = rc; + *err = er; + return (0); + } + + DEBUG_0("\nForking a child process "); + child_pid = fork(); + if (child_pid < 0 ) { + DEBUG_0("fork_and_clone: fork failed\n"); + *ret = -1; + *err = errno; + return (0); + } + if (child_pid == 0) { + DEBUG_0("\nFork success,Running child process "); + + /* Close read side */ + close(fd[0]); + + // child process + rc = cblk_clone_after_fork(id,O_RDONLY,0); + if (rc) { + + DEBUG_2("\nfork_and_clone: clone failed rc=%x, err=%d\n",rc, errno); + child_ret = rc; + child_err = errno; + /* Send errcode thru ouput side */ + write(fd[1],&child_ret, sizeof(int)); + write(fd[1],&child_err, sizeof(int)); + DEBUG_2("Sending child ret=%d, err=%d\n",child_ret, child_err); + cblk_close(id,0); + exit (1); + } + + cmd = FV_READ; + lba = 1; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &rc, &er, io_flags, open_flag); + if (rc != 1) { + // error + DEBUG_0("fork_and_clone: child I/O failed\n"); + child_ret = rc; + child_err = errno; + /* Send errcode thru ouput side */ + write(fd[1],&child_ret, sizeof(int)); + write(fd[1],&child_err, sizeof(int)); + DEBUG_2("Sending child ret=%d, err=%d\n",child_ret, child_err); + cblk_close(id,0); + exit (1); + } + blk_fvt_cmp_buf(nblks, &rc); + if (rc) { + // error + DEBUG_0("fork_and_clone: child I/O compare failed\n"); + child_ret = rc; + child_err = errno; + /* Send errcode thru ouput side */ + write(fd[1],&child_ret, sizeof(int)); + write(fd[1],&child_err, sizeof(int)); + DEBUG_2("Sending child ret=%d, err=%d\n",child_ret, child_err); + cblk_close(id,0); + exit (1); + } + DEBUG_1("fork_and_clone: Child buf compare ret rc = %d\n",rc); + DEBUG_0("\nfork_and_clone: Exiting child process normally "); + cblk_close(id,0); + exit (0); + + } else { + // parent's process + + DEBUG_0("\nfork_and_clone:Parent waiting for child proc "); + w_ret = wait(&child_status); + + if (w_ret == -1) { + DEBUG_1("fork_and_clone: wait failed %d\n",errno); + *ret = -1; + *err = errno; + return (0); + } else { + + DEBUG_0("\nfork_and_clone: Child process returned "); + if (WIFEXITED(child_status)) { + DEBUG_1("\nfork_and_clone:1 child exit status = 0x%x\n",child_status); + if (WEXITSTATUS(child_status)) { + DEBUG_1("\nfork_and_clone: Error child exit status = 0x%x\n",child_status); + /* Close output side */ + close(fd[1]); + rc = read(fd[0], &ret_code, sizeof(ret_code)); + DEBUG_1("Received child status %d\n",ret_code); + rc = read(fd[0], &ret_err, sizeof(ret_err)); + DEBUG_1("Received child errcode=%d \n",ret_err); + *ret = ret_code; + *err = ret_err; + } else { + DEBUG_1("\nfork_and_clone: Successfullchild exit status = 0x%x\n",child_status); + *ret = rc; + *err = errno; + } + } + } + } + + return(0); +} + +void blk_list_io_arg_test(chunk_id_t id, int arg_tst, int *err, int *ret) +{ + int rc = 0; + int i; + int num_complt; + uint64_t timeout = 0; + uint64_t lba = 1; + int uflags = 0; + size_t size = 1; + + cblk_io_t cblk_issue_io[1]; + cblk_io_t *cblk_issue_list[1]; + cblk_io_t cblk_complete_io[1]; + cblk_io_t *cblk_complete_list[1]; + + cblk_io_t cblk_wait_io[1]; + cblk_io_t *cblk_wait_list[1]; + + cblk_io_t cblk_pending_io[1]; + + num_complt = 1; + + // allocate buffer for 1 IO + + for (i=0; i<1; i++) { + bzero(&cblk_issue_io[i],sizeof(cblk_io_t)); + bzero(&cblk_complete_io[i],sizeof(cblk_io_t)); + bzero(&cblk_wait_io[i],sizeof(cblk_io_t)); + bzero(&cblk_pending_io[i],sizeof(cblk_io_t)); + } + i = 0; + lba = 1; + if (arg_tst == 9) + uflags = CBLK_IO_USER_STATUS; + else + uflags = 0; + size = 1; + cblk_issue_io[i].request_type = CBLK_IO_TYPE_WRITE; + cblk_issue_io[i].buf = (void *)(blk_fvt_comp_data_buf ); + cblk_issue_io[i].lba = lba; + cblk_issue_io[i].flags = uflags; + cblk_issue_io[i].nblocks = size; + cblk_issue_list[i] = &cblk_issue_io[i]; + + cblk_complete_list[i] = &cblk_complete_io[i]; + + cblk_wait_io[i].request_type = CBLK_IO_TYPE_WRITE; + cblk_wait_io[i].buf = (void *)(blk_fvt_comp_data_buf); + cblk_wait_io[i].lba = lba; + cblk_wait_io[i].flags = uflags; + cblk_wait_io[i].nblocks = size; + cblk_wait_list[i] = &cblk_wait_io[i]; + + cblk_pending_io[i].request_type = CBLK_IO_TYPE_WRITE; + cblk_pending_io[i].buf = (void *)(blk_fvt_comp_data_buf); + cblk_pending_io[i].lba = lba; + cblk_pending_io[i].flags = uflags; + cblk_pending_io[i].nblocks = size; + + + errno = 0; + switch (arg_tst) { + case 1: + /* ALL list NULL */ + rc = cblk_listio(id, + NULL,0, + NULL,0, + NULL,0, + NULL,&num_complt, + timeout,0); + break; + case 2: + rc = cblk_listio(id, + cblk_issue_list, 0, + NULL,0, + NULL,0, + NULL,&num_complt, + timeout,0); + break; + case 3: + rc = cblk_listio(id, + cblk_issue_list, 1, + NULL,0, + NULL,0, + cblk_complete_list,0, + timeout,0); + break; + case 4: + rc = cblk_listio(id, + cblk_issue_list, 1, + NULL,0, + NULL,0, + NULL,&num_complt, + timeout,0); + break; + case 5: + rc = cblk_listio(id, + NULL,0, + NULL,0, + NULL,0, + cblk_complete_list,&num_complt, + timeout,0); + break; + case 6: + /* Test null buffer report EINVAL error */ + cblk_issue_io[i].buf = (void *)NULL; + rc = cblk_listio(id, + cblk_issue_list, 1, + NULL,0, + cblk_wait_list,1, + cblk_complete_list,&num_complt, + timeout,0); + break; + case 7: + /* Test null num_complt report EINVAL */ + num_complt = 0; + rc = cblk_listio(id, + + cblk_issue_list, 1, + NULL,0, + NULL,1, + cblk_complete_list,&num_complt, + timeout,0); + break; + case 8: + /* Test -1 num_complt report EINVAL */ + num_complt = -1; + rc = cblk_listio(id, + cblk_issue_list, 1, + NULL,0, + NULL,1, + cblk_complete_list,&num_complt, + timeout,0); + break; + case 9: + /* Test T/O with CBLK_USER_STATUS report EINVAL */ + timeout = 1; + rc = cblk_listio(id, + cblk_issue_list, 1, + NULL,0, + NULL,1, + cblk_complete_list,&num_complt, + timeout,0); + break; + default: + break; + } + + DEBUG_1("Invalid arg test %d Complete\n",arg_tst); + *ret = rc; + *err = errno; + return; +} + +void blk_list_io_test(chunk_id_t id, int cmd, int t_type, int uflags, uint64_t timeout, int *err, int *ret, int num_listio) +{ + int rc = 0; + int i; + uint64_t lba; + size_t size = 1; + int ret_code = 0; + int num_complt = num_listio; + int cmplt_cnt = 0; + + cblk_io_t cblk_issue_io[num_listio]; + cblk_io_t *cblk_issue_list[num_listio]; + cblk_io_t cblk_complete_io[num_listio]; + cblk_io_t *cblk_complete_list[num_listio]; + + cblk_io_t cblk_wait_io[num_listio]; + cblk_io_t *cblk_wait_list[num_listio]; + + cblk_io_t cblk_pending_io[num_listio]; + cblk_io_t *cblk_pending_list[num_listio]; + + + for (i=0; i 50) { + fprintf(stderr, "Waited 50 sec, fail = %d, cmplt =%d\n", + cmds_fail,cmds_cmplt); + break; + } + sleep_cnt ++; + sleep(1); + } + + return (ret); + +} + +int check_completions(cblk_io_t *io, int num_listio ) +{ + int i; + int cmds_fail,cmds_invalid,cmds_cmplt,cmds_pend; + cmds_fail=cmds_invalid=cmds_cmplt=cmds_pend=0; + + + for (i=0; i +#include +#include + +#include +#include +#include +#ifndef _MACOSX +#include +#endif /* !_MACOS */ +#include +#include +#include + + +extern int blk_verbosity; +extern char *dev_path; +extern chunk_id_t chunks[]; +extern void *blk_fvt_data_buf; +extern void *blk_fvt_comp_data_buf; + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#define MAX_OPENS 512 +#define MAX_LUNS 512 +#define MAX_NUM_THREADS 4096 +#define BLK_FVT_BUFSIZE 4096 +#define NUM_LIST_IO 500 + +#define FILESZ 4096*4096*64 + + +#define FV_READ 0 +#define FV_WRITE 1 +#define FV_AREAD 2 +#define FV_AWRITE 3 +#define FV_RW_COMP 4 +#define FV_RW_AWAR 5 + +/* io_flags used in blk_fvt_io, to force conditions */ +#define FV_ALIGN_BUFS 0x1 +#define FV_ARESULT_BLOCKING 0x2 +#define FV_ARESULT_NEXT_TAG 0x4 +#define FV_NO_INRPT 0x8 + + +#define DEBUG_0(A) \ + do \ + { \ + if (blk_verbosity) \ + { \ + fprintf(stderr,A); \ + fflush(stderr); \ + } \ + } while (0) + +#define DEBUG_1(A,B) \ + do \ + { \ + if (blk_verbosity) \ + {fprintf(stderr,A,B);fflush(stderr);} \ + } while (0) + +#define DEBUG_2(A,B,C) \ + do \ + { \ + if (blk_verbosity) \ + {fprintf(stderr,A,B,C);fflush(stderr);} \ + } while (0) + +#define DEBUG_3(A,B,C,D) \ + do \ + { \ + if (blk_verbosity) \ + {fprintf(stderr,A,B,C,D);fflush(stderr);} \ + } while (0) + +#define DEBUG_4(A,B,C,D,E) \ + do \ + { \ + if (blk_verbosity) \ + {fprintf(stderr,A,B,C,D,E);fflush(stderr);} \ + } while (0) + +typedef struct blk_thread_status { + int ret; + int errcode; +} blk_thread_status_t; + +typedef struct blk_thread_data { + chunk_id_t chunk_id[MAX_LUNS]; + blk_thread_status_t status; + int flags; + size_t size; +} blk_thread_data_t; + + + +int blk_fvt_alloc_bufs(int size); +void blk_open_tst(int *id, int max_reqs, int *er_no, int opn_cnt, int flags, int mode); + +void blk_open_tst_inv_path(const char* path,int *id, int max_reqs, int *er_no, int opn_cnt, int flags, int mode); + +void blk_open_tst_close ( int id); + +void blk_open_tst_cleanup (); + +void blk_close_tst(int id, int *ret, int *er_no, int close_flag); + +void blk_fvt_get_set_lun_size(chunk_id_t id, size_t *size, int sz_flags, int get_set_size_flag, int *ret, int *err); + +void blk_fvt_io (chunk_id_t id, int cmd, uint64_t lba, size_t nblocks, int *ret, int *err, int io_flag, int open_flag); + +int blk_fvt_setup(int size); +int multi_lun_setup(); + +void blk_fvt_cmp_buf(int size, int *ret); +void blk_get_statistics (chunk_id_t id, int flags, int *ret, int *err); +void blk_thread_tst(int *ret, int *err); +void blk_multi_luns_thread_tst(int *ret, int *err); +void blocking_io_tst (chunk_id_t id, int *ret, int *err); +void io_perf_tst (chunk_id_t id, int *ret, int *err); +int fork_and_clone(int *ret, int *err, int mode); +int fork_and_clone_mode_test(int *ret, int *err, int pmode, int cmode); +int fork_and_clone_my_test(int *ret, int *err, int pmode, int cmode); +void blk_fvt_intrp_io_tst(chunk_id_t id, int testflag, int open_flag, int *ret, int *err); +void check_astatus(chunk_id_t id, int *tag, int arflag, int open_flag, int io_flag, cblk_arw_status_t *ardwr_status, int *rc, int *err); +void initialize_blk_tests(); +void terminate_blk_tests(); +void blk_list_io_test(chunk_id_t id, int cmd, int t_type, int uflags, uint64_t timeout, int *er_no, int *ret, int num_listio); +void blk_list_io_arg_test(chunk_id_t id, int arg_tst, int *err, int *ret); +int poll_arw_stat(cblk_io_t *cblk_issue_io, int num_listio); +int poll_completion(int t_type, int *cmplt, cblk_io_t *wait_io[], cblk_io_t *pending_io[], cblk_io_t *cmplt_io[]); +int check_completions(cblk_io_t *io , int num_listio); +int blk_fvt_alloc_list_io_bufs(int size); +int blk_fvt_alloc_large_xfer_io_bufs(size_t nblks); +char *find_parent(char *device_name); +int validate_share_context(); +int max_context(int *ret, int *err, int reqs, int cntx, int flags, int mode); +int child_open(int c, int reqs, int flags, int mode); +#endif diff --git a/src/block/test/block_perf_check b/src/block/test/block_perf_check new file mode 100755 index 00000000..337f59ec --- /dev/null +++ b/src/block/test/block_perf_check @@ -0,0 +1,169 @@ +#!/bin/ksh +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/block/test/block_perf_check $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +if [[ $1 = "-h" ]] +then + echo "Usage: block_perf_check [libpath] [binpath]" + echo " ex: block_perf_check" + echo " ex: block_perf_check /.../surelock-sw/img /.../surelock-sw/obj/tests" + exit +fi + +if [[ $3 == int* || $3 == INT* ]] +then + _intrps="-i" +fi + +_uname=$(uname -a|awk '{print $1}') +if [[ $(whoami) != root ]]; +then + echo "must be run as root" + exit +fi + +######## Set these DEVx vars to the correct luns for the best results ########## +######## 1 lun per FC port, across two cards ########## +DEVS=4 +if [[ $_uname = "AIX" ]] +then + DEV1=/dev/hdisk0 + DEV2=/dev/hdisk1 + DEV3=/dev/hdisk2 + DEV4=/dev/hdisk4 + _64=64 + for d in 0 1 2 3 + do + if [[ $(lsmpio -l hdisk$d|grep hdisk$d|wc -l) -ne 2 ]]; + then + echo "running to only one port for hdisk$d" + fi + done +else + # use only /dev/sg* devices + if [[ $(hostname) == cougar* ]] + then + DEV1=/dev/sg14 + DEV2=/dev/sg15 + DEV3=/dev/sg19 + DEV4=/dev/sg20 + elif [[ $(hostname) == p8tul2* ]] + then + DEV1=/dev/sg34 + DEV2=/dev/sg38 + DEV3=/dev/sg39 + DEV4=/dev/sg43 + fi + _64= +fi + +if [[ ! -z $1 ]] +then + if [[ $_uname = "AIX" ]] + then + export LIBPATH=$1 + cmd="$2" + else + cmd="LD_LIBRARY_PATH=$1 $2" + fi + cmd_dir=$cmd +else + cmd_dir="/opt/ibm/capikv/test" +fi + +function blockio +{ + cmd="$cmd_dir/blockio$_64 -d $DEV1 -s 15 $_intrps > /tmp/out_p" + eval $cmd + iops=$(cat /tmp/out_p|grep iops|awk '{print $10}'|awk -F : '{print $2}') + echo "blockio: $iops" + + cmd="$cmd_dir/blockio$_64 -d $DEV1 -s 15 -q 1 $_intrps > /tmp/out_p" + eval $cmd + iops=$(cat /tmp/out_p|grep iops|awk '{print $10}'|awk -F : '{print $2}') + echo "blockio: QD=1: $iops" + + rm -f /tmp/out_p + c=0 + while [ $c -lt 25 ] + do + cmd="$cmd_dir/blockio$_64 -d $DEV1 -q 1 -s 15 $_intrps >> /tmp/out_p" + eval $cmd& + cmd="$cmd_dir/blockio$_64 -d $DEV2 -q 1 -s 15 $_intrps >> /tmp/out_p" + eval $cmd& + cmd="$cmd_dir/blockio$_64 -d $DEV3 -q 1 -s 15 $_intrps >> /tmp/out_p" + eval $cmd& + cmd="$cmd_dir/blockio$_64 -d $DEV4 -q 1 -s 15 $_intrps >> /tmp/out_p" + eval $cmd& + let c=c+1 + PID=$! + done + wait + iops=0 + for d in $(cat /tmp/out_p|grep iops|awk '{print $10}'|awk -F : '{print $2}') + do + ((iops+=$d)) + done + printf "blockio: 100P: QD=1: %-6d\n" $iops +} + +function blockplistio +{ + cmd="$cmd_dir/blockplistio$_64 -d $DEV1 -s 15 $_intrps > /tmp/out_p" + eval $cmd + iops=$(cat /tmp/out_p|grep iops|awk '{print $9}'|awk -F : '{print $2}') + echo "blockplistio: $iops" + + cmd="$cmd_dir/blockplistio$_64 -d $DEV1 -s 15 -l 1 -c 1 $_intrps > /tmp/out_p" + eval $cmd + iops=$(cat /tmp/out_p|grep iops|awk '{print $9}'|awk -F : '{print $2}') + echo "blockplistio: QD=1: $iops" + + rm -f /tmp/out_p + c=0 + while [ $c -lt 25 ] + do + cmd="$cmd_dir/blockplistio$_64 -d $DEV1 -l 1 -c 1 -s 15 $_intrps >> /tmp/out_p" + eval "$cmd"& + cmd="$cmd_dir/blockplistio$_64 -d $DEV2 -l 1 -c 1 -s 15 $_intrps >> /tmp/out_p" + eval "$cmd"& + cmd="$cmd_dir/blockplistio$_64 -d $DEV3 -l 1 -c 1 -s 15 $_intrps >> /tmp/out_p" + eval "$cmd"& + cmd="$cmd_dir/blockplistio$_64 -d $DEV4 -l 1 -c 1 -s 15 $_intrps >> /tmp/out_p" + eval "$cmd"& + let c=c+1 + PID=$! + done + wait + iops=0 + for d in $(cat /tmp/out_p|grep iops|awk '{print $9}'|awk -F : '{print $2}') + do + ((iops+=$d)) + done + printf "blockplistio: 100P: QD=1: %-6d\n" $iops +} + +echo "BLOCK Performance" +blockio +blockplistio diff --git a/src/block/test/blockio.c b/src/block/test/blockio.c new file mode 100644 index 00000000..d0c4f190 --- /dev/null +++ b/src/block/test/blockio.c @@ -0,0 +1,345 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/block/test/blockio.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief Block Interface I/O Driver + * \details + * This runs I/O to the capi Block Interface using aread/awrite/aresult. The + * expected iops are 300k-400k per capi card. \n + * Using the queuedepth (-q) option affects iops, as there are less cmds. \n + * Example: \n + * \n + * blockio -d /dev/sg10 \n + * d:/dev/sg10 r:100 q:300 s:4 p:0 n:1 i:o err:0 mbps:1401 iops:358676 \n + * \n + * blockio -d /dev/sg10 -s 20 -q 1 -r 70 -p \n + * d:/dev/sg10 r:70 q:1 s:20 p:1 n:1 i:0 err:0 mbps:26 iops:6905 + ******************************************************************************* + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define _8K 8*1024 +#define _4K 4*1024 +#define NBUFS _8K + +#define TIME_INTERVAL _8K +#define SET_NBLKS TIME_INTERVAL*4 + +#ifdef _AIX +#define USLEEP 200 +#else +#define USLEEP 100 +#endif + +/** +******************************************************************************** +** \brief print the usage +** \details +** -d device *the device name to run Block IO \n +** -r %rd *the percentage of reads to issue (0..100) \n +** -q queuedepth *the number of outstanding ops to maintain \n +** -n nblocks *the number of 4k blocks in each I/O request \n +** -s secs *the number of seconds to run the I/O \n +** -p *run in physical lun mode \n +** -i *run using interrupts, not polling +*******************************************************************************/ +void usage(void) +{ + printf("Usage:\n"); + printf(" \ +[-d device] [-r %%rd] [-q queuedepth] [-n nblocks] [-s secs] [-p] [-i]\n"); + exit(0); +} + +/** +******************************************************************************** +** \brief print errno and exit +** \details +** An IO has failed, print the errno and exit +*******************************************************************************/ +void io_error(int id, int err) +{ + fprintf(stderr, "io_error: errno:%d\n", err); + cblk_close(id,0); + cblk_term(NULL,0); + exit(err); +} + +/** +******************************************************************************** +** \brief main +** \details +** process input parms \n +** open device \n +** alloc memory \n +** loop running IO until secs expire \n +** print IO stats \n +** cleanup +*******************************************************************************/ +int main(int argc, char **argv) +{ + struct timeval start, delta; + long int mil = 1000000; + float esecs = 0; + uint8_t **rbuf = NULL; + uint8_t **wbuf = NULL; + int *tags = NULL; + char *dev = NULL; + char FF = 0xFF; + char c = '\0'; + chunk_ext_arg_t ext = 0; + int flags = 0; + int rc = 0; + int id = 0; + char *_secs = NULL; + char *_QD = NULL; + char *_RD = NULL; + char *_nblocks = NULL; + uint32_t plun = 0; + uint32_t nsecs = 4; + uint32_t QD = 500; + uint32_t nRD = 100; + uint32_t RD = 0; + uint32_t WR = 0; + uint32_t intrp_thds = 0; + int tag = 0; + int rtag = 0; + uint32_t lba = 0; + size_t nblks = 0; + uint32_t nblocks = 1; + uint32_t cnt = 0; + uint32_t pollN = 0; + uint64_t status = 0; + uint32_t TI = TIME_INTERVAL; + uint32_t N = 0; + uint32_t TIME = 1; + uint32_t COMP = 0; + + /*-------------------------------------------------------------------------- + * process and verify input parms + *------------------------------------------------------------------------*/ + while (FF != (c=getopt(argc, argv, "d:r:q:n:s:phi"))) + { + switch (c) + { + case 'd': dev = optarg; break; + case 'r': _RD = optarg; break; + case 'q': _QD = optarg; break; + case 'n': _nblocks = optarg; break; + case 's': _secs = optarg; break; + case 'p': plun = 1; break; + case 'i': intrp_thds = 1; break; + case 'h': + case '?': usage(); break; + } + } + if (_secs) nsecs = atoi(_secs); + if (_QD) QD = atoi(_QD); + if (_nblocks) nblocks = atoi(_nblocks); + if (_RD) nRD = atoi(_RD); + + if (QD > _8K) QD = _8K; + if (nRD > 100) nRD = 100; + if (!plun) nblocks = 1; + if (dev == NULL) usage(); + + srand48(time(0)); + + N = QD; + COMP = QD < 5 ? 1 : QD/5; + + /*-------------------------------------------------------------------------- + * open device and set lun size + *------------------------------------------------------------------------*/ + rc = cblk_init(NULL,0); + if (rc) + { + fprintf(stderr,"cblk_init failed with rc = %d and errno = %d\n", + rc,errno); + exit(1); + } + if (!plun) flags = CBLK_OPN_VIRT_LUN; + if (!intrp_thds) flags |= CBLK_OPN_NO_INTRP_THREADS; + id = cblk_open(dev, QD, O_RDWR, ext, flags); + if (id == NULL_CHUNK_ID) + { + if (ENOSPC == errno) fprintf(stderr,"cblk_open: ENOSPC\n"); + else if (ENODEV == errno) fprintf(stderr,"cblk_open: ENODEV\n"); + else fprintf(stderr,"cblk_open: errno:%d\n",errno); + cblk_term(NULL,0); + exit(errno); + } + + rc = cblk_get_lun_size(id, &nblks, 0); + if (rc) + { + fprintf(stderr, "cblk_get_lun_size failed: errno: %d\n", errno); + exit(errno); + } + if (!plun) + { + nblks = nblks > SET_NBLKS ? SET_NBLKS : nblks; + rc = cblk_set_size(id, nblks, 0); + if (rc) + { + fprintf(stderr, "cblk_set_size failed, errno: %d\n", errno); + exit(errno); + } + } + + /*-------------------------------------------------------------------------- + * alloc data for IO + *------------------------------------------------------------------------*/ + tags = malloc(QD*sizeof(int)); + rbuf = malloc(QD*sizeof(uint8_t*)); + wbuf = malloc(QD*sizeof(uint8_t*)); + + for (tag=0; tag= nblks) lba=cnt%2;} + else if (EBUSY == errno) {++N; usleep(USLEEP); continue;} + else {io_error(id,errno);} + } + /*---------------------------------------------------------------------- + * send up to WR writes, as long as the queuedepth N is not max + *--------------------------------------------------------------------*/ + while (TIME && WR && N) + { + tag = tags[--N]; + rc = cblk_awrite(id, wbuf[tag], lba, nblocks, &tag, NULL, + CBLK_ARW_WAIT_CMD_FLAGS | CBLK_ARW_USER_TAG_FLAG); + if (0 == rc) {--WR; lba+=2; if (lba >= nblks) lba=cnt%2;} + else if (EBUSY == errno) {++N; usleep(USLEEP); continue;} + else {io_error(id,errno);} + } + + /* if the queuedepth is 1, don't immediately pound aresult */ + if (QD==1) usleep(USLEEP); + + /*---------------------------------------------------------------------- + * complete cmds until queue depth is QD-COMP + *--------------------------------------------------------------------*/ + while (N < COMP) + { + rtag=0; + rc = cblk_aresult(id, &rtag, &status, CBLK_ARESULT_BLOCKING| + CBLK_ARESULT_NEXT_TAG); + if (rc == 0) {++pollN; usleep(USLEEP); continue;} + else if (rc < 0) {io_error(id,errno);} + ++cnt; tags[N++] = rtag; + } + + /*---------------------------------------------------------------------- + * at an interval which does not impact performance, check if secs + * have expired, and randomize lba + *--------------------------------------------------------------------*/ + if (cnt > TI) + { + TI += TIME_INTERVAL; + gettimeofday(&delta, NULL); + if (delta.tv_sec - start.tv_sec >= nsecs) {TIME=0; COMP = QD;} + lba = lrand48() % TIME_INTERVAL; + } + } + while (TIME || QD-N); + + /*-------------------------------------------------------------------------- + * print IO stats + *------------------------------------------------------------------------*/ + gettimeofday(&delta, NULL); + esecs = ((float)((delta.tv_sec*mil + delta.tv_usec) - + (start.tv_sec*mil + start.tv_usec))) / (float)mil; + printf("d:%s r:%d q:%d s:%d p:%d n:%d i:%d pollN:%d mbps:%d iops:%d", + dev, nRD, QD, nsecs, plun, nblocks, intrp_thds, pollN, + (uint32_t)((float)((cnt*nblocks*4)/1024)/esecs), + (uint32_t)((float)(cnt/esecs))); + if (plun && nblocks > 1) + printf(" 4k-iops:%d", (uint32_t)((float)(cnt*nblocks)/esecs)); + printf("\n"); + + /*-------------------------------------------------------------------------- + * cleanup + *------------------------------------------------------------------------*/ + for (cnt=0; cnt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define _8K 8*1024 +#define _4K 4*1024 + +#define TIME_INTERVAL _8K +#define SET_NBLKS TIME_INTERVAL*4 + +#ifdef _AIX +#define USLEEP 100 +#else +#define USLEEP 50 +#endif + +typedef struct +{ + cblk_io_t **ioI; + cblk_io_t **ioP; + cblk_io_t **ioC; + cblk_io_t *p_ioI; + cblk_io_t *p_ioP; + cblk_io_t *p_ioC; + uint8_t **rbuf; + uint32_t do_send; + uint32_t do_cmp; +} listio_t; + +/** +******************************************************************************** +** \brief print the usage +** \details +** -d device *the device name to run Block IO \n +** -l #lists *the number of lists \n +** -c #cmds *the number of cmds per list \n +** -s secs *the number of seconds to run the I/O \n +** -p *run in physical lun mode \n +** -i *run using interrupts, not polling \n +*******************************************************************************/ +void usage(void) +{ + printf("Usage:\n"); + printf(" [-d device] [-l lists] [-c cmdsperlist] [-p] [-s secs]\n"); + exit(0); +} + +/** +******************************************************************************** +** \brief print errno and exit +** \details +** an IO has failed, print the errno and exit +*******************************************************************************/ +void io_error(int id, char *str, int err) +{ + fprintf(stderr, "io_error: %s errno:%d\n", str, err); + cblk_close(id,0); + cblk_term(NULL,0); + exit(err); +} + +/** +******************************************************************************** +** \brief main +** \details +** process input parms \n +** open device \n +** alloc memory \n +** loop running IO until secs expire \n +** print IO stats \n +** cleanup +*******************************************************************************/ +int main(int argc, char **argv) +{ + struct timeval start, delta; + long int mil = 1000000; + float esecs = 0; + char *dev = NULL; + char FF = 0xFF; + char c = '\0'; + chunk_ext_arg_t ext = 0; + int rc = 0; + int i,j = 0; + int id = 0; + char *_secs = NULL; + char *_NL = NULL; + char *_LD = NULL; + uint32_t nsecs = 5; + uint32_t NL = 40; + uint32_t LD = 10; + uint32_t plun = 0; + int tag = 0; + uint32_t cnt = 0; + size_t nblks = 0; + uint32_t pollN = 0; + int32_t ncmp = 0; + uint32_t TI = TIME_INTERVAL; + uint32_t N = 0; + uint32_t TIME = 1; + uint32_t COMP = 0; + uint32_t lba = 0; + listio_t *lio = NULL; + + /*-------------------------------------------------------------------------- + * process and verify input parms + *------------------------------------------------------------------------*/ + while (FF != (c=getopt(argc, argv, "d:l:c:s:ph"))) + { + switch (c) + { + case 'd': dev = optarg; break; + case 'l': _NL = optarg; break; + case 'c': _LD = optarg; break; + case 's': _secs = optarg; break; + case 'p': plun = 1; break; + case 'h': + case '?': usage(); break; + } + } + if (_secs) nsecs = atoi(_secs); + if (_NL) NL = atoi(_NL); + if (_LD) LD = atoi(_LD); + + if (dev == NULL) usage(); + + srand48(time(0)); + + if (NL*LD > _8K) + { + fprintf(stderr, "-l %d * -c %d cannot be greater than 8k\n", NL, LD); + usage(); + } + N = NL; + COMP = N < 3 ? 1 : N/3; + + /*-------------------------------------------------------------------------- + * open device and set lun size + *------------------------------------------------------------------------*/ + rc = cblk_init(NULL,0); + if (rc) + { + fprintf(stderr,"cblk_init failed with rc = %d and errno = %d\n", + rc,errno); + exit(1); + } + id = cblk_open(dev, NL*LD, O_RDWR, ext, CBLK_OPN_VIRT_LUN); + if (id == NULL_CHUNK_ID) + { + if (ENOSPC == errno) fprintf(stderr,"cblk_open: ENOSPC\n"); + else if (ENODEV == errno) fprintf(stderr,"cblk_open: ENODEV\n"); + else fprintf(stderr,"cblk_open: errno:%d\n",errno); + cblk_term(NULL,0); + exit(errno); + } + + rc = cblk_get_lun_size(id, &nblks, 0); + if (rc) + { + fprintf(stderr, "cblk_get_lun_size failed, errno:%d\n", errno); + exit(errno); + } + if (!plun) + { + nblks = nblks > SET_NBLKS ? SET_NBLKS : nblks; + rc = cblk_set_size(id, nblks, 0); + if (rc) + { + fprintf(stderr, "cblk_set_size failed, errno:%d\n", errno); + exit(errno); + } + } + + /*-------------------------------------------------------------------------- + * alloc data for IO + *------------------------------------------------------------------------*/ + lio = malloc(NL*sizeof(listio_t)); + assert(lio != NULL); + + for (i=0; irequest_type = CBLK_IO_TYPE_READ; + lio[i].ioI[j]->flags = CBLK_IO_USER_STATUS; + lio[i].ioI[j]->buf = lio[i].rbuf[j]; + lio[i].ioI[j]->lba = lba; + lio[i].ioI[j]->nblocks = 1; + } + } + + /*-------------------------------------------------------------------------- + * loop running IO until secs expire + *------------------------------------------------------------------------*/ + gettimeofday(&start, NULL); + + do + { + /*---------------------------------------------------------------------- + * send up to RD reads, as long as the queuedepth N is not max + *--------------------------------------------------------------------*/ + while (TIME && N) + { + if (!lio[tag].do_send) {if (++tag == NL) tag = 0; continue;} + + lio[tag].do_send = 0; + lio[tag].do_cmp = 1; + ncmp = LD; + N -= 1; + memset(lio[tag].p_ioC, 0, LD*sizeof(cblk_io_t)); + rc = cblk_listio(id, lio[tag].ioI,LD, + NULL,0, + NULL,0, + lio[tag].ioC, &ncmp, + 0,0); + if (rc == -1) io_error(id, "SEND", errno); + } + if (NL==1) usleep(USLEEP); + + /*---------------------------------------------------------------------- + * complete cmds until queue depth is QD-COMP + *--------------------------------------------------------------------*/ + while (N < COMP) + { + if (!lio[tag].do_cmp) {if (++tag == NL) {tag=0;} continue;} + + ncmp = 0; + + for (i=0; istat.status == CBLK_ARW_STATUS_PENDING) + {continue;} + else if (lio[tag].ioI[i]->stat.status == -1) + {++ncmp;} + else if (lio[tag].ioI[i]->stat.status ==CBLK_ARW_STATUS_SUCCESS) + { + ++ncmp; + lba+=2; if (lba >= nblks) lba=cnt%2; + lio[tag].ioI[i]->lba = lba; + lio[tag].ioI[i]->stat.status = -1; + } + else if (lio[tag].ioI[i]->stat.status == CBLK_ARW_STATUS_FAIL) + {io_error(id, "CMD FAIL",lio[tag].ioI[i]->stat.fail_errno);} + else + {io_error(id, "CMD ELSE",lio[tag].ioI[i]->stat.status);} + } + if (ncmp == LD) + { + /* all cmds complete */ + lio[tag].do_send = 1; + lio[tag].do_cmp = 0; + cnt += LD; + N += 1; + } + else + { + usleep(USLEEP); + ++pollN; + } + if (++tag == NL) {tag = 0;} + } + + /*---------------------------------------------------------------------- + * at an interval which does not impact performance, check if secs + * have expired, and randomize lba + *--------------------------------------------------------------------*/ + if (cnt >= TI) + { + TI += TIME_INTERVAL; + gettimeofday(&delta, NULL); + if (delta.tv_sec - start.tv_sec >= nsecs) {TIME=0; COMP=NL;} + lba = lrand48() % TIME_INTERVAL; + } + } + while (TIME || NL-N); + + /*-------------------------------------------------------------------------- + * print IO stats + *------------------------------------------------------------------------*/ + gettimeofday(&delta, NULL); + esecs = ((float)((delta.tv_sec*mil + delta.tv_usec) - + (start.tv_sec*mil + start.tv_usec))) / (float)mil; + printf("d:%s l:%d c:%d p:%d s:%d pollN:%d mbps:%d iops:%d\n", + dev, NL, LD, plun, nsecs, pollN, + (uint32_t)((float)(cnt*4)/1024/esecs), + (uint32_t)((float)cnt/esecs)); + + /*-------------------------------------------------------------------------- + * cleanup + *------------------------------------------------------------------------*/ + for (i=0; i +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define _8K 8*1024 +#define _4K 4*1024 + +#define TIME_INTERVAL _8K +#define SET_NBLKS TIME_INTERVAL*4 + +#ifdef _AIX +#define USLEEP 100 +#else +#define USLEEP 50 +#endif + +typedef struct +{ + cblk_io_t **ioI; + cblk_io_t **ioP; + cblk_io_t **ioC; + cblk_io_t *p_ioI; + cblk_io_t *p_ioP; + cblk_io_t *p_ioC; + uint8_t **rbuf; + uint32_t do_send; + uint32_t do_cmp; + uint32_t ncmp; + uint32_t nP; +} listio_t; + +/** +******************************************************************************** +** \brief print the usage +** \details +** -d device *the device name to run Block IO \n +** -l #lists *the number of lists \n +** -c #cmds *the number of cmds per list \n +** -s secs *the number of seconds to run the I/O \n +** -p *run in physical lun mode \n +** -i *run using interrupts, not polling \n +*******************************************************************************/ +void usage(void) +{ + printf("Usage:\n"); + printf(" [-d device] [-l lists] [-c cmdsperlist] [-p] [-s secs] [-i]\n"); + exit(0); +} + +/** +******************************************************************************** +** \brief print errno and exit +** \details +** an IO has failed, print the errno and exit +*******************************************************************************/ +void io_error(int id, char *str, int err) +{ + fprintf(stderr, "io_error: %s errno:%d\n", str, err); + cblk_close(id,0); + cblk_term(NULL,0); + exit(err); +} + +/** +******************************************************************************** +** \brief main +** \details +** process input parms \n +** open device \n +** alloc memory \n +** loop running IO until secs expire \n +** print IO stats \n +** cleanup +*******************************************************************************/ +int main(int argc, char **argv) +{ + struct timeval start, delta; + long int mil = 1000000; + float esecs = 0; + char *dev = NULL; + char FF = 0xFF; + char c = '\0'; + chunk_ext_arg_t ext = 0; + int flags = 0; + int rc = 0; + int i,j = 0; + int id = 0; + char *_secs = NULL; + char *_NL = NULL; + char *_LD = NULL; + uint32_t nsecs = 5; + uint32_t NL = 40; + uint32_t LD = 100; + uint32_t intrp_thds = 0; + uint32_t plun = 0; + int tag = 0; + uint32_t cnt = 0; + size_t nblks = 0; + uint32_t pollN = 0; + int32_t ncmp = 0; + uint32_t TI = TIME_INTERVAL; + uint32_t N = 0; + uint32_t TIME = 1; + uint32_t COMP = 0; + uint32_t lba = 0; + uint32_t p_i = 0; + listio_t *lio = NULL; + + /*-------------------------------------------------------------------------- + * process and verify input parms + *------------------------------------------------------------------------*/ + while (FF != (c=getopt(argc, argv, "d:l:c:s:phi"))) + { + switch (c) + { + case 'd': dev = optarg; break; + case 'l': _NL = optarg; break; + case 'c': _LD = optarg; break; + case 's': _secs = optarg; break; + case 'p': plun = 1; break; + case 'i': intrp_thds = 1; break; + case 'h': + case '?': usage(); break; + } + } + if (_secs) nsecs = atoi(_secs); + if (_NL) NL = atoi(_NL); + if (_LD) LD = atoi(_LD); + + if (dev == NULL) usage(); + + srand48(time(0)); + + if (NL*LD > _8K) + { + fprintf(stderr, "-l %d * -c %d cannot be greater than 8k\n", NL, LD); + usage(); + } + N = NL; + COMP = N < 3 ? 1 : N/3; + + /*-------------------------------------------------------------------------- + * open device and set lun size + *------------------------------------------------------------------------*/ + rc = cblk_init(NULL,0); + if (rc) + { + fprintf(stderr,"cblk_init failed with rc = %d and errno = %d\n", + rc,errno); + exit(1); + } + if (!plun) flags = CBLK_OPN_VIRT_LUN; + if (!intrp_thds) flags |= CBLK_OPN_NO_INTRP_THREADS; + id = cblk_open(dev, NL*LD, O_RDWR, ext, flags); + if (id == NULL_CHUNK_ID) + { + if (ENOSPC == errno) fprintf(stderr,"cblk_open: ENOSPC\n"); + else if (ENODEV == errno) fprintf(stderr,"cblk_open: ENODEV\n"); + else fprintf(stderr,"cblk_open: errno:%d\n",errno); + cblk_term(NULL,0); + exit(errno); + } + + rc = cblk_get_lun_size(id, &nblks, 0); + if (rc) + { + fprintf(stderr, "cblk_get_lun_size failed, errno:%d\n", errno); + exit(errno); + } + if (!plun) + { + nblks = nblks > SET_NBLKS ? SET_NBLKS : nblks; + rc = cblk_set_size(id, nblks, 0); + if (rc) + { + fprintf(stderr, "cblk_set_size failed, errno:%d\n", errno); + exit(errno); + } + } + + /*-------------------------------------------------------------------------- + * alloc data for IO + *------------------------------------------------------------------------*/ + lio = malloc(NL*sizeof(listio_t)); + assert(lio != NULL); + + for (i=0; irequest_type = CBLK_IO_TYPE_READ; + lio[i].ioI[j]->buf = lio[i].rbuf[j]; + lio[i].ioI[j]->lba = lba; + lio[i].ioI[j]->nblocks = 1; + } + } + + /*-------------------------------------------------------------------------- + * loop running IO until secs expire + *------------------------------------------------------------------------*/ + gettimeofday(&start, NULL); + + do + { + /*---------------------------------------------------------------------- + * send up to RD reads, as long as the queuedepth N is not max + *--------------------------------------------------------------------*/ + while (TIME && N) + { + if (!lio[tag].do_send) {if (++tag == NL) tag = 0; continue;} + + lio[tag].do_send = 0; + lio[tag].do_cmp = 1; + lio[tag].ncmp = 0; + ncmp = LD; + N -= 1; + memset(lio[tag].p_ioC, 0, LD*sizeof(cblk_io_t)); + rc = cblk_listio(id, lio[tag].ioI,LD, + NULL,0, + NULL,0, + lio[tag].ioC, &ncmp, + 0,0); + if (rc == -1) io_error(id, "SEND", errno); + } + if (NL==1) usleep(USLEEP); + + /*---------------------------------------------------------------------- + * complete cmds until queue depth is QD-COMP + *--------------------------------------------------------------------*/ + while (N < COMP) + { + if (!lio[tag].do_cmp) {if (++tag == NL) tag = 0; continue;} + + if (0 == lio[tag].nP) + { + memcpy(lio[tag].p_ioP, lio[tag].p_ioI, LD*sizeof(cblk_io_t)); + lio[tag].ncmp = 0; + lio[tag].nP = LD; + ncmp = LD; + } + else + { + ncmp = lio[tag].nP; + } + + memset(lio[tag].p_ioC, 0, LD*sizeof(cblk_io_t)); + rc = cblk_listio(id, NULL,0, + lio[tag].ioP, lio[tag].nP, + NULL,0, + lio[tag].ioC, &ncmp, + 0,0); + if (rc == -1) io_error(id, "COMP", errno); + + for (i=0; istat.status ==CBLK_ARW_STATUS_PENDING) + {continue;} + else if (lio[tag].ioC[i]->stat.status == -1) + {io_error(id, "status==-1", -1);} + else if (lio[tag].ioC[i]->stat.status ==CBLK_ARW_STATUS_SUCCESS) + {lba+=2; if (lba >= nblks) lba=cnt%2; + ++lio[tag].ncmp; lio[tag].ioI[i]->lba = lba;} + else if (lio[tag].ioC[i]->stat.status == CBLK_ARW_STATUS_FAIL) + {io_error(id, "CMD FAIL",lio[tag].ioC[i]->stat.fail_errno);} + else + {io_error(id, "CMD ELSE",lio[tag].ioC[i]->stat.status);} + } + if (lio[tag].ncmp == LD) + { + /* all cmds complete */ + lio[tag].do_send = 1; + lio[tag].do_cmp = 0; + lio[tag].nP = 0; + cnt += LD; + N += 1; + } + else + { + /*-------------------------------------------------------------- + * collapse the pending list to only the pending cmds + *------------------------------------------------------------*/ + p_i = 0; + for (i=0; istat.status == CBLK_ARW_STATUS_PENDING) + { + if (i==p_i) {++p_i; continue;} + else {memcpy(lio[tag].ioP[p_i++], + lio[tag].ioP[i], + sizeof(cblk_io_t));} + } + } + lio[tag].nP = p_i; + usleep(USLEEP); + ++pollN; + } + if (++tag == NL) tag = 0; + } + + /*---------------------------------------------------------------------- + * at an interval which does not impact performance, check if secs + * have expired, and randomize lba + *--------------------------------------------------------------------*/ + if (cnt >= TI) + { + TI += TIME_INTERVAL; + gettimeofday(&delta, NULL); + if (delta.tv_sec - start.tv_sec >= nsecs) {TIME=0; COMP=NL;} + lba = lrand48() % TIME_INTERVAL; + } + } + while (TIME || NL-N); + + /*-------------------------------------------------------------------------- + * print IO stats + *------------------------------------------------------------------------*/ + gettimeofday(&delta, NULL); + esecs = ((float)((delta.tv_sec*mil + delta.tv_usec) - + (start.tv_sec*mil + start.tv_usec))) / (float)mil; + printf("d:%s l:%d c:%d p:%d s:%d i:%d pollN:%d mbps:%d iops:%d\n", + dev, NL, LD, plun, nsecs, intrp_thds, pollN, + (uint32_t)((float)(cnt*4)/1024/esecs), + (uint32_t)((float)cnt/esecs)); + + /*-------------------------------------------------------------------------- + * cleanup + *------------------------------------------------------------------------*/ + for (i=0; i + +extern "C" +{ +#include "blk_tst.h" +int num_opens = 0; +uint32_t thread_count = 0; +uint64_t block_number; +uint64_t max_xfer = 0; +int num_loops; +int thread_flag; +int num_threads; +int virt_lun_flags=0; +int share_cntxt_flags=0; +int io_bufcnt = 1; +int num_listio = 500; +int test_max_cntx = 0; +chunk_id_t chunks[MAX_OPENS+15]; +void *blk_fvt_data_buf = NULL; +void *blk_fvt_comp_data_buf = NULL; +char *env_filemode = getenv("BLOCK_FILEMODE_ENABLED"); +char *env_max_xfer = getenv("CFLSH_BLK_MAX_XFER"); +char *env_num_cntx = getenv("MAX_CNTX"); +} + +int mode = O_RDWR; + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_open_phys_lun) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs = 64; + int er_no = 0; + int open_cnt = 1; + + // Setup dev name and allocated test buffers + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst(&id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + EXPECT_EQ(0, er_no ); + + blk_open_tst_cleanup(); + + +} +TEST(Block_FVT_Suite, BLK_API_FVT_open_phys_lun_RDONLY_mode_test) +{ + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + + uint64_t lba; + int io_flags = 0; + size_t nblks; + int cmd; + + mode = O_RDONLY; + + // Setup dev name and allocated test buffers + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + EXPECT_EQ(0, er_no ); + + + // Try to write , it should fail + cmd = FV_WRITE; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + // Try to read it should pass + cmd = FV_READ; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_open_phys_lun_WRONLY_mode_test) +{ + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + + uint64_t lba; + int io_flags = 0; + size_t nblks; + int cmd; + + mode = O_WRONLY; + + // Setup dev name and allocated test buffers + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + EXPECT_EQ(0, er_no ); + + // Try to read , it should fail + cmd = FV_READ; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + // Try to write it should pass + cmd = FV_WRITE; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_open_phys_lun_RDWR_mode_test) +{ + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + + uint64_t lba; + int io_flags = 0; + size_t nblks; + int cmd; + + mode = O_RDWR; + + // Setup dev name and allocated test buffers + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + EXPECT_EQ(0, er_no ); + + // Try to write , it should pass + cmd = FV_WRITE; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + // Try to read it should pass + cmd = FV_READ; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_open_virt_lun_RDONLY_mode_test) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + mode = O_RDONLY; + + // Setup dev name and allocated test buffers + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + EXPECT_EQ(0, er_no ); + + + temp_sz = 1; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + // Try to write , it should fail + cmd = FV_WRITE; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + // Try to read it should pass + cmd = FV_READ; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_open_virt_lun_WRONLY_mode_test) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + mode = O_WRONLY; + + // Setup dev name and allocated test buffers + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + EXPECT_EQ(0, er_no ); + + + temp_sz = 1; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + // Try to read , it should fail + cmd = FV_READ; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + // Try to write it should pass + cmd = FV_WRITE; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_open_virt_lun_RDWR_mode_test) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + mode = O_RDWR; + + // Setup dev name and allocated test buffers + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + EXPECT_EQ(0, er_no ); + + + temp_sz = 1; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + // Try to write , it should pass + cmd = FV_WRITE; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + // Try to read it should pass + cmd = FV_READ; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_open_virt_lun) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs = 64; + int er_no = 0; + int open_cnt = 1; + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + EXPECT_EQ(0, er_no ); + + blk_open_tst_cleanup(); +} +/** Need to check max_requests no, if valid */ + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_open_phys_lun_exceed_max_reqs) +{ + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs = 8192; + int er_no = 0; + int open_cnt = 1; + + + ASSERT_EQ(0,blk_fvt_setup(1)); + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + // expect success + + EXPECT_NE(NULL_CHUNK_ID, id ); + EXPECT_EQ(0, er_no ); + + if (id == NULL_CHUNK_ID) { + blk_open_tst_cleanup(); + return; + } + + max_reqs = (8192 +1); + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + // expect failure + + EXPECT_EQ(NULL_CHUNK_ID, id ); + + EXPECT_EQ(12, er_no ); + + blk_open_tst_cleanup(); + +} + + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_open_virt_lun_exceed_max_reqs) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs = 8192; + int er_no = 0; + int open_cnt = 1; + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + // expect success + + EXPECT_NE(NULL_CHUNK_ID, id ); + EXPECT_EQ(0, er_no ); + + if (id == NULL_CHUNK_ID) { + blk_open_tst_cleanup(); + return; + } + + max_reqs = (8192 +1); + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + // expect failure + + EXPECT_EQ(NULL_CHUNK_ID, id ); + + EXPECT_EQ(12, er_no ); + + blk_open_tst_cleanup(); + +} + +// verify using physical lun, max_contex 508 succeeds + +TEST(Block_FVT_Suite, BLK_API_FVT_UMC_Max_context_tst) +{ + + int open_flags = 0; + int max_reqs = 64; + int er_no = 0; + int ret = 0; + + int max_cntxt = 508; + +#ifdef _AIX + max_cntxt = 494; +#endif + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + max_context(&ret, &er_no, max_reqs, max_cntxt, open_flags,mode); + + EXPECT_EQ(0, ret ); + EXPECT_EQ(0, er_no ); + + blk_open_tst_cleanup (); + +} + +// verify using physical lun, max_contex 509 (for AIX 495) exceeds max , should fail + + +TEST(Block_FVT_Suite, BLK_API_FVT_UMC_Exceed_Max_context_tst) +{ + + int open_flags = 0; + int max_reqs = 64; + int er_no = 0; + int ret = 0; + + int max_cntxt = 509; +#ifdef _AIX + max_cntxt = 496; /* Should fail on 495 */ +#endif + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + if (test_max_cntx ) { + max_cntxt = test_max_cntx; + } + + max_context(&ret, &er_no, max_reqs, max_cntxt, open_flags,mode); + + EXPECT_NE(0, ret ); + EXPECT_NE(0, er_no ); + + blk_open_tst_cleanup (); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_open_invalid_dev_path) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs = 64; + const char *dev_path = "junk"; + int er_no = 0; + int open_cnt = 1; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + + blk_open_tst_inv_path(dev_path, &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + EXPECT_EQ(NULL_CHUNK_ID, id); + EXPECT_NE(0, er_no); + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_open_close_phys_lun) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs = 64; + int er_no = 0; + int open_cnt = 1; + int close_flag = 0; + int ret = 0; + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + blk_close_tst(id, &ret, &er_no, close_flag); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_open_close_virt_lun) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs = 64; + int er_no = 0; + int open_cnt = 1; + int close_flag = 0; + int ret = 0; + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + blk_close_tst(id, &ret, &er_no, close_flag); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); +} + + + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_close_unopen_phys_lun) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs = 64; + int er_no = 0; + int open_cnt = 1; + int close_flag = 0; + int ret = 0; + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + blk_close_tst(id, &ret, &er_no, close_flag); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + +// Close un-opened lun + + blk_close_tst(id, &ret, &er_no, close_flag); + + EXPECT_EQ(EINVAL , er_no); + EXPECT_EQ(-1 , ret); + + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_close_unopen_virt_lun) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs = 64; + int er_no = 0; + int open_cnt = 1; + int close_flag = 0; + int ret = 0; + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + blk_close_tst(id, &ret, &er_no, close_flag); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_close_tst(id, &ret, &er_no, close_flag); + + EXPECT_EQ(-1 , ret); + EXPECT_EQ(EINVAL , er_no); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_get_lun_size_physical) +{ + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + size_t phys_lun_sz = 0; + int get_set_size_flag = 0; // 0 = get phys + // 1 = get chunk + // 2 = set size + size_t ret_size = 0; + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + // Get phys lun size + + blk_fvt_get_set_lun_size(id, &phys_lun_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + EXPECT_NE(0, phys_lun_sz); + +// get size on phys lun should report whole phys lun size + get_set_size_flag = 1; + blk_fvt_get_set_lun_size(id, &ret_size, sz_flags, get_set_size_flag, &ret, &er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + +// test chunk size should be equal whole phys lun size + EXPECT_EQ(phys_lun_sz , ret_size); + + blk_open_tst_cleanup(); + +} +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_get_lun_siz_virtual) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + size_t lun_sz = 0; + int get_set_size_flag = 0; // 0 = get phys + // 1 = get chunk + // 2 = set size + + // open virtual lun + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + // Get virtual lun size , it should fail, since it is not set + + get_set_size_flag = 1; + + blk_fvt_get_set_lun_size(id, &lun_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + EXPECT_EQ(0, lun_sz); + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_set_size_physical) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + size_t lun_sz = 0; + int get_set_size_flag = 0; // 0 = get phys + // 1 = get chunk + // 2 = set size + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open phys lun + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + // Try to set size on phy lun , it should fail, since it can not be change + + get_set_size_flag = 2; + lun_sz = 4096; + + blk_fvt_get_set_lun_size(id, &lun_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + EXPECT_EQ(-1 , ret); + EXPECT_EQ(EINVAL , er_no); + + blk_open_tst_cleanup(); + +} + + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_set_size_virt_lun) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys + // 1 = get chunk + // 2 = set size + size_t chunk_sz = 0; + + size_t temp_sz = 0; + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + // Try to set size on virt lun e + + get_set_size_flag = 2; + chunk_sz = 4096; + + blk_fvt_get_set_lun_size(id, &chunk_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + // Try to get size on virt lun and verify the set size + + get_set_size_flag = 1; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + EXPECT_EQ(chunk_sz, temp_sz); + + blk_open_tst_cleanup(); + +} + +// open virt, get lun size, set size > lunsize, should fail + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_set_invalid_chunk_size) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys + // 1 = get chunk + // 2 = set chunk size + + size_t lun_sz = 0; + size_t temp_sz = 0; + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open phys lun + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + get_set_size_flag = 1; // get phy lun size as 1 chunk + + blk_fvt_get_set_lun_size(id, &lun_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cblk_close (id, 0); + num_opens --; + + // open virtual lun + + open_flags = CBLK_OPN_VIRT_LUN; + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + // Set size > lun size + + temp_sz = lun_sz + 1; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + + EXPECT_EQ(-1 , ret); + EXPECT_EQ(EINVAL ,er_no); + + blk_open_tst_cleanup(); + +} + +// TODO current block layer code donot support this functions + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_increase_decrease_chunk_size) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + size_t temp_sz = 0; + size_t chunk_sz = 0; + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + // Get virt lun size , it sould be 0 + + get_set_size_flag = 1; + + blk_fvt_get_set_lun_size(id, &chunk_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + EXPECT_EQ(0, chunk_sz); + + // set chunk size (100) + + temp_sz = chunk_sz + 100; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + // Get the chunk size and Verify it set correctly + + get_set_size_flag = 1; + blk_fvt_get_set_lun_size(id, &chunk_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + EXPECT_EQ(temp_sz, chunk_sz ); + + // Increase chunk sz by 20 blocks + // set size () + + temp_sz += 20; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + + // Get size and Verify it is increased correctly to 220 + chunk_sz = 0; + get_set_size_flag = 1; + blk_fvt_get_set_lun_size(id, &chunk_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + EXPECT_EQ(temp_sz, chunk_sz); + + // decrease chunk sz by 10 blocks + // set size () + + temp_sz = (chunk_sz - 10); + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + // Get the size and Verify it is decreased correctly to 210 + chunk_sz = 0; + get_set_size_flag = 1; + blk_fvt_get_set_lun_size(id, &chunk_sz, sz_flags, get_set_size_flag, &ret, &er_no); + + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + EXPECT_EQ(temp_sz, chunk_sz); + + + blk_open_tst_cleanup(); + +} + + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_increase_size_preserve_data) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + size_t temp_sz,nblks; + uint64_t lba; + int cmd; // 0 = read, 1, write + int io_flags = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + if ((env_filemode) && (atoi(env_filemode) == 1)) { + /* + * This test will not work with filemode and no MC. + */ + return; + } + + ASSERT_EQ(0,blk_fvt_setup(1)); + + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + // write on block 0 + cmd = FV_WRITE; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + // Increase virt lun size 1 additional block + temp_sz = 2; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + + // Read block 0 and see if it is protected + cmd = FV_READ; + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // Compare buffs, it should not be overwritten + blk_fvt_cmp_buf (nblks, &ret); + EXPECT_EQ(0,ret); + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_get_chunk_status) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + size_t nblks; + uint64_t lba; + int io_flags = 0; + size_t chunk_sz = 0; + int cmd; // 0 = read, 1, write + int get_set_size_flag = 0; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open Virt lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + // Try to set size on virt lun e + + get_set_size_flag = 2; + chunk_sz = 4096; + + blk_fvt_get_set_lun_size(id, &chunk_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_READ; + + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + blk_get_statistics(id, open_flags, &ret, &er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); +} + + +// Test read succeed on phy lun, prior to set size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_read_phys_lun_wo_setsz) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + size_t nblks; + uint64_t lba; + int io_flags = 0; + + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open Phys lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + cmd = FV_READ; + + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + // expect fail ret code + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); + + +} + +// Test read fails on virt lun, prior to set size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_read_virt_lun_wo_setsz) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + size_t nblks; + uint64_t lba; + int io_flags = 0; + + int cmd; // 0 = read, 1, write + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + cmd = FV_READ; + lba = 0; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + blk_open_tst_cleanup(); + +} + + +// Test read fails on read outside of lun size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_read_vir_lun_outside_lunsz) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; // 0 = read, 1, write + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1; + lba = 0; + get_set_size_flag = 2; + + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_READ; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + lba = 1; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + blk_open_tst_cleanup(); + +} + +// Test read fails on read outside of phys lun size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_read_phys_lun_outside_lunsz) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + size_t temp_sz,nblks; + int io_flags = 0; + int cmd; // 0 = read, 1, write + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open physocal lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 0; + get_set_size_flag = 1; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_READ; + + lba = temp_sz+1; + nblks = 1; + + // verify read outside lun size fails + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + blk_open_tst_cleanup(); +} +// Verify read fails when tried to read more than 1 block size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_read_greater_than_1_blk) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + size_t temp_sz, nblks; + int cmd; // 0 = read, 1, write + int io_flags = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + + ASSERT_EQ(0,blk_fvt_setup(2)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_READ; + + lba = 0; + nblks = 2; + + // Verify read fails when block size greater than 1 + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(2 , ret); + EXPECT_EQ(22 , er_no); + + blk_open_tst_cleanup(); + +} + +// write data , read data and then compare + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_read_write_compare) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_WRITE; + lba = 1; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + cmd = FV_READ; + lba = 1; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // compare buffers + + blk_fvt_cmp_buf(nblks, &ret); + + EXPECT_EQ(0, ret); + + blk_open_tst_cleanup(); +} +// Run I/O with xfersize 1M thru 16M + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_1M_thru_16M_xfersize_physical) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t nblks = 0; + size_t lun_sz = 0; + size_t xfersz = 0; + int cmd; + size_t i = 0; + + + if ((env_filemode) && (atoi(env_filemode) == 1)) { + if (env_max_xfer && (atoi(env_max_xfer))) + nblks = atoi(env_max_xfer); + else { + /* + * This test will not work filemode and no CFLSH_BLK_MAX_XFER set. + */ + fprintf(stderr, "Test Skipped :env CFLSH_BLK_MAX_XFER is _not_ set\n"); + return; + + } + } else { + nblks = 4096; /* 256 = 1 M, 4096 = 16M */ + } + + + ASSERT_EQ(0,blk_fvt_setup(nblks)); + + // open physical lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + get_set_size_flag = 0; + blk_fvt_get_set_lun_size(id, &lun_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + if (lun_sz < nblks) + nblks = lun_sz; + + for (i = 256; i < nblks ; i+= 256) { + xfersz = i; + cmd = FV_WRITE; + lba = 0; + + blk_fvt_io(id, cmd, lba, xfersz, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(xfersz , ret); + if ((int)xfersz != ret) { + fprintf(stderr,"Write failed xfersz 0x%lx\n",i); + blk_open_tst_cleanup(); + return; + } + + cmd = FV_READ; + lba = 0; + // read what was wrote xfersize + blk_fvt_io(id, cmd, lba, xfersz, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(xfersz , ret); + if ((int)xfersz != ret) { + fprintf(stderr,"Read failed xfersz 0x%lx\n",i); + blk_open_tst_cleanup(); + return; + } + // compare buffers + + blk_fvt_cmp_buf(xfersz, &ret); + if (ret != 0) { + fprintf(stderr,"Compare failed xfersz 0x%lx\n",i); + blk_open_tst_cleanup(); + } + ASSERT_EQ(0, ret); + } + + blk_open_tst_cleanup(); +} + +// Run I/O with 1M xfersize + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_1M_xfersize_physical) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t nblks = 0; + size_t lun_sz = 0; + size_t xfersz = 0; + int cmd; + if ((env_filemode) && (atoi(env_filemode) == 1)) { + if (env_max_xfer && (atoi(env_max_xfer))) + nblks = atoi(env_max_xfer); + else { + /* + * This test will not work filemode and no CFLSH_BLK_MAX_XFER set. + */ + fprintf(stderr, "Test Skipped :env CFLSH_BLK_MAX_XFER is _not_ set\n"); + return; + + } + } else { + nblks = 256; /* 256 = 1 M, 4096 = 16M */ + } + + + ASSERT_EQ(0,blk_fvt_setup(nblks)); + + // open physical lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + get_set_size_flag = 0; + blk_fvt_get_set_lun_size(id, &lun_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + if (lun_sz < nblks) + nblks = lun_sz; + + xfersz = nblks; + cmd = FV_WRITE; + lba = 1; + + blk_fvt_io(id, cmd, lba, xfersz, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(xfersz , ret); + if ((int)xfersz != ret) { + fprintf(stderr,"Write failed 1M xfersz \n"); + blk_open_tst_cleanup(); + return; + } + + cmd = FV_READ; + lba = 1; + // read what was wrote xfersize + blk_fvt_io(id, cmd, lba, xfersz, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(xfersz , ret); + if ((int)xfersz != ret) { + fprintf(stderr,"Read failed 1M xfersz \n"); + blk_open_tst_cleanup(); + return; + } + // compare buffers + + blk_fvt_cmp_buf(xfersz, &ret); + if (ret != 0) { + fprintf(stderr,"Compare failed 1M xfersz \n"); + blk_open_tst_cleanup(); + } + ASSERT_EQ(0, ret); + + blk_open_tst_cleanup(); +} + +// Run I/O with variable xfersize , use env_max_xfer if set + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_phys_lun_large_xfer_test) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t nblks = 0; + size_t lun_sz = 0; + size_t xfersz = 0; + int cmd; + + + // Use user supplied max xfer size, if provided + + if (env_max_xfer && (atoi(env_max_xfer))) + nblks = atoi(env_max_xfer); + + // filemode test must have set env_max_xfer variable + if ((env_filemode) && (atoi(env_filemode) == 1) && !nblks) { + /* + * This test will not work filemode and no CFLSH_BLK_MAX_XFER set. + */ + fprintf(stderr, "Test Skipped :env CFLSH_BLK_MAX_XFER is _not_ set\n"); + return; + + } else if (!nblks){ + nblks = 513; /* 2M +*/ + } + + + + ASSERT_EQ(0,blk_fvt_setup(nblks)); + + // open physical lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + get_set_size_flag = 0; + blk_fvt_get_set_lun_size(id, &lun_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + if (lun_sz < nblks) + nblks = lun_sz; + + xfersz = nblks; + cmd = FV_WRITE; + lba = 1; + + blk_fvt_io(id, cmd, lba, xfersz, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(xfersz , ret); + if ((int)xfersz != ret) { + fprintf(stderr,"Write failed 1M+ xfersz \n"); + blk_open_tst_cleanup(); + return; + } + + cmd = FV_READ; + lba = 1; + // read what was wrote xfersize + blk_fvt_io(id, cmd, lba, xfersz, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(xfersz , ret); + if ((int)xfersz != ret) { + fprintf(stderr,"Read failed 1M+ xfersz \n"); + blk_open_tst_cleanup(); + return; + } + // compare buffers + + blk_fvt_cmp_buf(xfersz, &ret); + if (ret != 0) { + fprintf(stderr,"Compare failed 1M+ xfersz \n"); + blk_open_tst_cleanup(); + } + ASSERT_EQ(0, ret); + + blk_open_tst_cleanup(); +} + +// Run I/O to debug failing block xfer size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_514_blksz_physical) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t nblks = 0; + size_t lun_sz = 0; + size_t xfersz = 0; + int cmd; + if ((env_filemode) && (atoi(env_filemode) == 1)) { + if (env_max_xfer && (atoi(env_max_xfer))) + nblks = atoi(env_max_xfer); + else { + /* + * This test will not work filemode and no CFLSH_BLK_MAX_XFER set. + */ + fprintf(stderr, "Test Skipped :env CFLSH_BLK_MAX_XFER is _not_ set\n"); + return; + + } + } else { + nblks = 514; /* 2M +*/ + } + + + ASSERT_EQ(0,blk_fvt_setup(nblks)); + + // open physical lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + get_set_size_flag = 0; + blk_fvt_get_set_lun_size(id, &lun_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + if (lun_sz < nblks) + nblks = lun_sz; + + xfersz = nblks; + cmd = FV_WRITE; + lba = 1; + + blk_fvt_io(id, cmd, lba, xfersz, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(xfersz , ret); + if ((int)xfersz != ret) { + fprintf(stderr,"Write failed 1M+ xfersz \n"); + blk_open_tst_cleanup(); + return; + } + + cmd = FV_READ; + lba = 1; + // read what was wrote xfersize + blk_fvt_io(id, cmd, lba, xfersz, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(xfersz , ret); + if ((int)xfersz != ret) { + fprintf(stderr,"Read failed 1M+ xfersz \n"); + blk_open_tst_cleanup(); + return; + } + // compare buffers + + blk_fvt_cmp_buf(xfersz, &ret); + if (ret != 0) { + fprintf(stderr,"Compare failed 1M+ xfersz \n"); + } + EXPECT_EQ(0, ret); + + blk_open_tst_cleanup(); +} + + +// Verify read fails on not aligned (16 byte) boundaries + +TEST(Block_FVT_Suite, BLK_API_FVT_read_not_16_byte_aligned) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs= 64; + int er_no = 0; + int sz_flags= 0; + int open_cnt= 1; + int ret = 0; + size_t nblks; + uint64_t lba; + int io_flags = 1; + + size_t temp_sz; + int get_set_size_flag; + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open Phys lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + temp_sz = 1; + get_set_size_flag = 2; + + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_READ; + + lba = 0; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); +} + + +// Test write succeed on phy lun, prior to set size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_write_phys_lun_wo_setsz) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + size_t nblks; + uint64_t lba; + int io_flags = 0; + + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open Phys lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + cmd = FV_WRITE; + + lba = 0; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); + + +} +// Test write fails on virt lun, prior to set size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_write_virt_lun_wo_setsz) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + size_t nblks; + uint64_t lba; + int io_flags = 0; + + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + cmd = FV_WRITE; + lba = 0; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + blk_open_tst_cleanup(); + +} + + + +// Test write fails on write outside of lun size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_write_virt_lun_outside_lunsz) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; // 0 = read, 1, write + + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1; + get_set_size_flag = 2; + + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_WRITE; + nblks = 1; + lba = 0; + + // This write should succeed + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + lba = 2; + nblks = 1; + + + // This write should fail + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + blk_open_tst_cleanup(); + +} + +// Test write fails on write outside of phys lun size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_write_phys_lun_outside_lunsz) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; // 0 = read, 1, write + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open physocal lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 0; + get_set_size_flag = 1; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_WRITE; + + lba = temp_sz+1; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + blk_open_tst_cleanup(); +} +// Verify write fails when tried to write more than 1 block size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_write_greater_than_1_blk) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t temp_sz, nblks; + int cmd; // 0 = read, 1, write + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + + ASSERT_EQ(0,blk_fvt_setup(2)); + + // open physocal lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_WRITE; + + lba = 0; + nblks = 2; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + +/** + if (!max_xfer && ((env_filemode) && (atoi(env_filemode) == 1))) { + EXPECT_NE(2 , ret); + EXPECT_NE(0 , er_no); + } else { + EXPECT_EQ(2 , ret); + EXPECT_EQ(0 , er_no); + } +**/ + EXPECT_NE(2 , ret); + EXPECT_EQ(22 , er_no); + + blk_open_tst_cleanup(); + +} + + +// Verify write fails when data buffer is not 16 byte aligned + +TEST(Block_FVT_Suite, BLK_API_FVT_write_not_16_byte_aligned) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs= 64; + int er_no = 0; + int sz_flags= 0; + int open_cnt= 1; + int ret = 0; + size_t nblks; + uint64_t lba; + int io_flags = 1; + + size_t temp_sz; + int get_set_size_flag; + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open Phys lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + temp_sz = 1; + get_set_size_flag = 2; + + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_WRITE; + + lba = 0; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); +} + +// Verify async read issued prior to set size succeed on physical lun + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_async_read_phys_lun_wo_setsz) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t nblks; + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + cmd = FV_AREAD; + + lba = 0; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); + +} + +// Verify async read issued prior to set size fails on virtual lun + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_async_read_virt_lun_wo_setsz) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t nblks; + int cmd; // 0 = read, 1, write + + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + cmd = FV_AREAD; + + lba = 0; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_EQ(EINVAL , er_no); + + blk_open_tst_cleanup(); +} + +// Test async read fails on read outside of lun size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_async_read_vir_lun_outside_lunsz) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1; + lba = 0; + get_set_size_flag = 2; + + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_AREAD; + nblks = 1; + lba = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + blk_open_tst_cleanup(); + +} + +// Test async read fails on read outside of phys lun size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_async_read_phys_lun_outside_lunsz) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; // 0 = read, 1, write + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open physocal lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 0; + get_set_size_flag = 1; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_AREAD; + + lba = temp_sz+1; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + +// TODO shouldn't be expecting 0 + + EXPECT_NE(0 , ret); + EXPECT_EQ(22 , er_no); + + blk_open_tst_cleanup(); +} +// Verify async read fails when tried to read more than 1 block size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_async_read_virt_lun_greater_than_1_blk) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t temp_sz, nblks; + int cmd; // 0 = read, 1, write + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + ASSERT_EQ(0,blk_fvt_setup(2)); + + // open physocal lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_AREAD; + + lba = 0; + nblks = 2; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + +/** + if (!max_xfer && ((env_filemode) && (atoi(env_filemode) == 1))) { + EXPECT_NE(2 , ret); + EXPECT_NE(0 , er_no); + } else { + EXPECT_EQ(2 , ret); + EXPECT_EQ(0 , er_no); + } +**/ + EXPECT_NE(2 , ret); + EXPECT_EQ(22 , er_no); + + blk_open_tst_cleanup(); + +} + +// Verify async write issued prior to set size succeed on physical lun + +TEST(Block_FVT_Suite, BLK_API_FVT_FM__async_write_phys_lun_wo_setsz) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t nblks; + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open physical lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + cmd = FV_AWRITE; + + lba = 0; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + +// TODO Shuldn't be NE 1 ? expect failure + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); + +} + + +// Verify async write issued prior to set size fails on virtual lun + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_async_write_virt_lun_wo_setsz) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t nblks; + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + cmd = FV_AWRITE; + + lba = 0; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_EQ(EINVAL , er_no); + + blk_open_tst_cleanup(); +} + + +// Test async write fails on write outside of lun size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_async_write_vir_lun_outside_lunsz) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1; + lba = 0; + get_set_size_flag = 2; + + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_AWRITE; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + lba = 1; + nblks = 1; + + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + blk_open_tst_cleanup(); + +} + +// Test async write fails on write outside of phys lun size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_async_write_phys_lun_outside_lunsz) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; // 0 = read, 1, write + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open physocal lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 0; + get_set_size_flag = 1; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_AWRITE; + + lba = temp_sz+1; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(1 , ret); + EXPECT_NE(0 , er_no); + + blk_open_tst_cleanup(); +} +// Verify async write fails when tried to write more than 1 block size + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_async_write_greater_than_1_blk) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t temp_sz, nblks; + int cmd; // 0 = read, 1, write + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + + ASSERT_EQ(0,blk_fvt_setup(2)); + + // open physocal lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_AWRITE; + + lba = 0; + nblks = 2; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_NE(2 , ret); + EXPECT_EQ(22 , er_no); + + blk_open_tst_cleanup(); + +} + +// async write data , async read data and then compare + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_async_write_read_compare) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_AWRITE; + lba = 1; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + cmd = FV_AREAD; + lba = 1; + nblks = 1; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // compare buffers + + blk_fvt_cmp_buf(nblks, &ret); + + EXPECT_EQ(0, ret); + + blk_open_tst_cleanup(); +} + +// Verify async write fails when data buffer is not 16 byte aligned. + +TEST(Block_FVT_Suite, BLK_API_FVT_awrite_not_16_byte_aligned) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs= 64; + int er_no = 0; + int sz_flags= 0; + int open_cnt= 1; + int ret = 0; + size_t nblks; + uint64_t lba; + int io_flags = 1; + + size_t temp_sz; + int get_set_size_flag; + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open Phys lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + temp_sz = 1; + get_set_size_flag = 2; + + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_AWRITE; + + lba = 0; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); +} + +// Verify async read fails when data buffer is not 16 byte aligned. + +TEST(Block_FVT_Suite, BLK_API_FVT_aread_not_16_byte_aligned) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs= 64; + int er_no = 0; + int sz_flags= 0; + int open_cnt= 1; + int ret = 0; + size_t nblks; + uint64_t lba; + int io_flags = 1; + + size_t temp_sz; + int get_set_size_flag; + int cmd; // 0 = read, 1, write + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open Phys lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + temp_sz = 1; + get_set_size_flag = 2; + + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_AREAD; + + lba = 0; + nblks = 1; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); +} + + +// Verify CBLK_ARESULT_BLOCKING . waits till cmd completes +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_aresult_blocking) +{ + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + uint64_t lba; + int io_flags = 0; + size_t temp_sz, nblks; + int cmd; // 0 = read, 1, write + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open Virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + cmd = FV_AWRITE; + + lba = 0; + nblks = 1; + io_flags = FV_ARESULT_BLOCKING; + io_flags = 0; + + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); +} + +// Verify if CBLK_ARESULT_NEXT_TAG set,call doesn't returns till +// async req completes + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_aresult_next_tag) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs= 1024; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int sz_flags= 0; + size_t temp_sz; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virt lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1024; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + ASSERT_NE(-1 , ret); + + blocking_io_tst ( id, &ret, &er_no); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_virt_lun_perf_test) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int max_reqs= 4096; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int sz_flags= 0; + size_t temp_sz; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + int num_cmds = 4096; + + ASSERT_EQ(0,blk_fvt_setup(num_cmds+1)); + + // open virt lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 10000; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + ASSERT_NE(-1 , ret); + + io_perf_tst (id, &ret, &er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_phy_lun_perf_test) +{ + + chunk_id_t id = 0; + int open_flags = 0; + int max_reqs= 4096; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int sz_flags= 0; + size_t temp_sz; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + int num_cmds = 4096; + + ASSERT_EQ(0,blk_fvt_setup(num_cmds+1)); + + // open virt lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + ASSERT_NE(NULL_CHUNK_ID, id ); + + get_set_size_flag = 0; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + ASSERT_NE(-1 , ret); + + /* Test needs atleast 10000 blksz lun */ + if (temp_sz < 10000 ) { + fprintf(stderr, "Test Skipped : Lun size less than then 10000 blks\n"); + blk_open_tst_cleanup(); + return; + } + + io_perf_tst (id, &ret, &er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); + +} + +#ifndef _AIX + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_clone_chunk) +{ + + int er_no = 0; + int ret = 0; + int rc = 0; + + mode = O_RDWR; + + rc = fork_and_clone (&ret, &er_no, mode); + ASSERT_NE(-1, rc ); + EXPECT_EQ(1, ret); + EXPECT_EQ(0, er_no); + blk_open_tst_cleanup(); +} + +TEST(BLOCK_FVT_Suite, BLK_API_FVT_fork_clone_chunk_RDONLY_mode_test) +{ + int er_no = 0; + int ret = 0; + int rc = 0; + + int pmode = O_RDONLY; + int cmode = O_RDONLY; + + rc = fork_and_clone_mode_test (&ret, &er_no, pmode, cmode); + ASSERT_NE(-1, rc ); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, er_no); + blk_open_tst_cleanup(); + +} + +TEST(BLOCK_FVT_Suite, BLK_API_FVT_fork_clone_chunk_RDONLY_mode_errpath_1) +{ + int er_no = 0; + int ret = 0; + int rc = 0; + + int pmode = O_RDONLY; + int cmode = O_RDWR; + + rc = fork_and_clone_mode_test (&ret, &er_no, pmode, cmode); + ASSERT_NE(-1, rc ); + EXPECT_NE(0, ret); + EXPECT_NE(0, er_no); + blk_open_tst_cleanup(); + +} + +TEST(BLOCK_FVT_Suite, BLK_API_FVT_fork_clone_chunk_RDONLY_mode_errpath_2) +{ + int er_no = 0; + int ret = 0; + int rc = 0; + + int pmode = O_RDONLY; + int cmode = O_WRONLY; + + rc = fork_and_clone_mode_test (&ret, &er_no, pmode, cmode); + ASSERT_NE(-1, rc ); + EXPECT_NE(0, ret); + EXPECT_NE(0, er_no); + blk_open_tst_cleanup(); + +} + +TEST(BLOCK_FVT_Suite, BLK_API_FVT_fork_clone_chunk_WRONLY_mode_test) +{ + int er_no = 0; + int ret = 0; + int rc = 0; + + int pmode = O_WRONLY; + int cmode = O_WRONLY; + + rc = fork_and_clone_mode_test (&ret, &er_no, pmode, cmode); + ASSERT_NE(-1, rc ); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, er_no); + blk_open_tst_cleanup(); + +} + +TEST(BLOCK_FVT_Suite, BLK_API_FVT_fork_clone_chunk_WRONLY_mode_errpath_1) +{ + int er_no = 0; + int ret = 0; + int rc = 0; + + int pmode = O_WRONLY; + int cmode = O_RDWR; + + rc = fork_and_clone_mode_test (&ret, &er_no, pmode, cmode); + ASSERT_NE(-1, rc ); + EXPECT_NE(0, ret); + EXPECT_NE(0, er_no); + blk_open_tst_cleanup(); + +} + +TEST(BLOCK_FVT_Suite, BLK_API_FVT_fork_clone_chunk_WRONLY_mode_errpath_2) +{ + int er_no = 0; + int ret = 0; + int rc = 0; + + int pmode = O_WRONLY; + int cmode = O_RDONLY; + + rc = fork_and_clone_mode_test (&ret, &er_no, pmode, cmode); + ASSERT_NE(-1, rc ); + EXPECT_NE(0, ret); + EXPECT_NE(0, er_no); + blk_open_tst_cleanup(); + +} + +TEST(BLOCK_FVT_Suite, BLK_API_FVT_FM_UMC_fork_clone_chunk_RDWR_mode_test) +{ + int er_no = 0; + int ret = 0; + int rc = 0; + + int pmode = O_RDWR; + int cmode = O_RDWR; + + rc = fork_and_clone_mode_test (&ret, &er_no, pmode, cmode); + ASSERT_NE(-1, rc ); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, er_no); + blk_open_tst_cleanup(); + +} + +TEST(BLOCK_FVT_Suite, BLK_API_FVT_FM_UMC_fork_clone_chunk_RDWR_mode_errpath_1) +{ + int er_no = 0; + int ret = 0; + int rc = 0; + + int pmode = O_RDWR; + int cmode = O_RDONLY; + + rc = fork_and_clone_mode_test (&ret, &er_no, pmode, cmode); + ASSERT_NE(-1, rc ); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, er_no); + blk_open_tst_cleanup(); + +} + +TEST(BLOCK_FVT_Suite, BLK_API_FVT_FM_UMC_fork_clone_chunk_RDWR_mode_errpath_2) +{ + int er_no = 0; + int ret = 0; + int rc = 0; + + int pmode = O_RDWR; + int cmode = O_WRONLY; + + rc = fork_and_clone_mode_test (&ret, &er_no, pmode, cmode); + ASSERT_NE(-1, rc ); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, er_no); + blk_open_tst_cleanup(); + +} + +#endif + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_aresult_explicit_ret_code_tst) +{ + + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int tag = max_reqs + 1; + uint64_t status; + size_t temp_sz = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virt lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1024; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + ASSERT_NE(-1 , ret); + + ret = cblk_aresult(id,&tag,&status,0); + + EXPECT_EQ(-1 , ret); + EXPECT_EQ(EINVAL, errno); + + blk_open_tst_cleanup(); + +} + +/*** new **/ +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_virt_multi_luns_single_thread_rw_tst) +{ + + int er_no = 0; + int ret = 0; + + ASSERT_EQ(0,blk_fvt_setup(1)); + num_loops = 10; + num_threads = 1; + num_opens = 0; + thread_flag = 1; + virt_lun_flags = TRUE; + blk_thread_tst(&ret,&er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + +} +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_phys_multi_luns_single_thread_rw_tst) +{ + + int er_no = 0; + int ret = 0; + + ASSERT_EQ(0,blk_fvt_setup(1)); + num_loops = 10; + num_threads = 1; + num_opens = 0; + thread_flag = 1; + virt_lun_flags = FALSE; + blk_thread_tst(&ret,&er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + +} + +/*** new **/ + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_virt_multi_luns_multi_thread_rw_tst) +{ + + int er_no = 0; + int ret = 0; + + ASSERT_EQ(0,blk_fvt_setup(1)); + num_loops = 10; + num_threads = 10; + num_opens = 0; + thread_flag = 1; + virt_lun_flags = TRUE; + blk_thread_tst(&ret,&er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + +} + + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_phy_lun_multi_thread_rw_tst) +{ + + int er_no = 0; + int ret = 0; + + ASSERT_EQ(0,blk_fvt_setup(1)); + num_loops = 10; + num_threads = 10; + num_opens = 0; + thread_flag = 1; + virt_lun_flags = FALSE; + blk_thread_tst(&ret,&er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_virt_lun_share_cntxt_rw_tst) +{ + + int er_no = 0; + int ret = 0; + + ASSERT_EQ(0,blk_fvt_setup(1)); + ret = validate_share_context(); + if (ret) { + fprintf(stderr,"SKIPPING Test, context _not_ sharable \n"); + if (blk_fvt_data_buf != NULL) + free(blk_fvt_data_buf); + if (blk_fvt_comp_data_buf != NULL) + free(blk_fvt_comp_data_buf); + return; + } + num_loops = 10; + num_threads = 10; + num_opens = 0; + thread_flag = 1; + virt_lun_flags = TRUE; + share_cntxt_flags = TRUE; + blk_thread_tst(&ret,&er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_phys_lun_share_cntxt_rw_tst) +{ + + int er_no = 0; + int ret = 0; + + ASSERT_EQ(0,blk_fvt_setup(1)); + ret = validate_share_context(); + if (ret) { + fprintf(stderr,"SKIPPING Test, context _not_ sharable \n"); + if (blk_fvt_data_buf != NULL) + free(blk_fvt_data_buf); + if (blk_fvt_comp_data_buf != NULL) + free(blk_fvt_comp_data_buf); + return; + } + num_loops = 10; + num_threads = 10; + num_opens = 0; + thread_flag = 1; + virt_lun_flags = FALSE; + share_cntxt_flags = TRUE; + blk_thread_tst(&ret,&er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_read_write_compare_loop_1000) +{ + + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1024; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + nblks = 1; + for ( lba = 1; lba <= 1000; lba++ ) { + cmd = FV_WRITE; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + cmd = FV_READ; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // compare buffers + + blk_fvt_cmp_buf(nblks, &ret); + + EXPECT_EQ(0, ret); + } + // get statistics + blk_get_statistics(id,open_flags,&ret,&er_no); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_get_chunk_status_num_commands) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + chunk_stats_t stats; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1024; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + ASSERT_EQ(0 , ret); + ASSERT_EQ(0 , er_no); + + nblks = 1; + for ( lba = 1; lba <= 1000; lba++ ) { + cmd = FV_WRITE; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + cmd = FV_READ; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // compare buffers + + blk_fvt_cmp_buf(nblks, &ret); + + EXPECT_EQ(0, ret); + } + // get statistics + + ret = cblk_get_stats (id, &stats, 0); + + EXPECT_EQ(0 , ret); + EXPECT_EQ(1000,stats.num_reads); + EXPECT_EQ(1000,stats.num_writes); + EXPECT_EQ(1000,stats.num_blocks_read); + EXPECT_EQ(1000,stats.num_blocks_written); + + + blk_open_tst_cleanup(); + +} + + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_aread_awrite_compare_loop_1000) +{ + + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1024; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + ASSERT_EQ(0 , ret); + ASSERT_EQ(0 , er_no); + + nblks = 1; + for ( lba = 1; lba <= 1000; lba++ ) { + cmd = FV_AWRITE; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + cmd = FV_AREAD; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // compare buffers + + blk_fvt_cmp_buf(nblks, &ret); + + EXPECT_EQ(0, ret); + } + + blk_open_tst_cleanup(); +} + +// Test NO_INTRP async io tests + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_virt_lun_NO_INTRP_THREAD_SET_async_io) +{ + + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + open_flags |= CBLK_OPN_NO_INTRP_THREADS; + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + nblks = 1; + lba = 1; + cmd = FV_AWRITE; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // If errors no reason to continue + if (ret != 1) { + blk_open_tst_cleanup(); + return; + } + cmd = FV_AREAD; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // If errors no reason to continue + if (ret != 1) { + blk_open_tst_cleanup(); + return; + } + // compare buffers + + blk_fvt_cmp_buf(nblks, &ret); + + EXPECT_EQ(0, ret); + + blk_open_tst_cleanup(); +} + +// Test NO_INTRP synchronous io tests + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_virt_lun_NO_INTRP_THREAD_SET_sync_io) +{ + + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + open_flags |= CBLK_OPN_NO_INTRP_THREADS; + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + nblks = 1; + lba = 1; + cmd = FV_WRITE; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // If errors no reason to continue + if (ret != 1) { + blk_open_tst_cleanup(); + return; + } + cmd = FV_READ; + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // If errors no reason to continue + if (ret != 1) { + blk_open_tst_cleanup(); + return; + } + // compare buffers + + blk_fvt_cmp_buf(nblks, &ret); + + EXPECT_EQ(0, ret); + + blk_open_tst_cleanup(); +} + +// testflag 1 - NO_INTRP set, null status, io_flags ARW_USER set +// verify it ret -1 and errno set + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_NO_INTRP_THREAD_ARG_TEST_1) +{ + + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + int testflag = 1; + size_t temp_sz; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + testflag = 1; + open_flags |= CBLK_OPN_VIRT_LUN | CBLK_OPN_NO_INTRP_THREADS; + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_fvt_intrp_io_tst(id, testflag, open_flags, &ret, &er_no); + + EXPECT_EQ(-1 , ret); + EXPECT_NE(0 , er_no); + + blk_open_tst_cleanup(); +} + +// testflag 2 - NO_INTRP _not set, status, io_flags ARW_USER not set +// expect success , verify arw_status is not updated. + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_NO_INTRP_THREAD_ARG_TEST_2) +{ + + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + int testflag = 0; + size_t temp_sz; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + testflag = 2; + open_flags |= CBLK_OPN_VIRT_LUN ; + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_fvt_intrp_io_tst(id, testflag, open_flags, &ret, &er_no); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); +} + +// testflag 3 - NO_INTRP flag _not set, status, io_flags ARW_USER_STATUS set +// Write should complete ,pass test, no errno set + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_NO_INTRP_THREAD_ARG_TEST_3) +{ + + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + int testflag = 0; + size_t temp_sz; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + testflag = 3; + open_flags |= CBLK_OPN_VIRT_LUN ; + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_fvt_intrp_io_tst(id, testflag, open_flags, &ret, &er_no); + + EXPECT_EQ(1 , ret); + EXPECT_EQ(0 , er_no); + + blk_open_tst_cleanup(); +} + +// testflag 4 - NO_INTRP flag set, status, io_flags ARW_USER set +// expect failure ret code -1 and errno set + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_NO_INTRP_THREAD_ARG_TEST_4) +{ + + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + int testflag = 0; + size_t temp_sz; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + testflag = 4; + open_flags |= CBLK_OPN_VIRT_LUN | CBLK_OPN_NO_INTRP_THREADS; + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_fvt_intrp_io_tst(id, testflag, open_flags, &ret, &er_no); + + EXPECT_EQ(-1 , ret); + EXPECT_NE( 0 , er_no); + + blk_open_tst_cleanup(); +} + +// testflag 5 - NO_INTRP flag set, status, ARW_USER | ARW_WAIT set +// expect failure ret code -1 and errno set + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_NO_INTRP_THREAD_ARG_TEST_5) +{ + + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + int testflag = 0; + size_t temp_sz; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + testflag = 5; + open_flags |= CBLK_OPN_VIRT_LUN | CBLK_OPN_NO_INTRP_THREADS; + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_fvt_intrp_io_tst(id, testflag, open_flags, &ret, &er_no); + + EXPECT_EQ(-1 , ret); + EXPECT_NE( 0, er_no); + + blk_open_tst_cleanup(); +} + +// testflag 6 - NO_INTRP flag set, status, ARW_USER | ARW_WAIT| ARW_USER_TAG set +// expect failure ret = -1 and errno set + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_NO_INTRP_THREAD_ARG_TEST_6) +{ + + + chunk_id_t id = 0; + int open_flags = 0; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + int testflag = 0; + size_t temp_sz; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + testflag = 6; + open_flags |= CBLK_OPN_VIRT_LUN | CBLK_OPN_NO_INTRP_THREADS; + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 64; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + blk_fvt_intrp_io_tst(id, testflag, open_flags, &ret, &er_no); + + EXPECT_EQ(-1 , ret); + EXPECT_NE( 0, er_no); + + blk_open_tst_cleanup(); +} + +// Mix read/write compare with async I/O and sync I/O +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_mix_i_o_loop_1000) +{ + + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1024; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + nblks = 1; + for ( lba = 1; lba <= 1000; lba++ ) { + (lba & 0x1) ? (cmd = FV_WRITE):(cmd = FV_AWRITE); + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // If errors no reason to continue + if (ret != 1) { + break; + } + + (lba & 0x1) ? (cmd = FV_READ):(cmd = FV_AREAD); + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // If errors no reason to continue + if (ret != 1) { + break; + } + + // compare buffers + + blk_fvt_cmp_buf(nblks, &ret); + + EXPECT_EQ(0, ret); + } + + blk_open_tst_cleanup(); +} + +// Mix read/write compare with async I/O and sync I/O with NO_INTRP +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_mix_i_o__no_intrp_loop_1000) +{ + + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN | CBLK_OPN_NO_INTRP_THREADS; + int sz_flags= 0; + int max_reqs= 64; + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + uint64_t lba; + int io_flags = 0; + size_t temp_sz,nblks; + int cmd; + + + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1024; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + nblks = 1; + for ( lba = 1; lba <= 1000; lba++ ) { + (lba & 0x1) ? (cmd = FV_WRITE):(cmd = FV_AWRITE); + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // If errors no reason to continue + if (ret != 1) { + break; + } + + (lba & 0x1) ? (cmd = FV_READ):(cmd = FV_AREAD); + blk_fvt_io(id, cmd, lba, nblks, &ret, &er_no, io_flags, open_flags); + + EXPECT_EQ(1 , ret); + + // If errors no reason to continue + if (ret != 1) { + break; + } + + // compare buffers + + blk_fvt_cmp_buf(nblks, &ret); + + EXPECT_EQ(0, ret); + } + + blk_open_tst_cleanup(); +} + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_list_io_args_test) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 1000; // will be using list with 100 i/o requests + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int i; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + size_t temp_sz; + int arg_tst = 0; + + + mode = O_RDWR; + + ASSERT_EQ(0,blk_fvt_setup(1)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 16; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + for (i=1 ; i<10; i++) { + arg_tst +=1 ; + er_no=ret=0; + blk_list_io_arg_test(id, arg_tst, &er_no, &ret); + + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, er_no); + + // If errors no reason to continue + if ((ret != -1) || (er_no != EINVAL)) { + break; + } + + } + + blk_open_tst_cleanup(); + + +} + +// Test listio on virt lun, without Timout, and with CBLK_IO_USER_STATUS flag set + +TEST(Block_FVT_Suite, BLK_API_FVT_FM_UMC_virt_lun_list_io_test) +{ + + chunk_id_t id = 0; + int open_flags = CBLK_OPN_VIRT_LUN; + int sz_flags= 0; + int max_reqs= 1000; // will be using list with 500 i/o requests + int er_no = 0; + int open_cnt= 1; + int ret = 0; + int get_set_size_flag = 0; // 0 = get phys lun sz + // 1 = get chunk sz + // 2 = set chunk sz + + size_t temp_sz; + int t_type = 1; + uint64_t timeout = 0; + int uflags = CBLK_IO_USER_STATUS; + int cmd ; + int i=0; + + /* Number of 4K bufs */ + io_bufcnt = 500; + + + ASSERT_EQ(0,blk_fvt_setup(num_listio)); + + // open virtual lun + blk_open_tst( &id, max_reqs, &er_no, open_cnt, open_flags, mode); + + ASSERT_NE(NULL_CHUNK_ID, id ); + + temp_sz = 1024; + get_set_size_flag = 2; + blk_fvt_get_set_lun_size(id, &temp_sz, sz_flags, get_set_size_flag, &ret, &er_no); + EXPECT_EQ(0 , ret); + EXPECT_EQ(0 , er_no); + + // If errors no reason to continue + if ((ret != 0) || (er_no != 0)) { + blk_open_tst_cleanup(); + return; + } + + + cmd = FV_WRITE; + blk_list_io_test(id, cmd, t_type, uflags, timeout, &er_no, &ret,num_listio); + + EXPECT_EQ(0, ret); + EXPECT_EQ(0, er_no); + + // If errors no reason to continue + if ((ret != 0) || (er_no != 0)) { + blk_open_tst_cleanup(); + return; + } + + + cmd = FV_READ; + blk_list_io_test(id, cmd, t_type, uflags, timeout, &er_no, &ret, num_listio); + + EXPECT_EQ(0, ret); + EXPECT_EQ(0, er_no); + + // If errors no reason to continue + if ((ret != 0) || (er_no != 0)) { + blk_open_tst_cleanup(); + return; + } + + + /* compare 1blk at a time */ + for (i=0; i +#include "blk_tst.h" + +//DO NOT ADD TEST CASES IN THIS FILE + +class Block_FVT_Suite : public testing::Test +{ + void SetUp() + { + initialize_blk_tests(); + } + void TearDown() + { + terminate_blk_tests(); + } +}; diff --git a/src/build/doxygen/doxygen.conf b/src/build/doxygen/doxygen.conf new file mode 100644 index 00000000..f2f5fc8f --- /dev/null +++ b/src/build/doxygen/doxygen.conf @@ -0,0 +1,1465 @@ +# Doxyfile 1.6.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = Surelock + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = obj/doxygen/ + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it parses. +# With this tag you can assign which parser to use for a given extension. +# Doxygen has a built-in mapping, but you can override or extend it using this tag. +# The format is ext=language, where ext is a file extension, and language is one of +# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, +# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat +# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate getter +# and setter methods for a property. Setting this option to YES (the default) +# will make doxygen to replace the get and set methods by a property in the +# documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = YES + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespace are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by +# doxygen. The layout file controls the global structure of the generated output files +# in an output format independent way. The create the layout file that represents +# doxygen's defaults, run doxygen with the -l option. You can optionally specify a +# file name after the option, if omitted DoxygenLayout.xml will be used as the name +# of the layout file. + +LAYOUT_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be abled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = YES + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = doxywarnings.log + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = src + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = */obj/* \ + */img/* \ + */.git/* + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix filesystem feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = */gtest*/* + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = NO + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# If the HTML_TIMESTAMP tag is set to YES then the generated HTML +# documentation will contain the timesstamp. + +HTML_TIMESTAMP = NO + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). + +HTML_DYNAMIC_SECTIONS = NO + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER +# are set, an additional index file will be generated that can be used as input for +# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated +# HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. +# For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's +# filter section matches. +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. + +GENERATE_TREEVIEW = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# When the SEARCHENGINE tag is enable doxygen will generate a search box for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP) +# there is already a search function so this one should typically +# be disabled. + +SEARCHENGINE = NO + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = YES + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = NO + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = NO + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = YES + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option is superseded by the HAVE_DOT option below. This is only a +# fallback. It is recommended to install and use dot, since it yields more +# powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the output directory to look for the +# FreeSans.ttf font (which doxygen will put there itself). If you specify a +# different font using DOT_FONTNAME you can set the path where dot +# can find it using this tag. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = YES + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/src/build/install/makefile b/src/build/install/makefile new file mode 100644 index 00000000..1bca9ed6 --- /dev/null +++ b/src/build/install/makefile @@ -0,0 +1,254 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +UNAME=$(shell uname) +ROOTPATH = ../../.. + +APPNAME?=capikv + +PREFIX?=${PKGDIR}/install_root +TEST_PREFIX?=${PKGDIR}/test_root +AFUPREFIX?=${PKGDIR}/afu_root + +ifeq ($(UNAME),AIX) +INSTALL_ROOT=${PREFIX}/usr/local/capikv +TEST_ROOT=${TEST_PREFIX}/usr/local/capikv +INSTALL=/usr/bin/cp +AFU_ROOT=${AFUPREFIX}/usr/local/capikv/afu +else +INSTALL_ROOT=${PREFIX}/opt/ibm/capikv +TEST_ROOT=${TEST_PREFIX}/opt/ibm/capikv +INSTALL=install +AFU_ROOT=${AFUPREFIX}/opt/ibm/capikv/afu +endif + +_LIBSRCDIR=${IMGDIR} +_TESTSRCDIR=${TESTDIR} +_DOCSRCDIR=${ROOTPATH}/obj/doxygen +_RESOURCEDIR=${ROOTPATH}/src/build/install/resources + +_BIN_DIR=${INSTALL_ROOT}/bin +_ETC_DIR=${INSTALL_ROOT}/etc +_EXAMPLES_DIR=${INSTALL_ROOT}/examples +_LIC_DIR=${INSTALL_ROOT}/license +_LIB_DIR=${INSTALL_ROOT}/lib +#_DOC_DIR=${INSTALL_ROOT}/doc +_TEST_DIR=${TEST_ROOT}/test +_TEST_DATA_DIR=${TEST_ROOT}/test/data +_INC_DIR=${INSTALL_ROOT}/include + +_AFU_IMAGES_DIR=${AFU_ROOT}/images + +_MAN_DIR=${PREFIX}/usr/share/man/man3 +_SYSTEMD_DIR=${PREFIX}/usr/lib/systemd/system +_UDEV_RULES_DIR=${PREFIX}/lib/udev/rules.d +all: + rm -rf ${INSTALL_ROOT} + ${MAKE} capikv capikv-test afuimage + +capikv: + @mkdir -p ${_BIN_DIR} + @mkdir -p ${_ETC_DIR} + @mkdir -p ${_LIB_DIR} + @mkdir -p ${_LIC_DIR} + @mkdir -p ${_EXAMPLES_DIR} + @mkdir -p ${_MAN_DIR} + @mkdir -p -m 755 ${_INC_DIR} + @mkdir -p ${_SYSTEMD_DIR} + @mkdir -p ${_UDEV_RULES_DIR} +ifeq ($(UNAME),AIX) + ${INSTALL} ${ROOTPATH}/src/kv/arkdb.h ${_INC_DIR} + ${INSTALL} ${ROOTPATH}/src/include/zmalloc.h ${_INC_DIR} + ${INSTALL} ${ROOTPATH}/src/include/capiblock.h ${_INC_DIR} + ${INSTALL} ${ROOTPATH}/src/include/cflash_tools_user.h ${_INC_DIR} +else + ${INSTALL} -m 644 ${ROOTPATH}/src/kv/arkdb.h ${_INC_DIR} + ${INSTALL} -m 644 ${ROOTPATH}/src/include/zmalloc.h ${_INC_DIR} + ${INSTALL} -m 644 ${ROOTPATH}/src/include/capiblock.h ${_INC_DIR} + ${INSTALL} -m 644 ${ROOTPATH}/src/include/cflash_tools_user.h ${_INC_DIR} + +endif + + + @#Bins +ifeq ($(UNAME),AIX) + ${INSTALL} ${_TESTSRCDIR}/asyncstress ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/_tst_ark ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/asyncstress64 ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/_tst_ark64 ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/blockio64 ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/blocklistio64 ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/blockplistio64 ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/blockio ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/blocklistio ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/blockplistio ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_kv_async64 ${_BIN_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_kv_sync64 ${_BIN_DIR} +else + ${INSTALL} -s ${PGMDIR}/cxlfd ${_BIN_DIR} + ${INSTALL} -s ${PGMDIR}/cxlflashutil ${_BIN_DIR} + ${INSTALL} -s ${PGMDIR}/provtool ${_BIN_DIR} + ${INSTALL} -s ${_TESTSRCDIR}/asyncstress ${_BIN_DIR} + ${INSTALL} -s ${_TESTSRCDIR}/_tst_ark ${_BIN_DIR} + ${INSTALL} -s ${_TESTSRCDIR}/blockio ${_BIN_DIR} + ${INSTALL} -s ${_TESTSRCDIR}/blocklistio ${_BIN_DIR} + ${INSTALL} -s ${_TESTSRCDIR}/blockplistio ${_BIN_DIR} + ${INSTALL} -s ${_TESTSRCDIR}/run_kv_sync ${_BIN_DIR} + ${INSTALL} -s ${_TESTSRCDIR}/run_kv_async ${_BIN_DIR} + + ${INSTALL} ${_RESOURCEDIR}/cxlfrefreshluns ${_BIN_DIR} + ${INSTALL} ${_RESOURCEDIR}/cablecheck ${_BIN_DIR} + ${INSTALL} ${_RESOURCEDIR}/cxlfsetlunmode ${_BIN_DIR} + ${INSTALL} ${_RESOURCEDIR}/cxlfstatus ${_BIN_DIR} + ${INSTALL} ${_RESOURCEDIR}/capikvutils.sh ${_BIN_DIR} + ${INSTALL} ${_RESOURCEDIR}/setup.sh ${_BIN_DIR} + + + #TEMPORARY install - remove this after kernel driver can run workaround + ${INSTALL} ${_RESOURCEDIR}/afucfg.sh ${_BIN_DIR} + + + @#Dev Permissions for cxl - set mode to 644 explicitly + ${INSTALL} -m 644 ${_RESOURCEDIR}/80-cxl.rules ${_UDEV_RULES_DIR} + ${INSTALL} -m 644 ${_RESOURCEDIR}/80-cxlflash.rules ${_UDEV_RULES_DIR} +endif + + @#Libs +ifeq ($(UNAME),AIX) + ${INSTALL} ${_LIBSRCDIR}/lib*.a ${_LIB_DIR} +else + ${INSTALL} -s ${_LIBSRCDIR}/lib*.so ${_LIB_DIR} +endif + + @#Sample Code +ifeq ($(UNAME),AIX) + ${INSTALL} ${ROOTPATH}/src/block/test/blockio.c ${_EXAMPLES_DIR} + ${INSTALL} ${ROOTPATH}/src/block/test/blocklistio.c ${_EXAMPLES_DIR} + ${INSTALL} ${ROOTPATH}/src/block/test/blockplistio.c ${_EXAMPLES_DIR} + ${INSTALL} ${ROOTPATH}/src/kv/test/run_kv_async.c ${_EXAMPLES_DIR} + ${INSTALL} ${ROOTPATH}/src/kv/test/run_kv_sync.c ${_EXAMPLES_DIR} + + @#Licenses + ${INSTALL} ${_RESOURCEDIR}/license/*.txt ${_LIC_DIR} +else + ${INSTALL} -m 644 ${ROOTPATH}/src/block/test/blockio.c ${_EXAMPLES_DIR} + ${INSTALL} -m 644 ${ROOTPATH}/src/block/test/blocklistio.c ${_EXAMPLES_DIR} + ${INSTALL} -m 644 ${ROOTPATH}/src/block/test/blockplistio.c ${_EXAMPLES_DIR} + ${INSTALL} -m 644 ${ROOTPATH}/src/kv/test/run_kv_async.c ${_EXAMPLES_DIR} + ${INSTALL} -m 644 ${ROOTPATH}/src/kv/test/run_kv_sync.c ${_EXAMPLES_DIR} + + @#Licenses + ${INSTALL} -m 644 ${_RESOURCEDIR}/license/*.txt ${_LIC_DIR} + + @#SystemD Daemons + ${INSTALL} -m 644 ${_RESOURCEDIR}/cxlfd.service ${_SYSTEMD_DIR} + + @#Linux Readme + ${INSTALL} -m 644 ${_RESOURCEDIR}/readme.txt ${INSTALL_ROOT} +endif + + @#Version tags and useful info + echo "${GITREVISION}" > ${INSTALL_ROOT}/version.txt + +capikv-test: + + @#TEST ONLY CONTENT - DO NOT SHIP + @mkdir -p ${_TEST_DIR} + @mkdir -p ${_TEST_DATA_DIR} + ${INSTALL} ${_TESTSRCDIR}/blk_test ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/pvtestauto ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/pblkread ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/transport_test ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_cflash_fvt ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_fvt_kv ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_fvt ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_block_fvt ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_kv_async_multi ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_kv_benchmark ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_io ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_mcio ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_mc_aio ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_perf ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_perf2 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_perf_check ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_perf_tool ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_kv_tst_ark ${_TEST_DIR} + ${INSTALL} ${ROOTPATH}/src/test/multi_process_perf ${_TEST_DIR} + ${INSTALL} ${ROOTPATH}/src/block/test/block_perf_check ${_TEST_DIR} + @#Enable factory flash if the test image is installed + ${INSTALL} ${_RESOURCEDIR}/flash_factory_image ${_TEST_DIR} + + +ifeq ($(UNAME),AIX) + ${INSTALL} ${_TESTSRCDIR}/blk_test64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/pvtestauto64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/pblkread64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/transport_test64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_cflash_fvt64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_fvt_kv64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_fvt64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_block_fvt64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_kv_async_multi64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/run_kv_benchmark64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_io64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_mcio64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_mc_aio64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_perf64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_perf_check64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_ark_perf_tool64 ${_TEST_DIR} + ${INSTALL} ${_TESTSRCDIR}/fvt_kv_tst_ark64 ${_TEST_DIR} + ${INSTALL} ${ROOTPATH}/src/test/multi_process_perf ${_TEST_DIR} + ${INSTALL} ${ROOTPATH}/src/block/test/block_perf_check ${_TEST_DIR} +endif +ifeq ($(UNAME),AIX) + @#Manpages - this is highly repetative +else + @#seems like it could become its own make rule / recipe... + ${INSTALL} ${_DOCSRCDIR}/man/man3/afu.h.3 ${_MAN_DIR} + ${INSTALL} ${_DOCSRCDIR}/man/man3/cxl.h.3 ${_MAN_DIR} + ${INSTALL} ${_DOCSRCDIR}/man/man3/capiblock.h.3 ${_MAN_DIR} + ${INSTALL} ${_DOCSRCDIR}/man/man3/libcxl.h.3 ${_MAN_DIR} + ${INSTALL} ${_DOCSRCDIR}/man/man3/arkdb.h.3 ${_MAN_DIR} + ${INSTALL} ${_DOCSRCDIR}/man/man3/ark.h.3 ${_MAN_DIR} + ${INSTALL} ${_DOCSRCDIR}/man/man3/cflash_tools_user.h.3 ${_MAN_DIR} + @#compress manpages in destination + gzip -f ${_MAN_DIR}/*.h.3 +endif + +afuimage: + @mkdir -p ${_AFU_IMAGES_DIR} +ifeq ($(UNAME),AIX) +# need to have a valid ffdc mechanism for AIX... +else #Linux only + ${INSTALL} -s ${_TESTSRCDIR}/cxl_afu_dump ${AFU_ROOT} + ${INSTALL} ${_RESOURCEDIR}/capi_flash.pl ${AFU_ROOT} + ${INSTALL} ${_RESOURCEDIR}/blacklist-cxlflash.conf ${AFU_ROOT} + ${INSTALL} ${_RESOURCEDIR}/cxlffdc ${AFU_ROOT} + ${INSTALL} ${_RESOURCEDIR}/flash_all_adapters ${AFU_ROOT} + ${INSTALL} ${_RESOURCEDIR}/reload_all_adapters ${AFU_ROOT} + ${INSTALL} ${_RESOURCEDIR}/psl_trace_dump ${AFU_ROOT} +endif + ${INSTALL} ${_RESOURCEDIR}/corsa* ${_AFU_IMAGES_DIR} + +include ${ROOTPATH}/config.mk diff --git a/src/build/install/resources/80-cxl.rules b/src/build/install/resources/80-cxl.rules new file mode 100644 index 00000000..a65817f8 --- /dev/null +++ b/src/build/install/resources/80-cxl.rules @@ -0,0 +1,3 @@ +SUBSYSTEM=="cxl", ATTRS{mode}=="dedicated_process", GROUP="cxl", MODE="0660" +SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", KERNEL=="afu[0-9]*.[0-9]*m", OWNER="cxl", GROUP="cxl", MODE="0600" +SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", KERNEL=="afu[0-9]*.[0-9]*s", OWNER="cxl", GROUP="cxl", MODE="0600" diff --git a/src/build/install/resources/80-cxlflash.rules b/src/build/install/resources/80-cxlflash.rules new file mode 100644 index 00000000..ad28376c --- /dev/null +++ b/src/build/install/resources/80-cxlflash.rules @@ -0,0 +1,4 @@ +#match the cxlflash driver, and locate all sg* devices. Note that this cannot depend on the sysfs attributes for cxlflash +#as they may not be set up yet (e.g. listing all sg* devices under cxlflash) +SUBSYSTEM=="scsi_generic", DRIVERS=="cxlflash", GROUP="cxl", MODE="0660", RUN+="/opt/ibm/capikv/bin/cxlflashutil --config --device=/dev/$kernel" +SUBSYSTEM=="block", DRIVERS=="cxlflash", GROUP="cxl", MODE="0660" diff --git a/src/build/install/resources/afucfg.sh b/src/build/install/resources/afucfg.sh new file mode 100755 index 00000000..18678189 --- /dev/null +++ b/src/build/install/resources/afucfg.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/afucfg.sh $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +#run capi-specific adapter configs +echo "INFO: Running adapter-specific configuration." + +accels=`lspci -d 1014:04cf | awk '{print $1}'` +for accel in ${accels}; do + setpci -s ${accel} 808.L=440000 + readback=`setpci -s ${accel} 808.L` + echo "INFO: Accelerator ${accel} cfg register 808: ${readback}" +done + diff --git a/src/build/install/resources/blacklist-cxlflash.conf b/src/build/install/resources/blacklist-cxlflash.conf new file mode 100644 index 00000000..03d00ef0 --- /dev/null +++ b/src/build/install/resources/blacklist-cxlflash.conf @@ -0,0 +1,9 @@ +#Prevent cxlflash from loading automatically if an eligible CAPI Accelerator (CXL) adapter is found +#This is needed typically during maintenance of multiple adapters, such as an adapter microcode +#update which updates all present CXL adapters. + +#If this file is found in /etc/modprobe.d then the kernel will respect the blacklist setting until +#subsequent reboots occur. If an initramfs update is made while this file is present in /etc +#then the cxlflash driver will be disabled on boot, requiring a manual "modprobe" of the driver. + +blacklist cxlflash diff --git a/src/build/install/resources/cablecheck b/src/build/install/resources/cablecheck new file mode 100755 index 00000000..8d965fad --- /dev/null +++ b/src/build/install/resources/cablecheck @@ -0,0 +1,89 @@ +#!/bin/bash -e +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/cxlfrefreshluns $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +CAPIKV=/opt/ibm/capikv +source $CAPIKV/bin/capikvutils.sh + +KERNELMOD=cxlflash + +rmmod_invoked=0 + +showhelp() +{ + echo "Usage: $0" + echo " Perform a connectivty and cable check for all present IBM Data Engine for NoSQL accelerators." +} + +disablemodule() +{ + echo "INFO: Ensuring $KERNELMOD is unloaded to guarantee that all accelerators are idle." + if lsmod | grep cxlflash + then + echo "INFO: Temporarily unloading ${KERNELMOD}..." + rmmod_invoked=1; + rmmod $KERNELMOD || + #multipath typically causes rmmod to fail for cxlflash + if pgrep "multipathd" > /dev/null + then + die "ERROR: Linux multipathing is enabled, and must be disabled prior to running this tool as it typically prevents the unloading of the $KERNELMOD driver. Please temporarily flush the multipath tables by running \"multipath -F\" and try again." 4 + else + die "Unable to remove the $KERNELMOD driver. Please ensure that all applications that exploit $KERNELMOD or ibmcapikv are terminated." + fi; + fi; + echo "INFO: $KERNELMOD unloaded." +} + +enablemodule() +{ + echo "INFO: re-enabling cxlflash" + modprobe $KERNELMOD; +} + +doloopback() +{ + echo "INFO: Performing cable checks for all present accelerators." + $CAPIKV/bin/provtool --loopback +} +#show help if we haven't gotten a correct # of args +if [ "$#" -ne 0 ]; then + showhelp; + exit $EINVAL; +fi + +echo "INFO: Performing diagnostic cable check for all present IBM Data Engine for NoSQL accelerators." +disablemodule; + +if ! doloopback +then + echo "ERROR: Cable verification checks failed. Please check all cables and SFPs between this system's accelerators and the FlashSystem." +else + echo "Cable verification check result: PASS" + if [[ $rmmod_invoked -eq "1" ]]; then + enablemodule + else + echo "INFO: $KERNELMOD was not loaded on invocation of this script, so this script will not automatically reload it. You may manually reload it by running \"sudo modprobe cxlflash\"" + fi +fi diff --git a/src/build/install/resources/capi_flash.pl b/src/build/install/resources/capi_flash.pl new file mode 100755 index 00000000..f5786fb2 --- /dev/null +++ b/src/build/install/resources/capi_flash.pl @@ -0,0 +1,587 @@ +#! /usr/bin/perl +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/capi_flash.pl $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +## +use Fcntl; +use Getopt::Long; + +# ------------------------------------------------------------------------------- +# Variables +# ------------------------------------------------------------------------------- +my $a; # Flash Address +my $ra; # Flash Read Address +my $dat; # Data +my $d; # Packed data +my $edat; # Expected Data +my $fdat; # Flash Data +my $cfgadr; # Configuration Register Address +my $rc; # Return Code from PSL when performing an operation +my $bc; # Block Counter +my $dif; # Return code from file read +my $cp; # Continue to poll flag +my $st; # Start Time +my $lt; # Last Poll Time +my $ct; # Current Time + +my $dif; # Data in file flag (End of File) +my $i; # Loop Counter + +my $et; # Total Elasped Time + +my $eet; # Total/End Erase Time +my $set; # Start Erase Time + +my $ept; # Total Program Time +my $spt; # Start Program Time +my $ept; # End Program Time + +my $evt; # Total Verify Time +my $svt; # Start Verify Time +my $evt; # End Verify Time + +my $fsize; # File size +my $n; # Number of Blocks to Program +my $numdev; # Number of CORSA Devices in the lspci output +my $devstr; # Device String from lspci output +my @dsev; # Device String split apart to find device location + +# ------------------------------------------------------------------------------- +# Defaults: +# ------------------------------------------------------------------------------- +my $prthelp = 0; # Print Help Flag +my $partition0 = 0; # Partition 0 Flag +my $partition1 = 1; # Partition 1 Flag +my $partition = "User Partition"; # Programming Partition Name +my $list = "0"; +my $optOK = 0; # Options are OK +my $filename = ""; # RBF File Name +my $target = ""; # pcie target +my $cfgfile = ""; # RBF File Name + +my $ec = 0; # Error Code + +my $ADDR_REG = 0x920; # Flash Address Configuration Register +my $SIZE_REG = 0x924; # Flash Size Configuration Register +my $CNTL_REG = 0x928; # Flash Control / Status Configuration Register +my $DATA_REG = 0x92C; # Flash Data Configuration Register + +# ------------------------------------------------------------------------------- +# Parse Options +# ------------------------------------------------------------------------------- +$optOK = GetOptions ( "f|rbf=s" => \$filename, + "p0" , \$partition0, + "p1" , \$partition1, + "v" , \$vpd, + "l" , \$list, + "t|target=s" => \$target, + "h|help!" , \$prthelp + ); + +if ($ARGV[0]) { + print "\nUnknown Command Line Options:\n"; + foreach(@ARGV) { + print " $_\n"; + } + print "\n"; + $prthelp = 1; +} + +if (!($optOK) | $prthelp) { + print "\n"; + print "Usage: capi_flash_pgm [-h | --help] [-p0 | -p1 | -v] [-f | --rbf] \n"; + print " -p0 : Program Partition 0 - Factory Image \n"; + print " -p1 : Program Partition 1 - User Image {Default} \n"; + print " -v : Program VPD Information \n"; + print " -f or --rbf : Raw Binary File Containing the Bitstream \n"; + print " -l : List pci location of capi devices \n"; + print " -t or --target : Target pcie device to flash \n"; + print " -h or --help : Help - Print this message \n"; + die "\n"; +} + +if ( !(($partition == 0) | ($partition == 1)) ) { + print "\nPartition can only be 0 or 1\n"; + die; +} + + +#added to support programming VPD - Charlie Johns +if ( $vpd == 1 ) { + #note we override the "address" set by the user above, so we cannot program VP + #simultaneously with another flash region (factory or user). + $a = 0x1FF0000 +} elsif ($partition0) { + #if we are not programming VPD, see if we should program the "partition 0" image + $a = 0x10000; + $partition = "Factory Image" +} else { + #default to programming partition 1 otherwise. + $a = 0x850000; +} + + + + +# ------------------------------------------------------------------------------- +# Open Bitstream File +# ------------------------------------------------------------------------------- +if (-e $filename) { + open(IN, "< $filename"); + binmode(IN); +} elsif (!$list){ + die "Raw Binary Bitstream File $filename does not exist.\n"; +} + +# ------------------------------------------------------------------------------- +# Make stdout autoflush +# ------------------------------------------------------------------------------- +select(STDOUT); # default +$| = 1; + +if ($list) { + + my @files = ; + my ($vendor, $device); + + $numdev = 0; + for my $file ( @files ) { + open(F, $file . "/vendor") or die "Can't open $filename: $!"; + read(F, $vendor, 6); + close (F); + open(F, $file . "/device") or die "Can't open $filename: $!"; + read(F, $device, 6); + close (F); + + if (($vendor eq "0x1014") && (($device eq "0x0477") || ($device eq "0x04cf"))) { + $cfgfile = $file; + print "Found CAPI Adapter : $cfgfile\n"; + $numdev++; + } + } +exit; +} +# ------------------------------------------------------------------------------- +# Find the CAPI Device +# ------------------------------------------------------------------------------- +my @files = ; +my ($vendor, $device); + +if ($target){ + print "Target specified: $target\n"; + $cfgfile = $target; +} +else{ +$numdev = 0; +for my $file ( @files ) { + open(F, $file . "/vendor") or die "Can't open $filename: $!"; + read(F, $vendor, 6); + close (F); + open(F, $file . "/device") or die "Can't open $filename: $!"; + read(F, $device, 6); + close (F); + + if (($vendor eq "0x1014") && (($device eq "0x0477") || ($device eq "0x04cf"))) { + $cfgfile = $file; + $numdev++; + } + } +} + +if ($numdev == 0 and $cfgfile eq "") { + die "CAPI Device (ID = 0x0477) does not exist.\n"; +} + +if ($numdev > 1) { + die "\n $numdev capi devices detected"; +} + +print "\nCAPI Adapter is : $cfgfile\n"; + +# ------------------------------------------------------------------------------- +# Open the CAPI Device's Configuration Space +# ------------------------------------------------------------------------------- +sysopen(CFG, $cfgfile . "/config", O_RDWR) or die $!; + +# ------------------------------------------------------------------------------- +# Read the Device/Vendor ID from the Configuration Space +# ------------------------------------------------------------------------------- +sysseek(CFG, 0, SEEK_SET); +sysread(CFG,$dat,4); +$d = unpack("V",$dat); + +printf(" Device/Vendor ID: 0x%08x\n\n", $d); + +# ------------------------------------------------------------------------------- +# Read the VSEC Length / VSEC ID from the Configuration Space +# ------------------------------------------------------------------------------- +sysseek(CFG, 0x904, SEEK_SET); +sysread(CFG,$dat,4); +$d = unpack("V",$dat); + +printf(" VSEC Length/VSEC Rev/VSEC ID: 0x%08x\n", $d); +if ( ($d & 0x08000000) == 0x08000000 ) { + printf(" Version 0.12\n\n"); + $ADDR_REG = 0x950; # Flash Address Configuration Register + $SIZE_REG = 0x954; # Flash Size Configuration Register + $CNTL_REG = 0x958; # Flash Control / Status Configuration Register + $DATA_REG = 0x95C; # Flash Data Configuration Register +} else { + printf(" Version 0.10\n\n"); +} + +# ------------------------------------------------------------------------------- +# Reset Any Previously Aborted Sequences +# ------------------------------------------------------------------------------- +$cfgadr = $CNTL_REG; +$dat = 0; +$d = pack("V",$dat); +sysseek CFG, $cfgadr, seek_set; +syswrite(CFG,$d,4); + +# ------------------------------------------------------------------------------- +# Wait for Flash to be Ready +# ------------------------------------------------------------------------------- +sysseek CFG, $cfgadr, seek_set; +sysread(CFG,$d,4); +$dat = unpack("V",$d); + +$st = $lt = time(); +$cp = 1; + +$cfgadr = $CNTL_REG; +while ($cp == 1) { + sysseek(CFG, $cfgadr, SEEK_SET); + sysread(CFG,$d,4); + $rc = unpack("V",$d); + if ( ($rc & 0x80000000) == 0x80000000 ) { + $cp = 0; + } + $ct = time(); + if (($ct - $lt) > 5) { + print "."; + $lt = $ct; + } + if (($ct - $st) > 120) { + print "\nFAILURE --> Flash not ready after 2 min\n"; + $cp = 0; + $ec = 1; + } +} + +# ------------------------------------------------------------------------------- +# Calculate the number of blocks to write +# ------------------------------------------------------------------------------- +$fsize = -s $filename; + +$n = $fsize / (64 * 1024 * 4); +$n = (($n == int($n)) ? $n : int($n + 1)); + +printf("Programming %s with %s\n",$partition, $filename); +printf(" Program -> Address: 0x%x for Size: %d in blocks (32K Words or 128K Bytes)\n\n",$a,$n); + +$n -= 1; + +$set = time(); + +if ($ec == 0) { +# ------------------------------------------------------------------------------- +# Setup for Program From Flash +# ------------------------------------------------------------------------------- + $cfgadr = $ADDR_REG; + $d = pack("V",$a); + sysseek CFG, $cfgadr, SEEK_SET; + syswrite(CFG,$d,4); + + $cfgadr = $SIZE_REG; + $d = pack("V",$n); + sysseek CFG, $cfgadr, SEEK_SET; + syswrite(CFG,$d,4); + + $cfgadr = $CNTL_REG; + $dat = 0x04000000; + $d = pack("V",$dat); + sysseek CFG, $cfgadr, SEEK_SET; + syswrite(CFG,$d,4); + + print "Erasing Flash\n"; + +# ------------------------------------------------------------------------------- +# Wait for Flash Erase to complete. +# ------------------------------------------------------------------------------- + $st = $lt = time(); + $cp = 1; + + $cfgadr = $CNTL_REG; + while ($cp == 1) { + sysseek(CFG, $cfgadr, SEEK_SET); + sysread(CFG,$d,4); + $rc = unpack("V",$d); + if ( (($rc & 0x00008000) == 0x00000000) && + (($rc & 0x00004000) == 0x00004000) ) { + $cp = 0; + } + $ct = time(); + if (($ct - $lt) > 5) { + print "."; + $lt = $ct; + } + if (($ct - $st) > 240) { + print "\nFAILURE --> Erase did not complete in 4 min\n"; + $cp = 0; + $ec += 2; + } + } +} + +$eet = $spt = time(); + +# ------------------------------------------------------------------------------- +# Program Flash +# ------------------------------------------------------------------------------- +if ($ec == 0) { + print "\n\nProgramming Flash\n"; + + $bc = 0; + print "Writing Block: $bc \r"; + $cfgadr = $DATA_REG; + for($i=0; $i<(64*1024*($n+1)); $i++) { + $dif = read(IN,$d,4); + $dat = unpack("V",$d); + if (!($dif)) { + $dat = 0xFFFFFFFF; + } + $d = pack("V",$dat); + sysseek CFG, $cfgadr, SEEK_SET; + syswrite(CFG,$d,4); + + if ((($i+1) % (512)) == 0) { + print "Writing Buffer: $bc \r"; + $bc++; + } + } +} + +print "\n\n"; + +# ------------------------------------------------------------------------------- +# Wait for Flash Program to complete. +# ------------------------------------------------------------------------------- +$st = $lt = time(); +$cp = 1; + +$cfgadr = $CNTL_REG; +while ($cp == 1) { + sysseek(CFG, $cfgadr, SEEK_SET); + sysread(CFG,$d,4); + $rc = unpack("V",$d); + if ( ($rc & 0x40000000) == 0x40000000 ) { + $cp = 0; + } + $ct = time(); + if (($ct - $lt) > 5) { + print "."; + $lt = $ct; + } + if (($ct - $st) > 120) { + print "\nFAILURE --> Programming did not complete after 2 min\n"; + $cp = 0; + $ec += 4; + } +} + +$ept = time(); + +# ------------------------------------------------------------------------------- +# Reset Program Sequence +# ------------------------------------------------------------------------------- +$cfgadr = $CNTL_REG; +$dat = 0; +$d = pack("V",$dat); +sysseek CFG, $cfgadr, seek_set; +syswrite(CFG,$d,4); + +# ------------------------------------------------------------------------------- +# Wait for Flash to be Ready +# ------------------------------------------------------------------------------- +sysseek CFG, $cfgadr, seek_set; +sysread(CFG,$d,4); +$dat = unpack("V",$d); + +$st = $lt = time(); +$cp = 1; + +$cfgadr = $CNTL_REG; +while ($cp == 1) { + sysseek(CFG, $cfgadr, SEEK_SET); + sysread(CFG,$d,4); + $rc = unpack("V",$d); + if ( ($rc & 0x80000000) == 0x80000000 ) { + $cp = 0; + } + $ct = time(); + if (($ct - $lt) > 5) { + print "."; + $lt = $ct; + } + if (($ct - $st) > 120) { + print "\nFAILURE --> Flash not ready after 2 min\n"; + $cp = 0; + $ec += 8; + } +} + +$svt = time(); + +# ------------------------------------------------------------------------------- +# Verify Flash Programmming +# ------------------------------------------------------------------------------- +if ($ec == 0) { + print "Verifying Flash\n"; + + seek IN, 0, SEEK_SET; # Reset to beginning of file + #close(IN); + #open(IN, "< $filename"); + #binmode(IN); + + $bc = 0; + $ra = $a; + print "Reading Block: $bc \r"; + for($i=0; $i<(64*1024*($n+1)); $i++) { + + $dif = read(IN,$d,4); + $edat = unpack("V",$d); + if (!($dif)) { + $edat = 0xFFFFFFFF; + } + + if (($i % 512) == 0) { + $cfgadr = $CNTL_REG; + $dat = 0; + $d = pack("V",$dat); + sysseek CFG, $cfgadr, seek_set; + syswrite(CFG,$d,4); + + # ------------------------------------------------------------------------------- + # Wait for Flash to be Ready + # ------------------------------------------------------------------------------- + $st = $lt = time(); + $cp = 1; + + $cfgadr = $CNTL_REG; + while ($cp == 1) { + sysseek(CFG, $cfgadr, SEEK_SET); + sysread(CFG,$d,4); + $rc = unpack("V",$d); + if ( ($rc & 0x80000000) == 0x80000000 ) { + $cp = 0; + } + $ct = time(); + if (($ct - $lt) > 5) { + print "."; + $lt = $ct; + } + if (($ct - $st) > 120) { + print "\nFAILURE --> Flash not ready after 2 min\n"; + $cp = 0; + $ec += 16; + last; + } + } + + # ------------------------------------------------------------------------------- + # Setup for Reading From Flash + # ------------------------------------------------------------------------------- + $cfgadr = $ADDR_REG; + $d = pack("V",$ra); + sysseek CFG, $cfgadr, SEEK_SET; + syswrite(CFG,$d,4); + $ra += 0x200; + + $cfgadr = $SIZE_REG; + $d = pack("V",0x1FF); + sysseek CFG, $cfgadr, SEEK_SET; + syswrite(CFG,$d,4); + + $cfgadr = $CNTL_REG; + $dat = 0x08000000; + $d = pack("V",$dat); + sysseek CFG, $cfgadr, SEEK_SET; + syswrite(CFG,$d,4); + } + + $cfgadr = $DATA_REG; + sysseek CFG, $cfgadr, SEEK_SET; + sysread(CFG,$d,4); + $fdat = unpack("V",$d); + + if ($edat != $fdat) { + $ma = $ra + ($i % 512) - 0x200; + printf("Data Miscompare @: %08x --> %08x expected %08x\r",$ma, $fdat, $edat); + $rc = ; + } + + if ((($i+1) % (64*1024)) == 0) { + print "Reading Block: $bc \r"; + $bc++; + } + } +} + +$evt = time(); + +print "\n\n"; + +# ------------------------------------------------------------------------------- +# Check for Errors during Programming +# ------------------------------------------------------------------------------- +if ($ec != 0) { + print "\nErrors Occured : Error Code => $ec\n"; +} + +# ------------------------------------------------------------------------------- +# Calculate and Print Elapsed Times +# ------------------------------------------------------------------------------- +$et = $evt - $set; +$eet = $eet - $set; +$ept = $ept - $spt; +$evt = $evt - $svt; + +print "Erase Time: $eet seconds\n"; +print "Program Time: $ept seconds\n"; +print "Verify Time: $evt seconds\n"; +print "Total Time: $et seconds\n\n"; + +# ------------------------------------------------------------------------------- +# Reset Read Sequence +# ------------------------------------------------------------------------------- +$cfgadr = $CNTL_REG; +$dat = 0; +$d = pack("V",$dat); +sysseek CFG, $cfgadr, seek_set; +syswrite(CFG,$d,4); + +close(IN); +close(CFG); +exit; diff --git a/src/build/install/resources/capikvutils.sh b/src/build/install/resources/capikvutils.sh new file mode 100755 index 00000000..ea7b1dc9 --- /dev/null +++ b/src/build/install/resources/capikvutils.sh @@ -0,0 +1,271 @@ +#!/bin/bash -e +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/capikvutils.sh $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +CAPIKVROOT=/opt/ibm/capikv +SIOTABLE=$CAPIKVROOT/etc/sioluntable.ini +LOGFILE=/tmp/cxlflog.${USER}.log + +#cxlflash LUN modes +LUNMODE_LEGACY=0; +LUNMODE_SIO=1; + +#common return codes +ENOENT=2; +EIO=5; +EACCES=13; +EINVAL=22; + + +die() +{ + echo "$1"; + exit 1; +} + +#@desc manipulate the SIO LUN table, adding or deleting an entry +#@param $1 - LUN +#@param $2 - mode +#@returns rc to 0 on success, OR sets return code to non-zero value on error +chluntable() +{ + local targetlun=$1; + local targetmode=$2; + local currmode="unknown"; + getluntablestate currmode $targetlun; + case "$currmode-$targetmode" in + "$LUNMODE_LEGACY-$LUNMODE_LEGACY" ) + echo "INFO: LUN $targetlun is already in legacy mode. No action needed."; + ;; + "$LUNMODE_LEGACY-$LUNMODE_SIO" ) + echo "INFO: Adding LUN $targetlun to Super IO table."; + echo "$targetlun=1" >> $SIOTABLE; + ;; + "$LUNMODE_SIO-$LUNMODE_LEGACY" ) + echo "INFO: Removing LUN $targetlun from Super IO table."; + sed "/$targetlun=.*/d" $SIOTABLE > ${SIOTABLE}.new; + mv $SIOTABLE ${SIOTABLE}.bak; + mv ${SIOTABLE}.new $SIOTABLE; + ;; + "$LUNMODE_SIO-$LUNMODE_SIO" ) + echo "INFO: LUN $targetlun is already in SIO mode. No action needed."; + ;; + * ) + echo "LUN $targetlun has unknown LUN Status ($currmode) or target mode ($targetmode)." + return $EINVAL; + ;; + esac + #check if the LUN's there, and if we're deleting, delete it. If its not and adding, add it. + #exclusive lock +} + +#@desc map a block device to its underlying sg device +#@param $1 - output variable +#@param $2 - block device (e.g. "sdc") +#@returns sg device string (e.g. "sg4") and sets rc to 0 on success, OR sets return code to non-zero value on error +getsgfromblock() +{ + local __resultvar=$1; + local blockdev=$2; + local sgdev="unknown"; + if [[ -e /sys/class/block/$blockdev/device/scsi_generic ]]; then + sgdev=`ls /sys/class/block/$blockdev/device/scsi_generic`; + else + echo "Invalid sg device: $dev" + return $ENOENT; + fi + eval $__resultvar="'$sgdev'" +} + +#@desc get the 32-character hex LUN identifier that backs a given sg device +#@param $1 - output variable +#@param $2 - sg device (e.g. "sg12") +#@returns hex string LUN ID and sets rc to 0 on success, OR sets return code to non-zero value on error +getlunid() +{ + local __resultvar=$1; + local lun=`/lib/udev/scsi_id -d /dev/$2 --page=0x83 --whitelisted`; + lun=`echo $lun | awk '{print substr($1,2); }'`; #cut off the first digit ONLY, since we want the LUN ID, not the inquiry response which leads with a nibble indicating some scsi data we don't care about. + local lunlength=${#lun}; + if [[ "$lunlength" -eq "0" ]]; then + lun="unknown"; + return $ENOENT; + fi + eval $__resultvar="'$lun'" +} + +#@desc get the cxlflash device mode for the given sg device +#@param $1 - output variable +#@param $2 - sg device (e.g. "sg12") +#@returns lun mode integer and sets rc to 0 on success, OR sets return code to non-zero value on error +getmode() +{ + local __resultvar=$1; + local mode="unknown"; + if [[ -e /sys/class/scsi_generic/$2/device/mode ]]; then + mode=`cat /sys/class/scsi_generic/$2/device/mode`; + else + return $ENOENT; + fi + eval $__resultvar="'$mode'" +} + +#@desc get the block device for the given sg device +#@param $1 - output variable +#@param $2 - sg device (e.g. "sg12") +#@returns block device (e.g. "sdc" and sets rc to 0 on success, OR sets return code to non-zero value on error +getblockdev() +{ + local __resultvar=$1; + local dev=$2; + local block="unknown"; + if [[ -e /sys/class/scsi_generic/$dev/device/block ]]; then + local block=`ls /sys/class/scsi_generic/$dev/device/block`; + else + #echo "Invalid block device: $dev" + #this will naturally occur if we are in superpipe mode, or if the LUN mappings are discontinuous (e.g. LUN1 exists, but LUN0 does not). + return $ENOENT; + fi + eval $__resultvar="'$block'" +} + + +#@desc get the scsi topology for a given sg device (e.g. a:b:c:d) +#@param $1 - output variable +#@param $2 - sg device (e.g. "sg12") +#@returns scsi topology string, colon separated, e.g. '1:2:3:4' and sets rc to 0 on success, OR sets return code to non-zero value on error +getscsitopo() +{ + local __resultvar=$1; + local dev=$2; + local pathname=`ls -d /sys/class/scsi_generic/$dev/device/scsi_device/*`; + local rslt=`basename $pathname`; + eval $__resultvar="'$rslt'" +} + +ctrlblockdevmap() +{ + local dev=$1; + local scsitopo=0; + local tgmode=$2; + local scsiaction="noop"; + local blockdev="unknown"; + getblockdev blockdev $dev || blockdev="unknown"; + getscsitopo scsitopo $dev; + #only unmap the device if the block device is NOT unknown, and we want to go to SIO mode + if [[ ( "$tgmode" == "$LUNMODE_SIO" ) && ( "$blockdev" != "unknown" ) ]]; then + scsiaction="unbind"; + #only map the device if the block device is unknown, and want to go to LEGACY mode + elif [[ ("$tgmode" == "$LUNMODE_LEGACY") && ("$blockdev" == "unknown") ]]; then + scsiaction="bind"; + fi + #if there's something to do, call either bind or unbind appropriately + if [ "$scsiaction" != "noop" ]; then + echo "INFO: ${scsiaction}ing $dev's block device, currently $blockdev" >> $LOGFILE; + echo -n "$scsitopo" > /sys/bus/scsi/drivers/sd/$scsiaction || echo "WARNING: error attempting to control block device for $dev" >> $LOGFILE; + fi +} + + +#@desc print out a status table of the current LUN modes / mappings +#@returns sets rc to 0 on success, OR sets return code to non-zero value on error +printstatus() +{ + #list of all known SG devices - local to prevent this from causing side effects / problems in udev handler + local _SGDEVS=`ls /sys/module/cxlflash/drivers/pci:cxlflash/*:*:*.*/host*/target*:*:*/*:*:*:*/scsi_generic | grep sg` + local lunid=0; + local lunmode=0; + local blockdev=0; + local scsitopo=0; + echo "CXL Flash Device Status" + printf "%10s: %10s %5s %9s %32s\n" "Device" "SCSI" "Block" "Mode" "LUN WWID"; + for dev in $_SGDEVS; do + getlunid lunid $dev; + getmode lunmode $dev; + getblockdev blockdev $dev || blockdev="n/a"; + getscsitopo scsitopo $dev; + printf "%10s: %10s, %5s, %9s, %32s\n" "$dev" "$scsitopo" "$blockdev" "$lunmode" "$lunid"; + done +} + +#@desc set the mode for a given block device +#@param $1 - sg device +#@returns sets rc to 0 on success, OR sets return code to non-zero value on error +setdevmode() +{ + #$1: target sg device + #$2: target mode + local dev=$1; + #causes the block device above this sg device to become mapped or unmapped + #ctrlblockdevmap $dev $targetmode; #@TODO: disable to keep HTX happy + echo "INFO: Setting $dev mode" >> $LOGFILE; + #call with the config option, which causes the tool to read the config + $CAPIKVROOT/bin/cxlflashutil -d /dev/$dev --config >> $LOGFILE; +} + +#@desc Determine if a given LUN is in the SIO LUN Table +#@param $1 - output variable +#@param $2 - lunid +#@returns rc to 0 on success and sets outputvar with LUN mode of device as per table, OR sets return code to non-zero value on error +getluntablestate() +{ + local __resultvar=$1; + local lunid=$2; + local targetmode=$LUNMODE_LEGACY; #default to legacy + + if [[ ! -f $SIOTABLE ]]; then + echo "Unable to access '$SIOTABLE'"; + return $ENOENT; + fi + #echo "searching for '$lunid'"; + #match any LUN entry that is lun=1 or lun=01 + if grep -xq "$lunid=0\?*1.\?*" $SIOTABLE ; then + #echo "lun in SIO mode"; + targetmode=$LUNMODE_SIO; #set to SIO mode since it's in the SIO table + fi + eval $__resultvar="'$targetmode'" +} + + +#@desc walk through all cxlflash LUNs and send updates to device driver for legacy / sio mode +#@returns rc to 0 on success, OR sets return code to non-zero value on error +dotableupdate() +{ + #list of all known SG devices - local to prevent this from causing side effects / problems in udev handler + local _SGDEVS=`ls /sys/module/cxlflash/drivers/pci:cxlflash/*:*:*.*/host*/target*:*:*/*:*:*:*/scsi_generic | grep sg` + date >> $LOGFILE; + if [[ ! -f $SIOTABLE ]]; then + echo "Unable to access '$SIOTABLE'"; + return $ENOENT; + else + echo "SIO LUN TABLE CONTENTS:" >> $LOGFILE; + cat $SIOTABLE >> $LOGFILE; + fi + echo "Refer to $LOGFILE for detailed table update logs." + for dev in $_SGDEVS; do + setdevmode $dev $targetmode + done +} + diff --git a/src/build/install/resources/corsa_surelock.150930D1.bin b/src/build/install/resources/corsa_surelock.150930D1.bin new file mode 120000 index 00000000..12028af7 --- /dev/null +++ b/src/build/install/resources/corsa_surelock.150930D1.bin @@ -0,0 +1 @@ +/gsa/ausgsa/projects/s/surelock/images/corsa_surelock.150930D1.bin \ No newline at end of file diff --git a/src/build/install/resources/cxlfd.service b/src/build/install/resources/cxlfd.service new file mode 100644 index 00000000..c8cbadb3 --- /dev/null +++ b/src/build/install/resources/cxlfd.service @@ -0,0 +1,48 @@ +#!/bin/bash -e +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/cxlfd.service $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +[Unit] +Description="Data Engine for NoSQL - Power Systems Edition CXL Flash LUN Management Daemon" + +[Service] +Group=cxl +Type=simple +Restart=on-failure +#wait 30s due to adapter reset intervals +RestartSec=30 +#Limit repawns to five times in five minutes +StartLimitInterval=300 +StartLimitBurst=5 + +#reduce the chance of daemon death due to low memory +OOMScoreAdjust=-700 + +#run the daemon in the background at the lowest possible niceness priority +Nice=19 +ExecStart=/opt/ibm/capikv/bin/cxlfd + +[Install] +WantedBy=multi-user.target + diff --git a/src/build/install/resources/cxlffdc b/src/build/install/resources/cxlffdc new file mode 100755 index 00000000..c2799349 --- /dev/null +++ b/src/build/install/resources/cxlffdc @@ -0,0 +1,64 @@ +#!/bin/bash +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/cxlffdc $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +set -e +DESTDIR=/tmp/cxlffdc +TARBALL=cxlffdc.tgz +AFUDIR=/opt/ibm/capikv/afu + +die() +{ + echo "$1" 1>&2; + exit $2; +} + +if [[ $EUID -ne 0 ]]; then + echo "This script must be run as root" 1>&2 + exit 1 +fi + +mkdir $DESTDIR || die "ERROR: Delete previous FFDC at $DESTDIR and try again." 1 +pushd $DESTDIR +cardnums=`ls -d /sys/class/cxl/card* | awk -F"/sys/class/cxl/card" '{ print $2 }'` +for i in $cardnums; do + mkdir card$i; + $AFUDIR/psl_trace_dump card$i $i || echo "WARNING: Error occcurred dumping card$i"; #psl_trace_dump needs to be on our path... +done + +dmesg > dmesg.txt +cp /var/log/syslog $DESTDIR +cp /sys/firmware/opal/msglog $DESTDIR/opal_msglog + +mcontexts=`ls /dev/cxl/afu*m` +for mcontext in $mcontexts; do + $AFUDIR/cxl_afu_dump $mcontext > $(basename $mcontext).txt +done + +popd +tar -cvzf $TARBALL $DESTDIR +rm -rf $DESTDIR +echo "INFO: FFDC Collected below." +ls -l $TARBALL + diff --git a/src/build/install/resources/cxlfrefreshluns b/src/build/install/resources/cxlfrefreshluns new file mode 100755 index 00000000..800239d8 --- /dev/null +++ b/src/build/install/resources/cxlfrefreshluns @@ -0,0 +1,44 @@ +#!/bin/bash -e +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/cxlfrefreshluns $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +CAPIKV=/opt/ibm/capikv +source $CAPIKV/bin/capikvutils.sh + + +showhelp() +{ + echo "Usage: $0" + echo " Refresh the cxlflash driver's internal LUN tables and set LUN modes for all cxlflash devices." +} + +#show help if we haven't gotten a correct # of args +if [ "$#" -ne 0 ]; then + showhelp; + exit $EINVAL; +fi + +dotableupdate; +printstatus; diff --git a/src/build/install/resources/cxlfsetlunmode b/src/build/install/resources/cxlfsetlunmode new file mode 100755 index 00000000..65ec6e85 --- /dev/null +++ b/src/build/install/resources/cxlfsetlunmode @@ -0,0 +1,49 @@ +#!/bin/bash -e +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/cxlfrefreshluns $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +CAPIKV=/opt/ibm/capikv +source $CAPIKV/bin/capikvutils.sh + + +showhelp() +{ + echo "Usage: $0 " + echo " Change a LUN's CXLFlash Mode." +} + +#show help if we haven't gotten a correct # of args +if [ "$#" -ne 2 ]; then + showhelp; + exit $EINVAL; +fi + +TARGETLUN=`echo $1 | awk '{print tolower($0)}'` +TARGETMODE=$2 + +#set up the SIO table +chluntable $TARGETLUN $TARGETMODE; +#immediately set the desired LUNs to the correct mode +$CAPIKV/bin/cxlflashutil -l $TARGETLUN -m $TARGETMODE diff --git a/src/build/install/resources/cxlfstatus b/src/build/install/resources/cxlfstatus new file mode 100755 index 00000000..6bcea04f --- /dev/null +++ b/src/build/install/resources/cxlfstatus @@ -0,0 +1,43 @@ +#!/bin/bash -e +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/cxlfstatus $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +CAPIKV=/opt/ibm/capikv +source $CAPIKV/bin/capikvutils.sh + + +showhelp() +{ + echo "Usage: $0" + echo " Print LUN modes for all cxlflash devices." +} + +#show help if we haven't gotten a correct # of args +if [ "$#" -ne 0 ]; then + showhelp; + exit $EINVAL; +fi + +printstatus; diff --git a/src/build/install/resources/flash_all_adapters b/src/build/install/resources/flash_all_adapters new file mode 100755 index 00000000..c900ca8b --- /dev/null +++ b/src/build/install/resources/flash_all_adapters @@ -0,0 +1,130 @@ +#!/bin/bash -e +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/flash_all_adapters $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +#Constants +KERNEL_MOD="cxlflash" +CAPI_FLASH=capi_flash.pl +CAPI_IMAGE=corsa_surelock.150930D1.bin +IMAGES_DIR=images #relative to the cwd of the script +FLASH_ROOT=/opt/ibm/capikv/afu + +MINCAPMASK="0003" +CAPABILITYKW="V3" +#Globals +factory="" + +#allow us to die with a specific error code +die() +{ + echo "AFUFLASHERR$2: $1" 1>&2; + exit $2; +} + +if [ "$1" == "--factory" ]; then + factory="-p0" +fi + + +checkidle() +{ + echo "INFO: Checking for CAPI Master Context..." + master_offline=true + lsmod | grep $KERNEL_MOD && master_offline=false #if the cxlflash module is found, we set the master_offline flag to false +#otherwise, shows the module info + + if $master_offline ; then + echo "INFO: CXL Flash Module is not running. Device is assumed to be idle."; + else + die "CXL Flash is running (see above). Cannot flash while the device is active. Run \"rmmod cxlflash\" to unload it." 1; + fi +} + +readvpd() +{ + local pciid=$1 + local keyword=$2 + local value=`lspci -s $pciid -vv | sed -n -e '/Vital Product Data/,/End/ p' | grep $keyword | awk -F": " '{print $2}'` + if [[ -z "$value" ]]; then + die "Unable to read PCI device $pciid, VPD keyword $keyword. This afu image is not compatible with this adapter." 2; + fi + echo $value +} + +findloc() +{ + devspecpath="/sys/bus/pci/devices/$1/devspec" + if [[ ! -f "$devspecpath" ]]; then + die "Unable to find devspec for device $1. Could not find '$devspecpath'" 3; + fi + local devspec=`cat $devspecpath` + local loccodepath="/proc/device-tree$devspec/ibm,loc-code" + if [[ ! -f "$loccodepath" ]]; then + die "Unable to find device tree special file path for PCI device $1: $loccodepath" 4; + fi + local loccode=`cat $loccodepath`; + if [[ -z "$loccode" ]]; then + die "Location code is invalid for PCI device $1" 5; + fi + echo $loccode; + } + +checkcompat() +{ + echo "INFO: Checking card / image compatibility for image $CAPI_IMAGE" + #enumerate all adapters, get the PCI id (e.g. "0000:01:00.0" from below) + #parse something like "Found CAPI Adapter : /sys/bus/pci/devices/0000:01:00.0" + for adapter in `$FLASH_ROOT/$CAPI_FLASH -l | awk -F/ '{print $NF}'`; + do + echo "INFO: Checking adapter / firmware compatibility for $(findloc $adapter)"; + adaptermask=$(readvpd $adapter $CAPABILITYKW); + local resultmask=$(($MINCAPMASK & $adaptermask)) + if [[ "$resultmask" -ne "$MINCAPMASK" ]]; then + die "Adapter $(findloc $adapter) firmware is incompatible with this adapter. Please use a different firmware image. Minimum capability mask for this firmware is 0x$MINCAPMASK and adapter VPD capability is 0x$adaptermask." 6; + fi + done + echo "INFO: All present IBM DataEngine for NoSQL adapters are capable of loading this image." + +} + + +flashadapters() +{ + echo "INFO: Now flashing all CAPI devices..." + + #enumerate all adapters, get the path (item #5 from the awk command) + #parse something like "Found CAPI Adapter : /sys/bus/pci/devices/0000:01:00.0" + for adapter in `$FLASH_ROOT/$CAPI_FLASH -l | awk '{print $5}'`; + do + echo "INFO: Flashing adapter: $adapter"; + $FLASH_ROOT/$CAPI_FLASH $factory -t $adapter -f $FLASH_ROOT/$IMAGES_DIR/$CAPI_IMAGE || die "Unable to flash. Halting script." 2; + done +} + +#Main Code +checkidle; +checkcompat; +flashadapters; + +echo "INFO: Adapter update complete. To restart the adapters on the new image, please run ${FLASH_ROOT}/reload_all_adapters" diff --git a/src/build/install/resources/flash_factory_image b/src/build/install/resources/flash_factory_image new file mode 100755 index 00000000..2df6ccd9 --- /dev/null +++ b/src/build/install/resources/flash_factory_image @@ -0,0 +1,15 @@ +#Constants +FLASH_ALL=flash_all_adapters +FLASH_ROOT=/opt/ibm/capikv/afu + +#Globals +factory="" + +#allow us to die with a specific error code +die() +{ + echo "$1" 1>&2; + exit $2; +} + +$FLASH_ROOT/$FLASH_ALL --factory || die "ERROR: Unable to flash factory image for at least one adapter." 1 diff --git a/src/build/install/resources/license/LICENSE b/src/build/install/resources/license/LICENSE new file mode 100644 index 00000000..68c771a0 --- /dev/null +++ b/src/build/install/resources/license/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/src/build/install/resources/postafuinstall b/src/build/install/resources/postafuinstall new file mode 100644 index 00000000..7f9bc17b --- /dev/null +++ b/src/build/install/resources/postafuinstall @@ -0,0 +1,33 @@ +#!/bin/bash +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/postafuinstall $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +set -e #halt on error +USERNAME=cxl +GROUPNAME=$USERNAME +FLASH_ROOT=/opt/ibm/capikv/afu +DATA_DIR=$CAPIKV_ROOT/data +INIFILE=$DATA_DIR/capikv.ini + +echo "INFO: Please remove the \"cxlflash\" module and run \"$FLASH_ROOT/flash_all_adapters\" to perform the CAPI accelerator microcode update." diff --git a/src/build/install/resources/postinstall b/src/build/install/resources/postinstall new file mode 100755 index 00000000..c20e4a98 --- /dev/null +++ b/src/build/install/resources/postinstall @@ -0,0 +1,52 @@ +#!/bin/bash +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/postinstall $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +set -e #halt on error +USERNAME=cxl +GROUPNAME=$USERNAME +CAPIKV_ROOT=/opt/ibm/capikv +ETC_DIR=$CAPIKV_ROOT/etc +SIO_FILE=$ETC_DIR/sioluntable.ini +#Creating empty SIO lun table and etc dir +echo "INFO: Ensuring SIO LUN Table exists..." +echo "INFO: Creating system cxl user / group..." +adduser --system --group $USERNAME +if ! egrep -i "^$GROUPNAME" /etc/group >/dev/null; then + echo "INFO: Creating cxl group..." + groupadd -r $GROUPNAME +fi +echo "INFO: setting permissions on cxl and cxlflash device nodes." +udevadm trigger -s cxl +udevadm trigger -s cxlflash + +chown $USERNAME:$GROUPNAME $ETC_DIR +chmod 770 $ETC_DIR +touch $SIO_FILE +chown $USERNAME:$GROUPNAME $SIO_FILE +chmod 660 $SIO_FILE + +echo "INFO: enabling cxlfd service for LUN Management" +systemctl enable cxlfd || echo "WARNING: Unable to enable the cxlfd service via systemctl. Please enable the cxlfd daemon for LUN management." +systemctl start cxlfd || echo "WARNING: Unable to start the cxlfd service via systemctl. Please enable the cxlfd daemon for LUN management." diff --git a/src/build/install/resources/psl_trace_dump b/src/build/install/resources/psl_trace_dump new file mode 100755 index 00000000..13b54e46 Binary files /dev/null and b/src/build/install/resources/psl_trace_dump differ diff --git a/src/build/install/resources/readme.resources.txt b/src/build/install/resources/readme.resources.txt new file mode 100644 index 00000000..880ce367 --- /dev/null +++ b/src/build/install/resources/readme.resources.txt @@ -0,0 +1,45 @@ +resources contains various external tools and applications needed by surelock. + +80-cxl.rules +Purpose: udev rules for /dev/cxl/afuX.0s and X.0m that enable members of the cxl group to access the slave device(s) + +80-cxlflash.rules +Purpose: udev rules for /dev/sdX and sgX that call enable user space to notify the cxlflash module about device status + +blacklist-cxlflash.conf +Purpose: temporarily blacklist the cxlflash driver by placing this in /etc/modprobe.d . this is used by reload_all_adapters (see below). + +capi_flash.pl +Purpose: flash AFU factory or user images. + +corsa_surelock.xxx.bin +Purpose: Accelerator binary image + +cxlffdc +Purpose: Gather PSL and AFU debug data, and place into a tarball for HW diagnosis + +flash_all_adapters +Purpose: Wrapper script for capi_flash.pl, used by debian package post-installer + + +reload_all_adapters +Purpose: Simple Wrapper to enable perst / reset of the adapters after a flash update without a reboot + +flash_factory_image +Purpose: Wrapper script for capi_flash.pl, used by CSC manufacturing to write factory image prior to shipmnet + +license/* +Purpose: License terms / conditions for Surelock + +postinstall +Purpose: initial setup of system - used by clients or ssrs + +psl_trace_dump +Purpose: collected PSL FFDC / traces on a failed system + + +capikvutils.sh +Purpose: Utility code for cxlflash and ibmcapikv tooling + +cxlfrefreshluns cxlfsetlunmode cxlfstatus +Purpose: suite of utilities for manipulating the cxlflash driver diff --git a/src/build/install/resources/readme.txt b/src/build/install/resources/readme.txt new file mode 100644 index 00000000..41dd66ce --- /dev/null +++ b/src/build/install/resources/readme.txt @@ -0,0 +1,69 @@ +IBM Data Engine for NoSQL - Power System Edition + +Please review the licenses available in /opt/ibm/capikv/license/ + +FlashSystem ssh key configuration no longer required for this solution. Mapping or unmapping LUNS may be performed via the FlashSystem GUI. + +Please ensure that all fiber channel cables are connected point-to-point between the accelerator cards and the FlashSystem, and ensure that all LUNs (or vdisks) mapped to the accelerators are 4096-byte block formatted disks. + +This software includes: + include Header files for application development + lib Libraries for exploitation of CAPI Flash APIs + license Licenses for the IBM Data Engine for NoSQL - Power Systems Edition + examples Example binaries and source code for application development + +Common Tasks +============ + +Host Creation in the FlashSystem +-------------------------------- +To determine the CAPI Flash World Wide Port Names (WWPN) for each of the present accelerator's ports (to allow one to provision them in FlashSystem to specific ports), one must use the lspci command. First, use it without arguments as "lspci |grep 'IBM Device 04cf'", which will output similar to the following: + 0000:01:00.0 Processing accelerators: IBM Device 04cf (rev 01) + 0002:01:00.0 Processing accelerators: IBM Device 04cf (rev 01) + +From the above list, select an adapter using the identifiers in the left column. Thus to get WWPN for the first adapter listed one would use the command "lspci -s 0000:01:00.0 -vv |grep -e V5 -e V6", which will show output similar to: + lspci -s 0000:01:00.0 -vv |grep -e V5 -e V6 + [V5] Vendor specific: 5005076069800230 + [V6] Vendor specific: 5005076069800231 +Thus the WWPNs of this adapter are: 5005076069800230, and 5005076069800231. Use these WWPNs to create a new host (or hosts) in the FlashSystem GUI or CLI. + + +Controlling Access to the Accelerator +------------------------------------- + +By default, installation of this package creates a "cxl" user and group, and adds udev rules to restrict read / write to the accelerator's volumes to members of the "cxl" system group. To enable an account to read/write the accelerator, add the account to the cxl group. For example: + sudo usermod -a -G cxl userid + + + +Viewing the status of the accelerator +------------------------------------- + +This package includes convenience scripts to display the status of the volumes mapped to the accelerator. To view the status of each adapter's LUN, use: + sudo /opt/ibm/capikv/bin/cxlfstatus + +Volumes may be in either "legacy" or "superpipe" mode. Volumes will default to legacy mode. Volumes must be in 'superpipe' mode for exploitation by the CAPI Flash block or arkdb APIs. + +The below example shows two accelerators, each with two ports, an a single volume mapped to each port's WWPN. One volume is in "legacy" mode, and three volumes are in "superpipe" mode. The WWID for each volume in the FlashSystem are displayed for convenience of administration. This matches the WWID shown in the FlashSystem GUI or CLI. + +ibm@power8:~$ sudo /opt/ibm/capikv/bin/cxlfstatus +CXL Flash Device Status +Device: SCSI Block Mode LUN WWID + sg9: 33:0:0:0, sdc, legacy, 60050768218b0818200000000400006e + sg10: 33:1:0:0, sdd, superpipe, 60050768218b0818200000000600006f + sg11: 34:0:0:0, sde, superpipe, 60050768218b08182000000007000070 + sg12: 34:1:0:0, sdf, superpipe, 60050768218b0818200000000300006d + + +Setting the mode for a volume +----------------------------- + +As shown in the example above, each volume may be in either "legacy" or "superpipe" mode. To set the mode for a volume, use the following command: + sudo /opt/ibm/capikv/bin/cxlfsetlunmode + +For example, LUN modes may be "0" for legacy, or "1" for superpipe. As an example: + ibm@power8:~$ /opt/ibm/capikv/bin/cxlfsetlunmode 60050768218b0818200000000400006e 1 + INFO: Adding LUN 60050768218b0818200000000400006e to Super IO table. + SUCCESS + +After a LUN is set to superpipe mode, all paths to that volume will also be set to superpipe. diff --git a/src/build/install/resources/reload_all_adapters b/src/build/install/resources/reload_all_adapters new file mode 100755 index 00000000..3b8cae1b --- /dev/null +++ b/src/build/install/resources/reload_all_adapters @@ -0,0 +1,160 @@ +#!/bin/bash -e +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/flash_all_adapters $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +#Constants +KERNEL_MOD="cxlflash" +FLASH_ROOT=/opt/ibm/capikv/afu +RELOADDELAY=0.5 +MAXRELOADCOUNT=120 #120 .5 second intervals +MODPROBEDIR=/etc/modprobe.d +BLACKLISTFILE=blacklist-${KERNEL_MOD}.conf + +#allow us to die with a specific error code +die() +{ + echo "AFUFLASHERR$2: $1" 1>&2; + exit $2; +} + +checkidle() +{ + echo "INFO: Checking for CAPI Master Context..." + master_offline=true + lsmod | grep $KERNEL_MOD && master_offline=false #if the cxlflash module is found, we set the master_offline flag to false +#otherwise, shows the module info + + if $master_offline ; then + echo "INFO: CXL Flash Module is not running. Device is assumed to be idle."; + else + die "CXL Flash is running (see above). Cannot flash while the device is active. Run \"rmmod cxlflash\" to unload it." 1; + fi +} + +disablemodule() +{ + if [ ! -e $MODPROBEDIR/$BLACKLISTFILE ]; then + echo "INFO: Temporarily blacklisting ${KERNEL_MOD}" + cp $FLASH_ROOT/$BLACKLISTFILE $MODPROBEDIR/$BLACKLISTFILE + fi +} + +enablemodule() +{ + if [ -e $MODPROBEDIR/$BLACKLISTFILE ]; then + echo "INFO: Removing ${KERNEL_MOD} from blacklist" + rm $MODPROBEDIR/$BLACKLISTFILE + fi +} + + +findloc() +{ + devspecpath="/sys/bus/pci/devices/$1/devspec" + if [[ ! -f "$devspecpath" ]]; then + die "Unable to find devspec for device $1. Could not find '$devspecpath'" 3; + fi + local devspec=`cat $devspecpath` + local loccodepath="/proc/device-tree$devspec/ibm,loc-code" + if [[ ! -f "$loccodepath" ]]; then + die "Unable to find device tree special file path for PCI device $1: $loccodepath" 4; + fi + local loccode=`cat $loccodepath`; + if [[ -z "$loccode" ]]; then + die "Location code is invalid for PCI device $1" 5; + fi + echo $loccode; + } + +writeTo_perst_reloads_same_image() +{ + local cxlsysfs="/sys/class/cxl" + local val=$1 + + for adapter in `ls $cxlsysfs | grep card`; do + echo "Writing $val to perst_reloads_same_image for $adapter" + echo "$val" > $cxlsysfs/$adapter/perst_reloads_same_image; + done +} + +reloadadapters() +{ + local rebootrequired="false" + local cxlsysfs="/sys/class/cxl" + + # Disable perst_reloads_same_image for all cards + writeTo_perst_reloads_same_image "0"; + + #Disable the cxlflash module temporarily + disablemodule; + + #enumerate all cxl cards, and issue a PERST to each one + #should result in card0 card1 card2.. etc. + for adapter in `ls $cxlsysfs | grep card`; do + if [[ (-f $cxlsysfs/$adapter/load_image_on_perst) && (-f $cxlsysfs/$adapter/reset) ]]; then + echo "Attempting to reload $adapter" + echo "user" > $cxlsysfs/$adapter/load_image_on_perst; + echo "1" > $cxlsysfs/$adapter/reset; + local pollcount=0; + while [ ! -f $cxlsysfs/$adapter/image_loaded ] + do + if [[ "$pollcount" -eq "$MAXRELOADCOUNT" ]]; then + #re-enable the cxlflash module + enablemodule; + # Re-enable perst_reloads_same_image for all cards + writeTo_perst_reloads_same_image "1"; + die "Unable to reload $adapter" 100; + fi + pollcount=$((pollcount+1)) + echo -n "."; + sleep $RELOADDELAY; + done + echo "done" + + local imgselected=`cat $cxlsysfs/$adapter/image_loaded`; + if [[ "$imgselected" -ne "user" ]]; then + echo "Card $adapter did not boot on the user image. Image selected was $imgselected."; + rebootrequired="true"; + fi + else + rebootrequired="true"; + fi + done + + #re-enable the cxlflash module + enablemodule; + # Re-enable perst_reloads_same_image for all cards + writeTo_perst_reloads_same_image "1"; + + if [[ "$rebootrequired" == "true" ]]; then + echo "WARNING: This kernel does not support a warm reload of the CAPI adapter firmware. A cold restart of the system is required to reinitialize the adapter. Please power off, then power on (do not \"reboot\")." + else + echo "INFO: All present IBM DataEngine for NoSQL adapters have reloaded on this image." + fi + +} + +#Main Code +checkidle; +reloadadapters; diff --git a/src/build/install/resources/setup.sh b/src/build/install/resources/setup.sh new file mode 100755 index 00000000..c82072bc --- /dev/null +++ b/src/build/install/resources/setup.sh @@ -0,0 +1,55 @@ +#!/bin/bash -e +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/install/resources/cxlfrefreshluns $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +set -e #halt on error +CAPIKV_ROOT=/opt/ibm/capikv + + +#allow us to die with a specific error code +die() +{ +echo "$1" 1>&2; +exit $2; +} + +echo "If you have not done so already, you may answer \"n\" and review the agreements in your local language, then re-run this script." +while true; do +read -p "Do you agree to the terms of the license agreements contained in $CAPIKV_ROOT/license/ ? [Y/n] " licagree +case $licagree in +[Yy]* ) break;; +[Nn]* ) exit 0;; +* ) echo "Please answer 'y' or 'n' at the prompt.";; +esac +done +echo "INFO: This software package no longer requires ssh access to the FlashSystem for LUN provisioning and management." +echo "" +echo "INFO: If not already completed, please:" +echo " 1. Create a host (or hosts) in the FlashSystem for the accelerator's WWPNs." +echo " 2. Map a 4096-byte formatted volume to that new host(s)." +echo " 3. Set each volume to 'superpipe' mode." +echo "" +echo "INFO: Please read $CAPIKV_ROOT/readme.txt for additional details." +echo "INFO: Refer to the readme initial administration steps. Setup is complete." \ No newline at end of file diff --git a/src/build/makefile b/src/build/makefile new file mode 100644 index 00000000..5d301132 --- /dev/null +++ b/src/build/makefile @@ -0,0 +1,29 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +ROOTPATH = ../.. + +SUBDIRS = install.d packaging.d + +include ${ROOTPATH}/config.mk diff --git a/src/build/packaging/makefile b/src/build/packaging/makefile new file mode 100644 index 00000000..4829c35a --- /dev/null +++ b/src/build/packaging/makefile @@ -0,0 +1,85 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/packaging/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +UNAME=$(shell uname) + +ROOTPATH = ../../.. +ifeq ($(UNAME),AIX) +all: aixcapikv aixcapikv-test aixafuimage +else +all: ibmcapikv ibmcapikv-test afuimage +endif + +VERSIONMAJOR=2 +VERSIONMINOR=0 +VERSION=${VERSIONMAJOR}.${VERSIONMINOR}-${GITREVISION} + +ibmcapikv: + mkdir -p ${PKGDIR} + cd ${PKGDIR}; \ + fpm -f -s dir -t rpm -n $@ -v ${VERSION} \ + --depends 'advance-toolchain-at7.1-runtime' \ + --depends 'libudev1' \ + -C ./install_root \ + --after-install ${SURELOCKROOT}/src/build/install/resources/postinstall .; \ + fpm -f -s dir -t deb -n $@ -v ${VERSION} \ + --depends 'advance-toolchain-at7.1-runtime' \ + --depends 'libudev1' \ + -C ./install_root \ + --after-install ${SURELOCKROOT}/src/build/install/resources/postinstall .; \ + tar -cvzf $@-${GITREVISION}.tar.gz -C ./install_root . + +ibmcapikv-test: + mkdir -p ${PKGDIR} + cd ${PKGDIR}; \ + fpm -f -s dir -t rpm -n $@ -v ${VERSION} -C ./test_root \ + --depends 'ibmcapikv = ${VERSION}' \ + --depends 'libudev-dev' .; \ + fpm -f -s dir -t deb -n $@ -v ${VERSION} -C ./test_root \ + --depends 'ibmcapikv = ${VERSION}' \ + --depends 'libudev-dev' .; \ + tar -cvzf $@-${GITREVISION}.tar.gz -C ./test_root . + +afuimage: + mkdir -p ${PKGDIR} + cd ${PKGDIR}; \ + fpm -f -s dir -t deb -a all -n $@ -v ${VERSION} -C ./afu_root \ + --after-install ${SURELOCKROOT}/src/build/install/resources/postafuinstall . + +aixcapikv: + mkdir -p ${PKGDIR} + cd ${PKGDIR}; \ + tar -cvf ibmcapikv_${VERSION}.ppc64be.tar -C ./install_root . + +aixcapikv-test: + mkdir -p ${PKGDIR} + cd ${PKGDIR}; \ + tar -cvf ibmcapikv-test_${VERSION}.ppc64be.tar -C ./test_root . + +aixafuimage: + mkdir -p ${PKGDIR} + cd ${PKGDIR}; \ + tar -cvf afuimage_${VERSION}.ppc64be.tar -C ./afu_root . + +include ${ROOTPATH}/config.mk diff --git a/src/build/tools/build_be_mem_nomc b/src/build/tools/build_be_mem_nomc new file mode 100755 index 00000000..0eb4bd05 --- /dev/null +++ b/src/build/tools/build_be_mem_nomc @@ -0,0 +1,105 @@ +#!/bin/bash +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/tools/build_be_mem_nomc $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +if [[ $1 = "help" ]] +then + echo "options: cleanall tests run_unit run_fvt" + echo "default: build only shipped targets" + exit 0 +fi + +if [[ $(uname) = "Linux" ]] +then + LINUX=1 + MAKE=make +else + LINUX=0 + MAKE=gmake + SURELOCKROOT= + echo "NOT LINUX" +fi + +if [[ $LINUX -eq 1 ]] +then + if [[ $(basename $PWD) = "surelock-sw" ]] + then + source env.bash + else + if [[ -z $SURELOCKROOT ]] + then + echo "set SURELOCKROOT or source env.bash before running" + exit 1 + fi + fi + cd $SURELOCKROOT +else + if [[ $(basename $PWD) != "surelock-sw" ]] + then + echo "must be in surelock-sw dir to execute" + exit 1 + fi +fi + +unset CUSTOMFLAGS +unset BLOCK_FILEMODE_ENABLED +unset BLOCK_MC_ENABLED +unset TARGET_PLATFORM + +if [[ -e customrc ]] +then + rm -f customrc +fi + +if [[ $1 = "cleanall" && -z $2 ]] +then + $MAKE cleanall + $MAKE -j12 +fi + +if [[ $1 = "cleanall" || $2 = "cleanall" || $3 = "cleanall" ]] +then + $MAKE cleanall +fi + +if [[ $1 = "tests" || $2 = "tests" || $3 = "tests" ]] +then + $MAKE tests -j12 +fi + +if [[ $1 = "run_unit" || $2 = "run_unit" || $3 = "run_unit" ]] +then + $MAKE run_unit -j12 +fi + +if [[ $1 = "run_fvt" || $2 = "run_fvt" || $3 = "run_fvt" ]] +then + $MAKE run_fvt -j12 +fi + +if [[ -z $1 ]] +then + $MAKE -j12 +fi diff --git a/src/build/tools/build_ker_hw b/src/build/tools/build_ker_hw new file mode 100755 index 00000000..7ce9ce71 --- /dev/null +++ b/src/build/tools/build_ker_hw @@ -0,0 +1,167 @@ +#!/bin/bash +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/tools/build_ker_hw $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +function check_rc +{ + if [[ $1 -ne 0 ]] + then + echo "LAST CMD FAILED, EXITING" + __exit $1 + fi +} + +function __exit +{ + exit $1 +} + +if [[ $1 = "help" ]] +then + echo "options: cleanall tests packaging run_unit run_fvt" + echo "default: build only shipped targets" + exit 0 +fi + +if [[ $(uname) = "Linux" ]] +then + if [[ $(basename $PWD) = "surelock-sw" ]] + then + source env.bash + else + if [[ -z $SURELOCKROOT ]] + then + echo "set SURELOCKROOT or source env.bash before running" + exit 1 + fi + fi + + cd $SURELOCKROOT + LINUX=1 + MAKE=make + + UNAME=$(uname --all) + if [[ $UNAME =~ "ppc64le" ]] + then + LE=1 + else + LE=0 + fi +else + #AIX + if [[ ! -z $SURELOCKROOT ]] + then + cd $SURELOCKROOT + fi + if [[ $(basename $PWD) != "surelock-sw" ]] + then + echo "must be in surelock-sw dir to execute" + exit 1 + fi + if [[ -z $SURELOCKROOT ]] + then + SURELOCKROOT= + fi + LINUX=0 + LE=0 + MAKE=gmake +fi + +unset CUSTOMFLAGS +unset BLOCK_FILEMODE_ENABLED +unset BLOCK_MC_ENABLED +unset TARGET_PLATFORM +ulimit -c unlimited #allow core files to be generated +ulimit -n 5000 + +if [[ -h customrc ]] +then + rm -f customrc +fi + +if [[ $LE -eq 1 ]] +then + ln -s customrc.p8elblkkermc customrc + source env.bash +else + ln -s customrc.p8beblkkermc customrc + . ./customrc +fi + +if [[ -z $1 ]] +then + $MAKE -j16 + __exit $? +fi + +if [[ $1 = "cleanall" && -z $2 ]] +then + $MAKE cleanall + $MAKE -j16 + __exit $? +fi + +if [[ $1 = "cleanall" || $2 = "cleanall" || $3 = "cleanall" || $4 = "cleanall" ]] +then + $MAKE cleanall +fi + +if [[ $1 = "tests" || $2 = "tests" || $3 = "tests" || $4 = "tests" ]] +then + $MAKE tests -j16 + rc=$? + check_rc $rc +fi + +if [[ $1 = "packaging" || $2 = "packaging" || $3 = "packaging" || $4 = "packaging" ]] +then + $MAKE tests -j16 + rc=$? + check_rc $rc + $MAKE docs + $MAKE install + rc=$? + check_rc $rc + $MAKE packaging + rc=$? + check_rc $rc + __exit $? +fi + +if [[ $1 = "run_unit" || $2 = "run_unit" || $3 = "run_unit" || $4 = "run_unit" ]] +then + $MAKE tests -j16 + $MAKE run_unit + __exit $? +fi + +if [[ $1 = "run_fvt" || $2 = "run_fvt" || $3 = "run_fvt" || $4 = "run_fvt" ]] +then + $MAKE tests -j16 + $MAKE run_fvt + __exit $? +fi + +__exit $? diff --git a/src/build/tools/build_mc b/src/build/tools/build_mc new file mode 100755 index 00000000..71aa028b --- /dev/null +++ b/src/build/tools/build_mc @@ -0,0 +1,168 @@ +#!/bin/bash +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/tools/build_mc $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +function check_rc +{ + if [[ $1 -ne 0 ]] + then + echo "LAST CMD FAILED, EXITING" + __exit $1 + fi +} + +function __exit +{ + exit $1 +} + +if [[ $1 = "help" ]] +then + echo "options: cleanall tests packaging run_unit run_fvt" + echo "default: build only shipped targets" + exit 0 +fi + +if [[ $(uname) = "Linux" ]] +then + if [[ $(basename $PWD) = "surelock-sw" ]] + then + source env.bash + else + if [[ -z $SURELOCKROOT ]] + then + echo "set SURELOCKROOT or source env.bash before running" + exit 1 + fi + fi + + cd $SURELOCKROOT + LINUX=1 + MAKE=make + + UNAME=$(uname --all) + if [[ $UNAME =~ "ppc64le" ]] + then + LE=1 + else + LE=0 + fi +else + #AIX + if [[ ! -z $SURELOCKROOT ]] + then + cd $SURELOCKROOT + fi + if [[ $(basename $PWD) != "surelock-sw" ]] + then + echo "must be in surelock-sw dir to execute" + exit 1 + fi + if [[ -z $SURELOCKROOT ]] + then + SURELOCKROOT= + fi + LINUX=0 + LE=0 + MAKE=gmake +fi + +unset CUSTOMFLAGS +unset BLOCK_FILEMODE_ENABLED +unset BLOCK_MC_ENABLED +unset TARGET_PLATFORM +ulimit -c unlimited #allow core files to be generated + +if [[ -h customrc ]] +then + rm -f customrc +fi + +if [[ $LE -eq 1 ]] +then + ln -s customrc.p8elblkmc customrc + source env.bash +else + echo "NO BE MC BUILD AVAILABLE" + exit + ln -s customrc.p8beblkmc customrc + . customrc +fi + +if [[ -z $1 ]] +then + $MAKE -j16 + __exit $? +fi + +if [[ $1 = "cleanall" && -z $2 ]] +then + $MAKE cleanall + $MAKE -j16 + __exit $? +fi + +if [[ $1 = "cleanall" || $2 = "cleanall" || $3 = "cleanall" || $4 = "cleanall" ]] +then + $MAKE cleanall +fi + +if [[ $1 = "tests" || $2 = "tests" || $3 = "tests" || $4 = "tests" ]] +then + $MAKE tests -j16 + rc=$? + check_rc $rc +fi + +if [[ $1 = "packaging" || $2 = "packaging" || $3 = "packaging" || $4 = "packaging" ]] +then + $MAKE tests -j16 + rc=$? + check_rc $rc + $MAKE docs + $MAKE install + rc=$? + check_rc $rc + $MAKE packaging + rc=$? + check_rc $rc + __exit $? +fi + +if [[ $1 = "run_unit" || $2 = "run_unit" || $3 = "run_unit" || $4 = "run_unit" ]] +then + $MAKE tests -j16 + $MAKE run_unit + __exit $? +fi + +if [[ $1 = "run_fvt" || $2 = "run_fvt" || $3 = "run_fvt" || $4 = "run_fvt" ]] +then + $MAKE tests -j16 + $MAKE run_fvt + __exit $? +fi + +__exit $? diff --git a/src/build/tools/build_mem b/src/build/tools/build_mem new file mode 100755 index 00000000..f6e54d53 --- /dev/null +++ b/src/build/tools/build_mem @@ -0,0 +1,150 @@ +#!/bin/bash +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/build/tools/build_mem $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +function check_rc +{ + if [[ $1 -ne 0 ]] + then + __exit $1 + fi +} + +function __exit +{ + exit $1 +} + +if [[ $1 = "help" ]] +then + echo "options: cleanall tests run_unit run_fvt" + echo "default: build only shipped targets" + exit 0 +fi + +if [[ $(uname) = "Linux" ]] +then + if [[ $(basename $PWD) = "surelock-sw" ]] + then + source env.bash + else + if [[ -z $SURELOCKROOT ]] + then + echo "set SURELOCKROOT or source env.bash before running" + exit 1 + fi + fi + + cd $SURELOCKROOT + LINUX=1 + MAKE=make + + UNAME=$(uname --all) + if [[ $UNAME =~ "ppc64le" ]] + then + LE=1 + else + LE=0 + fi +else + #AIX + if [[ ! -z $SURELOCKROOT ]] + then + cd $SURELOCKROOT + fi + if [[ $(basename $PWD) != "surelock-sw" ]] + then + echo "must be in surelock-sw dir to execute" + exit 1 + fi + if [[ -z $SURELOCKROOT ]] + then + SURELOCKROOT= + fi + LINUX=0 + LE=0 + MAKE=gmake +fi + +unset CUSTOMFLAGS +unset BLOCK_FILEMODE_ENABLED +unset BLOCK_MC_ENABLED +unset TARGET_PLATFORM +ulimit -c unlimited #allow core files to be generated + +if [[ -h customrc ]] +then + rm -f customrc +fi + +if [[ $LE -eq 1 ]] +then + ln -s customrc.p8el customrc + source env.bash +else + ln -s customrc.p8be customrc + . customrc +fi + +if [[ -z $1 ]] +then + $MAKE -j16 + __exit $? +fi + +if [[ $1 = "cleanall" && -z $2 ]] +then + $MAKE cleanall + $MAKE -j16 + __exit $? +fi + +if [[ $1 = "cleanall" || $2 = "cleanall" || $3 = "cleanall" ]] +then + $MAKE cleanall +fi + +if [[ $1 = "tests" || $2 = "tests" || $3 = "tests" ]] +then + $MAKE tests -j16 + rc=$? + check_rc $rc +fi + +if [[ $1 = "run_unit" || $2 = "run_unit" || $3 = "run_unit" ]] +then + $MAKE tests -j16 + $MAKE run_unit + __exit $? +fi + +if [[ $1 = "run_fvt" || $2 = "run_fvt" || $3 = "run_fvt" ]] +then + $MAKE tests -j16 + $MAKE run_fvt + __exit $? +fi + +__exit $? diff --git a/src/cflash/cxlfcommon.c b/src/cflash/cxlfcommon.c new file mode 100644 index 00000000..4b8cf0cf --- /dev/null +++ b/src/cflash/cxlfcommon.c @@ -0,0 +1,525 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/cxlfcommon.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include +#include // open/close +#include // ioctl cmds +#include //offsetof +#include // open/close +#include //basename +#include +#include +#include + +#define MAX_FOPEN_RETRIES 100 +#define FOPEN_SLEEP_INTERVAL_US 100000 //100 ms sleep interval + + + + + +int set_lun_mode(lun_table_entry_t* lun_entry_ptr, MODE_T mode) +{ + TRACEV("Setting %s to %02x\n",lun_entry_ptr->sgdev, mode); + return cxlf_set_mode(lun_entry_ptr->sgdev, mode, lun_entry_ptr->lun); +} + +/* + need to pass down wwn (get from scsi inquiry data) - does this have a "3" on the front or not??? + need to save off wwns in a file + need to call utility on udev startup / plugging... see ethernet code + */ + + +bool cxlf_set_mode(char* target_device, uint8_t target_mode, uint8_t* wwid) +{ + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + int fd = NULL; + bool rc = false; + struct dk_cxlflash_manage_lun manage_lun = {{0}}; + int retrycount=0; + uint8_t empty_wwid[DK_CXLFLASH_MANAGE_LUN_WWID_LEN] = {0}; + int i = 0; + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + TRACEV("Setting target mode for '%s' to %d\n",target_device, target_mode); + + do + { + if((target_device == NULL) || (strlen(target_device)==0)) + { + TRACED("Invalid target device.\n"); + rc = false; + break; + } + if(wwid == NULL) + { + TRACED("NULL wwid is invalid\n"); + rc = false; + break; + } + if(memcmp(empty_wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN) == 0) + { + TRACED("Invalid empty WWID for device '%s'\n", target_device); + rc = false; + break; + } + if(g_traceV) + { + TRACEV("WWID: "); + for(i=0; i < DK_CXLFLASH_MANAGE_LUN_WWID_LEN; i++) + { + printf("%02x", wwid[i]); + } + printf("\n"); + } + + + //open the device exclusively so that others are no longer able to manipulate it + do + { + if(retrycount != 0) + { + TRACEV("Failed to open device - retrying... %s %d\n", target_device, retrycount); + usleep(FOPEN_SLEEP_INTERVAL_US); + } + fd = open(target_device, (O_EXCL|O_NONBLOCK|O_RDWR)); + retrycount++; + } while((fd < 0) && (retrycount < MAX_FOPEN_RETRIES)); + int errsv = errno; + if (fd < 0) + { + TRACED("Unable to open device / special file.: '%s' (errno %d)\n", target_device, errsv); + rc = false; + break; + } + + //we've successfully opened the device now. + + switch(target_mode) + { + case MODE_LEGACY: + manage_lun.hdr.flags = DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE; + break; + + case MODE_SIO: + manage_lun.hdr.flags = DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE; + break; + default: + TRACED("Unknown target mode: %d\n",target_mode); + break; + } + + + memcpy(manage_lun.wwid, wwid, sizeof(manage_lun.wwid)); + + int rslt = ioctl(fd, DK_CXLFLASH_MANAGE_LUN, &manage_lun); + TRACEV("ioctl result: %d\n",rslt); + if(rslt == 0) + { + //only set the "true" return code if we were able to successfully make the call to the DD + rc=true; + } + } while(0); + + TRACEV("Closing fd = %d\n",fd); + //close on all paths + close(fd); + + return rc; +} + +bool cxlf_parse_wwid(uint8_t* o_buffer, char* i_string, uint8_t i_buffer_sz) +{ + //this is simplistic, and not really apt to check for errors. + if(strlen(i_string) != (DK_CXLFLASH_MANAGE_LUN_WWID_LEN*2)) + { + TRACEV("WWIDs must be %d characters long. '%s' is invalid.\n",(DK_CXLFLASH_MANAGE_LUN_WWID_LEN*2),i_string); + return false; + } + char* nextchar = i_string; + int i=0; + for(i = 0; i < DK_CXLFLASH_MANAGE_LUN_WWID_LEN; i++) + { + int upper = 0; + int lower = 0; + //read in two characters (representing 4 bits each) + int validchars = sscanf(nextchar, "%1x%1x", &upper, &lower); //consume two characters + //assemble a valid 8-bit byte + o_buffer[i] = (char)upper << 4 | lower; + //TRACED("parsed %c%c, valid chars: %d, resulting in %02x\n", *nextchar, *(nextchar+1), validchars, o_buffer[i]); + //check to see if we read 2 characters successfully + if(validchars != 2) + { + TRACED("Error parsing '%s'\n", i_string); + return false; + } + nextchar+=2;//advance by two characters + } + //if we got this far, the wwid is valid + //TRACED("Controlling WWID: 0x"); + //for(i = 0; ipage_code != 0x83) + { + TRACED("Didn't get valid page 83 data for %s", l_pci_path); + } + if(l_vpd_header->page_length != 0x55) + { + TRACED("Didn't get valid page data length for %s", l_pci_path); + } + i=offsetof(pg83header_t, data); + l_pg83data = (void*)(l_vpd_header->data); + + //scan and look for LUN ID data... + while(ilength == 16) && + (l_pg83data->prot_info == 0x01) && + (l_pg83data->piv_assoc_id == 0x03)) + { + //TRACEV("Found valid wwid for this LUN\n"); + break; + } + i+=l_pg83data->length + offsetof(pg83data_t, data); + l_pg83data = (void*)l_pg83data + l_pg83data->length + offsetof(pg83data_t, data); + } + memcpy(o_lun, l_pg83data->data, 16); + + + + }while(0); + if(l_vpd_file) + { + fclose(l_vpd_file); + l_vpd_file = NULL; + } + return 0; +} + +void printentry(lun_table_entry_t* entry) +{ + int i = 0; + uint64_t* lhs_ptr = (uint64_t*)&entry->lun[0]; + uint64_t* rhs_ptr = (uint64_t*)&entry->lun[8]; + uint64_t lhs_val = *lhs_ptr; + uint64_t rhs_val = *rhs_ptr; + TRACED("%-16s",entry->sgdev); + for(i=0; i < DK_CXLFLASH_MANAGE_LUN_WWID_LEN; i++) + { + TRACED("%02x",entry->lun[i]); + } + TRACED(" aka %"PRIx64"%"PRIx64"\n",lhs_val,rhs_val); + +} + +int compare_luns(const void* item1, const void* item2) +{ + //quickly compare 128-bit entries + //memcmp exists, but is slower + lun_table_entry_t* lhs = (lun_table_entry_t*) item1; + lun_table_entry_t* rhs = (lun_table_entry_t*) item2; + + if((lhs == NULL) || (rhs == NULL)) + { + TRACEI("Invalid lhs = %p or rhs = %p", lhs, rhs); + return 0; + } + //memcmp is 30% slower in a synthetic benchmark + //return memcmp(lhs->lun, rhs->lun, DK_CXLFLASH_MANAGE_LUN_WWID_LEN); + uint64_t* lhs_ptr = (uint64_t*)&lhs->lun[0]; + uint64_t* rhs_ptr = (uint64_t*)&rhs->lun[0]; + uint64_t lhs_val = *lhs_ptr; + uint64_t rhs_val = *rhs_ptr; + + if(lhs_val != rhs_val) + { + if(lhs_val < rhs_val) + { + return -1; + } + else + { + return 1; + } + } + else + { + //check the low order bytes + lhs_ptr++; + rhs_ptr++; + lhs_val = *lhs_ptr; + rhs_val = *rhs_ptr; + + if(lhs_val != rhs_val) + { + if(lhs_val < rhs_val) + { + return -1; + } + else + { + return 1; + } + } + else + { + return 0; + } + } +} + + + +int update_siotable(lun_table_entry_t* o_lun_table, int* o_lun_table_sz) +{ + //todo - static to select whether we do the update or not, based on stat data + ini_dict_t* dict = NULL; + ini_dict_t* curr_entry = NULL; + uint32_t linefail = 0; + uint16_t luncount = 0; + int curr_mode; + dict = cxlfIniParse("/opt/ibm/capikv/etc/sioluntable.ini", &linefail ); + curr_entry = dict; + bool rslt = false; + *o_lun_table_sz=0; + while((curr_entry != NULL) && (luncount < MAX_NUM_LUNS)) + { + //atoi will return zero for any non-successful conversion. this is + //ideal, since we will treat anything that doesn't convert to "1" integer + //as legacy mode. + curr_mode = atoi(curr_entry->value); + if(curr_mode == 1) + { + //attempt to parse any key we find that's the right size, and put it + //into the appropriate place in the LUN table, assuming LUNs are stored + //in hex with no leading 0x + if(strlen(curr_entry->key) == (DK_CXLFLASH_MANAGE_LUN_WWID_LEN*2)) + { + rslt = cxlf_parse_wwid(o_lun_table[luncount].lun, curr_entry->key, strlen(curr_entry->key)); + if(rslt == true) + { + luncount++; + } + } + + } + curr_entry = curr_entry->next; + } + *o_lun_table_sz = luncount; + TRACEI("Read %d LUNs from the table\n", *o_lun_table_sz); + qsort(o_lun_table, luncount, sizeof(lun_table_entry_t), compare_luns); + if(dict) + { + cxlfIniFree(dict); + } + + return 0; +} + + +int update_cxlflash_devs(lun_table_entry_t* o_cxldevices, int* o_cxldevices_sz, lun_table_entry_t* filter_lun) +{ + //todo- do something to determine if we make a change... perhaps based on stats? + int i = 0; + char* devname = NULL; + char sgdevpath[DEV_STRING_SZ]; + glob_t globbuf; + int rc = 0; + int num_devs = 0; + + TRACEV("Starting update...\n"); + + do + { + *o_cxldevices_sz=0; + memset(sgdevpath,0,DEV_STRING_SZ); + + rc = glob( "/sys/module/cxlflash/drivers/pci:cxlflash/*:*:*.*/host*/target*:*:*/*:*:*:*/scsi_generic/sg*", GLOB_ONLYDIR, NULL, &globbuf); + if(rc != 0) + { + TRACEI("Error on glob() call\n"); + break; + } + + if(globbuf.gl_pathc > MAX_NUM_SGDEVS) + { + TRACED("This application supports up to %d devices, but %d were found.\n", MAX_NUM_SGDEVS, (int)globbuf.gl_pathc); + break; + } + for(i = 0; i < globbuf.gl_pathc; i++ ) + { + devname = basename(globbuf.gl_pathv[i]); + if(strcmp(devname, ".")==0) + { + TRACEI("Found invalid SG device name for '%s'\n", globbuf.gl_pathv[i]); + break; + } + else + { + snprintf(sgdevpath, DEV_STRING_SZ, "/dev/%s",devname); + strncpy(o_cxldevices[num_devs].sgdev, sgdevpath, DEV_STRING_SZ); + if(extract_lun_from_vpd(devname, o_cxldevices[num_devs].lun) != 0) + { + TRACED("Unable to extract VPD for device %s\n", devname); + rc = -2; + break; + } + if(filter_lun != NULL) + { + if(compare_luns(filter_lun, &o_cxldevices[num_devs]) != 0) + { + continue; + } + } //if filter lun + num_devs++; + } //else + } //for(i...) + *o_cxldevices_sz = num_devs; + qsort(o_cxldevices, *o_cxldevices_sz, sizeof(lun_table_entry_t), compare_luns); + TRACEI("Read %d Device's properties from sysfs\n", *o_cxldevices_sz); + }while(0); + + globfree(&globbuf); //cleanup from glob() must always occur + + + return rc; +} + + +int cxlf_refresh_luns(lun_table_entry_t* i_luntable, int i_luntable_sz, lun_table_entry_t* i_sgdevs, int i_sgdevs_sz) +{ + + int32_t curr_sg=0; + int32_t curr_lun=0; + int rslt = 0; + + //there are likely more SG devices than LUN table entries + //assume the list is sorted, and start comparing the + //sorted lists + TRACEI("Max SG Devs: %d, MAX Luns: %d\n", i_sgdevs_sz, i_luntable_sz); + while((curr_sg < i_sgdevs_sz) && (curr_lun < i_luntable_sz)) + { + TRACEV("curr_sg = %d, curr_lun= %d\n", curr_sg, curr_lun); + //TRACED("SG: "); + //printentry(&i_sgdevs[curr_sg]); + //TRACED("Table: "); + //printentry(&i_sgdevs[curr_lun]); + rslt = compare_luns(&i_sgdevs[curr_sg], &i_luntable[curr_lun]); + //negative indicates we are NOT in the LUN table and + //should try and advance the device table pointer + if(rslt < 0) + { + set_lun_mode(&i_sgdevs[curr_sg], MODE_LEGACY); + curr_sg++; + } + //positive indicates the current LUN wasn't found, so we should + //advance until we are less than or equal to a table entry + else if(rslt > 0) + { + curr_lun++; + } + //indicates we found a match! + else + { + set_lun_mode(&i_sgdevs[curr_sg], MODE_SIO); + curr_sg++; + } + } + TRACEV("Cleaning up any LUN NOT in the LUN table...\n"); + while(curr_sg +#include +#include +#include +#include +#include + +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + +#define TRACE_ENABLED +#define KWDATA_SZ 256 +#define MAX_VPD_SIZE 0x58 +#define DEV_STRING_SZ 128 +#define MAX_NUM_SGDEVS 4096 +#define MAX_NUM_LUNS 4096 + + +/*@}*/ // Ending tag for external constants in doxygen + +/*----------------------------------------------------------------------------*/ +/* Enumerations */ +/*----------------------------------------------------------------------------*/ +typedef enum MODE +{ + MODE_LEGACY = 0, + MODE_SIO = 1, + MODE_INVALID = 2 +} MODE_T; + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ +extern int32_t g_traceE; /* error traces */ +extern int32_t g_traceI; /* informative 'where we are in code' traces */ +extern int32_t g_traceF; /* function exit/enter */ +extern int32_t g_traceV; /* verbose trace...lots of information */ + + +#pragma pack(1) +typedef struct pg83header +{ + uint8_t peripherial_type; + uint8_t page_code; + uint8_t reserved1; + uint8_t page_length; + uint8_t data[1]; +}pg83header_t; + +typedef struct pg83data +{ + uint8_t prot_info; + uint8_t piv_assoc_id; + uint8_t reserved2; + uint8_t length; + uint8_t data[1]; +}pg83data_t; + +typedef struct lun_table_entry +{ + char sgdev[DEV_STRING_SZ]; + uint8_t lun[DK_CXLFLASH_MANAGE_LUN_WWID_LEN]; + MODE_T mode; +} lun_table_entry_t; + + +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + +#define TRACEE(FMT, args...) if(g_traceE) \ +do \ +{ \ +char __data__[256]; \ +memset(__data__,0,256); \ +sprintf(__data__,"%s %s: " FMT, __FILE__, __func__, ## args); \ +perror(__data__); \ +} while(0) + +#define TRACEF(FMT, args...) if(g_traceF) \ +{ \ +printf("%s %s: " FMT, __FILE__, __func__ ,## args); \ +} + +#define TRACEI(FMT, args...) if(g_traceI) \ +{ \ +printf("%s %s: " FMT, __FILE__, __func__ ,## args); \ +} + +#define TRACEV(FMT, args...) if(g_traceV) printf("%s %s: " FMT, __FILE__, __func__ ,## args) + +#define TRACED(FMT, args...) printf(FMT,## args) + + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ +uint32_t convert_to_binary (uint8_t **output_buffer, + uint32_t *output_buffer_length, + char *input_buffer); + + + +bool cxlf_get_mode(char* target_device); +bool cxlf_set_mode(char* target_device, uint8_t target_mode, uint8_t* wwid); +bool cxlf_parse_wwid(uint8_t* o_buffer, char* i_string, uint8_t i_buffer_sz); +int extract_lun_from_vpd(const char* i_sgdevpath, uint8_t* o_lun); +int compare_luns(const void* item1, const void* item2); +int update_siotable(lun_table_entry_t* o_lun_table, int* o_lun_table_sz); +int update_cxlflash_devs(lun_table_entry_t* o_cxldevices, int* o_cxldevices_sz, lun_table_entry_t* filter_lun); +int cxlf_refresh_luns(lun_table_entry_t* i_luntable, int i_luntable_sz, lun_table_entry_t* i_sgdevs, int i_sgdevs_sz); +void printentry(lun_table_entry_t* entry); +#endif //_CXLFLASHUTIL_H + diff --git a/src/cflash/cxlfd.c b/src/cflash/cxlfd.c new file mode 100644 index 00000000..6db2a1b9 --- /dev/null +++ b/src/cflash/cxlfd.c @@ -0,0 +1,233 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/cxlfd.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/*! + * @file cxlflashutil.c + * @brief utility tooling + */ + + +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +#include +#include + + +#include + +#include +#include + +#include //delete this! +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ + +error_t parse_opt (int key, + char *arg, + struct argp_state *state); + + + +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ + + +const char *argp_program_version = "cxlfd\n"; +const char *argp_program_bug_address = "IBM Support"; +char doc[] = +"\ncxlfd -- LUN management daemon for IBM Data Engine for NoSQL Software\n\v"; + + +/*----------------------------------------------------------------------------*/ +/* Struct / Typedef */ +/*----------------------------------------------------------------------------*/ + +// +enum argp_char_options { + + // Note that we need to be careful to not re-use char's + CXLF_DEBUG = 'D', + CXLF_TIMERFREQ = 't', + +}; + +static struct argp_option options[] = { + {"timer", CXLF_TIMERFREQ, "", OPTION_HIDDEN, "Update interval for commands to be sent to cxlflash driver (in seconds)."}, + {"debug", CXLF_DEBUG, "", 0, "Internal trace level for tool"}, + {0} +}; + + +static struct argp argp = { options, parse_opt, 0, doc }; + + + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ +struct arguments g_args = {0}; +int32_t g_traceE = 1; /* error traces */ +int32_t g_traceI = 0; /* informative 'where we are in code' traces */ +int32_t g_traceF = 0; /* function exit/enter */ +int32_t g_traceV = 0; /* verbose trace...lots of information */ +lun_table_entry_t g_cxldevs[MAX_NUM_SGDEVS] = {{{0}}}; +int32_t g_cxldevs_sz = 0; +lun_table_entry_t g_luntable[MAX_NUM_LUNS] = {{{0}}}; +int32_t g_luntable_sz = 0; +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + +/* + need to pass down wwn (get from scsi inquiry data) - does this have a "3" on the front or not??? + need to save off wwns in a file + need to call utility on udev startup / plugging... see ethernet code +*/ + +error_t parse_opt (int key, + char *arg, + struct argp_state *state) +{ + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + char* endptr = NULL; + + /*-------------------------------------------------------------------------*/ + /* Code */ + /*-------------------------------------------------------------------------*/ + + switch (key) + { + // case CXLF_GET_MODE: + //g_args.get_mode = 1; + //break; + + case CXLF_TIMERFREQ: + if(((uint16_t)strtol(arg,&endptr, 10)<= 0) || (endptr != (arg+strlen(arg)))) + { + TRACED("Interval must be a positive integer in seconds. '%s' is invalid.\n", arg); + exit(EINVAL); + } + else + { + g_args.timer_override = atoi(arg); + TRACEV("Set timer override to %d\n", g_args.timer_override); + } + break; + + case CXLF_DEBUG: + g_args.verbose = atoi (arg); + TRACEI ("Set verbose level to %d\n", g_args.verbose); + if (g_args.verbose >= 1) + g_traceI = 1; + if (g_args.verbose >= 2) + g_traceF = 1; + if (g_args.verbose >= 3) + g_traceV = 1; + break; + + case 0 : + + TRACEV("Got a naked argument: '%s'\n", arg); + break; + + default: + return (ARGP_ERR_UNKNOWN); + } + + return (0); + +} + + + + + + + + + +#define BILLION 1000000000L +int main (int argc, char *argv[]) +{ + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + int32_t rc = 0; + int16_t timer_interval = DEFAULT_TIMER_INTERVAL; + //int i = 0; + bool skip_trace = false; //prevent us from filling logs + /*-------------------------------------------------------------------------*/ + /* Code */ + /*-------------------------------------------------------------------------*/ + + memset(&g_args,0,sizeof(g_args)); + + argp_parse (&argp, argc, argv, ARGP_IN_ORDER, 0, &g_args); + + + if(g_args.timer_override != 0) + { + timer_interval = g_args.timer_override; + } + + TRACED("Starting up with %d second interval\n", timer_interval); + + while(1) + { + update_cxlflash_devs(g_cxldevs, &g_cxldevs_sz, NULL); + update_siotable(g_luntable, &g_luntable_sz); + + if(g_cxldevs_sz != 0) + { + skip_trace = false; + cxlf_refresh_luns(g_luntable, g_luntable_sz, g_cxldevs, g_cxldevs_sz); + } + else + { + if(skip_trace != true) + { + TRACED("No CXL Devices were found; waiting...\n"); + skip_trace = true; + } + } + + sleep(timer_interval); + continue; + + + } + + + return(rc); +} + + + diff --git a/src/cflash/cxlfd.h b/src/cflash/cxlfd.h new file mode 100644 index 00000000..a2431646 --- /dev/null +++ b/src/cflash/cxlfd.h @@ -0,0 +1,81 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/cxlfd.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _CXLFD_H +#define _CXLFD_H +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include + +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ +#define LUNTABLEINI "/opt/ibm/capikv/etc/sioluntable.ini" +#define DEFAULT_TIMER_INTERVAL 30 //seconds + +/*----------------------------------------------------------------------------*/ +/* Enumerations */ +/*----------------------------------------------------------------------------*/ +/** + * \defgroup ExternalEnum External Enumerations + */ +/*@{*/ // Special tag to say everything between it and the ending + // brace is a part of the external enum module in doxygen. + +struct arguments +{ + uint16_t timer_override; + uint8_t verbose; +}; + + + + + +/*@}*/ // Ending tag for external structure module in doxygen + + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ + + +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ + +#endif //_CXLFD_H + diff --git a/src/cflash/cxlfini.c b/src/cflash/cxlfini.c new file mode 100644 index 00000000..b7aa6ab1 --- /dev/null +++ b/src/cflash/cxlfini.c @@ -0,0 +1,387 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/cxlfini.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include +#include +#include +#include //isspace +#include +#include + + +//forward prototypes +bool section_is_valid(char* i_section); + +char* trim_whitespace_inplace(char* io_buffer); + +void trim_comments(char* io_buffer); + +ini_dict_t* append_ini_entry(ini_dict_t** io_entry_list, char* section, char* key, char* value); + + + +bool section_is_valid(char* i_section) +{ + bool l_rc = true; + int i = 0; + do + { + //if the pointer's invalid, the section is obv. invalid. + if(i_section == NULL) + { + TRACEE("invalid input\n"); + l_rc = false; + break; + } + //if the string is empty, it is not valid + if(strlen(i_section) == 0) + { + l_rc = false; + break; + } + //check each char to see if it's whitespace. + for(i = 0; i < strlen(i_section); i++) + { + //if whitespace is found, isspace returns a non-zero + if(isspace(i_section[i]) != 0) + { + TRACEE("Found whitespace in section title.\n"); + l_rc = false; + break; + } + else if(i_section[i] == ';') + { + TRACEE("Found a comment char in the section title.\n"); + l_rc = false; + break; + } + } + + } while (0); + if((l_rc == false) && (i_section != NULL)) + { + TRACEE("Section string '%s' is invalid.\n", i_section); + } + else if(l_rc ==true) + { + //TRACEV("Section string '%s' is valid.\n", i_section); + } + return l_rc; +} + +char* trim_whitespace_inplace(char* io_buffer) +{ + bool l_found_char = false; + char* l_start = io_buffer; + int i=0; + if(l_start != NULL) + { + //TRACEV("Input string to trim is '%s'\n",l_start); + //search the string for white space + for(i = 0; inext != NULL)) + { + l_curr_entry = l_curr_entry->next; + } + if(l_curr_entry == NULL) + { + TRACEE("Error occurred parsing ini entry structs - we fell off the end of the list.\n"); + l_new_entry = NULL; + } + else + { + l_new_entry = malloc(sizeof(ini_dict_t)); + l_curr_entry->next = l_new_entry; + } + }//end else - list exists + + //add the new entry! + if(l_new_entry != NULL) + { + l_new_entry->section = strdup(section); + l_new_entry->key = strdup(key); + l_new_entry->value = strdup(value); + l_new_entry->next = NULL; + } + }//end else - entry is valid + return l_new_entry; +} + +ini_dict_t* cxlfIniParse(char* i_inifilename, uint32_t* o_failed_line) +{ + FILE* l_file = NULL; + ini_dict_t* l_first_entry = NULL; + char l_curr_line[CXLF_MAX_LINE_SZ] = ""; + char l_section[CXLF_MAX_LINE_SZ] = "default"; + uint32_t l_line_num = 0; + bool l_err = false; + do + { + //do some basic input validation + if((i_inifilename == NULL) || (o_failed_line == NULL)) + { + TRACEE("Invalid argument %p or %p\n", i_inifilename, o_failed_line); + break; + } + + //open the file handle + l_file = fopen(i_inifilename, "r"); + if(l_file == NULL) + { + TRACEE("Unable to find file '%s'\n",i_inifilename); + l_err = true; + break; + } + + //read lines until we get to the end + while(fgets(l_curr_line, CXLF_MAX_LINE_SZ, l_file) != NULL) + { + //advance our line counts = for humans, files start on line 1, not zero. + l_line_num++; + + trim_comments(l_curr_line); + if(strlen(l_curr_line) < 2) + { + //ini files are not allowed to have fewer than 2 characters ('a=') + //if we find something that's shorter, consider it a comment or empty line + //TRACEV("skipping line #%d.\n",l_line_num); + //note we don't "continue" since we want to bump our line number counts... + } + else if(l_curr_line[0] == '[') + { + char* l_section_end = NULL; + //found a section start element, so search for the matching end + l_section_end = strchr(l_curr_line, ']'); + if(l_section_end == NULL) + { + TRACEE("Parsing error: unmatched []'s found on line %d: '%s'", (uint32_t)l_line_num, l_curr_line); + l_err = true; + break; + } + else + { + //artificially strip off the end ']' by marking it as a null char + *l_section_end = '\0'; + } + //copy the section name out to our section buffer + //note we +1 to avoid copying the leading '[' character + strncpy(l_section, l_curr_line+1, CXLF_MAX_LINE_SZ); + + if(!section_is_valid(l_section)) + { + TRACEE("Error: section field was invalid. Unable to parse '%s'\n",l_curr_line); + l_err = true; + break; + } + TRACEV("New section: '%s'\n", l_section); + } + else + { + //TRACEV("Found a k-v pair line\n"); + //must be a key/value pair + char* l_equalsign = NULL; + char* l_value = NULL; + char* l_key = NULL; + l_equalsign = strchr(l_curr_line, '='); + if(l_equalsign == NULL) + { + TRACEE("No '=' found in key/value pair line. string = '%s'\n",l_curr_line); + l_err = true; + break; + } + //we must have a null terminator at the end of the string buffer + //if we assume the = is the last char, then l_value will be a + //null character. + l_value = l_equalsign+1; + //artificially stick a 'null' in the middle of the 'key = value' string. + *l_equalsign = '\0'; + l_key = trim_whitespace_inplace(l_curr_line); + if(strlen(l_key) == 0) + { + TRACEE("Invalid key/value pair. Parsed and found key length == 0.\n"); + l_err = true; + break; + } + l_value = trim_whitespace_inplace(l_value); + //value is allowed to be empty + append_ini_entry(&l_first_entry, l_section, l_key, l_value); + } + + }//end while(fgets...) + if(l_err) + { + TRACEE("Error occured parsing cfg file.\n"); + break; + } + + } while (0); + + + if(l_file) + { + //always close the file if it was opened + fclose(l_file); + } + //if something broke, NULL and empty the entire linked list. it's input data + //was not valid, so it (itself) is not valid. + if(l_err) + { + cxlfIniFree(l_first_entry); + l_first_entry = NULL; + *o_failed_line = l_line_num; + } + return l_first_entry; +} + + + +void cxlfIniFree(ini_dict_t* i_ini_entry) +{ + //Locals + ini_dict_t* l_curr = i_ini_entry; + ini_dict_t* l_next = NULL; + + while(l_curr != NULL) + { + //free the elements from our current entry + free(l_curr->section); + free(l_curr->key); + free(l_curr->value); + //free our book-keeping structures, but keep 'next' so we can proceed down the list + l_next = l_curr->next; + free(l_curr); + //advance our way through the linked list + l_curr = l_next; + + } + + return; +} + +char* cxlfFindIniValue(ini_dict_t* i_ini_entry, char* i_section, char* i_key) +{ + //Locals + ini_dict_t* l_curr = i_ini_entry; + char* l_value = NULL; + TRACEV("Searching for %s.%s \n", i_section, i_key); + //Code + while(l_curr != NULL) + { + if((strcmp(l_curr->section, i_section) == 0) && + (strcmp(l_curr->key, i_key) == 0)) + { + l_value = l_curr->value; + //found the entry + break; + } + //advance our way through the linked list + l_curr = l_curr->next; + } + return l_value; +} + + diff --git a/src/cflash/cxlfini.h b/src/cflash/cxlfini.h new file mode 100644 index 00000000..93812128 --- /dev/null +++ b/src/cflash/cxlfini.h @@ -0,0 +1,102 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/cxlfini.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/** + * @file cxlfini.h + * @brief Contains all internal headers for cxlfisioning afu logic + * + * + */ + +#ifndef _CXLFINI_H +#define _CXLFINI_H +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +#include +#include +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ +#define CXLF_MAX_LINE_SZ 150 +/*----------------------------------------------------------------------------*/ +/* Structures */ +/*----------------------------------------------------------------------------*/ + + +/* Each key=value of an INI file is parsed into an ini_element. The pointers + * here are dynamically-allocated, which means we must free() the indiv. element + * members prior to freeing the linked list overall! Destroy this list by calling + */ +typedef struct ini_dict +{ + char* section; + char* key; + char* value; + struct ini_dict* next; +} ini_dict_t; + + + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ + + +/** + * @brief Free an ini_dict_t* structure and associated members + * Frees the internal data structures that make up the ini dictionary that is + * returned by cxlfIniParse. cxlfIniFree must be called to avoid memory leaks. + * @param i_ini_entry ini_dict_t* to be freed. + */ + +void cxlfIniFree(ini_dict_t* i_ini_entry); + + +/** + * @brief Parse an ini file and create a dictionary of the found contents + * This dynamically-allocates data structures to describe the contents of + * an ini file. The caller MUST call cxlfIniFree() on the returned pointer + * when the dictionary is no longer needed to avoid a memory leak. + * @param i_inifilename file to parse + * @param o_failed_line Line we failed to parse on (if any). Check this if + * the return value is NULL. + * @returns a valid dictionary on sucess, NULL on failure. + */ +ini_dict_t* cxlfIniParse(char* i_inifilename, uint32_t* o_failed_line); + + +/** + * @brief find a desired ini value, given a section and key + * @param i_ini_entry dictionary to process + * @param i_section section header (if any) for the key. Enter "default" if no section is specified in the source file. + * @param i_key key to be found + * @returns valid char* pointer on success, or NULL on error or key / section not found. Note that empty strings e.g. "" may be returned if a key is present, but not set in the ini file. + */ +char* cxlfFindIniValue(ini_dict_t* i_ini_entry, char* i_section, char* i_key); + + +#endif diff --git a/src/cflash/cxlflashutil.c b/src/cflash/cxlflashutil.c new file mode 100644 index 00000000..93ffd17d --- /dev/null +++ b/src/cflash/cxlflashutil.c @@ -0,0 +1,350 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/cxlflashutil.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/*! + * @file cxlflashutil.c + * @brief utility tooling + */ + + +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include //basename +#include +#include + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ +error_t parse_opt (int key, + char *arg, + struct argp_state *state); + + + +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ +const char *argp_program_version = "cxlflashutil 2.0\n"; +const char *argp_program_bug_address = "IBM Support"; +char doc[] = +"\ncxlflashutil -- Control utility for IBM Data Engine for NoSQL Software\n\v"; + + +/*----------------------------------------------------------------------------*/ +/* Struct / Typedef */ +/*----------------------------------------------------------------------------*/ + +// +enum argp_char_options { + + // Note that we need to be careful to not re-use char's + //CXLF_GET_MODE = 'm', + CXLF_CONFIG = 'c', + CXLF_DEV = 'd', + CXLF_DEBUG = 'D', + CXLF_LUN_ID = 'l', + CXLF_SET_MODE = 'm', + +}; + +static struct argp_option options[] = { + {"set-mode", CXLF_SET_MODE, "<0=legacy/1=sio>", 0, "Set the IO mode for ALL instances and paths to a target LUN's disk(s)."}, + {"lun", CXLF_LUN_ID, "", 0, "Target LUN to operate on (16 byte hex value)"}, + //{"get-mode", CXLF_GET_MODE, 0, 0, "Get the IO mode for ALL instances and paths to a target LUN's disk(s)."}, + {"device", CXLF_DEV, "", 0, "Target device (e.g. /dev/sg123)"}, + {"debug", CXLF_DEBUG, "", 0, "Internal trace level for tool"}, + {"config", CXLF_CONFIG, 0, 0, "Configure device according to the LUN table"}, + {0} +}; + + +static struct argp argp = { options, parse_opt, 0, doc }; + + + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ +struct arguments g_args = {{0}}; +int32_t g_traceE = 1; /* error traces */ +int32_t g_traceI = 0; /* informative 'where we are in code' traces */ +int32_t g_traceF = 0; /* function exit/enter */ +int32_t g_traceV = 0; /* verbose trace...lots of information */ +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + + +error_t parse_opt (int key, + char *arg, + struct argp_state *state) +{ + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + //bool rc = false; + char* endptr = NULL; + int rc = 0; + /*-------------------------------------------------------------------------*/ + /* Code */ + /*-------------------------------------------------------------------------*/ + + switch (key) + { + // case CXLF_GET_MODE: + //g_args.get_mode = 1; + //break; + case CXLF_CONFIG: + if(g_args.set_mode != 0) + { + //disallow these args simultaneously + TRACED("Cannot set mode and configure-default option at the same time.\n"); + exit(EINVAL); + } + g_args.configure_default = 1; + break; + + case CXLF_LUN_ID: + + if(strlen(g_args.target_device) != 0) + { + //disallow these args simultaneously + TRACED("Cannot specify LUN config for multiple devices and a specific device simultaneously.\n"); + exit(EINVAL); + } + rc = cxlf_parse_wwid(g_args.wwid, arg, sizeof(g_args.wwid)); + if(rc == false) + { + TRACED("LUN ID is invalid.\n"); + //zero out the wwid just in case + memset(g_args.wwid,0,sizeof(g_args.wwid)); + g_args.wwid_valid = false; + exit(EINVAL); + } + else + { + TRACEV("Got a valid WWID\n"); + g_args.wwid_valid = true; + } + break; + + case CXLF_DEV: + if(g_args.wwid_valid != 0) + { + //disallow these args simultaneously + TRACED("Cannot specify LUN config for multiple devices and a specific device simultaneously.\n"); + exit(EINVAL); + } + memset(g_args.target_device, 0, DEV_STRING_SZ); + //leave a null byte on the end to avoid buffer overruns + strncpy(g_args.target_device, arg, DEV_STRING_SZ-1); + TRACEV("Set target device to '%s'\n",g_args.target_device); + break; + + case CXLF_SET_MODE: + if(g_args.configure_default != 0) + { + //disallow configure and set_mode at the same time. + TRACED("Cannot set mode and configure-default option at the same time.\n"); + exit(EINVAL); + } + if((uint8_t)strtol(arg,&endptr, 10)>= MODE_INVALID || (endptr != (arg+strlen(arg)))) + { + TRACED("Mode argument must be '0' (LEGACY) or '1' (SIO). '%s' is invalid.\n", arg); + exit(EINVAL); + } + else + { + g_args.set_mode = 1; + g_args.target_mode = atoi(arg); + TRACEV("Set mode to %d\n", g_args.target_mode); + } + break; + + case CXLF_DEBUG: + g_args.verbose = atoi (arg); + TRACEI ("Set verbose level to %d\n", g_args.verbose); + if (g_args.verbose >= 1) + g_traceI = 1; + if (g_args.verbose >= 2) + g_traceF = 1; + if (g_args.verbose >= 3) + g_traceV = 1; + break; + + case 0 : + + TRACEI("Got a naked argument: '%s'\n", arg); + break; + + default: + return (ARGP_ERR_UNKNOWN); + } + + return (0); + +} + + +int main (int argc, char *argv[]) +{ + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + int32_t rc = 0; + char devname[DEV_STRING_SZ] = {0}; + char* devbasename = NULL; + lun_table_entry_t device_entry = {{0}}; + lun_table_entry_t luntable[MAX_NUM_LUNS] = {{{0}}}; + lun_table_entry_t devtable[MAX_NUM_SGDEVS] = {{{0}}}; + int luntable_sz = 0; + int devtable_sz = 0; + int i = 0; + /*-------------------------------------------------------------------------*/ + /* Code */ + /*-------------------------------------------------------------------------*/ + + memset(&g_args,0,sizeof(g_args)); + + argp_parse (&argp, argc, argv, ARGP_IN_ORDER, 0, &g_args); + + + do + { + //if a LUN was specified, we need to make up a single-item LUN table + //and then take some action. To do this, we get all SG devs, and + //filter based on the matching LUN entry. + //we then either keep or delete that entry so that we can set the + //LUN to SIO mode or LEGACY. + if(g_args.wwid_valid) + { + TRACEV("Setting LUN %s to mode %d\n",g_args.wwid, g_args.target_mode); + //make a single LUN table entry + luntable_sz=1; + memcpy(luntable[0].lun, g_args.wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN); + + //get all possible wwids + rc = update_cxlflash_devs(devtable, &devtable_sz, &luntable[0]); + if(rc!= 0) + { + TRACED("Error processing device tables.\n"); + break; + } + + if(g_args.target_mode == MODE_LEGACY) + { + //"delete" the entry in the LUN table so that we can + //trigger a refresh, and effectively set all LUNs to legacy + //otherwise allow the single entry to continue to exist + luntable_sz = 0; + } + //refresh the device(s) that matched + rc = cxlf_refresh_luns(luntable, luntable_sz, devtable, devtable_sz); + if(rc!= 0) + { + TRACED("Error refreshing LUNs.\n"); + break; + } + } + else + { + //set a single device's values... + if(strlen(g_args.target_device) == 0) + { + TRACED("Error: device special file name required.\n"); + break; + } + + //copy this since basename() may corrupt the original string depending + //on the implementation + strncpy(devname, g_args.target_device, DEV_STRING_SZ); + devbasename = basename(devname); + if(strcmp(devname, ".")==0) + { + TRACED("Error: device name '%s' appears to be invalid.",g_args.target_device); + break; + } + rc = extract_lun_from_vpd(devbasename, device_entry.lun); + if(rc != 0) + { + TRACED("Error: Unable to find a matching LUN ID for device '%s' - is this a cxlflash device, and are you able to manage it?\n", g_args.target_device); + break; + } + + //if the default config option is set, then read the LUN table, + //and set a mode based on its contents + if(g_args.configure_default != 0) + { + + g_args.set_mode = 1; + rc = update_siotable(luntable, &luntable_sz); + if(rc != 0) + { + TRACED("Error: unable to read LUN table successfully.\n"); + break; + } + //scan the sio table for this disk's LUN, and if found, + //set the mode to SIO + g_args.target_mode = MODE_LEGACY; + for(i = 0; i < luntable_sz; i++) + { + if(compare_luns(&luntable[i], &device_entry) == 0) + { + + g_args.target_mode = MODE_SIO; + break; + } + } + } + + + + bool l_success = cxlf_set_mode(g_args.target_device, g_args.target_mode, device_entry.lun); + if(!l_success) + { + TRACED("ERROR: Device driver call returned an error.\n"); + rc = EIO; //arbitrary non-zero RC + break; + } + + } + + TRACED("SUCCESS\n"); + rc = 0; + } while(0); + + + return(rc); +} + + + + diff --git a/src/cflash/cxlflashutil.h b/src/cflash/cxlflashutil.h new file mode 100644 index 00000000..a3531cc1 --- /dev/null +++ b/src/cflash/cxlflashutil.h @@ -0,0 +1,87 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/cxlflashutil.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _CXLFLASHUTIL_H +#define _CXLFLASHUTIL_H +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include + +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ + + + +/*----------------------------------------------------------------------------*/ +/* Enumerations */ +/*----------------------------------------------------------------------------*/ +/** + * \defgroup ExternalEnum External Enumerations + */ +/*@{*/ // Special tag to say everything between it and the ending + // brace is a part of the external enum module in doxygen. + + +struct arguments +{ + char target_device[DEV_STRING_SZ]; + uint8_t set_mode; + uint8_t configure_default; + uint8_t target_mode; + uint8_t get_mode; + uint8_t verbose; + bool wwid_valid; + uint8_t wwid[DK_CXLFLASH_MANAGE_LUN_WWID_LEN]; +}; + +/*@}*/ // Ending tag for external structure module in doxygen + + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ + + +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ + + + + +#endif //_CXLFLASHUTIL_H + diff --git a/src/cflash/makefile b/src/cflash/makefile new file mode 100644 index 00000000..4789c611 --- /dev/null +++ b/src/cflash/makefile @@ -0,0 +1,59 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/cflash/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +ROOTPATH = ../.. + +UNAME=$(shell uname) + +USER_DIR = . +SUBDIRS = test.d +TESTDIR = ${ROOTPATH}/obj/tests + +LIBPATHS = -L${ROOTPATH}/img +LINKLIBS = + +PGMDIR = ${ROOTPATH}/obj/programs +TESTDIR = ${ROOTPATH}/obj/tests + +#cxlflashutil is only used on Linux +ifeq ($(UNAME),AIX) +PGMS = + +#Linux +else +PGMS = cxlflashutil cxlfd +cxlfd_OFILES = cxlfini.o cxlfcommon.o +cxlflashutil_OFILES = cxlfcommon.o cxlfini.o + +#ifeq ... +endif + +PROGRAMS = $(addprefix ${PGMDIR}/, ${PGMS}) + + +all: $(PROGRAMS) +test: $(BIN_TESTS) + +include ${ROOTPATH}/config.mk diff --git a/src/cflash/test/119.c b/src/cflash/test/119.c new file mode 100644 index 00000000..aabb82e2 --- /dev/null +++ b/src/cflash/test/119.c @@ -0,0 +1,159 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/119.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + +extern int g_error; +extern pid_t pid; +extern int long_run; + + +// create VLUN +int create_vluns_119(char *dev, dev64_t devno_1, + struct ctx *p_ctx) +{ + int rc,i; + pthread_t thread; + __u64 chunk = 0x10; + __u64 stride= 0x8000, nlba=0; + __u64 flags=0 ;//TBD + + pid = getpid(); + rc = ctx_init2(p_ctx, dev, flags, devno_1); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + // create VLUN + nlba = chunk * (p_ctx->chunk_size); + p_ctx->flags=DK_UVF_ALL_PATHS; + rc = create_resource(p_ctx, nlba, p_ctx->flags, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + for ( i=0; i< long_run; i++) + { + stride=0x1000; + rc = do_io(p_ctx, stride); + } + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; +} + +int create_lun_direct_120(char *dev, struct ctx *p_ctx, dev64_t devno_1) +{ + int rc,i=0; + + pthread_t thread; + __u64 stride= 0x100, nlba=0, flags=0; + debug("create_lun_direct_120\n"); + + rc = ctx_init2(p_ctx, dev, flags, devno_1); + CHECK_RC(rc, "Context init failed"); + // CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + rc = create_resource(p_ctx, nlba, DK_UDF_ASSIGN_PATH , LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + // do io on context + for ( i=0; i< long_run; i++) + { + stride=0x1000; + rc = do_io(p_ctx, stride); + } + sleep(3); + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; +} + + +int ioctl_7_1_119_120( int flag ) +{ + + int rc=0,i,j=2; + struct ctx myctx[10]; + struct ctx *p_ctx[10]; + dev64_t devno[10]={ '0' }; + struct flash_disk fdisk[MAX_FDISK]; + //int cfdisk = MAX_FDISK; + pid = getpid(); + rc = get_flash_disks(fdisk, FDISKS_SHARED); + if ( rc == 0 ) + return 1; + + for ( i=0;i<10;i++) + { + p_ctx[i]=&myctx[i]; + } + + rc=0; + // I will use the first disk here for the path + // need to call dk_query_first to get all path + // i am assuming my disk to come from 2 adapter for now. This test can be enhanced for any number later. + // get number of path +#ifdef _AIX + strcpy(p_ctx[0]->dev,fdisk[0].dev); + j=ioctl_dk_capi_query_path_get_path(p_ctx[0],devno); +#endif + switch ( flag ) + { + case 119: + for ( i=0; i + +extern int g_error; +extern pid_t pid; +extern char cflash_path[MC_PATHLEN]; +int MAX_LIMIT=1; + +int test_traditional_IO( int flag, int disk_num ) +{ + + + int rc; + struct ctx myctx[20]; + struct ctx *p_ctx[20]; + char *disk_name,*disk_name1,temp[MC_PATHLEN], *str=NULL; + __u64 chunk = 0x10; + + + pid = getpid(); + str = (char *) malloc(100); + + pthread_t thread[20]; + __u64 nlba=0; + __u64 flags=0; + + struct flash_disk disks[MAX_FDISK]; // flash disk struct + strcpy(temp,cflash_path); + get_flash_disks(disks, FDISKS_ALL); + pid = getpid(); + disk_name = strtok(temp,"/"); + disk_name = strtok(NULL,"/"); + disk_name1 = strtok(disks[1].dev,"/"); + disk_name1 = strtok(NULL,"/"); + debug("cflash_path=%s\n", cflash_path); + int i=0,j=0; + + + switch ( flag ) + { + + // 7.1.180: Do rmdev -l hdisk# while super-pipe IO(root user) + case 180 : + // virtual LUN + // create multiple vlun and do io + i=0; + p_ctx[i]=&myctx[i]; + rc = ctx_init(p_ctx[i]); + //rc = ctx_init2(p_ctx, flash_dev, flags, path_id); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread[i], NULL, ctx_rrq_rx, p_ctx[i]); + nlba=p_ctx[i]->chunk_size; + rc = create_resource(p_ctx[i], nlba, flags, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + // do io on context + // We wish to do IO in a different thread... Setting up for that ! + + // try to detach the disk using rmdev command + sprintf(str, "rmdev -l %s >rmdev.log 2>&1 ", disk_name); + rc=system(str); + + if ( rc == 0 ) + { + debug(" rmdev succeeded, though super pipe io was ongoing \n"); + rc=1; + } + else + { + debug("rmdev failed as expected \n"); + rc=system("cat rmdev.log | grep \"device is busy\""); + if ( rc !=0 ) + { + debug("error message is not proper \n"); + rc=1; + } + else + { + debug(" error message was thrown properly \n"); + rc=0; + } + } + sprintf(str, "cfgmgr -l %s ", disk_name); + system(str); + // Wait for IO thread to complete + break; + + // 7.1.180 with direct LUN. This is a new scenario, and not part of testcase. Repeat of 180 with DIRECT_LUN + case 1801: + // Direct LUN + // create DIRECT_LUN and do io + i=0; + p_ctx[i]=&myctx[i]; + rc = ctx_init(p_ctx[i]); + //rc = ctx_init2(p_ctx, flash_dev, flags, path_id); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + nlba=p_ctx[i]->lun_size; + rc = create_resource(p_ctx[i], nlba, flags, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + // do io on context + // try to detach the disk using rmdev command + sprintf(str, "rmdev -l %s >rmdev.log 2>&1 ", disk_name); + debug("%s\n",str); + rc=system(str); + if ( rc == 0 ) + { + debug(" rmdev succeeded, though super pipe io was ongoing \n"); + rc=1; + } + else + { + debug("rmdev failed as expected \n"); + rc=system("cat rmdev.log | grep \"device is busy\""); + if ( rc !=0 ) + { + debug("error message is not proper \n"); + rc=1; + } + else + { + debug(" error message was thrown properly \n"); + rc=0; + } + } + sprintf(str, "cfgmgr -l %s ", disk_name); + system(str); + + break; + + // 7.1.181 + // Test lsmpio, devrsrv & iostat commands while super-pipe IO + + case 181: + // create VLUns + i=0; + p_ctx[i]=&myctx[i]; + + rc = ctx_init(p_ctx[i]); + //rc = ctx_init2(p_ctx, flash_dev, flags, path_id); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread[i], NULL, ctx_rrq_rx, p_ctx[i]); + nlba=p_ctx[i]->chunk_size; + rc = create_resource(p_ctx[i], nlba, flags, LUN_DIRECT); + sleep(10); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc=0; + // now do lsmpio on flash disk + sprintf(str, "lsmpio -l %s >/tmp/lsmpio.log 2>&1", disk_name); + rc=system(str); + + if ( rc != 0 ) + rc=1; + else + { sprintf(str, "grep %s /tmp/lsmpio.log", disk_name); + rc=system(str); + + if ( rc !=0 ) + rc=2; + } + + // devrsrv + sprintf(str, "devrsrv -c query -l %s>/tmp/devrsrv.log 2>&1", disk_name); + rc=system(str); + + if ( rc != 0 ) + rc=1; + else + { sprintf(str, "grep %s /tmp/devrsrv.log", disk_name); + rc=system(str); + + if ( rc !=0 ) + rc=2; + } + + // iostat + sprintf(str, "iostat %s>/tmp/iostat.log 2>&1", disk_name); + rc=system(str); + + if ( rc != 0 ) + rc=1; + else + { + sprintf(str, "grep %s /tmp/iostat.log", disk_name); + rc=system(str); + + if ( rc !=0 ) + rc=2; + } + + + break; + + // 7.1.182 + // Test VLUN creation, if flash disk inactive opened + case 182: + // create a VG on flash disk and then varyoff it + sprintf(str, "mkvg -f -y NEW_VG %s; varyoffvg NEW_VG", disk_name); + rc=system(str); + + i=0; + p_ctx[i]=&myctx[i]; + + rc = ctx_init(p_ctx[i]); + if ( rc == 0 ) + { + debug("context creation succeeded, should have failed \n"); + ctx_close(p_ctx[i]); + rc=1; + } + else rc=0; + close(p_ctx[i]->fd); + + system("varyonvg NEW_VG; lspv"); + system("varyoffvg NEW_VG; exportvg NEW_VG"); + sprintf(str, "chdev -l %s -a pv=clear", disk_name); + system(str); + + return rc; + //7.1.187 + //While super-pipe IO on a LUN, keep configuring/unconfiguring other hdisk (root user) + case 187: + // creating many VLUN + i=0; + p_ctx[i]=&myctx[i]; + + rc = ctx_init(p_ctx[i]); + //rc = ctx_init2(p_ctx, flash_dev, flags, path_id); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread[i], NULL, ctx_rrq_rx, p_ctx[i]); + nlba = chunk * (p_ctx[i]->chunk_size); + rc = create_resource(p_ctx[i], nlba, flags, LUN_VIRTUAL); + + + // now initiate rmdev and cfgdev for other // need to run it for longer time + for ( i=0; i<10; i++ ) + { + for ( j=0;j<10;j++) + { + sprintf(str, "rmdev -l %s", disk_name1); + rc=system(str); + + if ( rc != 0 ) + return 1; + sleep(1); + sprintf(str, "cfgmgr -l %s", disk_name1); + rc=system(str); + + if ( rc != 0 ) + return 1; + } + } + // Wait for IO thread to complete + i=0; + break; + + + // 7.1.212 + //To call IOCTL DK_CAPI_PATH_QUERY on a adapter with disks reserve_policy set to no_reserve and one disk reserve_policy set to single_path + + case 212: + // setting disk2 reserve policy to single_path + i=0; + + sprintf(str, "chdev -l %s -a reserve_policy=no_reserve", disk_name); + rc=system(str); + + if ( rc != 0 ) + rc=1; + else + { + + sprintf(str, "chdev -l %s -a reserve_policy=single_path", disk_name1); + rc=system(str); + + if ( rc != 0 ) + rc=2; + else + { + // call dk_capi_query_path +#ifdef _AIX + p_ctx[i]=&myctx[i]; + + rc = ctx_init(p_ctx[i]); + rc=ioctl_dk_capi_query_path_check_flag(p_ctx[0],0,0); + if ( rc == 1 ) + return 1; + else + return 0; +#endif + // will handle for Linux later + } + } + //7.1.213 + case 213: + + p_ctx[0]=&myctx[0]; + + // disabling disk 1 + // changing reserve policy to no_reserve + sprintf(str," chdev -l %s -a reserve_policy=no_reserve", disk_name); + system(str); + sprintf(str, "chpath -s disable -l %s -i 0", disk_name); + rc=system(str); + + if ( rc != 0 ) + rc=1; + else + { + sprintf(str, "lspath -l %s | grep -i Disabled", disk_name); + rc=system(str); + + if ( rc != 0 ) + rc=2; + else + { + // call dk_capi_query_path +#ifdef _AIX + ctx_init(p_ctx[0]); + ioctl_dk_capi_detach(p_ctx[0]); + + rc=ioctl_dk_capi_query_path_check_flag(p_ctx[0],DK_CPIF_DISABLED,0); + if ( rc == 1 ) + rc=3; + else + rc=0; +#endif + // will handle for Linux later + } + } + sprintf(str, "chpath -s enable -l %s -i 0", disk_name); + system(str); + + return rc; + //7.1.214 + case 214: + // cable pull adapter + rc=system("cable_pull"); + if ( rc != 0 ) + rc=1; + else + { + sprintf(str, "lspath %s | grep -i failed", disk_name); + rc=system(str); + + if ( rc != 0 ) + rc=2; + else + { + // call dk_capi_query_path +#ifdef _AIX + rc=ioctl_dk_capi_query_path_check_flag(p_ctx[0],DK_CPIF_CLOSED,DK_CPIF_CLOSED); + if ( rc == 1 ) + rc=3; + else + rc=0; +#endif + // will handle for Linux later + } + } + return rc; + default: + debug( "please enter correct flag \n"); + rc=1; + break; + } + for (j=0;j<=i;j++ ) + { + pthread_cancel(thread[j]); + close_res(p_ctx[j]); + ctx_close(p_ctx[j]); + } + return rc; + + +} + diff --git a/src/cflash/test/174_175.c b/src/cflash/test/174_175.c new file mode 100644 index 00000000..932b543c --- /dev/null +++ b/src/cflash/test/174_175.c @@ -0,0 +1,116 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/174_175.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + +extern int g_error; +extern pid_t pid; + +int ioctl_7_1_174_175( int flag ) +{ + int rc=0,rc1=0; + struct ctx myctx; + struct ctx *p_ctx; + + struct flash_disk disks[MAX_FDISK]; + int cfdisk = MAX_FDISK; + p_ctx=&myctx; + cfdisk = get_flash_disks(disks, FDISKS_ALL); + //need to check the number of disks + if (cfdisk < 1) + { + debug("Must have 2 flash disks..\n"); + return -1; + } + + switch ( flag ) + { + case 1 : + // 7.1.174 + // start super pipe io and then try to do traditional io. + // traditional i/o should fail + if ( 0 == fork()) + { + rc=create_multiple_vluns(p_ctx); + if ( rc != 0 ) + { + debug("create_multiple_vluns failed\n"); + return 1; + } + exit(rc); + } + + if ( 0 == fork()) + { + rc=traditional_io(1); + if ( rc != 0 ) + { + debug(" traditional I/O failed \n"); + rc=0; + } + else + { + debug("traditional io successful \n"); + rc=1; + } + exit(rc); + } + wait4all(); + return rc; + + // 7.1.175 + case 2 : + // start traditional I/O and then try to create multiple context. context creation should fail + if ( 0 == fork()) + { + rc=traditional_io(1); + if ( rc != 0 ) + { + debug(" traditional I/O failed \n"); + } + else + { + debug("traditional io successful \n"); + rc=0; + } + exit(rc); + } + sleep(6); + // try to create luns with multiple context. all should fail + + rc1=create_multiple_vluns(p_ctx); + if ( rc1 == 0 ) + { + debug(" creation of VLUN succeeded, should have failed"); + return 1; + } + + wait4all(); + return 0; + + } + return 0; +} diff --git a/src/cflash/test/188.c b/src/cflash/test/188.c new file mode 100644 index 00000000..901149df --- /dev/null +++ b/src/cflash/test/188.c @@ -0,0 +1,280 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/188.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + +extern int g_error; +extern pid_t pid; + +int do_microcode_update( ) +{ + int rc; + +#ifdef _AIX + rc=system("diag -d capi0 -T \"download -fl latest -s /etc/microcode\" -c"); +#else + sleep(5); + system("sync;sync;sync;sync"); + + rc=system("echo 10000000 > /sys/kernel/debug/powerpc/eeh_max_freezes"); + CHECK_RC(rc, "Failed to make max_freezes"); + + sleep(5); + + rc=system("lsmod | grep cxlflash && rmmod -v cxlflash"); + CHECK_RC(rc, "Failed to unload cxlflash driver"); + + rc=system("/opt/ibm/capikv/afu/flash_all_adapters"); + CHECK_RC(rc, "Failed in afu update"); + + rc=system("/opt/ibm/capikv/afu/reload_all_adapters"); + CHECK_RC(rc, "Failed to reload updated afu image"); + + rc=system("modprobe -v cxlflash"); + CHECK_RC(rc, "Failed to load cxlflash driver"); + + // enable kernel traces + system("echo \"module cxlflash +p\" > /sys/kernel/debug/dynamic_debug/control"); + + set_spio_mode(); +#endif + + return rc; +} + +#ifndef _AIX + +int do_perst() +{ + int rc; + + sleep(5); + + rc=system("echo 10000000 > /sys/kernel/debug/powerpc/eeh_max_freezes"); + CHECK_RC(rc, "Failed to make max_freezes"); + + sleep(5); + + rc=system("lsmod | grep cxlflash && rmmod -v cxlflash"); + CHECK_RC(rc, "Failed to unload cxlflash driver"); + + // reload_all_adapters will perform the PESRT + + rc=system("/opt/ibm/capikv/afu/reload_all_adapters"); + CHECK_RC(rc, "Failed to reload updated afu image"); + + rc=system("modprobe -v cxlflash"); + CHECK_RC(rc, "Failed to load cxlflash driver"); + + // enable kernel traces + system("echo \"module cxlflash +p\" > /sys/kernel/debug/dynamic_debug/control"); + + set_spio_mode(); + + return rc; +} + + +#endif + + +int ioctl_7_1_188( int flag ) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t threadId; + __u64 chunk; + __u64 nlba; + __u64 stride; + + // pid used to create unique data patterns & logging from util ! + pid = getpid(); + + // just for sake of cleanup ! + set_spio_mode(); + + //ctx_init with default flash disk & devno + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + rc = pthread_create(&threadId, NULL, ctx_rrq_rx, p_ctx); + CHECK_RC(rc, "pthread_create failed"); + + switch ( flag ) + { + case 188: +#ifndef _AIX + chunk = (p_ctx->last_phys_lba+1)/p_ctx->chunk_size; + nlba = chunk * p_ctx->chunk_size; + //create vlun + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + stride=p_ctx->block_size; + // Check that IO works + rc = do_io(p_ctx, stride); + CHECK_RC(rc, "do_io() failed"); + + // flash disk must be released before afu update + pthread_cancel(threadId); + rc=close_res(p_ctx); + CHECK_RC(rc, "close_res() failed"); + rc=ctx_close(p_ctx); + CHECK_RC(rc, "ctx_close() failed"); + + // Perform microcode update + rc = do_microcode_update(); + CHECK_RC(rc, "do_microcode_update failed"); + + //ctx_init with default flash disk & devno + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + rc = pthread_create(&threadId, NULL, ctx_rrq_rx, p_ctx); + CHECK_RC(rc, "pthread_create failed"); + + //create vlun + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + // Check if IO is back to normal after afu update + rc = do_io(p_ctx, stride); + CHECK_RC(rc, "do_io() failed"); +#else + rc=create_multiple_vluns(p_ctx); + if ( rc != 0 ) + return 2; + rc=do_microcode_update(); +#endif + break; + + case 189: +#ifndef _AIX + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + stride=0x16; + // Just perform io write now. + rc = do_write_or_read(p_ctx, stride, WRITE); + CHECK_RC(rc, "io write failed"); + + pthread_cancel(threadId); + rc=close_res(p_ctx); + CHECK_RC(rc, "resource cleanup failed"); + rc=ctx_close(p_ctx); + CHECK_RC(rc, "context cleanup failed"); + + rc = do_microcode_update(); + CHECK_RC(rc, "do_microcode_update failed"); + + //ctx_init with default flash disk & devno + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + // Create a pLUN over same disk with new context again + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + //thread to handle AFU interrupt & events + rc = pthread_create(&threadId, NULL, ctx_rrq_rx, p_ctx); + CHECK_RC(rc, "pthread_create failed"); + + // Now perform io read & data compare test. + rc = do_write_or_read(p_ctx, stride, READ); + CHECK_RC(rc, "io read failed"); +#else + rc=create_direct_lun(p_ctx); + if ( rc != 0 ) + return 2; + rc=do_microcode_update(); +#endif + break; + +#ifndef _AIX + case E_CAPI_LINK_DOWN: + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + /* be default IO will happen; until we export NO_IO */ + char * noIOP = getenv("NO_IO"); + + if ( noIOP == NULL ) + { + stride=0x16; + // Just perform io write now. + rc = do_write_or_read(p_ctx, stride, WRITE); + CHECK_RC(rc, "io write failed"); + } + + pthread_cancel(threadId); + rc=close_res(p_ctx); + CHECK_RC(rc, "resource cleanup failed"); + rc=ctx_close(p_ctx); + CHECK_RC(rc, "context cleanup failed"); + + rc = do_perst(); + CHECK_RC(rc, "do_perst() failed"); + + //ctx_init with default flash disk & devno + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + // Create a pLUN over same disk with new context again + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + //thread to handle AFU interrupt & events + rc = pthread_create(&threadId, NULL, ctx_rrq_rx, p_ctx); + CHECK_RC(rc, "pthread_create failed"); + if ( noIOP == NULL ) + { + // Now perform io read & data compare test. + rc = do_write_or_read(p_ctx, stride, READ); + CHECK_RC(rc, "io read failed"); + } + + break; + +#endif + default: + + printf("Flag not correct \n"); + CHECK_RC(1,"Usage failed"); + break; + + } + + pthread_cancel(threadId); + rc=close_res(p_ctx); + CHECK_RC(rc, "close_res() failed"); + rc=ctx_close(p_ctx); + CHECK_RC(rc, "ctx_close() failed"); + + return rc; +} diff --git a/src/cflash/test/190.c b/src/cflash/test/190.c new file mode 100644 index 00000000..f87a6f41 --- /dev/null +++ b/src/cflash/test/190.c @@ -0,0 +1,293 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/190.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include +extern int MAX_RES_HANDLE; +extern int g_error; +extern pid_t pid; + +//int MAX_LIMIT=1; + + + +void *max_ctx_res(void *arg) +{ + +#ifndef _AIX + + return 1; + +#endif + int rc; + res_hndl_t my_res_hndl[MAX_RES_HANDLE]; + struct ctx *p_ctx = (struct ctx *)arg; + int i; + int long_run = atoi(getenv("LONG_RUN")); + pthread_t pthread_id = pthread_self(); + __u64 size = 16; + __u64 actual_size; + __u64 nlba, st_lba; + __u64 stride; + mc_stat_t l_mc_stat; + + pid = getpid(); + rc = ctx_init(p_ctx); + if (rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + g_error =-1; + return NULL; + } + + //open max allowed res for a context + size = (rand()%10+1)*16; + for (i=0;ires_hndl,O_RDWR,&my_res_hndl[i]); + if (rc != 0) + { + fprintf(stderr, "ctx: %d:mc_open: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + debug("ctx:%d res hndl:%u\n",p_ctx->ctx_hndl,my_res_hndl[i]); + } + for (i=0;ires_hndl,my_res_hndl[i],size,&actual_size); + if (rc != 0) + { + fprintf(stderr, "thread : %lx:mc_size: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + rc = mc_stat(p_ctx->res_hndl,my_res_hndl[i], &l_mc_stat); + if (rc != 0) + { + fprintf(stderr, "thread : %lx:mc_stat: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + stride = 1 << l_mc_stat.nmask; + nlba = 0; //NO IO here + for (st_lba = 0;st_lba < nlba; st_lba += (NUM_CMDS*stride)) + { + rc = send_write(p_ctx, st_lba, stride, pid); + if ((rc != 0) && (actual_size == 0)) + { + printf("%d : Fine, IO @(0X%lX) but range is(0X%lX)\n",pid, st_lba, nlba-1); + size = 16; + break; + } + else + { + fprintf(stderr,"%d : Send write failed @ (0X%lX) LBA\n", pid, st_lba); + g_error = -1; + return NULL; + } + rc = send_read(p_ctx, st_lba, stride); + rc += rw_cmp_buf(p_ctx, st_lba); + if (rc) + { + g_error = -1; + return NULL; + } + } + + debug("ctx:%d res_hand:%d size:%lu\n",p_ctx->ctx_hndl,my_res_hndl[i],actual_size); + size += 16; + } + for (i=0;ires_hndl,my_res_hndl[i]); + if (rc != 0) + { + fprintf(stderr, "ctx: %d:mc_close: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + } + + rc = mc_unregister(p_ctx->res_hndl); + if (rc != 0) + { + fprintf(stderr, "mc_unregister failed for mc_hdl %p\n", p_ctx->res_hndl); + g_error = -1; + return NULL; + } + debug("mc unregistered for ctx:%d\n",p_ctx->ctx_hndl); + return 0; +} +int max_ctx_n_res_190_1() +{ + int rc; + int i; + pthread_t threads[MAX_OPENS]; + struct ctx *p_ctx[MAX_OPENS]; + int MAX_CTX; + int long_run = atoi(getenv("LONG_RUN")); + + if (mc_init() !=0 ) + { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + + debug("mc_init success.\n"); + + rc = posix_memalign((void **)&p_ctx, 0x1000, sizeof(struct ctx)*MAX_OPENS); + if (rc != 0) + { + fprintf(stderr, "Can not allocate ctx structs, errno %d\n", errno); + return -1; + } + + //Creating threads for ctx_init with nchan value + // calculate how many ctx can be created + + MAX_CTX=MAX_OPENS/2; + + for (i = 0; i < MAX_CTX; i++) + { + rc = pthread_create(&threads[i],NULL, &max_ctx_res, (void *)p_ctx[i]); + if (rc) + { + fprintf(stderr, "Error creating thread %d, errno %d\n", i, errno); + free(p_ctx); + return -1; + } + } + + //joining + for ( i=0; i< long_run;i++) + { + for (i = 0; i < MAX_CTX; i++) + { + pthread_join(threads[i], NULL); + } + } + for (i = 0; i < MAX_CTX; i++) + { + ctx_close(p_ctx[i]); + } + free(p_ctx); + rc = g_error; + g_error = 0; + return rc; +} + + +int max_ctx_n_res_190_2( ) +{ + int rc; + int i; + pthread_t threads[MAX_OPENS]; + struct ctx *p_ctx[MAX_OPENS]; + int MAX_CTX; + int long_run = atoi(getenv("LONG_RUN")); + if (mc_init() !=0 ) + { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + + debug("mc_init success.\n"); + + // calculate how many ctx can be created + + MAX_CTX=MAX_OPENS/2; + + + rc = posix_memalign((void **)&p_ctx, 0x1000, sizeof(struct ctx)*MAX_CTX); + if (rc != 0) + { + fprintf(stderr, "Can not allocate ctx structs, errno %d\n", errno); + return -1; + } + + for (i = 0; i < MAX_CTX; i++) + { + rc=ctx_init(p_ctx[i]); + if (rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + g_error =-1; + return NULL; + } + } + + for ( i=0;i + +extern int g_error; +extern pid_t pid; +extern int long_run; + +int ioctl_7_1_191( int flag ) +{ + int rc,j,i; + struct ctx myctx; + struct ctx *p_ctx; + char *disk_name=NULL, *str=NULL; + + pid = getpid(); + str = (char *) malloc(100); + + disk_name = (char *)getenv("FVT_DEV"); + disk_name = strtok(disk_name,"/"); + disk_name = strtok(NULL,"/"); + + + p_ctx=&myctx; + //pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + // attach and detach of same LUN + for (j=0; j + +extern int g_error; +extern pid_t pid; +extern int long_run; + + + +int ioctl_7_1_192( int flag ) +{ + int rc,i,j; + struct ctx myctx; + struct ctx *p_ctx=&myctx; + + struct flash_disk fldisks[MAX_FDISK]; +#ifdef _AIX + char *disk_name, *str; + disk_name = (char *)getenv("FVT_DEV"); + get_flash_disks(fldisks, FDISKS_ALL); + str = (char *) malloc(100); +#endif + + + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + //pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + // attach and detach of same LUN + for (j=0; j + +extern int g_error; +extern pid_t pid; + +//int MAX_LIMIT=1; + +int change_nchan( int nchan ) +{ + int rc; + char str[80]; + char *disk_name = (char *)getenv("FLASH_DISK"); + sprintf(str, "chdev -l %s -attr nchn=%d", disk_name,nchan); + rc=system(str); + if ( rc != 0 ) + fprintf(stderr,"changing nchn value failed"); + return rc; +} + + +int max_ctx_n_res_nchan(int nchan) +{ + int rc; + int i; + pthread_t threads[MAX_OPENS]; + struct ctx *p_ctx; + int MAX_CTX; + + if (mc_init() !=0 ) + { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + + debug("mc_init success.\n"); + + rc = posix_memalign((void **)&p_ctx, 0x1000, sizeof(struct ctx)*MAX_OPENS); + if (rc != 0) + { + fprintf(stderr, "Can not allocate ctx structs, errno %d\n", errno); + return -1; + } + + //Creating threads for ctx_init with nchan value + // calculate how many ctx can be created + + MAX_CTX=MAX_OPENS-nchan; + + for (i = 0; i < MAX_CTX; i++) + { + rc = pthread_create(&threads[i],NULL, &max_ctx_res, (void *)&p_ctx[i]); + if (rc) + { + fprintf(stderr, "Error creating thread %d, errno %d\n", i, errno); + free(p_ctx); + return -1; + } + } + + //joining + for (i = 0; i < MAX_CTX; i++) + { + pthread_join(threads[i], NULL); + } + + for (i = 0; i < MAX_CTX; i++) + { + ctx_close(&p_ctx[i]); + } + free(p_ctx); + rc = g_error; + g_error = 0; + return rc; +} + + +int ioctl_7_1_193_1( int flag ) +{ + + return 1; // test case need to re-implemented + + int rc,i; + + // Create MAX context & resource handlers based on default Nchn value + // first change the nchan value + rc=change_nchan(atoi(getenv("NCHAN_VALUE"))); + CHECK_RC(rc, "changing nchan value failed"); + + // now create max context and max resource and after than detach + //rc=test_max_ctx_n_res_nchan("NCHAN_VALUE"); TBD no defination of the function + //CHECK_RC(rc, "creating max context and resource handler failed"); + + // change the nchan value again + for ( i=1; i<=atoi(getenv("MAX_NCHAN_VALUE")); i++ ) + { + rc=change_nchan(atoi(getenv("NCHAN_VALUE"))+i); + CHECK_RC(rc, "changing nchan value failed"); + // now again create max context and max resource and after than detach + //rc=test_max_ctx_n_res_nchan(atoi(getenv("NCHAN_VALUE"))+1); TBD no defination of the function + CHECK_RC(rc, "creating max context and resource handler failed"); + } + + return rc; + +} +// this is 5ht proicedure in 7.1.193. This is written in different case, as dont make sense with super pipe I/O +int ioctl_7_1_193_2( int flag ) +{ + struct flash_disk disks[MAX_FDISK]; + int cfdisk = MAX_FDISK,rc,i; + + cfdisk = get_flash_disks(disks, FDISKS_ALL); + // need to check the number of disks + if (cfdisk < 2) + { + fprintf(stderr,"Must have 2 flash disks..\n"); + return -1; + } + // change nchan on one of the disk, using the first disk to 1 and other as default + rc=change_nchan(1); + // initiate tradition IO on both the disk. + + for ( i=0; i < 2 ; i++ ) + { + if (0 == fork()) //child process + { + rc = traditional_io(i); + } + exit(rc); + } + return 0; +} + + + diff --git a/src/cflash/test/196.c b/src/cflash/test/196.c new file mode 100644 index 00000000..0b395149 --- /dev/null +++ b/src/cflash/test/196.c @@ -0,0 +1,252 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/196.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + +extern int g_error; +extern pid_t pid; +extern int long_run; + +//int MAX_LIMIT=1; + +// creating thread for creation VLUN or PLUN +void *create_lun1(void *arg ) +{ + struct ctx *p_ctx = (struct ctx *)arg; + int rc; + __u64 stride=0x8; + rc = create_resource(p_ctx, p_ctx->lun_size, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + if ( rc == 0 ) + { + rc = do_io(p_ctx, stride); + if ( rc !=0 ) + { fprintf(stderr,"io failed on attached LUN\n"); + //TBD Fix this return 2; + return NULL; + } + } + //TBD Fix this return 1; + //return 1; + return NULL; +} + +int ioctl_7_1_196() +{ + int rc,i,j; + struct ctx myctx[21],myctx_1, myctx_2; + struct ctx *p_ctx[21],*p_ctx_1,*p_ctx_2; + __u64 stride=0x1000,st_lba=0; + pthread_t thread[20]; + struct flash_disk disks[MAX_FDISK]; + int cfdisk = MAX_FDISK; + + pid = getpid(); + + cfdisk = get_flash_disks(disks, FDISKS_SAME_ADPTR); + //need to check the number of disks + if (cfdisk < 2) + { + fprintf(stderr,"Must have 2 flash disks..\n"); + return -1; + } + // creating first context + + for (i=0;i<21;i++) + { + p_ctx[i]=&myctx[i]; + } + p_ctx_1=&myctx_1; + p_ctx_2=&myctx_2; + debug("1ST PROCEDURE\n"); + // using p_ctx[[0] for LUN direct for firect disk + /* rc = ctx_init2(p_ctx[0], disks[0].dev, DK_AF_ASSIGN_AFU, disks[0].devno[0]); + pthread_create(&thread[0], NULL, ctx_rrq_rx, p_ctx[0]); + */ + /* rc = create_resource(p_ctx[0], 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + */ + // creating another 19 context LUN VIRTUAL + for ( i=2;i<21;i++) + { + sleep(2); + rc = ctx_init2(p_ctx[i], disks[1].dev, DK_AF_ASSIGN_AFU, disks[1].devno[0]); + rc=create_resource(p_ctx[i], p_ctx[i]->chunk_size, DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); + } + + + // do context reuse for direct LUN + strcpy(p_ctx[0]->dev,disks[0].dev); + strcpy(p_ctx[1]->dev,disks[1].dev); + p_ctx[0]->fd = open_dev(disks[0].dev, O_RDWR); + if (p_ctx[0]->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[0].dev, errno); + g_error = -1; + return -1; + } + p_ctx[1]->fd = open_dev(disks[1].dev, O_RDWR); //Hoping to open second disk + if (p_ctx[1]->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[1].dev, errno); + g_error = -1; + } +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx[0]); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); +#else + //TBD for linux +#endif + p_ctx[0]->work.num_interrupts = p_ctx[1]->work.num_interrupts = 4; + + + rc=ioctl_dk_capi_attach_reuse(p_ctx[0],p_ctx[1],LUN_DIRECT); + + // CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); + + + if ( rc != 0 ) + { + fprintf(stderr,"LUN DIRECT got attached to new disk with VLUN, should have succeeded"); + return rc; + } + + + // initiate I/O on all the LUNs + for (i=2;i<21;i++) + { + pthread_create(&thread[i], NULL, ctx_rrq_rx, p_ctx[i]); + rc = do_io(p_ctx[i], stride); + } + if ( rc != 0 ) + { fprintf(stderr,"io on some LUN failed"); + return rc; + } + + for (i=2;i<21;i++) + { + pthread_cancel(thread[i]); + close_res(p_ctx[i]); + } + + ctx_close(p_ctx[2]); + debug("2nd PROCEDURE\n"); + + // procedure 2 of the same case + debug("%d: ........Phase 1 done.. Starting 2nd Phase........\n",getpid()); + memset(p_ctx_1, 0, sizeof(struct ctx)); + + memset(p_ctx_2, 0, sizeof(struct ctx)); + // open the first flash disk in write mode and create a DIRECT LUN + p_ctx_1->fd = open_dev(disks[0].dev, O_WRONLY); + if (p_ctx_1->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[0].dev, errno); + return -1; + } + rc = ctx_init2(p_ctx_1, disks[0].dev, DK_AF_ASSIGN_AFU, disks[0].devno[0]); + pthread_create(&thread[0], NULL, ctx_rrq_rx, p_ctx_1); + CHECK_RC(rc, "create context failed"); + + rc = create_resource(p_ctx_1, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + // open the same flash disk in read mode again. + p_ctx_2->fd = open_dev(disks[0].dev, O_RDONLY); + if (p_ctx_2->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[0].dev, errno); + return -1; + } + rc = ctx_init2(p_ctx_2, disks[0].dev, DK_AF_ASSIGN_AFU, disks[0].devno[0]); + pthread_create(&thread[1], NULL, ctx_rrq_rx, p_ctx_2); + CHECK_RC(rc, "create context failed"); + rc = create_resource(p_ctx_2, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + // now write to the disk and then read + for (st_lba = 0; st_lba <= p_ctx_1->last_lba; st_lba += (NUM_CMDS*stride)) + { + rc = send_write(p_ctx_1, st_lba, stride, pid); + CHECK_RC(rc, "send_write failed"); + rc = send_read(p_ctx_2, st_lba, stride); + CHECK_RC(rc, "send_read failed"); + if (rc !=0 ) + { + rc = rw_cmp_buf(p_ctx_1, st_lba); + if (rc != 0) + { + fprintf(stderr,"buf cmp failed for lba 0x%lX,rc =%d\n",st_lba,rc); + break; + } + } + } + if ( rc != 0 ) + return rc; + + for (i=0;i<2;i++) + { + pthread_cancel(thread[i]); + } + + close_res(p_ctx_1); + ctx_close(p_ctx_1); + close_res(p_ctx_2); + ctx_close(p_ctx_2); + + debug("3rd PROCEDURE\n"); + + + debug("%d: ........Phase 2 done.. Starting 3rd Phase........\n",getpid()); + // case 3 of the same case + // creating multiple process for LUN_DIRECT creation. + for (j=0;j + +extern int g_error; +extern pid_t pid; + + +int ioctl_7_1_197( int flag ) +{ + int rc; + struct ctx myctx, myctx1, myctx2; + struct ctx *p_ctx = &myctx; + struct ctx *p_ctx1 = &myctx1, *temp=&myctx2; + pthread_t thread,thread1; + pid = getpid(); + flag=0; + // creating first context + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + // creating another context + rc = ctx_init(p_ctx1); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread1, NULL, ctx_rrq_rx, p_ctx1); + + // Creating resource for both ctx token + rc = create_resource(p_ctx, p_ctx->lun_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + rc = create_resource(p_ctx1, p_ctx1->lun_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + rc = create_resource(p_ctx1, p_ctx1->lun_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + // use the resource handler of process 2 with context_token of process 1 and call all ioctls + temp->rsrc_handle=p_ctx->rsrc_handle; + p_ctx->rsrc_handle=p_ctx1->rsrc_handle; + // resize + rc = vlun_resize(p_ctx, (p_ctx->lun_size)*2); + if ( rc == 0 ) + flag=1; + // detach + // release + rc = ioctl_dk_capi_release(p_ctx); + if ( rc == 0 ) + flag=1; + //lun_create + rc = create_resource(p_ctx, p_ctx->lun_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + if ( rc != 0 ) + flag=1; + // log report +#ifdef _AIX + rc = ioctl_dk_capi_log(p_ctx,""); + if ( rc == 0 ) + flag=1; + // dk_exception + rc=ioctl_dk_capi_query_exception(p_ctx); + if ( rc == 0 ) + flag=1; +#endif + //dk_recover + rc=ioctl_dk_capi_recover_ctx(p_ctx); + if ( rc != 0 ) + flag=1; + //using attach again +#ifdef _AIX + p_ctx->flags=DK_AF_REUSE_CTX; +#else + p_ctx->flags=DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#endif + rc=ioctl_dk_capi_attach(p_ctx); + if ( rc == 0 ) + flag=1; + rc = ioctl_dk_capi_detach(p_ctx); + if ( rc != 0 ) + flag=1; + p_ctx->context_id=temp->context_id; + pthread_cancel(thread); + pthread_cancel(thread1); + close_res(p_ctx1); + ctx_close(p_ctx1); + return flag; +} + + diff --git a/src/cflash/test/198.c b/src/cflash/test/198.c new file mode 100644 index 00000000..102d25c7 --- /dev/null +++ b/src/cflash/test/198.c @@ -0,0 +1,157 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/198.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + +extern int g_error; +extern pid_t pid; + +// create VLUN +int create_vluns_198(char *dev, dev64_t devno, + __u16 lun_type, struct ctx *p_ctx, pthread_t thread) +{ + int rc,i,flag=0; + + __u64 nlba; + __u64 stride= 0x10000; + int long_run = atoi(getenv("LONG_RUN")); + nlba=p_ctx->lun_size; + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + if ( LUN_VIRTUAL == lun_type) + { + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + + for ( i=0; ichunk_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "Context init failed"); + + if ( 0 == fork() ) + { + i=0; + p_ctx[i]=&myctx[i]; + rc = ctx_init(p_ctx[i]); + //rc = ctx_init2(p_ctx, flash_dev, flags, path_id); + rc = create_resource(p_ctx[i], p_ctx[i]->chunk_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "Context init failed"); + + // rc = create_vluns_198(fldisks[0].dev, fldisks[0].devno[0], + // LUN_VIRTUAL, p_ctx[i],thread[i]); + + sleep(1); + debug("%lu\n", p_ctx[1]->context_id); + // use the resource handler of process 2 with context_token of process 1 and call all ioctls + temp=p_ctx[0]->context_id; + p_ctx[0]->context_id=p_ctx[1]->context_id; + // resize + rc = vlun_resize(p_ctx[0], (p_ctx[0]->chunk_size)*2); + // CHECK_RC(rc, "vlun_resize failed"); + // detach + if ( rc == 0 ) + flag=1; + rc = ioctl_dk_capi_detach(p_ctx[0]); + // CHECK_RC(rc, "ctx_close failed"); + if ( rc == 0 ) + flag=1; + + // release + rc = ioctl_dk_capi_release(p_ctx[0]); + if ( rc == 0 ) + flag=1; + // CHECK_RC(rc, "close_res failed"); + //lun_create + rc = create_resource(p_ctx[0], p_ctx[0]->lun_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + if ( rc == 0 ) + flag=1; + // CHECK_RC(rc, "create LUN_VIRTUAL failed"); + // log report + rc = ioctl_dk_capi_log(p_ctx[0],""); + if ( rc == 0 ) + flag=1; + // CHECK_RC(rc, "ioctl_dk_capi_log failed"); + // dk_exception + rc=ioctl_dk_capi_query_exception(p_ctx[0]); + if ( rc == 0 ) + flag=1; + // CHECK_RC(rc, "ioctl_dk_capi_exception failed"); + //dk_recover + rc=ioctl_dk_capi_recover_ctx(p_ctx[0]); + if ( rc == 0 ) + flag=1; + // CHECK_RC(rc, "ioctl_dk_capi_recover failed"); + //using attach againd +#ifdef _AIX + p_ctx[0]->flags=DK_AF_REUSE_CTX; +#else + p_ctx[0]->flags=DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#endif + rc=ioctl_dk_capi_attach(p_ctx[0]); + if ( rc == 0 ) + flag=1; + //CHECK_RC(rc, "ioctl_dk_capi_attach failed"); + debug("flag=%d\n",flag); + p_ctx[0]->context_id=temp; + p_ctx[0]->context_id=temp; + close_res(p_ctx[0]); + ctx_close(p_ctx[0]); + exit(flag); + } + sleep(30); + close_res(p_ctx[1]); + ctx_close(p_ctx[1]); + return flag; +} + diff --git a/src/cflash/test/203.c b/src/cflash/test/203.c new file mode 100644 index 00000000..c9f40d3d --- /dev/null +++ b/src/cflash/test/203.c @@ -0,0 +1,426 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/193.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + +extern int g_error; +extern char cflash_path[MC_PATHLEN]; +extern pid_t pid; +extern int MAX_RES_HANDLE; +#ifdef _AIX +int MAX_OPEN_1=494; +int MAX_NCHAN_VALUE=8; +#else +int MAX_OPEN_1=200; +#endif + +static int res_num=1,ctx_num=1; +/*int traditional_io(int disk_num) + { + int rc,i; + + char *disk_name, *str; + if ( disk_num == 1 ) + disk_name = (char *)getenv("FLASH_DISK_1"); + else + disk_name = (char *)getenv("FLASH_DISK_2"); + + sprintf(str, "dd if=/dev/hdisk0 of=/dev/%s >/tmp/read_write.log &", disk_name); + system(str); + for (i=0;i<360;i++) + { + sleep(1); + rc=system("cat /tmp/read_write.log | grep -i failed "); + if ( rc == 0 ) + return 1; + } + } + */ +int change_nchan( int nchan ) +{ + int rc; + char *str; + char *flash_name = (char *)getenv("FLASH_ADAPTER"); + str = (char *) malloc(100); + // removing all disk associalted with flash + sprintf(str, "rmdev -l %s -R", flash_name); + rc=system(str); + if ( rc != 0 ) + { + fprintf(stderr,"removing all child disk failed\n"); + return rc; + } + + sprintf(str, "chdev -l %s -a num_channel=%d", flash_name,nchan); + debug("%s\n",str); + rc=system(str); + if ( rc != 0 ) + { + fprintf(stderr,"changing nchn value failed"); + return rc; + } + sprintf(str, "cfgmgr -l %s", flash_name); + rc=system(str); + if ( rc != 0 ) + exit(rc); + return rc; +} + +int max_ctx_res() +{ + int rc; + __u64 my_res_hndl[MAX_RES_HANDLE]; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + int i; + pthread_t thread; + + pid = getpid(); + + MAX_RES_HANDLE=get_max_res_hndl_by_capacity(cflash_path); + + debug("**********************creatin CTX NO %d******************************\n",ctx_num++); + sleep(1); + rc = ctx_init(p_ctx); + CHECK_RC(rc,"ctx_init failed\n"); + + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + debug("pid=%d ctx:%d is attached..sahitya\n",pid,p_ctx->ctx_hndl); + //open max allowed res for a context + for (i=0;ictx_hndl,rc); + g_error = -1; + pthread_cancel(thread); + return errno; + } + my_res_hndl[i]=p_ctx->rsrc_handle; + debug("ctx:%d res hndl:%lu\n", p_ctx->ctx_hndl,my_res_hndl[i]); + debug("********res_num=%d\n", res_num++); + } + + for (i=0;irsrc_handle=my_res_hndl[i]; + debug("ctx:%d res hndl:%lu\n",p_ctx->ctx_hndl,my_res_hndl[i]); + rc = vlun_resize(p_ctx,p_ctx->chunk_size); + if (rc != 0) + { + fprintf(stderr, "%d:resize: failed,rc %d\n", pid,rc); + g_error = -1; + pthread_cancel(thread); + return errno; + } + //NO IO here + //rc=do_io(p_ctx,0x100); + //CHECK_RC(rc, "do_io failed\n"); + } + + for (i=0;irsrc_handle=my_res_hndl[i]; + debug("ctx:%d res hndl:%lu\n",p_ctx->ctx_hndl,my_res_hndl[i]); + rc = close_res(p_ctx); + if (rc != 0) + { + fprintf(stderr, "ctx: %d:mc_close: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + pthread_cancel(thread); + return errno; + } + } + + sleep(3); + pthread_cancel(thread); + rc = ctx_close(p_ctx); + if (rc != 0) + { + fprintf(stderr, "mc_unregister failed for mc_hdl %x\n", p_ctx->ctx_hndl); + g_error = -1; + return errno; + } + debug("mc unregistered for ctx:%d\n",p_ctx->ctx_hndl); + + return 0; +} + + +int max_ctx_n_res_nchan(int nchan) +{ + int rc; + int i; + int MAX_CTX; + + //Creating threads for ctx_init with nchan value + // calculate how many ctx can be created + + MAX_CTX=(MAX_OPEN_1-(8 - nchan)); + debug("*****MAX_CTX=%d\n", MAX_CTX); + sleep(3); + for (i = 0; i < MAX_CTX; i++) + { + if ( 0 == fork() ) + { + rc = max_ctx_res(); + exit(rc); + } + } + rc=wait4all(); + return rc; +} + +int ioctl_7_1_193_1( int flag ) +{ + + int rc; +#ifdef _AIX + int i; +#endif + + // Create MAX context & resource handlers based on default Nchn value + // first change the nchan value + if ( flag != 1 ) + { + rc=change_nchan(1); + CHECK_RC(rc, "changing nchan value failed"); + } + // now create max context and max resource and after than detach + rc=max_ctx_n_res_nchan(1); + CHECK_RC(rc, "creating max context and resource handler failed"); + // change the nchan value again +#ifdef _AIX + if ( flag != 1 ) + { + for (i=2;ictx_hndl); + //open max allowed res for a context + for (i=0;ictx_hndl,rc); + g_error = -1; + return errno; + } + my_res_hndl[i]=p_ctx->rsrc_handle; + debug("ctx:%d res hndl:%lu\n", p_ctx->ctx_hndl,my_res_hndl[i]); + debug("********res_num=%d\n", res_num++); + } + + + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + stride = 1 << l_mc_stat.nmask; + nlba = 0; //NO IO here + for (st_lba = 0;st_lba < nlba; st_lba += (NUM_CMDS*stride)) + { + rc = send_write(p_ctx, st_lba, stride, pid); + if ((rc != 0) && (actual_size == 0)) + { + printf("%d : Fine, IO @(0X%lX) but range is(0X%lX)\n",pid, st_lba, nlba-1); + break; + } + else + { + fprintf(stderr,"%d : Send write failed @ (0X%lX) LBA\n", pid, st_lba); + g_error = -1; + return errno; + } + rc = send_read(p_ctx, st_lba, stride); + rc += rw_cmp_buf(p_ctx, st_lba); + if (rc) + { + g_error = -1; + return errno; + } + } + + debug("ctx:%d res_hand:%lu size:%lu\n",p_ctx->ctx_hndl,my_res_hndl[i],p_ctx->lun_size); + //size += 16; + + + /* for (i=0;irsrc_handle=my_res_hndl[i]; + debug("ctx:%d res hndl:%u\n",p_ctx->ctx_hndl,my_res_hndl[i]); + rc = close_res(p_ctx); + if (rc != 0) + { + fprintf(stderr, "ctx: %d:mc_close: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return errno; + } + } + */ + sleep(3); + /* rc = ctx_close(p_ctx); + if (rc != 0) + { + fprintf(stderr, "mc_unregister failed for mc_hdl %p\n", p_ctx->ctx_hndl); + g_error = -1; + return errno; + } + debug("mc unregistered for ctx:%d\n",p_ctx->ctx_hndl); + */ + return 0; +} + + + +int max_ctx_n_res_190() +{ + int rc; + int i; + int MAX_CTX; + + MAX_CTX=MAX_OPEN_1; + debug("*****MAX_CTX=%d\n", MAX_CTX); + sleep(3); + for (i = 0; i < MAX_CTX; i++) + { + if ( 0 == fork() ) + { + rc = max_ctx_res(); + exit(rc); + } + } + rc=wait4all(); + return rc; +} + + +int ioctl_7_1_190( ) +{ + + // Create MAX context & resource handlers based on default Nchn value + // first change the nchan value + // rc=change_nchan(atoi(getenv("NCHAN_VALUE"))); + // CHECK_RC(rc, "changing nchan value failed"); + + // now create max context and max resource and after than detach + + // change the nchan value again + debug("E_test_SPIO_RLS_DET\n"); + return max_ctx_n_res_190(); +} + + +int ioctl_7_1_203() +{ + + int rc=0; + rc = setenv("NCHAN_VALUE", "0", true); + CHECK_RC(rc, "NCHAN_VALUE env value setting failed \n"); + MAX_RES_HANDLE=get_max_res_hndl_by_capacity(cflash_path); + if (MAX_RES_HANDLE <= 0) + { + fprintf(stderr,"Unable to run ioctl_7_1_203.. refere prior error..\n"); + return -1; + } + rc=ioctl_7_1_193_1(1); + CHECK_RC(rc,"ioctl_7_1_193_1 1st call failed..\n"); + rc=ioctl_7_1_190(); + CHECK_RC(rc,"ioctl_7_1_190 1st call failed..\n"); + rc=ioctl_7_1_190(); + CHECK_RC(rc,"ioctl_7_1_190 2nd call failed..\n"); + rc=ioctl_7_1_190(); + CHECK_RC(rc,"ioctl_7_1_190 3rd call failed..\n"); + rc=ioctl_7_1_193_1(1); + CHECK_RC(rc,"ioctl_7_1_193_1 last called failed\n"); + return 0; +} diff --git a/src/cflash/test/209.c b/src/cflash/test/209.c new file mode 100644 index 00000000..743f00f6 --- /dev/null +++ b/src/cflash/test/209.c @@ -0,0 +1,90 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/209.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + +extern int g_error; +extern pid_t pid; + + +int ioctl_7_1_209( int flag ) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx; + char *disk_name, *str=NULL; + struct flash_disk disks[MAX_FDISK]; // flash disk struct + + + pid = getpid(); + str = (char *) malloc(100); + p_ctx=&myctx; + get_flash_disks(disks, FDISKS_ALL); + pid = getpid(); + + disk_name = strtok(disks[0].dev,"/"); + disk_name = strtok(NULL,"/"); + + // create a VG on flash disk and then varyoff it + sprintf(str, "mkvg -f -y NEW_VG %s", disk_name); + rc=system(str); + + if ( rc !=0 ) + return 1; + + rc=system("lspv | grep NEW_VG | grep active"); + /*if ( rc !=0 ) + return 2;*/ + // varyoff vg + rc|=system("varyoffvg NEW_VG; exportvg NEW_VG"); + /*if ( rc !=0 ) + return 3;*/ + // now try creating super pipes + sprintf(str, "chdev -l %s -a pv=clear", disk_name); + rc|=system(str); + if ( rc !=0 ) return 3; + rc=create_multiple_vluns(p_ctx); + if ( rc != 0 ) + flag=1; + rc=create_direct_lun(p_ctx); + if ( rc != 0 ) + flag=2; + if ( flag == 1 ) + { + printf( "some error with LUN_VIRTUAL \n"); + return 1; + } + if ( flag == 2 ) + { + printf("some error with LUN_DIRECT \n"); + return 1; + } + + printf("all case succeeded \n"); + return 0; +} + + diff --git a/src/cflash/test/210.c b/src/cflash/test/210.c new file mode 100644 index 00000000..ffd6c981 --- /dev/null +++ b/src/cflash/test/210.c @@ -0,0 +1,141 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/210.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + +extern int g_error; +extern int g_errno; +extern pid_t pid; + +int FREE_SIZE=1; +int DISK_SIZE=1; + + +int ioctl_7_1_210( int flag ) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + struct flash_disk disks[MAX_FDISK]; + pthread_t thread; + __u64 nlba=0,stride=0x10; + get_flash_disks(disks, FDISKS_ALL); + memset(p_ctx, 0, sizeof(struct ctx)); + // open capi flash disk in read mode first + strcpy(p_ctx->dev,disks[0].dev); + pid=getpid(); + debug("%s\n",p_ctx->dev); + p_ctx->fd = open_dev(p_ctx->dev, O_RDONLY); + + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", p_ctx->dev, errno); + g_error = -1; + return -1; + } + +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "query path failed") +#endif +#ifdef _AIX + rc = ctx_init_internal(p_ctx, DK_AF_ASSIGN_AFU, p_ctx->devno); +#else + rc = ctx_init_internal(p_ctx, 0x0, p_ctx->devno); + // Linux wouldn't support context creation + // until disk is opened in O_RDWR mode, Refer def# SW325500 + if (rc == -1 && g_errno == 1) rc=0; + else rc=-1; + g_errno=0; + return rc; +#endif + //rc = ctx_init2(p_ctx, flash_dev, flags, path_id); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + nlba=p_ctx->block_size; + rc = create_resource(p_ctx, nlba, DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + + // now try to write on the disk, should fail. + rc = send_write(p_ctx, p_ctx->st_lba, stride, pid); + //CHECK_RC(rc, "send_write failed"); + + if ( rc == 0 ) + { + debug("write operation succeeded in read mode\n"); + // return 1; + } + // continue with closing the disk and opening in write mode + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + // now open the disk in write mode and try to read. + p_ctx->fd = open_dev(disks[0].dev, O_WRONLY); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[0].dev, errno); + g_error = -1; + return -1; + } + +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "query path failed") +#endif /*_AIX */ + +#ifdef _AIX + rc = ctx_init_internal(p_ctx, DK_AF_ASSIGN_AFU, p_ctx->devno); +#else + rc = ctx_init_internal(p_ctx, 0x1, p_ctx->devno); +#endif + //rc = ctx_init2(p_ctx, flash_dev, flags, path_id); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + nlba=p_ctx->block_size; + rc = create_resource(p_ctx, nlba, DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + debug("E_test_RD_PRM_WRITE\n"); + // write to disk, which need to be read.. + rc = send_write(p_ctx, p_ctx->st_lba, stride, pid); + CHECK_RC(rc, "send_write failed"); + // send read command. + // system("/dtest_final r cflash0 -S '-port_num 0x1 -lunid 0x500507605e839c53' -a 1 -f 1 -b"); + rc = send_read(p_ctx, p_ctx->st_lba, stride); + // CHECK_RC(rc, "send_read failed"); + if ( rc == 0 ) + { + debug("read operation succeeded in write only mode\n"); + return 1; + } + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + return 0; +} diff --git a/src/cflash/test/211.c b/src/cflash/test/211.c new file mode 100644 index 00000000..ee0b71f1 --- /dev/null +++ b/src/cflash/test/211.c @@ -0,0 +1,77 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/211.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + + +extern int g_error; +extern pid_t pid; + +struct ctx *p_ctx,*p_ctx1; +struct ctx myctx, myctx1; + +// creating thread for creation VLUN or PLUN +void *create_lun(void *arg) +{ + int *ptr = (int *)arg; + int flag = *ptr; + // int rc; + if ( flag == LUN_DIRECT ) + create_resource(p_ctx, p_ctx->lun_size, DK_UDF_ASSIGN_PATH, flag); + else + create_resource(p_ctx, p_ctx->lun_size, DK_UVF_ALL_PATHS, flag); + // return &rc; + return NULL; +} + + +int ioctl_7_1_211( int flag ) +{ + int rc,i; + struct ctx myctx; + struct ctx *p_ctx =&myctx; + for (i=0;i<2;i++) + { + if (0 == fork()) + { + //child process + pid =getpid(); + rc = ctx_init(p_ctx); + CHECK_RC_EXIT(rc, "ctx_init failed\n"); + usleep(1000); + if ( i == 0 ) + rc = create_resource(p_ctx, p_ctx->lun_size, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + else + rc = create_resource(p_ctx, p_ctx->lun_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + sleep(1); + close_res(p_ctx); + ctx_close(p_ctx); + exit(rc); + } + } + rc = wait4all(); + return rc; +} diff --git a/src/cflash/test/215.c b/src/cflash/test/215.c new file mode 100644 index 00000000..c3482858 --- /dev/null +++ b/src/cflash/test/215.c @@ -0,0 +1,133 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/215.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + + +extern int g_error; +extern pid_t pid; +extern int long_run; +// to create multiple context + +// create VLUN +int create_vluns_215(char *dev, dev64_t devno, + __u16 lun_type, __u64 chunk, struct ctx *p_ctx) +{ + int rc,i,flag=0; + + __u64 nlba; + __u64 stride= 0x10000; + nlba=p_ctx->lun_size; + if ( LUN_DIRECT == lun_type) + { + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + for ( i=0; idevno); + // initializing a dummy context for second disk, to use it as temporary, context + rc = ctx_init2(p_ctx, disks[1].dev, flags, p_ctx->devno); + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + // now using REUSE flag to extend the created context to another disk. +#ifdef _AIX + p_ctx->flags=DK_AF_REUSE_CTX; +#else + p_ctx->flags=DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#endif + + temp->fd=p_ctx->fd; + p_ctx->fd=p_ctx1->fd; // initiating fd to second disk. + rc=ioctl_dk_capi_attach(p_ctx); // doing detach + CHECK_RC(rc, "Context with REUSE failed"); + + // if reuse succeeded. detach the context + p_ctx->flags=0; + p_ctx->fd=temp->fd; // re initiating fd to first disk + rc=ioctl_dk_capi_detach(p_ctx); + CHECK_RC(rc, "Context detach after REUSE failed"); + + /* spawn the VLUNs */ + p_ctx->fd=p_ctx1->fd; // reinitiating fd to second disk + rc=create_multiple_vluns_215(p_ctx); + pthread_cancel(thread); + close_res(p_ctx1); + ctx_close(p_ctx1); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; + +} + + diff --git a/src/cflash/test/216.c b/src/cflash/test/216.c new file mode 100644 index 00000000..fdbc2302 --- /dev/null +++ b/src/cflash/test/216.c @@ -0,0 +1,153 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/216.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + + +extern int g_error; +extern pid_t pid; +extern int long_run; +// to create multiple context + +// create VLUN +int create_vluns_216(char *dev, + __u16 lun_type, __u64 chunk, struct ctx *p_ctx) +{ + int rc,i,flag=0; + + __u64 nlba; + __u64 stride= 0x10000; + nlba=p_ctx->lun_size; + if ( LUN_DIRECT == lun_type) + { + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + for ( i=0; idevno); + // initializing a dummy context for second disk, to use it as temporary, context + rc = ctx_init2(p_ctx, disks[1].dev, flags, p_ctx->devno); + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + temp->fd=p_ctx->fd; + temp1->fd=p_ctx1->fd; + + // need to do following in a loop... + for (i=1;iflags=DK_AF_REUSE_CTX; +#else + p_ctx->flags=DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#endif + // initiating fd to second disk. + if ( i/2 == 0 ) + p_ctx->fd=temp->fd; // for every even iteration, use fd of first disk + else + p_ctx->fd=temp1->fd; // for every odd iteration, use fd of second disk + + rc=ioctl_dk_capi_attach(p_ctx); // doing detach + CHECK_RC(rc, "Context with REUSE failed"); + + // if reuse succeeded. detach the context for other disk, reverse the fd for detaching + p_ctx->flags=0; + if ( i/2 == 0 ) + p_ctx->fd=temp1->fd; + else + p_ctx->fd=temp->fd; + + + rc=ioctl_dk_capi_detach(p_ctx); + CHECK_RC(rc, "Context detach after REUSE failed"); + + /* spawn the VLUNs */ + // now re initiating again + if ( i/2 == 0 ) + p_ctx->fd=temp->fd; // for every even iteration, use fd of first disk + else + p_ctx->fd=temp1->fd; // for every odd iteration, use fd of second disk + + rc=create_multiple_vluns_216(p_ctx); + close_res(p_ctx); + } + + pthread_cancel(thread); + ctx_close(p_ctx1); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; + +} + + diff --git a/src/cflash/test/asmrw.h b/src/cflash/test/asmrw.h new file mode 100644 index 00000000..9ec4a00b --- /dev/null +++ b/src/cflash/test/asmrw.h @@ -0,0 +1,104 @@ +#ifndef _ASMRW_H +#define _ASMRW_H + +#ifdef _AIX +#include +#else +#include +#endif + +typedef struct mc_stat_s { + __u32 blk_len; /* length of 1 block in bytes as reported by device */ + __u8 nmask; /* chunk_size = (1 << nmask) in device blocks */ + __u8 rsvd[3]; + __u64 size; /* current size of the res_hndl in chunks */ + __u64 flags; /* permission flags */ +} mc_stat_t; + +/* The write_nn or read_nn routines can be used to do byte reversed MMIO + or byte reversed SCSI CDB/data. +*/ +static inline void write_64(volatile __u64 *addr, __u64 val) +{ + __u64 zero = 0; +#ifndef _AIX + asm volatile ( "stdbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +#else +#ifndef __64BIT__ + *((volatile __u32 *)(addr)) = (val & 0xffffffff); +#else + *((volatile __u64 *)(addr)) = val; +#endif +#endif /* _AIX */ +} + +static inline void write_32(volatile __u32 *addr, __u32 val) +{ + __u32 zero = 0; +#ifndef _AIX + asm volatile ( "stwbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +#else + *((volatile __u32 *)(addr)) = val; +#endif /* _AIX */ +} + +static inline void write_16(volatile __u16 *addr, __u16 val) +{ + __u16 zero = 0; +#ifndef _AIX + asm volatile ( "sthbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +#else + *((volatile __u16 *)(addr)) = val; +#endif /* _AIX */ +} + +static inline __u64 read_64(volatile __u64 *addr) +{ + __u64 val; + __u64 zero = 0; +#ifndef _AIX + asm volatile ( "ldbrx %0, %1, %2" : "=r"(val) : "r"(zero), "r"(addr) ); +#else +#ifndef __64BIT__ + val = *((volatile __u32 *)(addr)); +#else + val = *((volatile __u64 *)(addr)); +#endif +#endif /* _AIX */ + + return val; +} +static inline __u32 read_32(volatile __u32 *addr) +{ + __u32 val; + __u32 zero = 0; +#ifndef _AIX + asm volatile ( "lwbrx %0, %1, %2" : "=r"(val) : "r"(zero), "r"(addr) ); +#else + val = *((volatile __u32 *)(addr)); +#endif /* _AIX */ + return val; +} + +static inline __u16 read_16(volatile __u16 *addr) +{ + __u16 val; + __u16 zero = 0; +#ifndef _AIX + asm volatile ( "lhbrx %0, %1, %2" : "=r"(val) : "r"(zero), "r"(addr) ); +#else + val = *((volatile __u16 *)(addr)); +#endif /* _AIX */ + return val; +} +static inline void write_lba(__u64* addr, __u64 lba) +{ + #ifndef __64BIT__ + __u32 *p_u32 = (__u32*)addr; + write_32(p_u32,lba >>32); + write_32(p_u32+1, lba & 0xffffffff); +#else + write_64(addr, lba); +#endif +} +#endif /*_ASMRW_H*/ diff --git a/src/cflash/test/cflash_test.h b/src/cflash/test/cflash_test.h new file mode 100755 index 00000000..75956e0e --- /dev/null +++ b/src/cflash/test/cflash_test.h @@ -0,0 +1,829 @@ + /* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/cflash_test.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __CFLASH_TEST_H__ +#define __CFLASH_TEST_H__ + +// If MANUAL flag enabled then semi automated test also get build +// For jenkins run, we won't build manual tests by default ! +//#define MANUAL + +#include +#include +#ifdef _AIX +#include +#include +#define DK_QEF_ALL_RESOURCE 0x0 +#else +#include +typedef __u64 dev64_t; //no use in Linux, its dummy +//wil be removed once defined in Linux scope +//it will be dummy for Linux, these are useful in AIX +#define DK_UDF_ASSIGN_PATH 0x0000000000000001LL +#define DK_UVF_ASSIGN_PATH 0x0000000000000001LL +#define DK_UVF_ALL_PATHS 0x0000000000000002LL +#define DK_CAPI_REATTACHED 0x0000000000000001LL +#define DK_AF_ASSIGN_AFU 0x0000000000000002LL + +// dummy definition to compile cflash_test_scen.c on linux +// TBD: cleanup once cflash_ioctl.h is updated +#define DK_VF_LUN_RESET 0x50 +#define DK_RF_IOCTL_FAILED 0x70 +#define DK_RF_REATTACHED 0x0 +#define DK_VF_HC_TUR 0x0 +#define DK_VF_HC_INQ 0x0 + +#define DK_RF_LUN_NOT_FOUND 0x1 +#define DK_RF_ATTACH_NOT_FOUND 0x2 +#define DK_RF_DUPLICATE 0x3 +#define DK_RF_CHUNK_ALLOC 0x4 +#define DK_RF_PATH_ASSIGNED 0x5 + +#endif /*_AIX */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define MC_PATHLEN 64 + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#define CFLASH_ADAP_POLL_INDX 0 +#define CFLASH_DISK_POLL_INDX 1 + +// below flags are going to be used in do_write_or_read() +// depending on the user need + +#define WRITE 1 +#define READ 2 + +#define RES_HNDLR_MASK 0xffffffff +#define CTX_HNDLR_MASK 0xffffffff + +#define FDISKS_ALL 1 +#define FDISKS_SAME_ADPTR 2 +#define FDISKS_DIFF_ADPTR 3 +#define FDISKS_SHARED 4 + +//IOCTL +#define MAX_PATH 8 //check later & change based on that +#define SCSI_VERSION_0 0 //Check where is defined in scsi headers +#define NMASK 0x10 //(1<= 1) printf + +#define debug_2\ + if(DEBUG ==2) printf + +// all required for EEH automation + +#define MAXBUFF 400 +#define MAXNP 10 +#define KEYLEN 5 + +typedef struct eehCmd +{ + char cmdToRun[MAXBUFF]; + int ieehLoop; + int eehSync; + pthread_mutex_t eeh_mutex; + pthread_cond_t eeh_cv; + +} eehCmd_t; + +int diskToPCIslotConv( char *, char * ); +int prepEEHcmd(char *, char * ); +void * do_trigger_eeh_cmd( void * ); + +//Context structure. + +struct ctx +{ + /* Stuff requiring alignment go first. */ + + /* Command & data for AFU commands issued by test. */ + char rbuf[NUM_CMDS][0x1000]; // 4K read data buffer (page aligned) + char wbuf[NUM_CMDS][0x1000]; // 4K write data buffer (page aligned) + __u64 rrq_entry[NUM_RRQ_ENTRY]; // 128B RRQ (page aligned) + + struct afu_cmd + { + volatile sisl_ioarcb_t rcb; // IOARCB (cache line aligned) + volatile sisl_ioasa_t sa; // IOASA follows RCB + pthread_mutex_t mutex; + pthread_cond_t cv; + + __u8 cl_pad[CL_SIZE - + ((sizeof(sisl_ioarcb_t) + + sizeof(sisl_ioasa_t) + + sizeof(pthread_mutex_t) + + sizeof(pthread_cond_t)) & CL_SIZE_MASK)]; + } cmd[NUM_CMDS]; + + // AFU interface + int fd; + char dev[MC_PATHLEN]; + struct cxl_ioctl_start_work work; + char event_buf[0x1000]; + volatile struct sisl_host_map *p_host_map; + __u64 mmio_size; + + __u64 *p_hrrq_start; + __u64 *p_hrrq_end; + volatile __u64 *p_hrrq_curr; + unsigned int toggle; + + + //for IOARCB request + res_hndl_t res_hndl; + ctx_hndl_t ctx_hndl; + + //for IOCTL + uint16_t path_id; + dev64_t devno; + uint64_t flags; + uint64_t return_flags; + uint64_t block_size; /*a block size in bytes*/ + uint64_t chunk_size; /*size in blocks of one chunk */ + uint64_t last_lba; /* last lba of dev or vlun */ + uint64_t verify_last_lba; /* last lba of dev or vlun returned by verify ioctl */ + uint64_t last_phys_lba; /* last lba of physical disk */ + uint64_t lun_size; /* requested size in blocks */ + uint64_t req_size; /* New requested size, blocks */ + uint64_t hint; /* Reasons for verify DK_HINT_* */ + uint64_t reason; /* Reason code for error */ + uint16_t version; + uint64_t new_ctx_token; /* returned new_ctx_token by recover ioctl */ + uint64_t context_id; + uint64_t rsrc_handle; + uint32_t adap_fd; + uint32_t new_adap_fd; + uint32_t path_id_mask; + uint64_t exceptions; + char sense_data[512]; + int dummy_sense_flag; + uint64_t adap_except_type; + uint64_t adap_except_time; + uint64_t adap_except_data; + uint64_t adap_except_count; + char verify_sense_data[SISL_SENSE_DATA_LEN]; + + __u64 st_lba; + uint8_t return_path_count; + __u64 max_xfer; + + //internal use + __u32 blk_len; /*blocks requested to RW */ + __u64 unused_lba; /*hold avaibale disk blocks */ +} __attribute__((aligned(0x1000))); + +typedef struct ctx * ctx_p; + +typedef +enum +{ + CASE_PLUN = 1, + CASE_VLUN, +}validate_en_t; + +struct validatePckt +{ + // we will continue to add member required to validate + uint64_t expt_return_flags; + uint64_t expt_last_lba; + validate_en_t obCase; + struct ctx *ctxPtr; + +}; + +struct ctx_alloc +{ + struct ctx ctx; + __u8 page_pad[0x1000 - (sizeof(struct ctx) & 0xFFF)]; +}; + +struct pthread_alloc +{ + pthread_t rrq_thread; +}; + +struct rwbuf +{ + char wbuf[NUM_CMDS][0x2000]; //8K for EA alignment + char rbuf[NUM_CMDS][0x2000]; //8K for EA alignment +}__attribute__((aligned(0x1000))); + +struct rwshmbuf +{ + char wbuf[1][0x1000]; + char rbuf[1][0x1000]; +}__attribute__((aligned(0x1000))); + + +//struct for large transfer size +struct rwlargebuf +{ + char *wbuf[NUM_CMDS]; + char *rbuf[NUM_CMDS]; +}; + +struct flash_disk +{ + char dev[MC_PATHLEN]; + __u16 path_count; + dev64_t devno[MAX_PATH]; + uint16_t path_id[MAX_PATH]; + uint32_t path_id_mask[MAX_PATH]; + +}; + + +typedef +enum +{ + TEST_SPIO_VLUN = 1, + TEST_SPIO_0_VLUN, + TEST_SPIO_A_PLUN, + TEST_SPIO_ALL_PLUN, + TEST_SPIO_ALL_VLUN, + TEST_SPIO_VLUN_PLUN, + TEST_SPIO_NORES_AFURC, + TEST_MC_SIZE_REGRESS, + TEST_MC_REGRESS_CTX_CRT_DSTR, + TEST_MC_REGRESS_CTX_CRT_DSTR_IO, + TEST_MC_REGRESS_RESOURCE, + TEST_MC_TWO_CTX_RD_WRTHRD, + TEST_MC_TWO_CTX_RDWR_SIZE, + TEST_MC_ONE_CTX_TWO_THRD, + TEST_MC_ONE_CTX_RD_WRSIZE, + TEST_MC_MAX_RES_HNDL, + TEST_ONE_UNIT_SIZE, + TEST_MAX_CTX_RES_UNIT, + TEST_MAX_CTX_RES_LUN_CAP, + TEST_MC_MAX_SIZE, + TEST_MC_SPIO_VLUN_ATCH_DTCH, + TEST_MC_SPIO_PLUN_ATCH_DTCH, + MC_TEST_RWBUFF_GLOBAL, + MC_TEST_RWBUFF_HEAP, + MC_TEST_RWBUFF_SHM, + MC_TEST_RW_SIZE_PARALLEL, + MC_TEST_GOOD_ERR_AFU_DEV, + TEST_MC_RW_CLS_RSH, + TEST_MC_RW_CLOSE_CTX, + TEST_MC_RW_CLOSE_DISK_FD, + TEST_MC_RW_UNMAP_MMIO, + TEST_MC_IOARCB_EA_ALGNMNT_16, + TEST_MC_IOARCB_EA_ALGNMNT_128, + TEST_MC_IOARCB_EA_INVLD_ALGNMNT, + TEST_LARGE_TRANSFER_IO, + TEST_LARGE_TRNSFR_BOUNDARY, + MAX_CTX_RCVR_EXCEPT_LAST_ONE, + MAX_CTX_RCVR_LAST_ONE_NO_RCVR, + /*** DK_CAPI_QUERY_PATH ****/ + TEST_DCQP_VALID_PATH_COUNT, + TEST_IOCTL_INVALID_VERSIONS, + TEST_DCQP_INVALID_PATH_COUNT, + TEST_DCQP_DUAL_PATH_COUNT, + TEST_DCQP_DK_CPIF_RESERVED, + TEST_DCQP_DK_CPIF_FAILED, + /**** DK_CAPI_ATTACH ***/ + TEST_DCA_OTHER_DEVNO, + TEST_DCA_INVALID_DEVNO, + TEST_DCA_INVALID_INTRPT_NUM, + TEST_DCA_VALID_VALUES, + TEST_DCA_INVALID_FLAGS, + TEST_DCA_CALL_TWICE, + TEST_DCA_CALL_DIFF_DEVNO_MULTIPLE, + TEST_DCA_REUSE_CTX_FLAG, + TEST_DCA_REUSE_CTX_FLAG_NEW_PLUN_DISK, + TEST_DCA_REUSE_CTX_NEW_VLUN_DISK, + TEST_DCA_REUSE_CTX_ALL_CAPI_DISK, + TEST_DCA_REUSE_CTX_OF_DETACH_CTX, + TEST_DCA_REUSE_CTX_OF_RELASED_IOCTL, + TEST_DCA_REUSE_CTX_NEW_DISK_AFTER_EEH, + /*** DK_CAPI_RECOVER_CTX ***/ + TEST_DCRC_NO_EEH, + TEST_DCRC_DETACHED_CTX, + TEST_DCRC_EEH_VLUN, + TEST_DCRC_EEH_PLUN_MULTI_VLUN, + TEST_DCRC_EEH_VLUN_RESUSE_CTX, + TEST_DCRC_EEH_PLUN_RESUSE_CTX, + TEST_DCRC_EEH_VLUN_RESIZE, + TEST_DCRC_EEH_VLUN_RELEASE, + TEST_DCRC_INVALID_DEVNO, + TEST_DCRC_INVALID_FLAG, + TEST_DCRC_INVALID_REASON, + TEST_DCRC_IO_EEH_VLUN, + TEST_DCRC_IO_EEH_PLUN, + /*** DK_CAPI_USER_DIRECT ***/ + TEST_DCUD_INVALID_DEVNO_VALID_CTX, + TEST_DCUD_INVALID_CTX_VALID_DEVNO, + TEST_DCUD_INVALID_CTX_INVALID_DEVNO, + TEST_DCUD_VALID_CTX_VALID_DEVNO, + TEST_DCUD_PATH_ID_MASK_VALUES, + TEST_DCUD_FLAGS, + TEST_DCUD_TWICE_SAME_CTX_DEVNO, + TEST_DCUD_VLUN_ALREADY_CREATED_SAME_DISK, + TEST_DCUD_PLUN_ALREADY_CREATED_SAME_DISK, + TEST_DCUD_VLUN_CREATED_DESTROYED_SAME_DISK, + TEST_DCUD_IN_LOOP, + TEST_DCUD_BAD_PATH_ID_MASK_VALUES, + /*** DK_CAPI_USER_VIRTUAL ***/ + TEST_DCUV_INVALID_DEVNO_VALID_CTX, + TEST_DCUV_INVALID_CTX_INVALID_DEVNO, + TEST_DCUV_VALID_DEVNO_INVALID_CTX, + TEST_DCUV_INVALID_FLAG_VALUES, + TEST_DCUV_INVALID_VLUN_SIZE, + TEST_DCUV_LUN_VLUN_SIZE_ZERO, + TEST_DCUV_PLUN_ALREADY_CREATED_SAME_DISK, + TEST_DCUV_VLUN_ALREADY_CREATED_SAME_DISK, + TEST_DCUV_NO_FURTHER_VLUN_CAPACITY, + TEST_DCUV_MTPLE_VLUNS_SAME_CAPACITY_SAME_DISK, + TEST_DCUV_TWICE_SAME_CTX_DEVNO, + TEST_DCUV_VLUN_MAX, + TEST_DCUV_VLUN_SIZE_MORE_THAN_DISK_SIZE, + TEST_DCUV_PLUN_CREATED_DESTROYED_SAME_DISK, + TEST_DCUV_WITH_CTX_OF_PLUN, + TEST_DCUD_WITH_CTX_OF_VLUN, + TEST_DCUV_PATH_ID_MASK_VALUES, + TEST_DCUV_INVALID_PATH_ID_MASK_VALUES, + TEST_DCUV_IN_LOOP, + /*** DK_CAPI_VLUN_RESIZE ***/ + TEST_DCVR_INVALID_DEVNO, + TEST_DCVR_INVALID_CTX_DEVNO, + TEST_DCVR_INVALID_CTX, + TEST_DCVR_NO_VLUN, + TEST_DCVR_ON_PLUN, + TEST_DCVR_GT_DISK_SIZE, + TEST_DCVR_NOT_FCT_256MB, + TEST_DCVR_EQ_CT_VLUN_SIZE, + TEST_DCVR_LT_CT_VLUN_SIZE, + TEST_DCVR_GT_CT_VLUN_SIZE, + TEST_DCVR_EQ_DISK_SIZE_NONE_VLUN, + TEST_DCVR_EQ_DISK_SIZE_OTHER_VLUN, + TEST_DCVR_INC_256MB, + TEST_DCVR_DEC_256MB, + TEST_DCVR_GT_CT_VLUN_LT_256MB, + TEST_DCVR_LT_CT_VLUN_LT_256MB, + TEST_DCVR_INC_DEC_LOOP, + G_MC_test_DCVR_ZERO_Vlun_size, + /*** DK_CAPI_RELEASE ***/ + TEST_DCR_INVALID_DEVNO, + TEST_DCR_INVALID_DEVNO_CTX, + TEST_DCR_INVALID_CTX, + TEST_DCR_NO_VLUN, + TEST_DCR_PLUN_AGIAN, + TEST_DCR_VLUN_AGIAN, + TEST_DCR_MULTP_VLUN, + TEST_DCR_VLUN_INV_REL, + /*** DK_CAPI_DETACH ***/ + TEST_DCD_INVALID_CTX_DEVNO, + TEST_DCD_INVALID_DEVNO, + TEST_DCD_INVALID_CTX, + TEST_DCD_TWICE_ON_PLUN, + TEST_DCD_TWICE_ON_VLUN, + /*** DK_CAPI_VERIFY ***/ + TEST_DCV_INVALID_DEVNO, + TEST_DCV_INVALID_FLAGS, + TEST_DCV_INVALID_RES_HANDLE, + TEST_DCV_UNEXPECTED_ERR, + TEST_DCV_NO_ERR, + TEST_DCV_UNEXPECTED_ERR_VLUN, + TEST_DCV_VLUN_RST_FlAG, + TEST_DCV_VLUN_TUR_FLAG, + TEST_DCV_VLUN_INQ_FLAG, + TEST_DCV_VLUN_HINT_SENSE, + TEST_DCV_PLUN_RST_FlAG, + TEST_DCV_PLUN_TUR_FLAG, + TEST_DCV_PLUN_INQ_FLAG, + TEST_DCV_PLUN_HINT_SENSE, + TEST_DCV_PLUN_RST_FlAG_EEH, + /********** DK_CAPI_LOG_EVENT ********/ + TEST_DCLE_VALID_VALUES, + TEST_DCLE_DK_LF_TEMP, + TEST_DCLE_DK_LF_PERM, + TEST_DCLE_DK_FL_HW_ERR, + TEST_DCLE_DK_FL_SW_ERR, + /*********** ERR CASE **************/ + TEST_VSPIO_EEHRECOVERY, + TEST_DSPIO_EEHRECOVERY, + TEST_IOCTL_FCP, + TEST_MMIO_ERRCASE1, + TEST_MMIO_ERRCASE2, + TEST_MMIO_ERRCASE3, + TEST_SPIO_KILLPROCESS, + TEST_SPIO_EXIT, + TEST_IOCTL_SPIO_ERRCASE, + TEST_CFDISK_CTXS_DIFF_DEVNO, + TEST_ATTACH_REUSE_DIFF_PROC, + TEST_DETACH_DIFF_PROC, + TEST_FC_PR_RESET_VLUN, + TEST_FC_PR_RESET_PLUN, + /*********** EXCP CASE *************/ + EXCP_VLUN_DISABLE, + EXCP_PLUN_DISABLE, + EXCP_VLUN_VERIFY, + EXCP_PLUN_VERIFY, + EXCP_VLUN_INCREASE, + EXCP_VLUN_REDUCE, + EXCP_PLUN_UATTENTION, + EXCP_VLUN_UATTENTION, + EXCP_EEH_SIMULATION, + EXCP_INVAL_DEVNO, + EXCP_INVAL_CTXTKN, + EXCP_INVAL_RSCHNDL, + EXCP_DISK_INCREASE, + + /*CLONE */ + TEST_DK_CAPI_CLONE, +G_ioctl_7_1_119, +E_ioctl_7_1_120, +E_ioctl_7_1_174, +E_ioctl_7_1_175, +E_ioctl_7_1_180, +E_ioctl_7_1_1801, +G_ioctl_7_1_181, +E_ioctl_7_1_182, +G_ioctl_7_1_187, +E_ioctl_7_1_212, +E_ioctl_7_1_213, +E_ioctl_7_1_215, +E_ioctl_7_1_216, +G_ioctl_7_1_188, +G_ioctl_7_1_189, +E_ioctl_7_1_190, +G_ioctl_7_1_191, +G_ioctl_7_1_192, +G_ioctl_7_1_193_1, +G_ioctl_7_1_193_2, +G_ioctl_7_1_196, +E_ioctl_7_1_197, +E_ioctl_7_1_198, +E_ioctl_7_1_209, +E_ioctl_7_1_210, +E_ioctl_7_1_211, +E_ioctl_7_1_214, +E_test_SCSI_CMDS, +E_TEST_CTX_RESET, +M_TEST_7_5_13_1, +M_TEST_7_5_13_2, +G_TEST_MAX_CTX_PLUN, +G_TEST_MAX_CTX_ONLY, +G_TEST_MAX_CTX_0_VLUN, +E_CAPI_LINK_DOWN, +G_ioctl_7_1_203, +G_TEST_MAX_VLUNS, +G_TEST_MAX_CTX_IO_NOFLG, +}mc_test_t; + +int validateFunction(struct validatePckt *); +int mc_max_open_tst(); +int mc_open_close_tst(); +int mc_register_tst(); +int mc_unregister_tst(); +int mc_open_tst(int); +int mc_size_tst(int); +int mc_xlate_tst(int); +int mc_hdup_tst(int cmd); +int mc_max_vdisk_thread(); +int test_mc_clone_api(__u32 flags); +int test_mc_clone_error(__u32 oflg, __u32 cnflg); +int test_mc_max_size(); +int test_max_ctx_n_res(); +int test_one_aun_size(); +int test_mc_clone_read(); +int test_mc_clone_write(); +int test_mc_lun_size(int cmd); +int test_mc_dup_api(); +int mc_close_tst(); +int mc_test_chunk_regress(int cmd); +int mc_test_chunk_regress_long(); +int mc_test_chunk_regress_both_afu(); +int test_mc_xlate_error(int); +int test_vdisk_io(); +int ctx_init_thread(void *); +int test_lun_discovery(int cmd); +int test_onectx_twothrd(int cmd); +int test_mc_reg_error(int cmd); +int test_two_ctx_two_thrd(int cmd); +int test_mc_invalid_ioarcb(int cmd); +int test_rw_close_hndl(int cmd); +int test_mc_clone_many_rht(); +int test_good_ctx_err_ctx(int cmd); +int test_mc_ioarcb_ea_alignment(int cmd); +int check_mc_null_params(int cmd); +int test_many_ctx_one_rrq_curr_null(); +int test_all_afu_devices(); +int mc_test_rwbuff_global(); +int test_mc_rwbuff_shm(); +int test_mc_rw_size_parallel(); +int test_mc_good_error_afu_dev(); +int test_mc_regress_ctx_crt_dstr(int cmd); +int test_mc_size_error(int cmd); +int test_mc_null_params(int cmd); +int test_mc_inter_prcs_ctx(int cmd); +int mc_test_ctx_regress(int cmd); +int test_large_trnsfr_boundary(); +int test_large_transfer(); +int max_ctx_rcvr_except_last_one(); +int max_ctx_rcvr_last_one_no_rcvr(); + + +int open_dev(char*, int); +int mc_test_engine(mc_test_t); +int ctx_init(struct ctx *p_ctx); +int ctx_init2(struct ctx *p_ctx, char *dev, __u64 flags, dev64_t devno); +int ctx_reinit(struct ctx *p_ctx); +int ctx_close(struct ctx *p_ctx); +void ctx_close_thread(void *); +int get_fvt_dev_env(); + +int test_init(struct ctx *p_ctx); +void *ctx_rrq_rx(void *arg); +int send_write(struct ctx *p_ctx, __u64 start_lba, + __u64 stride, __u64 data); +int send_single_write(struct ctx *p_ctx, __u64 vlba, __u64 data); +int send_read(struct ctx *p_ctx, __u64 start_lba, __u64 stride); +int send_single_read(struct ctx *p_ctx, __u64 vlba); +int rw_cmp_buf(struct ctx *p_ctx, __u64 start_lba); +int rw_cmp_buf_cloned(struct ctx *p_ctx, __u64 start_lba); +int cmp_buf_cloned(__u64* p_buf, unsigned int len); +int rw_cmp_single_buf(struct ctx *p_ctx, __u64 vlba); +int send_cmd(struct ctx *p_ctx); +int wait_resp(struct ctx *p_ctx); +int wait_single_resp(struct ctx *p_ctx); +void fill_buf(__u64* p_buf, unsigned int len, __u64 data); +int cmp_buf(__u64* p_buf1, __u64 *p_buf2, unsigned int len); +int send_report_luns(struct ctx *p_ctx, __u32 port_sel, + __u64 **lun_ids,__u32 *nluns); +int send_read_capacity(struct ctx *p_ctx, __u32 port_sel, + __u64 lun_id, __u64 *lun_capacity, __u64 *blk_len); +int check_status(volatile sisl_ioasa_t *p_ioasa); +void send_single_cmd(struct ctx *p_ctx); +int send_rw_rcb(struct ctx *p_ctx, struct rwbuf *p_rwb, + __u64 start_lba, __u64 stride, + int align, int where); +int send_rw_lsize(struct ctx *p_ctx, struct rwlargebuf *p_rwb, + __u64 start_lba, __u32 blocks); +int send_rw_shm_rcb(struct ctx *p_ctx, struct rwshmbuf *p_rwb, + __u64 vlba); +void hexdump(void *data, long len, const char *hdr); +int get_flash_disks(struct flash_disk *disks, int type); +int generate_unexpected_error(void); + +int test_all_ioctl_invalid_version(void); +int test_invalid_version_ioctl(int flag); +#ifdef _AIX +int test_dcqp_ioctl(int flag); +int test_dcqp_error_ioctl(int flag); +#endif +int test_dca_ioctl(int flag); +int test_dca_error_ioctl(int flag); +int test_dcrc_ioctl(int flag); +int test_dcud_error_ioctl(int flag); +int test_dcud_ioctl(int flag); +int test_dcuv_error_ioctl(int flag); +int test_dcuv_ioctl(int flag); +int test_dcvr_error_ioctl(int flag); +int test_dcvr_ioctl(int flag); +int test_dcd_ioctl(int flag); +int test_dcv_error_ioctl(int flag); +int test_dcv_ioctl(int flag); +int test_dcle_ioctl(int flag); +int test_dcr_ioctl(int flag); + +int test_vSpio_eehRecovery(int); +int test_ioctl_fcp(); +int test_mmio_errcase(int); +int test_detach_diff_proc(); +int test_attach_reuse_diff_proc(); +int test_cfdisk_ctxs_diff_devno(); +int test_ioctl_spio_errcase(); +int test_spio_exit(); +int test_spio_killprocess(); +int test_dSpio_eehRecovery(int); + +int test_dcqexp_ioctl(); +int test_dcqexp_invalid(); + +typedef struct do_io_thread_arg +{ + struct ctx *p_ctx; + __u64 stride; + int loopCount; +} do_io_thread_arg_t; + +void * do_io_thread(void * ); + + + +int get_nonflash_disk(char *, dev64_t *); + +//new functions for ioctl +#ifdef _AIX +int ioctl_dk_capi_query_path(struct ctx *p_ctx); +#endif +int ioctl_dk_capi_attach(struct ctx *p_ctx); +int ioctl_dk_capi_detach(struct ctx *p_ctx); +int ioctl_dk_capi_udirect(struct ctx *p_ctx); +int ioctl_dk_capi_uvirtual(struct ctx *p_ctx); +int ioctl_dk_capi_release(struct ctx *p_ctx); +int ioctl_dk_capi_vlun_resize(struct ctx *p_ctx); +int ioctl_dk_capi_verify(struct ctx *p_ctx); +int ioctl_dk_capi_log(struct ctx *p_ctx, char *s_data); +int ioctl_dk_capi_recover_ctx(struct ctx *p_ctx); +int ioctl_dk_capi_query_exception(struct ctx *p_ctx); +int ioctl_dk_capi_clone(struct ctx *p_ctx, uint64_t old_ctx_id,int src_adap_fd); + +int close_res(struct ctx *p_ctx); +int create_res(struct ctx *p_ctx); +int mc_stat1(struct ctx *p_ctx, mc_stat_t *stat); +int mc_size1(struct ctx *p_ctx, __u64 chunk, __u64 *actual_size); +int create_resource(struct ctx *p_ctx, __u64 nlba, + __u64 flags, __u16 lun_type); +int vlun_resize(struct ctx *p_ctx, __u64 nlba); +int wait4all(); +int do_io(struct ctx *p_ctx, __u64 stride); +int do_io_nocompare(struct ctx *p_ctx, __u64 stride); +int do_large_io(struct ctx *p_ctx, struct rwlargebuf *rwbuf, __u64 size); +int do_poll_eeh(struct ctx *); +int do_eeh(struct ctx *); +int diskInSameAdapater( char * ); +int diskInDiffAdapater( char * ); +bool check_afu_reset(struct ctx *p_ctx); +#ifdef _AIX +int ioctl_dk_capi_query_path_check_flag(struct ctx *p_ctx, + int flag1, int flag2); +#endif +int test_spio_vlun(int); +int test_spio_plun(); +int test_fc_port_reset_vlun(); +int test_fc_port_reset_plun(); +int test_spio_lun(char *dev, dev64_t devno, + __u16 lun_type, __u64 chunk); +int test_spio_pluns(int cmd); +int test_spio_vluns(int cmd); +int test_spio_direct_virtual(); +int max_ctx_max_res(int cmd); +int test_spio_attach_detach(int cmd); +int test_clone_ioctl(int cmd); +__u64 get_disk_last_lba(char *dev, dev64_t devno, uint64_t *chunk_size); +int compare_size(uint64_t act, uint64_t exp); +int compare_flags(uint64_t act, uint64_t exp); +int traditional_io(int disk_num); +int mc_init(); +int mc_term(); +//int max_ctx_res(struct ctx *p_ctx); +int create_multiple_vluns(struct ctx *p_ctx); +int ioctl_7_1_119_120(int); +int ioctl_7_1_174_175(int); +int test_traditional_IO(int,int); +int ioctl_7_1_188(int); +int ioctl_7_1_191(int); +int ioctl_7_1_192(int); +int ioctl_7_1_196(); +int ioctl_7_1_197(); +int ioctl_7_1_198(); +int ioctl_7_1_209(); +int ioctl_7_1_210(); +int ioctl_7_1_211(); +int ioctl_7_1_215(); +int ioctl_7_1_216(); +void *create_lun1(void *arg ); +int create_direct_lun(struct ctx *p_ctx); +int capi_open_close(struct ctx *, char *); +int create_direct_lun(struct ctx *p_ctx); +int create_vluns_216(char *dev, + __u16 lun_type, __u64 chunk, struct ctx *p_ctx); +int create_vlun_215(char *dev, dev64_t devno, + __u16 lun_type, __u64 chunk, struct ctx *p_ctx); +int ioctl_dk_capi_attach_reuse(struct ctx *, struct ctx * , __u16 ); +int ctx_init_internal(struct ctx *p_ctx, + __u64 flags, dev64_t devno); +void handleSignal(int sigNo); +void sig_handle(int sig); +int test_scsi_cmds(); +int do_write_or_read(struct ctx *p_ctx, __u64 stride, int do_W_R); +int allocate_buf(struct rwlargebuf *rwbuf, __u64 size); +void deallocate_buf(struct rwlargebuf *rwbuf); +int ioctl_dk_capi_attach_reuse_all_disk( ); +int ioctl_dk_capi_attach_reuse_loop(struct ctx *p_ctx,struct ctx *p_ctx_1 ); +int test_ctx_reset(); +int set_spio_mode(); +int ioctl_7_5_13(int ops); +int keep_doing_eeh_test(struct ctx *p_ctx); +int max_ctx_on_plun(int ); +int max_ctx_n_res_nchan(int nchan); +int ioctl_7_1_193_1( int flag ); +int ioctl_7_1_193_2( int flag ); +int max_ctx_n_res_190(); +int ctx_init_reuse(struct ctx *p_ctx); +int max_ctx_res_190(struct ctx *p_ctx); +int ioctl_7_1_203(); +int ioctl_7_1_190( ); +int call_attach_diff_devno(); +int max_vlun_on_a_ctx(); +void displayBuildinfo(); +int get_max_res_hndl_by_capacity(char *dev); +#endif /*__CFLASH_TEST_H__ */ diff --git a/src/cflash/test/cflash_test2.c b/src/cflash/test/cflash_test2.c new file mode 100644 index 00000000..b95c00f0 --- /dev/null +++ b/src/cflash/test/cflash_test2.c @@ -0,0 +1,1069 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/cflash_test2.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "cflash_test.h" +#include +#include + +//extern char master_dev_path[MC_PATHLEN]; +extern char cflash_path[MC_PATHLEN]; +extern int MAX_RES_HANDLE; + +extern pid_t pid; +extern __u8 rrq_c_null; +extern int dont_displa_err_msg; +extern bool long_run_enable; +/*each thread can modify this value incase of any failure */ +extern int g_error; + +/* + * * Serialization is required for a mc_handle + * * which is shared by multiple threads. + * + */ +static pthread_mutex_t mutex; +static pthread_mutex_t counter=PTHREAD_MUTEX_INITIALIZER; +static int count=0; + +typedef struct max_res_thread +{ + struct ctx *p_ctx; + __u64 chunk; /* chunk size to request per thread */ + __u64 stride; +}max_res_thread_t; + +void* test_mc_api(void *arg) +{ + max_res_thread_t *res_thrd = (max_res_thread_t *)arg; + struct ctx *p_ctx = res_thrd->p_ctx; + int rc=0; + res_hndl_t res_handl; + __u64 rsrc_handle; + __u64 actual_size=0; + __u64 nlba, st_lba; + mc_stat_t l_mc_stat; + + __u64 size = res_thrd->chunk; + __u64 stride = res_thrd->stride;; + pthread_t pthread_id1 =pthread_self(); + unsigned int pthread_id =(unsigned int)pthread_id1; + + pthread_mutex_lock(&counter); + count++; + pthread_mutex_unlock(&counter); + + pthread_mutex_lock(&mutex); + rc = create_res(p_ctx); + res_handl = p_ctx->res_hndl; + rsrc_handle = p_ctx->rsrc_handle; + pthread_mutex_unlock(&mutex); + sleep(1);//lets all thread created + if (rc != 0) + { + fprintf(stderr, "thread : 0x%x:create_res: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + + pthread_mutex_lock(&mutex); + p_ctx->rsrc_handle = rsrc_handle; + rc = mc_size1(p_ctx, size, &actual_size); + l_mc_stat.size = actual_size; + pthread_mutex_unlock(&mutex); + if (rc != 0) + { + fprintf(stderr, "thread : 0x%x:mc_size: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + + pthread_mutex_lock(&mutex); + p_ctx->rsrc_handle = rsrc_handle; + rc = mc_stat1(p_ctx, &l_mc_stat); + pthread_mutex_unlock(&mutex); + if (rc != 0) + { + fprintf(stderr, "thread : 0x%x:mc_stat: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + size = l_mc_stat.size; + if (size != actual_size) + { + fprintf(stderr,"thread : 0x%x:size mismatched: %lu : %lu\n", pthread_id,size,actual_size); + g_error = -1; + return NULL; + } + + nlba = size * (1 << l_mc_stat.nmask); + + debug("res hnd:%d send IO lba range 0X%"PRIX64"\n",res_handl, nlba); + for (st_lba = 0; st_lba < nlba; st_lba +=(NUM_CMDS*stride)) + { + pthread_mutex_lock(&mutex); + p_ctx->res_hndl = res_handl; + debug_2("res hnd: %d send IO for 0X%"PRIX64" \n",res_handl, st_lba); + + rc = send_write(p_ctx, st_lba, stride, pthread_id1); + rc += send_read(p_ctx, st_lba, stride); + rc += rw_cmp_buf(p_ctx, st_lba); + + pthread_mutex_unlock(&mutex); + if (rc) + { + g_error = rc; + break; + } + } + + sleep(1); + pthread_mutex_lock(&mutex); + p_ctx->rsrc_handle = rsrc_handle; + rc = close_res(p_ctx); + pthread_mutex_unlock(&mutex); + if (rc != 0) + { + fprintf(stderr, "thread : 0x%x:close_res: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + return 0; +} + +int mc_max_vdisk_thread() +{ + struct ctx myctx; + struct pthread_alloc *p_thread_a; + struct ctx *p_ctx = &myctx; + pthread_mutexattr_t mattr; + max_res_thread_t res[MAX_RES_HANDLE]; + pthread_t thread; + int rc = 0; + int i; + + pid = getpid(); + //Allocating structures for pthreads. + p_thread_a = (struct pthread_alloc *) malloc(sizeof(struct pthread_alloc) * MAX_RES_HANDLE); + if (p_thread_a == NULL) + { + fprintf(stderr, " Can not allocate thread structs, errno %d\n", errno); + return -1; + } + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx_init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + //initialize the mutex + pthread_mutexattr_init(&mattr); + pthread_mutex_init(&mutex, &mattr); + //create threads + for (i=0;i< MAX_RES_HANDLE; ++i) + { + res[i].p_ctx = p_ctx; + res[i].chunk = i+1; + res[i].stride = 0x1000; + + rc = pthread_create(&(p_thread_a[i].rrq_thread), NULL, &test_mc_api, (void *)&res[i]); + if (rc) + { + fprintf(stderr, "Error creating thread %d, errno %d\n", i, errno); + return -1; + } + } + + //destroy mutexattr + pthread_mutexattr_destroy(&mattr); + //joining + for (i=0;i< MAX_RES_HANDLE; ++i) + { + pthread_join(p_thread_a[i].rrq_thread, NULL); + } + + //destroy the mutex + pthread_mutex_destroy(&mutex); + pthread_cancel(thread); + + ctx_close(p_ctx); + mc_term(); + //free allocated space + free(p_thread_a); + rc = g_error; + g_error =0; + return rc; +} + +int test_mc_max_size() +{ + int rc = 0; + struct ctx testctx; + pthread_t thread; + __u64 chunks =0x4; + __u64 actual_size; + __u64 max_size = 0; + __u64 rnum; + __u64 lun_size; + __u64 stride = 0x1000; //4K + __u64 st_lba = 0; + int loop_stride = 1000; + __u64 nlba; + mc_stat_t l_mc_stat; + bool is_stress = false; + + struct ctx *p_ctx = &testctx; + unsigned int i; + char *str = getenv("LONG_RUN"); + if (str != NULL) + { + printf("LONG RUN Enabled....\n"); + chunks = 1; //increment one by one + loop_stride = 10; + is_stress = true; + } + + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx_init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = create_res(p_ctx); + CHECK_RC(rc, "create_res failed"); + + //allocate max allow size for a vdisk + while (1) + { + rc = mc_size1(p_ctx,chunks, &actual_size); + l_mc_stat.size = actual_size; + if (chunks != actual_size) + { + debug("now reaching extreme..chunk(0X%"PRIX64") act(0X%"PRIX64")\n", chunks,actual_size); + /*rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat1 failed");*/ + max_size = l_mc_stat.size; + break; + } + rc = mc_stat1(p_ctx,&l_mc_stat); + if (rc != 0) + { + fprintf(stderr,"mc_stat failed rc = %d\n",rc); + return -1; + } + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + debug("chunk(0X%"PRIX64")lba (0X%"PRIX64") i/o@(0X%"PRIX64")\n", + actual_size,nlba, nlba-(NUM_CMDS*stride)); + rc = send_write(p_ctx, nlba-(NUM_CMDS*stride), stride, pid); + if (rc) break; + rc += send_read(p_ctx, nlba-(NUM_CMDS*stride), stride); + if (rc) break; + rc = rw_cmp_buf(p_ctx, nlba-(NUM_CMDS*stride)); + if (rc) break; + if (is_stress) + chunks++; + else + chunks += 0x4; + } + + if (max_size == 0) + { + debug("lets check more chunk can be allocated\n"); + rc = mc_size1(p_ctx, chunks+1, &actual_size); + l_mc_stat.size = actual_size; + debug("new chunk =0X%"PRIX64" & LBAs = 0X%"PRIX64"\n", + actual_size, actual_size*(1 << l_mc_stat.nmask)); + fprintf(stderr, "some errors happend\n"); + return -1; + } + debug("OK, I got the lun size 0X%"PRIX64" & nlba 0X%"PRIX64"\n", + max_size, max_size*(1 << l_mc_stat.nmask)); + lun_size = (max_size*(1 << l_mc_stat.nmask)*(l_mc_stat.blk_len)/(1024*1024*1024)); + + + nlba = max_size * (1 << l_mc_stat.nmask); + if (is_stress) + { + printf("%d: now do IO till 0X%"PRIX64" lbas\n", pid, nlba-1); + for (st_lba = 0; st_lba < nlba; st_lba += NUM_CMDS * stride) + { + rc = send_write(p_ctx, st_lba, stride, pid); + CHECK_RC(rc, "send_write"); + rc = send_read(p_ctx, st_lba, stride); + CHECK_RC(rc, "send_read"); + rc = rw_cmp_buf(p_ctx, st_lba); + CHECK_RC(rc, "rw_cmp_buf"); + } + fflush(stdout); + } + //allocate & dallocate max_size + chunks = max_size; + while (chunks > 0) + { + chunks = chunks/2; + if (chunks == 0) break; + rc = mc_size1(p_ctx, chunks, &actual_size); + l_mc_stat.size = actual_size; + if (rc != 0 || chunks != actual_size) + { + fprintf(stderr,"mc_size api failed.. rc=%d\n",rc); + fprintf(stderr,"expected & actual : %lu & %lu\n",chunks,actual_size); + return -1; + } + rc = mc_stat1(p_ctx, &l_mc_stat); + if (rc != 0 || chunks != actual_size) + { + fprintf(stderr,"mc_stat api failed.. rc=%d\n",rc); + fprintf(stderr,"expected & actual : %lu & %lu\n",chunks,actual_size); + return -1; + } + actual_size = l_mc_stat.size; + if (actual_size == 0) break; + nlba = actual_size * (1 << l_mc_stat.nmask); + debug("chunk(0X%"PRIX64")lba (0X%"PRIX64") i/o@(0X%"PRIX64")\n", + actual_size,nlba, nlba-(NUM_CMDS*stride)); + rc = send_write(p_ctx, nlba-(NUM_CMDS*stride), stride, pid); + if (rc) break; + rc += send_read(p_ctx, nlba-(NUM_CMDS*stride), stride); + if (rc) break; + rc = rw_cmp_buf(p_ctx, nlba-(NUM_CMDS*stride)); + if (rc) break; + } + + for (i=0;i<=max_size;i+=loop_stride) + { + rnum = rand()% max_size +1; + if (rnum*p_ctx->chunk_size > (p_ctx->last_phys_lba +1)) continue; + rc = mc_size1(p_ctx, rnum, &actual_size); + l_mc_stat.size = actual_size; + + if ((rc != 0)||(rnum != actual_size)) + { + fprintf(stderr,"mc_size api failed.. rc=%d\n",rc); + fprintf(stderr,"expected & actual : %lu & %lu\n",chunks,actual_size); + return -1; + } + rc = mc_stat1(p_ctx, &l_mc_stat); + if ((rc != 0 )|| (rnum != actual_size)) + { + fprintf(stderr,"mc_stat api failed.. rc=%d\n",rc); + fprintf(stderr,"expected & actual : %lu & %lu\n",chunks,actual_size); + return -1; + } + actual_size = l_mc_stat.size; + nlba = actual_size * (1 << l_mc_stat.nmask); + debug("chunk(0X%"PRIX64")lba (0X%"PRIX64") i/o@(0X%"PRIX64")\n", + actual_size,nlba, nlba-(NUM_CMDS*stride)); + rc = send_write(p_ctx, nlba-(NUM_CMDS*stride), stride, pid); + if (rc) break; + rc += send_read(p_ctx, nlba-(NUM_CMDS*stride), stride); + if (rc) break; + rc = rw_cmp_buf(p_ctx, nlba-(NUM_CMDS*stride)); + if (rc) break; + } + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + printf("LUN size is :%lu GB\n",lun_size); + return rc; +} + +int test_one_aun_size() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 aun = 1; + __u64 actual_size; + __u64 nlba; + __u64 size; + __u64 stride = 0x10; //IO on all LBAs + pthread_t thread; + mc_stat_t l_mc_stat; + + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx_init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = create_res(p_ctx); + CHECK_RC(rc, "create_res failed"); + + rc = mc_size1(p_ctx, aun, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size1 failed"); + + rc = mc_stat1(p_ctx,&l_mc_stat); + if ((rc != 0) || (aun != l_mc_stat.size)) + { + fprintf(stderr,"mc_get_size failed rc =%d: %lu : %lu\n", rc,aun,actual_size); + return -1; + } + debug("mc_stat:blk_size=0X%X nmask=0X%X size=0X%"PRIX64" flags=0X%"PRIX64"\n", + l_mc_stat.blk_len, l_mc_stat.nmask, l_mc_stat.size, l_mc_stat.flags); + + nlba = aun*(1 << l_mc_stat.nmask); + size = nlba*(l_mc_stat.blk_len); + debug("ONE AUN = %lu(0x%lx) LBAs and One AUN size =%lu(0x%lx)Bytes\n",nlba,nlba,size,size); + debug("ONE AUN = %lu MB\n",size/(1024*1024)); + + rc |= send_single_write(p_ctx, nlba-1, pid); + rc |= send_single_read(p_ctx, nlba-1); + + stride = p_ctx->blk_len; + rc |= send_write(p_ctx, 0, stride, pid); + rc |= send_read(p_ctx, 0, stride); + rc |= rw_cmp_buf(p_ctx, 0); + + rc |= send_write(p_ctx, nlba/2, stride, pid); + rc |= send_read(p_ctx, nlba/2, stride); + rc |= rw_cmp_buf(p_ctx, nlba/2); + CHECK_RC(rc, "IO"); + + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + return 0; +} + +void *exploit_chunk(void *arg) +{ + struct ctx *p_ctx = (struct ctx*)arg; + //mc_hndl_t mc_hndl = p_ctx->mc_hndl; + int rc=0; + int i; + res_hndl_t res_handl; + __u64 rsrc_handle; + __u64 size = 1; + __u64 actual_size=0; + //__u64 plba=0; + __u64 st_lba; + __u64 nlba; + int myloop = 1; + int inner_loop =1; + __u64 stride = 0x1000; + mc_stat_t l_mc_stat; + + char *str = getenv("LONG_RUN"); + if (str != NULL) + { + myloop = 10; + inner_loop = 10; + stride = 1; + debug("%d: %s : %d :Regress Outerloop: %d & inner loop:%d\n", + pid, __func__, __LINE__, myloop, inner_loop); + } + while (myloop > 0) + { + pthread_mutex_lock(&mutex); + rc = create_res(p_ctx); + rsrc_handle = p_ctx->rsrc_handle; + res_handl = p_ctx->res_hndl; + pthread_mutex_unlock(&mutex); + if (rc != 0) + { + fprintf(stderr, "ctx: %d:mc_open: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + debug_2("%d : rsh %d started\n", pid, res_handl); + for (i = 0; i< inner_loop;i++) + { + pthread_mutex_lock(&mutex); + p_ctx->rsrc_handle = rsrc_handle; + rc = mc_size1(p_ctx, size,&actual_size); + l_mc_stat.size = actual_size; + pthread_mutex_unlock(&mutex); + if (rc != 0) + { + fprintf(stderr, "ctx: %d:mc_size: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + pthread_mutex_lock(&mutex); + p_ctx->rsrc_handle = rsrc_handle; + rc = mc_stat1(p_ctx, &l_mc_stat); + pthread_mutex_unlock(&mutex); + if (rc != 0) + { + fprintf(stderr, "ctx: %d:mc_stat: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + + debug_2("%d: R/W started for rsh %d from 0X0 to 0X%"PRIX64"\n", + pid, res_handl, nlba-1); + for (st_lba = 0; st_lba < nlba; st_lba += NUM_CMDS*stride) + { + debug_2("%d: start lba 0X%"PRIX64" total lba 0X%"PRIX64" rsh %d\n", + pid, st_lba, nlba,res_handl); + pthread_mutex_lock(&mutex); + p_ctx->res_hndl = res_handl; + rc = send_write(p_ctx, st_lba, stride, pid); + if (rc) + { + mc_stat1(p_ctx, &l_mc_stat); + if (size == 0 || (l_mc_stat.size * (1 << l_mc_stat.nmask)) <= st_lba) + { + printf("%d: Fine, send write(0X%"PRIX64") was out of bounds, MAX LBAs(0X%"PRIX64")\n", + pid, st_lba, size * (1 << l_mc_stat.nmask)); + pthread_mutex_unlock(&mutex); + fflush(stdout); + break; + } + else + { + g_error = -1; + fprintf(stderr,"%d: chunk(0X%"PRIX64")IO failed rsh %d st_lba(0X%"PRIX64") range(0X%"PRIX64")\n", + pid, size, res_handl, st_lba, nlba-1); + pthread_mutex_unlock(&mutex); + return NULL; + } + } + else + { + rc = send_read(p_ctx, st_lba, stride); + rc += rw_cmp_buf(p_ctx, st_lba); + pthread_mutex_unlock(&mutex); + if (rc) + { + g_error = -1; + return NULL; + } + } + } + debug_2("%d: R/W done for rsh %d from 0X0 to 0X%"PRIX64"\n", + pid, res_handl, nlba-1); + size = (rand()%10+1)*16; + } + pthread_mutex_lock(&mutex); + p_ctx->rsrc_handle= rsrc_handle; + rc = mc_stat1(p_ctx, &l_mc_stat); + pthread_mutex_unlock(&mutex); + size= l_mc_stat.size; + + sleep(2); + pthread_mutex_lock(&mutex); + p_ctx->rsrc_handle= rsrc_handle; + rc = close_res(p_ctx); + pthread_mutex_unlock(&mutex); + debug_2("%d: now closing rsh %d\n", pid, res_handl); + if (rc != 0) + { + fprintf(stderr, "ctx: %d:mc_close: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + myloop--; + debug("%d: %d loop remains was rsh %d\n", pid, myloop, res_handl); + } + return 0; +} +int chunk_regress() +{ + struct ctx_alloc p_ctx_a; + struct pthread_alloc *p_thread_a; + struct ctx *p_ctx = &(p_ctx_a.ctx); + pthread_mutexattr_t mattr; + pthread_t thread; + int rc = 0; + int i; + int MAX_NUM_THREAD=MAX_RES_HANDLE; + + pid = getpid(); + debug("%d: afu=%s",pid, cflash_path); + + //Allocating structures for pthreads. + p_thread_a = (struct pthread_alloc *) malloc(sizeof(struct pthread_alloc) * MAX_RES_HANDLE); + if (p_thread_a == NULL) + { + fprintf(stderr, " Can not allocate thread structs, errno %d\n", errno); + return -1; + } + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx_init faild"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + //initialize the mutex + pthread_mutexattr_init(&mattr); + pthread_mutex_init(&mutex, &mattr); + + //create threads + for (i=0;i< MAX_NUM_THREAD; ++i) + { + rc = pthread_create(&(p_thread_a[i].rrq_thread), NULL, &exploit_chunk, (void *)p_ctx); + if (rc) + { + pthread_cancel(thread); + fprintf(stderr, "Error creating thread %d, errno %d\n", i, errno); + return -1; + } + } + + //destroy mutexattr + pthread_mutexattr_destroy(&mattr); + //joining + for (i=0;i< MAX_NUM_THREAD; ++i) + { + pthread_join(p_thread_a[i].rrq_thread, NULL); + } + + //destroy the mutex + pthread_mutex_destroy(&mutex); + + pthread_cancel(thread); + + ctx_close(p_ctx); + mc_term(); + //free allocated space + free(p_thread_a); + rc = g_error; + g_error =0; + debug("%d: I am returning %d\n", pid, rc); + return rc; +} + +int mc_size_regress_internal() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=MAX_RES_HANDLE; + __u64 actual_size=0; + //__u64 nlba; //unused + __u64 stride=0x1000; + __u32 i; + int mc_size_regrss_l = 2; + mc_stat_t l_mc_stat; + pthread_t thread; + pid = getpid(); + + char *str = getenv("LONG_RUN"); + if (str != NULL) + { + printf("LONG RUN Enabled....\n"); + mc_size_regrss_l = 100; + stride = 0x1; + } + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = create_res(p_ctx); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + for (i = 1 ; i <= mc_size_regrss_l; i++) + { + chunks = chunks/2; + rc = mc_size1(p_ctx,chunks, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size"); + if (actual_size) + { + rc=do_io(p_ctx, stride); + CHECK_RC(rc,"IO Failed..\n"); + } + rc = mc_size1(p_ctx, 0, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size"); + if ( i % 10 == 0) + { + system("date"); + printf("%d: loop %d(%d) done...\n", pid, i, mc_size_regrss_l); + } + fflush(stdout); + } + pthread_cancel(thread); + return 0; +} + +int mc_test_chunk_regress(int cmd) +{ + int rc; + int i; + //pid_t pid1; + int max_p= MAX_OPENS; + + pid = getpid(); + MAX_RES_HANDLE=get_max_res_hndl_by_capacity(cflash_path); + if (MAX_RES_HANDLE <= 0) + { + fprintf(stderr,"Unable to run max_ctx_max_res.. refere prior error..\n"); + fflush(stdout); + return -1; + } + char *str = getenv("LONG_RUN"); + if (str != NULL) + { + system("date"); + printf("%d: Do %s Regress for %d context processes\n", + pid, __func__, max_p); + if (4 == cmd) + debug("mc_size api(0 to value & viceversa) Regress 1000 loops...\n"); + fflush(stdout); + } + for (i = 0; i< max_p;i++) + { + if (fork() == 0) + { + debug("%d process created.........................................\n",i+1); + usleep(1000); + if (1 == cmd) // chunk regress + rc = chunk_regress(); + else if (4 == cmd) //mc_size regress + rc = mc_size_regress_internal(); + else //ctx regress create &destroy with io & wo io + rc = mc_test_ctx_regress(cmd); + + if (rc ) + { + debug("%d: exiting with rc = %d\n", pid, rc); + } + + fflush(stdout); + exit(rc); + } + } + + rc=wait4all(); + fflush(stdout); + /*rc = g_error; + g_error = 0;*/ + return rc; +} + +int mc_test_chunk_regress_long() +{ + int rc; + int i; + int lrun=1; + char *str = getenv("LONG_RUN"); + if (str != NULL) + { + printf("LONG RUN Enabled....\n"); + lrun = 100; + printf("%d: Do %s Regress loop : %d\n", + pid, __func__, lrun); + fflush(stdout); + } + pid = getpid(); + for (i = 1; i <= lrun; i++) + { + debug("Loop %d(%d) started...\n", i, lrun); + rc = mc_test_chunk_regress(1); + debug("Loop %d(%d) done...\n", i, lrun); + if (i%10 == 0) + { + system("date"); + printf("Loop %d(%d) done...\n", i, lrun); + fflush(stdout); + } + if (rc) + { + fprintf(stderr, "Loop %d is failed with rc = %d\n", i, rc); + break; + } + } + return rc; +} +int mc_test_chunk_regress_both_afu() +{ + int rc; + int i; + //pid_t pid; + int max_p = 4; + char l_afu[MC_PATHLEN]; + //char l_master[MC_PATHLEN]; + char buffer[MC_PATHLEN]; + char *str; + char *afu_1 = "0.0s"; + char *afu_2 = "1.0s"; + strcpy(l_afu, cflash_path); + //strcpy(l_master, master_dev_path); + for (i = 0; i < max_p; i++) + { + if (i%2) + { + strcpy(cflash_path, l_afu); + //strcpy(master_dev_path, l_master); + } + else + { + str = strstr(l_afu, afu_1); + if (str == NULL) + { + //ENV var set with 1.0 + strncpy(buffer, l_afu, strlen(l_afu)-strlen(afu_2)); + buffer[strlen(l_afu)-strlen(afu_2)]='\0'; + strcat(buffer, afu_1); + } + else + { + strncpy(buffer, l_afu, strlen(l_afu)-strlen(afu_1)); + buffer[strlen(l_afu)-strlen(afu_1)]='\0'; + strcat(buffer, afu_2); + } + strcpy(cflash_path, buffer); + //strncpy(master_dev_path, cflash_path, strlen(afu_path)-1); + //master_dev_path[strlen(cflash_path)-1] ='\0'; + //strcat(master_dev_path, "m"); + } + if (fork() == 0) + { + rc =chunk_regress(); + exit(rc); + } + } + + wait4all(); + + rc = g_error; + g_error = 0; + + return rc; +} +int test_mix_in_out_bound_lba() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + int myloop = 2; + __u64 chunks=256; + __u64 actual_size=0; + __u64 st_lba,nlba; + __u64 stride; + mc_stat_t l_mc_stat; + pthread_t thread; + char *str = getenv("LONG_RUN"); + if (str != NULL) + { + printf("LONG RUN Enabled....\n"); + myloop = 100; + } + + pid = getpid(); + rc = mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = create_res(p_ctx); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size1(p_ctx, chunks, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size"); + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + nlba = actual_size * (1 << l_mc_stat.nmask); + stride = 1 << l_mc_stat.nmask; + debug("%d: chunk(0X%"PRIX64") & lba range(0X%"PRIX64")\n", pid, actual_size, nlba-1); + while (myloop-- > 0) + { + for (st_lba =0; st_lba < nlba; st_lba += (NUM_CMDS * stride)) + { + //in bound + send_write(p_ctx, st_lba, stride, pid); + send_read(p_ctx, st_lba, stride); + //out bound + send_write(p_ctx, nlba + st_lba, stride, pid); + send_read(p_ctx, nlba + st_lba, stride); + } + } + pthread_cancel(thread); + ctx_close(p_ctx); + mc_term(); + fflush(stdout); + return 0; +} + +/* + * Function : test_mc_good_error_afu_dev + * return : 0 success else failure + * + * Run Good path test case like Chunk regress on one AFU dev + * And Error path test case on another AFU dev + * Make sure good path test case should run smoothly + * Doesn't bother about error test cases + */ +int test_mc_good_error_afu_dev() +{ + int rc = 0;; + int status=0; + int i; + int lloop = 5; + //char buffer[MC_PATHLEN]; + int j; + struct flash_disk fldisks[MAX_FDISK]; + int cfdisk; + + cfdisk = get_flash_disks(fldisks, FDISKS_ALL); + if (cfdisk < 2) + { + fprintf(stderr, "Failed,need 2 flash disks \n"); + return -1; + } + + char *str1 = getenv("LONG_RUN"); + if (str1 != NULL) + { + printf("LONG RUN Enabled....\n"); + lloop = 100; + } + pid = getpid(); + debug("%d: Good path on %s\n", pid, cflash_path); + //now create a child process & do error path + if (fork() == 0) + { + dont_displa_err_msg = 1; + pid = getpid(); + + strcpy(cflash_path, fldisks[1].dev); + debug("%d: Error path on disk %s\n", pid, cflash_path); + debug("%d: error path process started..\n", pid); + for (i = 0; i < lloop; i++) + { + debug("%d: starting loop %d(%d)\n", pid, i , lloop); + rc = test_mix_in_out_bound_lba(); + + for (j=1; j<=13; j++) + { + rc = test_mc_invalid_ioarcb(j); + } + rc = mc_test_engine(TEST_MC_RW_CLS_RSH); + //rc = mc_test_engine(TEST_MC_UNREG_MC_HNDL); + } + debug("%d: error path process exited..\n", pid); + exit(rc); + } + else + { + debug("%d: Good path process started..\n", pid); + //good path on fldisks[0] + strcpy(cflash_path, fldisks[0].dev); + for (i = 0; i < lloop; i++) + { + rc += chunk_regress(); + } + wait(&status); + debug("%d: Good path process exited with rc=%d..\n", pid,rc); + } + return rc; +} + +int mc_test_ctx_regress(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=MAX_RES_HANDLE; + __u64 actual_size=0; + //__u64 nlba; //unused + __u64 stride; + mc_stat_t l_mc_stat; + pthread_t thread; + + pid = getpid(); + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + sleep(1); + CHECK_RC(rc, "ctx init failed"); + if (long_run_enable) + stride = p_ctx->blk_len; + else + stride = 0x100; + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = create_res(p_ctx); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size1(p_ctx,chunks, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size"); + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + if (3 == cmd) + { + rc=do_io(p_ctx,stride); + } + pthread_cancel(thread); + sleep(1); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; +} + +int test_mc_regress_ctx_crt_dstr(int cmd) +{ + int rc; + int i; + int lrun=2; + char *str = getenv("LONG_RUN"); + if (str != NULL) + { + printf("LONG RUN Enabled....\n"); + lrun = 400; //for 4 Hrs + if (3 == cmd) + lrun = 100; + system("date"); + printf("%s : %d : Regress loop : %d\n", __func__, __LINE__, lrun); + fflush(stdout); + } + for (i = 1; i <= lrun; i++) + { + debug("Loop %d(%d) started...\n", i, lrun); + if (1 == cmd) //mc_regress_ctx_crt_dstr without io + rc = mc_test_chunk_regress(2); + else //mc_regress_ctx_crt_dstr with io + rc = mc_test_chunk_regress(3); + + debug("Loop %d(%d) done...\n", i, lrun); + if (i%10 == 0) + { + system("date"); + printf("%d: Loop %d(%d) done...\n", getpid(), i, lrun); + fflush(stdout); + } + if (rc) + { + fprintf(stderr, "Loop %d is failed with rc = %d\n", i, rc); + break; + } + } + return rc; +} diff --git a/src/cflash/test/cflash_test_engine.c b/src/cflash/test/cflash_test_engine.c new file mode 100644 index 00000000..d5e43861 --- /dev/null +++ b/src/cflash/test/cflash_test_engine.c @@ -0,0 +1,803 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/cflash_test_engine.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "cflash_test.h" + +int test1() +{ + return -1; +} + +int mc_test_engine(mc_test_t test_name) +{ + int rc = 0; + if (get_fvt_dev_env()) return -1; +#ifdef _AIX + //system("ctctrl -c cflashdd -r memtraceoff"); + system("ctctrl -c cflashdd -r memtraceon"); + if (DEBUG) + system("ctctrl -c cflashdd -r -q"); +#else + if (DEBUG) + system("echo \"module cxlflash +p\" > /sys/kernel/debug/dynamic_debug/control"); + displayBuildinfo(); +#endif + + if (DEBUG) + { + system("echo;echo ---------------------- Test Start Timestamp ----------------------"); + system("date"); + system("echo ------------------------------------------------------------------; echo"); + } + + if (fork() == 0) + { + //child process + switch (test_name) + { + /*** DK_CAPI_QUERY_PATH ***/ +#ifdef _AIX + case TEST_DCQP_VALID_PATH_COUNT: + rc = test_dcqp_ioctl(1); + break; +#endif + case TEST_IOCTL_INVALID_VERSIONS: + rc = test_all_ioctl_invalid_version(); + break; +#ifdef _AIX + case TEST_DCQP_INVALID_PATH_COUNT: + rc = test_dcqp_error_ioctl(1); + break; + case TEST_DCQP_DUAL_PATH_COUNT: + rc = test_dcqp_ioctl(2); + break; + case TEST_DCQP_DK_CPIF_RESERVED: + rc = test_dcqp_ioctl(3); + break; + case TEST_DCQP_DK_CPIF_FAILED: + rc = test_dcqp_ioctl(4); + break; +#endif + /**** DK_CAPI_ATTACH ***/ + case TEST_DCA_OTHER_DEVNO: //other devno + rc = test_dca_error_ioctl(1); + break; + case TEST_DCA_INVALID_DEVNO: + rc = test_dca_error_ioctl(2);//invalid path -1 + break; + case TEST_DCA_INVALID_INTRPT_NUM: + rc = test_dca_error_ioctl(3);//invalid -1 intrpt number + break; + case TEST_DCA_VALID_VALUES: + rc = test_dca_ioctl(1);//Valid values + break; + /* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ + case TEST_DCA_INVALID_FLAGS: + rc = test_dca_error_ioctl(4); + break; + case TEST_DCA_CALL_TWICE: + rc = test_dca_error_ioctl(5); + break; + case TEST_DCA_CALL_DIFF_DEVNO_MULTIPLE: + rc = test_dca_ioctl(2); + break; + case TEST_DCA_REUSE_CTX_FLAG: + rc = test_dca_error_ioctl(6); + break; + case TEST_DCA_REUSE_CTX_FLAG_NEW_PLUN_DISK: + rc = test_dca_error_ioctl(7); + break; + case TEST_DCA_REUSE_CTX_NEW_VLUN_DISK: + rc = test_dca_error_ioctl(8); + break; + case TEST_DCA_REUSE_CTX_ALL_CAPI_DISK: + rc = test_dca_error_ioctl(12); + break; + case TEST_DCA_REUSE_CTX_OF_DETACH_CTX: + rc = test_dca_error_ioctl(9); + break; + case TEST_DCA_REUSE_CTX_OF_RELASED_IOCTL: + rc = test_dca_error_ioctl(11); + break; + case TEST_DCA_REUSE_CTX_NEW_DISK_AFTER_EEH: + rc = test_dca_error_ioctl(10); + break; + /*** DK_CAPI_RECOVER_CTX ***/ + case TEST_DCRC_NO_EEH: + rc = test_dcrc_ioctl(1); + break; + case TEST_DCRC_DETACHED_CTX: + rc = test_dcrc_ioctl(2); + break; + case TEST_DCRC_EEH_VLUN: + rc = test_dcrc_ioctl(3); + break; + case TEST_DCRC_EEH_PLUN_MULTI_VLUN: + rc = test_dcrc_ioctl(4); + break; + case TEST_DCRC_EEH_VLUN_RESUSE_CTX: + rc = test_dca_error_ioctl(15); + break; + case TEST_DCRC_EEH_PLUN_RESUSE_CTX: + rc = test_dca_error_ioctl(16); + break; + case TEST_DCRC_EEH_VLUN_RESIZE: + rc = test_dcrc_ioctl(7); + break; + case TEST_DCRC_EEH_VLUN_RELEASE: + rc = test_dcrc_ioctl(8); + break; + case TEST_DCRC_INVALID_DEVNO: + rc = test_dcrc_ioctl(9); + break; + case TEST_DCRC_INVALID_FLAG: + rc = test_dcrc_ioctl(10); + break; + case TEST_DCRC_INVALID_REASON: + rc = test_dcrc_ioctl(11); + break; + case TEST_DCRC_IO_EEH_VLUN: + rc = test_dcrc_ioctl(12); + break; + case TEST_DCRC_IO_EEH_PLUN: + rc = test_dcrc_ioctl(13); + break; + /*** DK_CAPI_USER_DIRECT ***/ + case TEST_DCUD_INVALID_DEVNO_VALID_CTX: + rc = test_dcud_error_ioctl(1); + break; + case TEST_DCUD_INVALID_CTX_VALID_DEVNO: + rc = test_dcud_error_ioctl(2); + break; + case TEST_DCUD_VALID_CTX_VALID_DEVNO: + rc = test_dcud_ioctl(1); + break; + case TEST_DCUD_FLAGS: + rc = test_dcud_error_ioctl(3); + break; + case TEST_DCUD_TWICE_SAME_CTX_DEVNO: + rc = test_dcud_error_ioctl(4); + break; + case TEST_DCUD_VLUN_ALREADY_CREATED_SAME_DISK: + rc = test_dcud_error_ioctl(5); + break; + case TEST_DCUD_PLUN_ALREADY_CREATED_SAME_DISK: + rc = test_dcud_error_ioctl(6); + break; + case TEST_DCUD_VLUN_CREATED_DESTROYED_SAME_DISK: + rc = test_dcud_ioctl(2); + break; + case TEST_DCUD_IN_LOOP: + rc = test_dcud_ioctl(3); + break; + case TEST_DCUD_PATH_ID_MASK_VALUES: + rc = test_dcud_ioctl(4); + break; + case TEST_DCUD_BAD_PATH_ID_MASK_VALUES: + rc = test_dcud_error_ioctl(7); + break; + /*** DK_CAPI_USER_VIRTUAL ***/ + case TEST_DCUV_INVALID_DEVNO_VALID_CTX: + rc = test_dcuv_error_ioctl(1); + break; + case TEST_DCUV_INVALID_CTX_INVALID_DEVNO: + rc = test_dcuv_error_ioctl(2); + break; + case TEST_DCUV_VALID_DEVNO_INVALID_CTX: + rc = test_dcuv_error_ioctl(3); + break; + case TEST_DCUV_LUN_VLUN_SIZE_ZERO: + rc = test_dcuv_ioctl(1); + break; + case TEST_DCUV_PLUN_ALREADY_CREATED_SAME_DISK: + rc = test_dcuv_error_ioctl(4); + break; + case TEST_DCUV_VLUN_ALREADY_CREATED_SAME_DISK: + rc = test_dcuv_ioctl(2); + break; + case TEST_DCUV_NO_FURTHER_VLUN_CAPACITY: + rc = test_dcuv_error_ioctl(5); + break; + case TEST_DCUV_MTPLE_VLUNS_SAME_CAPACITY_SAME_DISK: + rc = test_dcuv_ioctl(3); + break; + case TEST_DCUV_TWICE_SAME_CTX_DEVNO: + rc = test_dcuv_ioctl(4); + break; + case TEST_DCUV_VLUN_MAX: + rc = test_dcuv_ioctl(5); + break; + case TEST_DCUV_VLUN_SIZE_MORE_THAN_DISK_SIZE: + rc = test_dcuv_error_ioctl(6); + break; + case TEST_DCUV_PLUN_CREATED_DESTROYED_SAME_DISK: + rc = test_dcuv_ioctl(6); + break; + case TEST_DCUV_WITH_CTX_OF_PLUN: + rc = test_dcuv_error_ioctl(7); + break; + case TEST_DCUD_WITH_CTX_OF_VLUN: + rc = test_dcuv_error_ioctl(8); + break; + case TEST_DCUV_PATH_ID_MASK_VALUES: + rc = test_dcuv_ioctl(7); + break; + case TEST_DCUV_INVALID_PATH_ID_MASK_VALUES: + rc = test_dcuv_ioctl(8); + break; + case TEST_DCUV_IN_LOOP: + rc = test_dcuv_ioctl(9); + break; + /*** DK_CAPI_DETACH ***/ + case TEST_DCD_INVALID_CTX_DEVNO: + rc = test_dcd_ioctl(1); + break; + case TEST_DCD_INVALID_DEVNO: + rc = test_dcd_ioctl(2); + break; + case TEST_DCD_INVALID_CTX: + rc = test_dcd_ioctl(3); + break; + case TEST_DCD_TWICE_ON_PLUN: + rc = test_dcd_ioctl(4); + break; + case TEST_DCD_TWICE_ON_VLUN: + rc = test_dcd_ioctl(5); + break; + /*** DK_CAPI_VERIFY ****/ + case TEST_DCV_INVALID_DEVNO: + rc = test_dcv_error_ioctl(1); + break; + case TEST_DCV_INVALID_FLAGS: + rc = test_dcv_error_ioctl(2); + break; + case TEST_DCV_INVALID_RES_HANDLE: + rc = test_dcv_error_ioctl(3); + break; + case TEST_DCV_UNEXPECTED_ERR: + rc = test_dcv_ioctl(1); + break; + case TEST_DCV_NO_ERR: + rc = test_dcv_ioctl(2); + break; + case TEST_DCV_UNEXPECTED_ERR_VLUN: + rc = test_dcv_ioctl(3); + break; + case TEST_DCV_VLUN_RST_FlAG: + rc = test_dcv_ioctl(4); + break; + case TEST_DCV_VLUN_TUR_FLAG: + rc = test_dcv_ioctl(5); + break; + case TEST_DCV_VLUN_INQ_FLAG: + rc = test_dcv_ioctl(6); + break; + case TEST_DCV_VLUN_HINT_SENSE: + rc = test_dcv_ioctl(7); + break; + case TEST_DCV_PLUN_RST_FlAG: + rc = test_dcv_ioctl(8); + break; + case TEST_DCV_PLUN_TUR_FLAG: + rc = test_dcv_ioctl(9); + break; + case TEST_DCV_PLUN_INQ_FLAG: + rc = test_dcv_ioctl(10); + break; + case TEST_DCV_PLUN_HINT_SENSE: + rc = test_dcv_ioctl(11); + break; + case TEST_DCV_PLUN_RST_FlAG_EEH: + rc = test_dcv_ioctl(12); + break; + /*** DK_CAPI_VLUN_RESIZE ***/ + case TEST_DCVR_INVALID_DEVNO: + rc = test_dcvr_error_ioctl(1); + break; + case TEST_DCVR_INVALID_CTX_DEVNO: + rc = test_dcvr_error_ioctl(2); + break; + case TEST_DCVR_INVALID_CTX: + rc = test_dcvr_error_ioctl(3); + break; + case TEST_DCVR_NO_VLUN: + rc = test_dcvr_error_ioctl(4); + break; + case TEST_DCVR_ON_PLUN: + rc = test_dcvr_error_ioctl(5); + break; + case TEST_DCVR_GT_DISK_SIZE: + rc = test_dcvr_error_ioctl(6); + break; + case TEST_DCVR_NOT_FCT_256MB: + rc = test_dcvr_ioctl(1); + break; + case TEST_DCVR_EQ_CT_VLUN_SIZE: + rc = test_dcvr_ioctl(2); + break; + case TEST_DCVR_LT_CT_VLUN_SIZE: + rc = test_dcvr_ioctl(3); + break; + case TEST_DCVR_GT_CT_VLUN_SIZE: + rc = test_dcvr_ioctl(4); + break; + case TEST_DCVR_EQ_DISK_SIZE_NONE_VLUN: + rc = test_dcvr_ioctl(5); + break; + case TEST_DCVR_EQ_DISK_SIZE_OTHER_VLUN: + rc = test_dcvr_error_ioctl(7); + break; + case TEST_DCVR_INC_256MB: + rc = test_dcvr_ioctl(6); + break; + case TEST_DCVR_DEC_256MB: + rc = test_dcvr_ioctl(7); + break; + case TEST_DCVR_GT_CT_VLUN_LT_256MB: + rc = test_dcvr_ioctl(8); + break; + case TEST_DCVR_LT_CT_VLUN_LT_256MB: + rc = test_dcvr_ioctl(9); + break; + case TEST_DCVR_INC_DEC_LOOP: + rc = test_dcvr_ioctl(10); + break; + case G_MC_test_DCVR_ZERO_Vlun_size: + rc = test_dcvr_ioctl(11); + break; + /*** DK_CAPI_RELEASE ***/ + case TEST_DCR_INVALID_DEVNO: + rc = test_dcr_ioctl(1); + break; + case TEST_DCR_INVALID_DEVNO_CTX: + rc = test_dcr_ioctl(2); + break; + case TEST_DCR_INVALID_CTX: + rc = test_dcr_ioctl(3); + break; + case TEST_DCR_NO_VLUN: + rc = test_dcr_ioctl(4); + break; + case TEST_DCR_PLUN_AGIAN: + rc = test_dcr_ioctl(5); + break; + case TEST_DCR_VLUN_AGIAN: + rc = test_dcr_ioctl(6); + break; + case TEST_DCR_MULTP_VLUN: + rc = test_dcr_ioctl(7); + break; + case TEST_DCR_VLUN_INV_REL: + rc = test_dcr_ioctl(8); + break; + /*** DK_CAPI_LOG_EVENT ***/ +#ifdef _AIX + case TEST_DCLE_VALID_VALUES: + rc = test_dcle_ioctl(1); + break; + case TEST_DCLE_DK_LF_TEMP: + rc = test_dcle_ioctl(2); + break; + case TEST_DCLE_DK_LF_PERM: + rc = test_dcle_ioctl(3); + break; + case TEST_DCLE_DK_FL_HW_ERR: + rc = test_dcle_ioctl(4); + break; + case TEST_DCLE_DK_FL_SW_ERR: + rc = test_dcle_ioctl(5); + break; +#endif + case TEST_SPIO_VLUN: + rc = test_spio_vlun(2); + break; + case TEST_SPIO_0_VLUN: + rc = test_spio_vlun(1); + break; + case TEST_SPIO_NORES_AFURC: + rc = test_spio_vlun(3); + break; + case TEST_SPIO_A_PLUN: + rc = test_spio_plun(); + break; + case TEST_SPIO_ALL_PLUN: + rc = test_spio_pluns(2); + break; + case TEST_SPIO_ALL_VLUN: + rc = test_spio_vluns(1); + break; + case TEST_SPIO_VLUN_PLUN: + rc = test_spio_direct_virtual(); + break; + case TEST_MC_SIZE_REGRESS: + rc = mc_test_chunk_regress(4); + break; + case TEST_MC_REGRESS_CTX_CRT_DSTR: + rc = test_mc_regress_ctx_crt_dstr(1); + break; + case TEST_MC_REGRESS_CTX_CRT_DSTR_IO: + rc = test_mc_regress_ctx_crt_dstr(2); + break; + case TEST_MC_REGRESS_RESOURCE: + rc = mc_test_chunk_regress_long(); + break; + case TEST_MC_TWO_CTX_RD_WRTHRD: + rc = test_two_ctx_two_thrd(1); + break; + + case TEST_MC_TWO_CTX_RDWR_SIZE: + rc = test_two_ctx_two_thrd(2); + break; + + case TEST_MC_ONE_CTX_TWO_THRD: + rc = test_onectx_twothrd(1); + break; + + case TEST_MC_ONE_CTX_RD_WRSIZE: + rc = test_onectx_twothrd(2); + break; + + case TEST_MC_MAX_RES_HNDL: + rc = mc_max_vdisk_thread(); + break; + + case TEST_ONE_UNIT_SIZE: + rc = test_one_aun_size(); + break; + + case TEST_MAX_CTX_RES_UNIT: + rc = max_ctx_max_res(1); + break; + + case TEST_MAX_CTX_RES_LUN_CAP: + rc = max_ctx_max_res(2); + break; + + case TEST_MC_MAX_SIZE: + rc = test_mc_max_size(); + break; + + case TEST_MC_SPIO_VLUN_ATCH_DTCH: + rc = test_spio_attach_detach(1); + break; + + case TEST_MC_SPIO_PLUN_ATCH_DTCH: + rc = test_spio_attach_detach(2); + break; + + case MC_TEST_RWBUFF_GLOBAL: + rc = mc_test_rwbuff_global(1); + break; + + case MC_TEST_RWBUFF_HEAP: + rc = mc_test_rwbuff_global(2); + break; + + case MC_TEST_RWBUFF_SHM: + rc = test_mc_rwbuff_shm(); + break; + + case MC_TEST_RW_SIZE_PARALLEL: + rc = test_mc_rw_size_parallel(); + break; + + case MC_TEST_GOOD_ERR_AFU_DEV: + rc = test_mc_good_error_afu_dev(); + break; + + case TEST_MC_RW_CLS_RSH: + rc = test_rw_close_hndl(1); + break; + + case TEST_MC_RW_CLOSE_DISK_FD: + rc = test_rw_close_hndl(2); + break; + + case TEST_MC_RW_CLOSE_CTX: + rc = test_rw_close_hndl(3); + break; + + case TEST_MC_RW_UNMAP_MMIO: + rc = test_rw_close_hndl(4); + break; + + case TEST_MC_IOARCB_EA_ALGNMNT_16: + rc = test_mc_ioarcb_ea_alignment(1); + break; + + case TEST_MC_IOARCB_EA_ALGNMNT_128: + rc = test_mc_ioarcb_ea_alignment(2); + break; + + case TEST_MC_IOARCB_EA_INVLD_ALGNMNT: + rc = test_mc_ioarcb_ea_alignment(3); + break; + + case TEST_LARGE_TRANSFER_IO: + rc = test_large_transfer(); + break; + + case TEST_LARGE_TRNSFR_BOUNDARY: + rc = test_large_trnsfr_boundary(); + break; + + case MAX_CTX_RCVR_EXCEPT_LAST_ONE: + rc = max_ctx_rcvr_except_last_one(); + break; + + case MAX_CTX_RCVR_LAST_ONE_NO_RCVR: + rc = max_ctx_rcvr_last_one_no_rcvr(); + break; + + case TEST_VSPIO_EEHRECOVERY: + rc = test_vSpio_eehRecovery(1); + break; + + case TEST_DSPIO_EEHRECOVERY: + rc = test_dSpio_eehRecovery(1); + break; + + case TEST_IOCTL_FCP: + rc = test_ioctl_fcp(); + break; + + case TEST_MMIO_ERRCASE1: + rc = test_mmio_errcase(TEST_MMIO_ERRCASE1); + break; + case TEST_MMIO_ERRCASE2: + rc = test_mmio_errcase(TEST_MMIO_ERRCASE2); + break; + case TEST_MMIO_ERRCASE3: + rc = test_mmio_errcase(TEST_MMIO_ERRCASE3); + break; + + case TEST_SPIO_KILLPROCESS: + rc = test_spio_killprocess(); + break; + + case TEST_SPIO_EXIT: + rc = test_spio_exit(); + break; + + case TEST_IOCTL_SPIO_ERRCASE: + rc = test_ioctl_spio_errcase(); + break; + case TEST_FC_PR_RESET_VLUN: + rc = test_fc_port_reset_vlun(); + break; + case TEST_FC_PR_RESET_PLUN: + rc = test_fc_port_reset_plun(); + break; + case TEST_CFDISK_CTXS_DIFF_DEVNO: + rc = test_cfdisk_ctxs_diff_devno(); + break; + + case TEST_ATTACH_REUSE_DIFF_PROC: + rc = test_attach_reuse_diff_proc(); + break; + + case TEST_DETACH_DIFF_PROC: + rc = test_detach_diff_proc(); + break; + case EXCP_VLUN_DISABLE: + rc = test_dcqexp_ioctl(EXCP_VLUN_DISABLE); + break; + case EXCP_PLUN_DISABLE: + rc = test_dcqexp_ioctl(EXCP_PLUN_DISABLE); + break; + case EXCP_VLUN_VERIFY: + rc = test_dcqexp_ioctl(EXCP_VLUN_VERIFY); + break; + case EXCP_DISK_INCREASE: + rc = test_dcqexp_ioctl(EXCP_DISK_INCREASE); + break; + case EXCP_PLUN_VERIFY: + rc = test_dcqexp_ioctl(EXCP_PLUN_VERIFY); + break; + case EXCP_VLUN_INCREASE: + rc = test_dcqexp_ioctl(EXCP_VLUN_INCREASE); + break; + case EXCP_VLUN_REDUCE : + rc = test_dcqexp_ioctl(EXCP_VLUN_REDUCE); + break; + case EXCP_PLUN_UATTENTION : + rc = test_dcqexp_ioctl(EXCP_PLUN_UATTENTION); + break; + case EXCP_VLUN_UATTENTION : + rc = test_dcqexp_ioctl(EXCP_VLUN_UATTENTION); + break; + case EXCP_EEH_SIMULATION : + rc = test_dcqexp_ioctl(EXCP_EEH_SIMULATION); + break; + case EXCP_INVAL_DEVNO : + rc = test_dcqexp_invalid(EXCP_INVAL_DEVNO); + break; + case EXCP_INVAL_CTXTKN : + rc = test_dcqexp_invalid(EXCP_INVAL_CTXTKN); + break; + case EXCP_INVAL_RSCHNDL : + rc = test_dcqexp_invalid(EXCP_INVAL_RSCHNDL); + break; + case TEST_DK_CAPI_CLONE : + rc = test_clone_ioctl(0); + break; + case G_ioctl_7_1_119: + rc=ioctl_7_1_119_120(119); + break; + case E_ioctl_7_1_120: + rc=ioctl_7_1_119_120(120); + break; + case E_ioctl_7_1_174: + rc=ioctl_7_1_174_175(1); + break; + case E_ioctl_7_1_175: + rc=ioctl_7_1_174_175(2); + break; +#ifdef _AIX + case E_ioctl_7_1_180: + rc=test_traditional_IO(180,1); + break; + case E_ioctl_7_1_1801: + rc=test_traditional_IO(1801,1); + break; + case G_ioctl_7_1_181: + rc=test_traditional_IO(181,1); + break; + case E_ioctl_7_1_182: + rc=test_traditional_IO(182,1); + break; + case G_ioctl_7_1_187: + rc=test_traditional_IO(187,1); + break; +#endif + case G_ioctl_7_1_188: +#ifdef _AIX + rc=test_vSpio_eehRecovery(1); +#else + rc=ioctl_7_1_188(188); +#endif + break; + case G_ioctl_7_1_189: +#ifdef _AIX + rc = test_dSpio_eehRecovery(1); +#else + rc=ioctl_7_1_188(189); +#endif + break; + case E_CAPI_LINK_DOWN : + rc=ioctl_7_1_188(E_CAPI_LINK_DOWN); + break; + case E_ioctl_7_1_190: + rc=ioctl_7_1_190(); + break; + case G_ioctl_7_1_191: + rc=ioctl_7_1_191(191); + break; + case G_ioctl_7_1_192: + rc=ioctl_7_1_192(192); + break; +#ifdef _AIX + case G_ioctl_7_1_193_1: + rc=ioctl_7_1_193_1(193); + break; + case G_ioctl_7_1_193_2: + rc=ioctl_7_1_193_2(194); + break; +#endif + case G_ioctl_7_1_196: + rc=ioctl_7_1_196(); + break; + case E_ioctl_7_1_197: + rc=ioctl_7_1_197(197); + break; + case E_ioctl_7_1_198: + rc=ioctl_7_1_198(198); + break; + case G_ioctl_7_1_203: + rc=ioctl_7_1_203(203); + break; + case E_ioctl_7_1_209: + rc=ioctl_7_1_209(209); + break; + case E_ioctl_7_1_210: + rc=ioctl_7_1_210(210); + break; + case E_ioctl_7_1_211: + rc=ioctl_7_1_211(211); + break; +#ifdef _AIX + case E_ioctl_7_1_212: + rc=test_traditional_IO(212,1); + break; + case E_ioctl_7_1_213: + rc=test_traditional_IO(213,1); + break; +#endif + case E_ioctl_7_1_214: + rc=test_traditional_IO(214,1); + break; + + case E_ioctl_7_1_215: + rc= test_dca_error_ioctl(13); + break; + case E_ioctl_7_1_216: + rc=test_dca_error_ioctl(14); + break; + + case E_test_SCSI_CMDS: + rc = test_scsi_cmds(); + break; + + case E_TEST_CTX_RESET: + rc = test_ctx_reset(); + break; + + case M_TEST_7_5_13_1: + rc = ioctl_7_5_13(1); + break; + + case M_TEST_7_5_13_2: + rc = ioctl_7_5_13(2); + break; + case G_TEST_MAX_CTX_PLUN: + rc = max_ctx_on_plun(3); + break; + + case G_TEST_MAX_CTX_0_VLUN: + rc = max_ctx_on_plun(2); + break; + + case G_TEST_MAX_CTX_ONLY: + rc = max_ctx_on_plun(1); + break; + + case G_TEST_MAX_CTX_IO_NOFLG: + rc = max_ctx_on_plun(4); + break; + + case G_TEST_MAX_VLUNS: + rc = max_vlun_on_a_ctx(); + break; + + default: + rc = -1; + break; + } + exit(rc); + } + wait(&rc); + if (WIFEXITED(rc)) + { + rc = WEXITSTATUS(rc); + } + + if (DEBUG) + { + system("echo;echo ---------------------- Test End Timestamp ----------------------"); + system("date"); + system("echo ----------------------------------------------------------------; echo"); + } + + return rc; +} diff --git a/src/cflash/test/cflash_test_error.c b/src/cflash/test/cflash_test_error.c new file mode 100644 index 00000000..76b4e508 --- /dev/null +++ b/src/cflash/test/cflash_test_error.c @@ -0,0 +1,950 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/cflash_test_error.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "cflash_test.h" +#include +#include +#include +#include + +extern int MAX_RES_HANDLE; +extern char cflash_path[MC_PATHLEN]; +extern sigjmp_buf sBuf; +extern pid_t pid; +extern int g_error; +extern __u64 lun_id; +extern __u64 fc_port; +extern uint8_t rc_flags; +extern int dont_displa_err_msg; +extern bool bad_address; +extern bool err_afu_intrpt; + +typedef int(myfunc)(int); + + +int mc_invalid_ioarcb(int cmd); +int mc_test_inter_prcs_ctx_int(int cmd); + +int test_mc_error(myfunc test1,int cmd) +{ + + int rc; + if (get_fvt_dev_env()) return -1; + + if (fork() ==0) + { + rc = test1(cmd); + exit(rc); + } + wait(&rc); + + if (WIFEXITED(rc)) + { + rc = WEXITSTATUS(rc); + } + return rc; +} + +int test_mc_invalid_ioarcb(int cmd) +{ + return test_mc_error(&mc_invalid_ioarcb, cmd); +} +int test_mc_inter_prcs_ctx(int cmd) +{ + return test_mc_error(&mc_test_inter_prcs_ctx_int, cmd); +} + + +int init_mc(struct ctx *p_ctx, res_hndl_t *res_hndl) +{ + int rc; + __u64 chunks=16; + __u64 actual_size=0; + + rc = mc_init(); + CHECK_RC(rc, "mc_init failed"); + debug("mc_init success :%d\n",rc); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + rc = create_res(p_ctx); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size1(p_ctx,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + *res_hndl = p_ctx->res_hndl; + + rc = create_res(p_ctx); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size1(p_ctx, chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + return 0; +} + +void fill_send_write(struct ctx *p_ctx, __u64 vlba, + __u64 data, __u64 stride) +{ + __u64 *p_u64; + __u32 *p_u32; + __u64 lba; + int i; + + for (i = 0 ; i < NUM_CMDS; i++) + { + lba = i * stride; + + fill_buf((__u64*)&p_ctx->wbuf[i][0], + sizeof(p_ctx->wbuf[i])/sizeof(__u64),data); + + memset((void *)&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_lba(p_u64, lba); + + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->wbuf[0][0]; + + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0]); + p_ctx->cmd[i].rcb.cdb[0] = 0x8A; + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, p_ctx->blk_len); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } +} +void fill_send_read(struct ctx *p_ctx, __u64 lba) +{ + __u64 *p_u64; + __u32 *p_u32; + + memset(&p_ctx->rbuf[0][0], 0, sizeof(p_ctx->rbuf[0])); + + memset((void *)&p_ctx->cmd[0].rcb.cdb[0], 0, sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_ctx->cmd[0].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + write_lba(p_u64, lba); + debug("send read for vlba =0X%"PRIX64"\n",lba); + + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->rbuf[0]); + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->rbuf[0][0]; + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + + write_32(p_u32, p_ctx->blk_len); + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; +} + +int place_bad_addresses(struct ctx *p_ctx, int action) +{ + int cnt = NUM_CMDS; + int wait_try=MAX_TRY_WAIT; + int p_cmd = 0; + int i; + __u64 room; + __u64 baddr = 0x1234; + + /* make memory updates visible to AFU before MMIO */ + asm volatile( "lwsync" : : ); + if (2 == action) + { + // set up bad HRRQ address + write_64(&p_ctx->p_host_map->rrq_start, (__u64)0x456123); + write_64(&p_ctx->p_host_map->rrq_end, (__u64)0x895e6fe); + bad_address= true; + } + + if (3 == action) + { + //cmd_room violation + room = read_64(&p_ctx->p_host_map->cmd_room); + debug("%d:placing %d cmds in 0X%"PRIX64" cmd_room...\n",pid,NUM_CMDS,room); + for (i=0;ip_host_map->ioarrin, + (__u64)&p_ctx->cmd[i].rcb); + bad_address= true; + return 0; + } + while (cnt) + { + room = read_64(&p_ctx->p_host_map->cmd_room); + if (0 == room) + { + usleep(MC_BLOCK_DELAY_ROOM); + wait_try--; + } + if (0 == wait_try) + { + fprintf(stderr, "%d: send cmd wait over %d cmd remain\n", + pid, cnt); + return -1; + } + for (i = 0; i < room; i++) + { + // add a usleep here if room=0 ? + // write IOARRIN + if (1 == action) + { + //bad RCB address + write_64(&p_ctx->p_host_map->ioarrin, baddr*i); + bad_address= true; + } + else + { + write_64(&p_ctx->p_host_map->ioarrin, + (__u64)&p_ctx->cmd[p_cmd++].rcb); + } + wait_try = MAX_TRY_WAIT; //each cmd give try max time + if (cnt-- == 1) break; + } + } + return 0; +} + +int handle_bad_ioasa(struct ctx *p_ctx, __u64 data) +{ + __u64 *p_u64; + __u32 *p_u32; + __u64 lba=0; + sisl_ioarcb_t *rcb; + rcb = (sisl_ioarcb_t *)malloc(sizeof(sisl_ioarcb_t)); + + fill_buf((__u64*)&p_ctx->wbuf[0][0], + sizeof(p_ctx->wbuf[0])/sizeof(__u64),data); + + memset(&(rcb->cdb[0]), 0, sizeof(rcb->cdb)); + p_u64 = (__u64*)&(rcb->cdb[2]); + + rcb->res_hndl = p_ctx->res_hndl; + rcb->req_flags = SISL_REQ_FLAGS_RES_HNDL; + rcb->req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_lba(p_u64, lba); + + rcb->data_ea = (__u64) &p_ctx->wbuf[0][0]; + + rcb->data_len = sizeof(p_ctx->wbuf[0]); + rcb->cdb[0] = 0x8A; + + p_u32 = (__u32*)&(rcb->cdb[10]); + write_32(p_u32, p_ctx->blk_len); + + asm volatile( "lwsync" : : ); + write_64(&p_ctx->p_host_map->ioarrin,(__u64)rcb); + return 0; + +} +int mc_invalid_ioarcb(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=32; + __u64 actual_size=0; + __u64 vlba =0; + __u32 *p_u32; + __u64 stride; + __u64 *p_u64; + pthread_t thread; + mc_stat_t l_mc_stat; + int i; + + pid = getpid(); + + signal(SIGABRT, sig_handle); + signal(SIGSEGV, sig_handle); + rc = mc_init(); + CHECK_RC(rc, "mc_init failed"); + debug("mc_init success :%d\n",rc); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + if (15 == cmd) + { + //PLBA out of range + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "opening res_hndl"); + actual_size = (p_ctx->last_lba+1)/p_ctx->chunk_size; + } + else + { + p_ctx->flags = DK_UVF_ALL_PATHS; + rc = create_res(p_ctx); + CHECK_RC(rc, "opening res_hndl"); + rc = mc_size1(p_ctx,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + } + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + stride = 1 << l_mc_stat.nmask; + + vlba = (actual_size * (1 << l_mc_stat.nmask))-1; + fill_send_write(p_ctx, vlba, pid, stride); + for (i = 0; i < NUM_CMDS; i++) + { + if (1 == cmd) + { + //invalid upcode + debug("invalid upcode(0xFA) action = %d\n",cmd); + p_ctx->cmd[i].rcb.cdb[0] = 0xFA; + } + else if (2 == cmd) + { + //EA = NULL + debug("EA = NULL action = %d\n",cmd); + p_ctx->cmd[i].rcb.data_ea = (__u64)NULL; +#ifdef _AIX + bad_address = true; +#endif + } + else if (3 == cmd) + { + //invalid flgas + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + debug("invalid flag = 0X%X\n",p_ctx->cmd[i].rcb.req_flags); + } + else if (5 == cmd) + { + //SISL_AFU_RC_RHT_INVALID + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl + 2; + } + else if ( 6 == cmd) + { + //SISL_AFU_RC_RHT_OUT_OF_BOUNDS + p_ctx->cmd[i].rcb.res_hndl = MAX_RES_HANDLE; + } + else if (7 == cmd) + { + //invalid address for page fault + debug("setting EA = 0x1234 to generate error page fault\n"); + p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234; +#ifdef _AIX + bad_address = true; +#endif + } + else if (8 == cmd) + { + //invalid ctx_id + debug("%d: sending invalid ctx id\n", pid); + p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl +10; + } + else if (9 == cmd) + { + //test flag underrun + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0])/2; + } + else if (10 == cmd) + { + // test flag overrun + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0]); + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, 2); + } + else if (11 == cmd) + { + //rc scsi_rc_check + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, p_ctx->blk_len +1); + } + else if (12 == cmd) + { + //data len 0 in ioarcb + p_ctx->cmd[i].rcb.data_len = 0; + } + else if (13 == cmd) + { + //NUM BLK to write 0 + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, 0); + } + else if ((14 == cmd) || (15 == cmd)) + { + //test out of range LBAs + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + vlba += i+1; + write_lba(p_u64, vlba); + } + } + + //test BAD IOARCB, IOASA & CMD room violation + if (cmd >= 100) + { + if (100 == cmd) + { + //bad RCB + place_bad_addresses(p_ctx, 1); + usleep(1000); + if (err_afu_intrpt) //cool expected res + rc = 100; + else rc = -1; + goto END; + } + else if (101 == cmd) + { + //bad IOASA + handle_bad_ioasa(p_ctx, pid); + usleep(1000); //sleep sometime to process rcb cmd by AFU + //And let handle rrq event + //how to handle error, rrq thread should throw some error + return -1; + } + else if (102 == cmd) + { + //cmd_room violation + place_bad_addresses(p_ctx, 3); + usleep(1000); +#ifdef _AIX + if (err_afu_intrpt) //cool expected res + rc = 102; + else rc = -1; + goto END; +#endif + } + else if (103 == cmd) + { + //bad HRRQ + place_bad_addresses(p_ctx, 2); + usleep(1000); + if (err_afu_intrpt) //cool expected res + rc = 103; + else rc = -1; + goto END; + } + } + else + { + send_cmd(p_ctx); + } + rc = wait_resp(p_ctx); + if ( cmd >= 9 && cmd <= 13) + { + if (!rc_flags) + { + if (!dont_displa_err_msg) + fprintf(stderr, "%d: Expecting rc flags non zero\n", pid); + rc = -1; + } + } + if (4 == cmd) + { + //invalid fc port & lun id + debug("invalid fc port(0xFF)&lun id(0X1200), action=%d",cmd); + fill_send_write(p_ctx, vlba, pid, stride); + for (i = 0; i < NUM_CMDS; i++) + { + p_ctx->cmd[i].rcb.lun_id = 0x12000; + p_ctx->cmd[i].rcb.port_sel = 0xff; + } + //send_single_cmd(p_ctx); + send_cmd(p_ctx); + rc = wait_resp(p_ctx); + } +#ifdef _AIX + if ((7 == cmd || 2 == cmd)&& (err_afu_intrpt)) + rc = 7; +#endif +END: + pthread_cancel(thread); + close_res(p_ctx); + //mc_unregister(p_ctx->mc_hndl); + //xerror: + ctx_close(p_ctx); + mc_term(); + return rc; +} +int test_mc_invalid_opcode() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=10; + __u64 actual_size=0; + __u64 vlba =0; + __u64 *p_u64; + __u32 *p_u32; + mc_stat_t l_mc_stat; + pthread_t thread; + + if (mc_init() !=0 ) + { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + rc = ctx_init(p_ctx); + if (rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + /*rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) + { + fprintf(stderr, "ctx _reg failed, ctx_hndl %d,rc %d\n",p_ctx->ctx_hndl, rc ); + return -1; + }*/ + + rc = create_res(p_ctx); + if (rc != 0) + { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + return -1; + } + + rc = mc_size1(p_ctx,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + pid = getpid(); + vlba = (actual_size * (1 << l_mc_stat.nmask))-1; + fill_buf((__u64*)&p_ctx->wbuf[0][0], + sizeof(p_ctx->wbuf[0])/sizeof(__u64),pid); + + memset((void *)&p_ctx->cmd[0].rcb.cdb[0], 0, sizeof(p_ctx->cmd[0].rcb.cdb)); + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_lba(p_u64, vlba); + + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->wbuf[0][0]; + + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->wbuf[0]); + p_ctx->cmd[0].rcb.cdb[0] = 0xFA; // invalid opcode + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + write_32(p_u32, 8); // 8 LBAs for 4K + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + return rc; +} + +/* + * create two ctx process & 2 resource handler each ctx + * use diff ctx handler in diff process, get another process + * ctx handler through PIPE. + */ +int mc_test_inter_prcs_ctx_int(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + res_hndl_t res_hndl; + ctx_hndl_t ctx_hndl; + int pdes[2]; + pid_t cpid; + pthread_t thread; + __u64 stride = 0x1000; + int i; + //create pipe, child open for write + // parent open for read + + pipe(pdes); + cpid = fork(); + if ( 0 == cpid) + { + //child one running + pid = getpid(); + debug("%d: child do init_mc \n", pid); + rc = init_mc(p_ctx, &res_hndl); + if (rc) + { + fprintf(stderr, "%d: exiting due to init_mc\n:", pid); + exit(rc); + } + //do write into pipe & wait until parent kill me + close(pdes[0]); //close read des + write(pdes[1], &p_ctx->ctx_hndl, sizeof(ctx_hndl_t)); + while (1); + } + else + { + //parent + close(pdes[1]); //close write des + //lets child do there work & wait for me + sleep(1); + pid = getpid(); + rc = init_mc(p_ctx, &res_hndl); + if (rc) + { + kill(cpid, SIGKILL); + return rc; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + read(pdes[0], &ctx_hndl, sizeof(ctx_hndl_t)); + fill_send_write(p_ctx, 0, pid, stride); + //set another process ctx + debug("%d: use child(%d)process ctx hndl: %d\n", pid, cpid, ctx_hndl); + for (i = 0; i< NUM_CMDS; i++) + { + p_ctx->cmd[i].rcb.ctx_id = ctx_hndl; + } + if (2 == cmd) + { + //another test is to close one of my ctx res hndl + //and use child ctx handler here + //(child has opened 2 res handler) + p_ctx->res_hndl = res_hndl; + close_res(p_ctx); + debug("%d: close res_hndl(%d) but child (%d)has opened\n", + pid, res_hndl, cpid); + for (i = 0; i< NUM_CMDS; i++) + { + p_ctx->cmd[i].rcb.res_hndl = res_hndl; + } + } + send_cmd(p_ctx); + rc = wait_resp(p_ctx); + kill(cpid, SIGKILL); + pthread_cancel(thread); + } + return rc; +} + +int test_scsi_cmds() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunk = 16; + pthread_t thread; + __u64 stride = 0x10; + __u64 nlba; + uint8_t opcode[]={ 0x00,0xA0,0x09E,0x12,0x03,0x1B,0x5A,0x55 }; + int index; + pid = getpid(); + rc = ctx_init(p_ctx); + int i; + CHECK_RC(rc, "Context init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + p_ctx->flags = DK_UVF_ALL_PATHS; + p_ctx->lun_size = chunk * p_ctx->chunk_size; + rc = create_res(p_ctx); + CHECK_RC(rc, "create_res failed"); + + nlba = p_ctx->last_lba+1; + for (index=0;index cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[i].rcb.cdb[0] = opcode[index]; + } + send_cmd(p_ctx); + rc = wait_resp(p_ctx); +#ifndef _AIX + if (rc != 0x21) + { + fprintf(stderr,"%d:failed rc =%d for scsi cmd=0X%"PRIX8",exptd rc=0x21\n", + pid,rc,opcode[index]); + break; + } +#endif + debug("%d:rc =%d for scsi cmd=0X%"PRIX8" ........\n",pid,rc,opcode[index]); + usleep(1000); + } + pthread_cancel(thread); + ctx_close(p_ctx); + return rc; +} +int test_ctx_reset() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx= &myctx; + pthread_t thread; + __u64 buf_size = 0x2000000; //32MB + __u64 chunk = 10; + __u64 stride = 0x1000; + struct rwlargebuf rwbuf; + int i; + + pid=getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx_init failed"); + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + rc = create_resource(p_ctx,chunk*p_ctx->chunk_size,DK_UVF_ASSIGN_PATH,LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + //do bad EA + if (1) + { + debug("%d: ........place bad EA....\n", pid); + fill_send_write(p_ctx, 0, pid, stride); + for (i = 0; i < NUM_CMDS; i++) + { + p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234; + } + bad_address = true; + send_cmd(p_ctx); + rc = wait_resp(p_ctx); + sleep(1); + //normal IO + bad_address = false; + debug("%d: .........after bad EA, do normal IO....\n", pid); + rc = do_io(p_ctx, stride); + CHECK_RC(rc,"Normal IO failed after bad EA"); + + //do bad RCB + debug("%d: .........place bad RCB....\n", pid); + bad_address = true; + place_bad_addresses(p_ctx, 1); + sleep(2); + //normal IO + debug("%d: ......after bad RCB, do normal IO....\n", pid); + bad_address = false; + rc = do_io(p_ctx, stride); + CHECK_RC(rc,"Normal IO failed after bad RCB"); +#ifdef _AIX + system("ulimit -d unlimited"); + system("ulimit -s unlimited"); + system("ulimit -m unlimited"); +#endif + } + //do large _transfer + debug("%d: Do large transfer ....\n", pid); + rc = allocate_buf(&rwbuf, buf_size); + CHECK_RC(rc, "memory allocation failed"); + rc = do_large_io(p_ctx, &rwbuf, buf_size); + deallocate_buf(&rwbuf); + buf_size = 0x100000; //4k + rc = allocate_buf(&rwbuf, buf_size); + CHECK_RC(rc, "memory allocation failed"); + //normal io + debug("%d: after large transfer,do normal IO ....\n", pid); + rc = do_io(p_ctx, 0x10000); + //rc = do_large_io(p_ctx, &rwbuf, buf_size); + CHECK_RC(rc,"Normal IO failed after large transfer"); + + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; +} + +int test_fc_port_reset_vlun() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + int ioCounter=0; + __u64 nlba; + __u64 stride=0x1; + + pid = getpid(); +#ifdef _AIX + memset(p_ctx, 0, sizeof(myctx)); + strcpy(p_ctx->dev, cflash_path); + if ((p_ctx->fd =open_dev(p_ctx->dev, O_RDWR)) < 0) + { + fprintf(stderr,"open %s failed, errno=%d\n",p_ctx->dev,errno); + return -1; + } + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc,"dk_capi_query_path failed..\n"); + rc = ctx_init_internal(p_ctx, 0,p_ctx->devno); +#else + rc = ctx_init(p_ctx); +#endif + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + nlba = 1 * (p_ctx->chunk_size); + rc = create_resource(p_ctx, nlba, 0, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc = compare_size(p_ctx->last_lba, nlba-1); + CHECK_RC(rc, "failed compare_size"); + + debug("-- Going to start IO.Please do chportfc -reset at texan --\n"); + + debug("rc=%d,g_error=%d\n",rc,g_error); + do + { + rc = do_io(p_ctx, stride); + if (rc !=0 ) + { + debug("rc=%d,ioCounter=%d,IO failed..... \n",rc,ioCounter); + if ( ioCounter==1 ) + { + debug("rc=%d, Going to verify.... \n",rc); + + p_ctx->flags=DK_VF_LUN_RESET; +#ifdef _AIX + p_ctx->hint = DK_HINT_SENSE; +#else + p_ctx->hint = DK_CXLFLASH_VERIFY_HINT_SENSE; +#endif + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed\n"); + } + else + { if (ioCounter > 1) + { + rc=-1; // IO failed third time + break; + } + } + + } + else + { + debug("rc=%d,IO succeeded \n",rc); + g_error=0; + } + + ioCounter++; + rc|=g_error; + sleep(3); + + } while ( rc !=0); + + debug("rc=%d,g_error=%d\n",rc,g_error); + + if ( ioCounter <= 1) + { + debug("WARNING: Test case not excuted properly... Please rerun\n"); + rc =255; + } + + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + rc |= g_error; + return rc; +} + +int test_fc_port_reset_plun() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + __u64 stride= 0x100; + int ioCounter=0; + + pid = getpid(); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + //for PLUN 2nd argument(lba_size) would be ignored + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "failed compare_size"); + + debug("-- Going to start IO.Please do chportfc -reset at texan --\n"); + do + { + + rc = do_io(p_ctx, stride); + if (rc !=0 ) + { + debug("rc=%d,ioCounter=%d,IO failed..... \n",rc,ioCounter); + if ( ioCounter==1 ) + { + debug("rc=%d, Going to verify.... \n",rc); + + p_ctx->flags=DK_VF_LUN_RESET; +#ifdef _AIX + p_ctx->hint = DK_HINT_SENSE; +#else + p_ctx->hint = DK_CXLFLASH_VERIFY_HINT_SENSE; +#endif + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed\n"); + } + else + { if (ioCounter > 1) + { + rc=-1; // IO failed third time + break; + } + } + + } + else + { + debug("rc=%d,IO succeeded \n",rc); + g_error=0; + } + + ioCounter++; + rc|=g_error; + sleep(3); + + } while ( rc !=0); + + debug("rc=%d,g_error=%d\n",rc,g_error); + + if ( ioCounter <= 1) + { + debug("WARNING: Test case not excuted properly... Please rerun\n"); + rc =255; + } + + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; +} diff --git a/src/cflash/test/cflash_test_error2.c b/src/cflash/test/cflash_test_error2.c new file mode 100644 index 00000000..2254edb3 --- /dev/null +++ b/src/cflash/test/cflash_test_error2.c @@ -0,0 +1,988 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/cflash_test_error2.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include "cflash_test.h" +#include +#include +#include +#define KB 1024 + +// do_io() will use this pid. +extern pid_t pid; +extern int g_error; +extern sigjmp_buf sBuf; + +// Test Case Starts here .......... !! +void cleanup(struct ctx *p_ctx, pthread_t threadId) +{ + debug("\n\n%d:**************** Start cleanup ****************\n",pid); + // Useful for some -ve tests. NOOPs if thId is passed as -1. + if ( -1 != threadId ) pthread_cancel(threadId); + close_res(p_ctx); + ctx_close(p_ctx); + debug("%d:****************** End cleanup ******************\n",pid); +} + +// 7.1.185 : EEH while super-pipe IO(VIRTUAL)(root user) +int test_vSpio_eehRecovery(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t threadId, ioThreadId, thread2; + do_io_thread_arg_t ioThreadData; + do_io_thread_arg_t * p_ioThreadData=&ioThreadData; + __u64 chunk; + __u64 nlba; + __u64 stride= 0x1000; + __u64 last_lba; + + // pid used to create unique data patterns & logging from util ! + pid = getpid(); + + //ctx_init with default flash disk & devno + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + rc = pthread_create(&threadId, NULL, ctx_rrq_rx, p_ctx); + CHECK_RC(rc, "pthread_create failed"); + + chunk = (p_ctx->last_phys_lba+1)/p_ctx->chunk_size; + nlba = chunk * p_ctx->chunk_size; + //create vlun + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + // We wish to do IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=stride; + p_ioThreadData->loopCount=1000; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + //Trigger EEH + rc = do_eeh(p_ctx); + CHECK_RC(rc, "do_eeh() failed"); + + // Wait for IO thread to complete + pthread_join(ioThreadId, NULL); +#ifndef _AIX + pthread_cancel(threadId); +#endif + + // Heading for context recovery using ioctl ! + //p_ctx->flags = DK_CAPI_REATTACHED; + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_recover_ctx failed"); + +#ifdef _AIX + if ( DK_RF_REATTACHED != p_ctx->return_flags ) +#else + if ( DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags ) +#endif + CHECK_RC(1, "ioctl_dk_capi_recover_ctx flag verification failed"); + + rc = ctx_reinit(p_ctx); + CHECK_RC(rc, "ctx_reinit() failed"); +#ifndef _AIX + pthread_create(&thread2, NULL, ctx_rrq_rx, p_ctx); +#endif + + rc = do_io(p_ctx, stride); + if ( rc == 2) rc=0; + else CHECK_RC(rc, "1st IO attempt didn't fail"); + +#ifdef _AIX + last_lba = p_ctx->last_phys_lba; +#else + last_lba = p_ctx->last_lba; +#endif + +#ifdef _AIX + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = DK_HINT_SENSE; +#else + p_ctx->hint = DK_CXLFLASH_VERIFY_HINT_SENSE; +#endif + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed\n"); + +#ifdef _AIX + if ( 0 != p_ctx->return_flags ) + CHECK_RC(1, "ioctl_dk_capi_verify flag verification failed"); +#endif + if ( p_ctx->verify_last_lba != last_lba ) + CHECK_RC(1, "ioctl_dk_capi_verify last_lba verification failed"); + + + // After adapter reset, + // AFU interrupt monitoring thread need to be restarted. + //rc = pthread_create(&threadId, NULL, ctx_rrq_rx, p_ctx); + //CHECK_RC(rc, "pthread_create failed"); + + // Re-start the io using new context + if (2 == cmd) + { + //its for long run +#ifndef _AIX + pthread_cancel(thread2); +#endif + rc = keep_doing_eeh_test(p_ctx); + } + else + { + //its for one attempt, sanity check of eeh + debug("%d:Try once more IO & expecting to pass this time..\n",pid); + rc = do_io(p_ctx, stride); + CHECK_RC(rc, "do_io() failed"); + } + +#ifndef _AIX + pthread_cancel(thread2); +#endif + + cleanup(p_ctx, threadId); + + return rc; +} + +// 7.1.186 : EEH while super-pipe IO(DIRECT)(root user) +int test_dSpio_eehRecovery(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t threadId, ioThreadId, thread2; + do_io_thread_arg_t ioThreadData; + do_io_thread_arg_t * p_ioThreadData=&ioThreadData; + __u64 last_lba; + __u64 stride= 0x1000; + + // pid used to create unique data patterns & logging from util ! + pid = getpid(); + + // ctx_init with default flash disk & devno + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + // thread to handle AFU interrupt & events + rc = pthread_create(&threadId, NULL, ctx_rrq_rx, p_ctx); + CHECK_RC(rc, "pthread_create failed"); + + // for PLUN 2nd argument(lba_size) would be ignored + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + // We wish to do IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=stride; + p_ioThreadData->loopCount=1000; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + //Trigger EEH + rc = do_eeh(p_ctx); + CHECK_RC(rc, "do_eeh() failed"); + + // Wait for IO thread to complete + pthread_join(ioThreadId, NULL); +#ifndef _AIX + pthread_cancel(threadId); +#endif + + // Heading for context recovery using ioctl ! + //p_ctx->flags = DK_CAPI_REATTACHED; + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_recover_ctx failed"); + +#ifdef _AIX + if ( DK_RF_REATTACHED != p_ctx->return_flags ) +#else + if ( DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags ) +#endif + CHECK_RC(1, "ioctl_dk_capi_recover_ctx flag verification failed"); + + rc = ctx_reinit(p_ctx); + CHECK_RC(rc, "ctx_reinit() failed"); +#ifndef _AIX + pthread_create(&thread2, NULL, ctx_rrq_rx, p_ctx); +#endif + + rc = do_io(p_ctx, stride); + if ( rc == 2) rc=0; + else CHECK_RC(rc, "1st IO attempt didn't fail"); + +#ifdef _AIX + last_lba = p_ctx->last_phys_lba; +#else + last_lba = p_ctx->last_lba; +#endif + +#ifdef _AIX + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = DK_HINT_SENSE; +#else + p_ctx->hint = DK_CXLFLASH_VERIFY_HINT_SENSE; +#endif + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed\n"); + +#ifdef _AIX + if ( 0 != p_ctx->return_flags ) + CHECK_RC(1, "ioctl_dk_capi_verify flag verification failed"); +#endif + + if ( p_ctx->verify_last_lba != last_lba ) + CHECK_RC(1, "ioctl_dk_capi_verify last_lba verification failed"); + + // Re-start the io using new context + if (2 == cmd) + { + //its for long run +#ifndef _AIX + pthread_cancel(thread2); +#endif + rc = keep_doing_eeh_test(p_ctx); + } + else + { + //its for one attempt, sanity check of eeh + stride = 0x1000; + rc = do_io(p_ctx, stride); + CHECK_RC(rc, "do_io() failed"); + } +#ifndef _AIX + pthread_cancel(thread2); +#endif + cleanup(p_ctx, threadId); + + return rc; +} + +// 7.1.194 : Test DK_CAPI_QUERY_PATH & DK_CAPI_ATTACH ioctl for FCP disk +int test_ioctl_fcp() +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + + char fc_dev[MC_PATHLEN]; + dev64_t fc_devno; + + pid = getpid(); + + // Get the FC disk from env. + rc = get_nonflash_disk(&fc_dev[0], &fc_devno); + CHECK_RC(rc, "get_nonflash_disk() failed to get FC disk"); + + // Open fc disk + p_ctx->fd = open(fc_dev, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", fc_dev, errno); + return -1; + } + +#ifdef _AIX + + // query path ioctl isn't supported on linux + // Call query path ioctl + rc = ioctl_dk_capi_query_path(p_ctx); + if ( 0 == rc ) + CHECK_RC(1, "ioctl_dk_capi_query_path didn't fail"); + + // Verify return values after ioctl + // TBD: return_path_count need to be added to ctx. + if ( p_ctx->return_path_count != 0 || + p_ctx->return_flags != DK_RF_IOCTL_FAILED ) + CHECK_RC(1, "returned_path_count/return_flags is incorrect.."); + +#endif /*_AIX */ + + // Prepare for attach ioctl + p_ctx->flags = DK_AF_ASSIGN_AFU; + p_ctx->work.num_interrupts = 4; // use num_interrupts from AFU desc +#ifdef _AIX + p_ctx->devno = fc_devno; +#endif /*_AIX */ + + // Clear the previous RF before subsequent ioctl + p_ctx->return_flags=0; + + strcpy(p_ctx->dev, fc_dev); + + //do context attach + rc = ioctl_dk_capi_attach(p_ctx); + if ( 0 == rc ) + CHECK_RC(1, "ioctl_dk_capi_attach didn't fail"); + + // Verify return values after ioctl + if ( p_ctx->return_flags != DK_RF_IOCTL_FAILED ) + CHECK_RC(1, "return_flags is incorrect.."); + + // If we reach here, we return success. + return 0; +} + +// 7.1.199 : Try to map & access MMIO space beyond the assigned 64KB. +int test_mmio_errcase(int cnum ) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + void * p_mmapAddr; + struct sigaction pgHandle; + + pid = getpid(); + + // ctx_init with default flash disk & devno + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + // for PLUN 2nd argument(lba_size) would be ignored + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + // ----- signal handler code ------ + + if (sigsetjmp(sBuf, 0) == 1) + { + goto xerror; + } + pgHandle.sa_handler = handleSignal; + sigemptyset(&pgHandle.sa_mask); + pgHandle.sa_flags = 0; + + if ( sigaction(SIGSEGV, &pgHandle, NULL) < 0) + { + CHECK_RC(1, "sigaction() failed"); + } + + // -------- Negative test : Start --------------------------- + switch (cnum) + { + // We will attempt IO beyond mmio_size and it should fail + case TEST_MMIO_ERRCASE1 : + p_mmapAddr = memset((void *)(p_ctx->p_host_map), + 1, 2048*(p_ctx->mmio_size)); + if ( p_mmapAddr == (void *)(p_ctx->p_host_map)) + CHECK_RC(1, "memset did not fail"); + break; + case TEST_MMIO_ERRCASE2 : + // mmap() beyond mmio_size should fail.... + p_mmapAddr = mmap((void *)p_ctx->p_host_map, + (p_ctx->mmio_size)*2048, + PROT_READ | PROT_WRITE, + MAP_SHARED, + p_ctx->adap_fd, 0); + if ( MAP_FAILED != p_mmapAddr ) + CHECK_RC(1, "mmap() did not fail"); + break; + // attempting IO just beyond mmio_size - only 10KB extra + // it should fail + case TEST_MMIO_ERRCASE3 : + // defect #SW311014 for reference + p_mmapAddr = memset((void *)(p_ctx->p_host_map), + 1, ((p_ctx->mmio_size)+65*KB)); + if ( p_mmapAddr == (void *)(p_ctx->p_host_map)) + CHECK_RC(1, "memset did not fail"); + break; + } + + // -------- Negative test : End --------------------------- + + // Just a quick sanity check for mmap() success case + // -------- Sanity: Start --------------------------- + +#ifndef _AIX + + // IO attempt directly. + memset((void *)(p_ctx->p_host_map), 1, p_ctx->mmio_size); + + rc = munmap( (void *)p_ctx->p_host_map, p_ctx->mmio_size ); + CHECK_RC(rc, "munmap() failed"); +#endif + // -------- Sanity: End --------------------------- + cleanup(p_ctx, -1); + +xerror: + // Test Passed if we return from here ! + return g_error; +} + +// Signal handler for test_spio_killprocess +void callme(int sig_num) +{ + printf("\nGot Signal : %d from child.. Proceeding now..\n",sig_num); +} + +// 7.1.200 : Send signal to kill process when it has cmds queued. +int test_spio_killprocess() +{ + int rc; + int isFailed=0; + int i, nTimes; + pid_t cpid; + int cstat; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + struct sigaction action; + sigset_t sigset; + pthread_t threadId; + __u64 chunk = 0x10; + __u64 nlba; + __u64 stride=0x1000; + + pid = getpid(); + + sigemptyset(&sigset); + sigprocmask(SIG_SETMASK, &sigset, NULL); + + // Set up the signal handler + action.sa_handler = callme; + action.sa_flags = 0; + sigemptyset(&action.sa_mask); + + if (sigaction(SIGUSR1, &action, NULL) < 0) + CHECK_RC(1, "sigaction() failed"); + + char *str = getenv("LONG_RUN"); + if (str == NULL) nTimes=10; + else nTimes=100; + + for (i=0; ichunk_size; + //create vlun + rc = create_resource(p_ctx,nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + // Test with pluns one after another + else if ( i > nTimes/5 && i < 2*nTimes/5 ) + { + // Create PLUN + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + stride=0x10000; + } + // Test with vluns/pluns alternately ! + else if ( i % 2 ) + { + nlba = chunk * p_ctx->chunk_size; + //create vlun + rc = create_resource(p_ctx,nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + stride=0x1000; + } + else + { + // Create PLUN + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + stride=0x10000; + } + + rc = do_io(p_ctx, stride); + + // Signal parent to kill itself after this point. + kill(getppid(), SIGUSR1); + + // Exit at this point if we failed in initial IO + CHECK_RC_EXIT(rc, "Initial IO failed"); + + // Keep driving IO till killed + for (;;) do_io(p_ctx, stride); + } + // parent process + else + { + pid = getpid(); + + cpid = rc; + // Wait for child to complete at-least 1 successful IO. + pause(); + + // Let the child IO go on some more time ! + sleep(1); + + // Send signal 9 - process can't ignore it; + kill(cpid, 9); + + // Probe child's exit status. + if ( wait(&cstat) == -1 ) + CHECK_RC(1, "Failed while wait() for child"); + + // We don't expect child to exit itself + if (WIFEXITED(cstat)) isFailed = 1; + else if (WIFSIGNALED(cstat)) + { + // We expect this ! + debug("%d : killed by %d signal\n", cpid, WTERMSIG(cstat)); + if (WCOREDUMP(cstat)) + fprintf(stderr, "%d : was core dupmed ...\n", cpid); + } + + debug("pid %d exited with rc = %d\n", cpid, cstat); + } + } + + return isFailed; +} + +// 7.1.201 : Queue up commands, do not wait for completion and +// exit w/o detach/close, +// and do detach/close while commands in queue +int test_spio_exit() +{ + int rc; + int isFailed=0; + int i, nTimes; + pid_t cpid; + int cstat; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t threadId, ioThreadId; + do_io_thread_arg_t ioThreadData; + do_io_thread_arg_t * p_ioThreadData=&ioThreadData; + __u64 chunk = 0x10; + __u64 nlba; + __u64 stride= 0x10000; + + for (i=0, nTimes=50; ichunk_size; + //create vlun + rc = create_resource(p_ctx,nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + // Test with pluns one after another + else if ( i > nTimes/5 && i < 2*nTimes/5 ) + { + // Create PLUN + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + stride = 0x10000; + } + // Test with vluns/pluns alternately ! + else if ( i % 2 ) + { + nlba = chunk * p_ctx->chunk_size; + //create vlun + rc = create_resource(p_ctx,nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + else + { + // Create PLUN + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + stride = 0x10000; + } + + // Make sure at-least 1 IO is successful before proceeding ! + rc = do_io(p_ctx, stride); + CHECK_RC_EXIT(rc, "Initial IO attempt failed"); + + // We wish to do IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=stride; + p_ioThreadData->loopCount=100; + rc = pthread_create(&ioThreadId,NULL, + do_io_thread, (void *)p_ioThreadData); + CHECK_RC_EXIT(rc, "do_io_thread() pthread_create failed"); + + // Sleep for a sec before exiting + sleep(1); + + if ( i % 2 ) + { + debug("%d:Exiting w/o detach/close",pid); + } + else + { + debug("%d:Exiting after detach/close",pid); + cleanup(p_ctx, threadId); + } + + exit(10); + } + // parent process + else + { + pid = getpid(); + + cpid = rc; + + // Probe child's exit status. + if ( wait(&cstat) == -1 ) + CHECK_RC(1, "Failed while wait() for child"); + + // We expect child to exit itself + if (WIFEXITED(cstat)) + { + debug("Exiting w/o getting killed %d \n",cpid); + // We expect child to exit with rc 10 only ! + if ( WEXITSTATUS(cstat) != 10 ) isFailed=1; + } + else if (WIFSIGNALED(cstat)) + { + //isFailed=1; + debug("%d : killed by %d signal\n", cpid, WTERMSIG(cstat)); + if (WCOREDUMP(cstat)) //expected if exiting without cancelling poll thread + fprintf(stderr, "%d : was core dupmed ...\n", cpid); + } + + debug("pid %d exited with rc = %d\n", cpid, cstat); + } + } + + return isFailed; +} + +// 7.1.202 : Try to send a ctx_id in some ioctl before attach +// (i.e. when no ctx is established). & some more scenarios +int test_ioctl_spio_errcase() +{ + int rc; + int itr, type; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + char *fvt_dev; + pthread_t threadId; + __u64 nlba; // stride; //stride not used + + pid = getpid(); + + memset((void *)p_ctx, 0, sizeof(struct ctx)); + + // Case1 + // ---------- IOCTL error cases w/o context : Start ---------- + fvt_dev = getenv("FVT_DEV"); + if (NULL == fvt_dev) + { + fprintf(stderr, "FVT_DEV ENV var NOT set, Please set...\n"); + return -1; + } + + strcpy(p_ctx->dev, fvt_dev); + + //open CAPI Flash disk device + debug("Going to open CAPI Flash disk \n"); + p_ctx->fd = open_dev(fvt_dev, O_RDWR); + if (p_ctx->fd < 0) CHECK_RC(1, "capi device open() failed"); + +#ifdef _AIX + // Get the devno. : only needed for AIX + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "query path ioctl failed"); + +#endif + +#ifdef _AIX + p_ctx->work.num_interrupts = 5; // use num_interrupts from AFU desc +#else + p_ctx->work.num_interrupts = 4; // use num_interrupts from AFU desc +#endif /*_AIX*/ + + p_ctx->context_id = 0x1; + + rc = ioctl_dk_capi_detach(p_ctx); + if ( 22 != rc ) CHECK_RC(1, "context detach ioctl did not fail"); + + p_ctx->flags = DK_UDF_ASSIGN_PATH; + + rc = ioctl_dk_capi_udirect(p_ctx); + if ( 22 != rc ) CHECK_RC(1, "pLun creation did not fail"); + + g_error=0; // reset-ing the g_error + close(p_ctx->fd); + + debug("Done. Close the fd \n"); + // ---------- IOCTL error cases w/o context : End ---------- + + // Test for both pLun & vLun ! + for (itr=0; itr<2; itr++) + { + debug("\n\n\n%d:-------- Start Itr# %d -------\n", pid, itr); + // Test Case2 & Case3 + for (type=0; type<2; type++) + { + debug("\n%d:-------- Start test type# %d -------\n", pid, type); + // ctx_init with default flash disk & devno + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + // thread to handle AFU interrupt & events + rc = pthread_create(&threadId, NULL, ctx_rrq_rx, p_ctx); + CHECK_RC(rc, "pthread_create failed"); + + if ( 0 == itr ) + { + // create plun + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + } + else + { + nlba = p_ctx->chunk_size; + // create vlun + rc = create_resource(p_ctx,nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + + if ( 0 == type ) + { + // -------- Case2: IO error cases after context detach ------- + pthread_cancel(threadId); + rc = ioctl_dk_capi_detach(p_ctx); + CHECK_RC(rc, "detach ioctl failed"); + } + else + { + // -------- Case3: IO error cases after fd close ------- + pthread_cancel(threadId); + rc = close(p_ctx->fd); + CHECK_RC(rc, "close(fd) failed"); + debug("%d: close(p_ctx->fd) i.e. close(%d): done !\n", pid, p_ctx->fd); + } + + // Reset rc if we reach this point. + rc = 0; + g_error=0; + debug("rc %d, g_error =%d, errno =%d\n" , rc , g_error, errno ); + //open CAPI Flash disk device again for clean using ioctls + p_ctx->fd = open_dev(fvt_dev, O_RDWR); + debug("After disk reopned -- rc %d, g_error =%d, errno =%d\n" , rc , g_error, errno ); + if (p_ctx->fd < 0) CHECK_RC(1, "capi device open() failed"); + debug("%d: disk re-opened, new fd: %d\n", pid, p_ctx->fd); + cleanup(p_ctx, -1); + } + } + + return 0; +} + +// 7.1.217 : create two context for same flash disks shared between 2 adapters +int test_cfdisk_ctxs_diff_devno() +{ + int nDisk; + int rc=0; + struct flash_disk cfDisk[2]; + struct ctx myctx1, myctx2; + struct ctx *p_ctx1 = &myctx1; + struct ctx *p_ctx2 = &myctx2; + + pid = getpid(); + + nDisk = get_flash_disks(cfDisk, FDISKS_DIFF_ADPTR); + if (nDisk < 2) + { + fprintf(stderr,"Failed to find 2 flash disks from diff adapter..\n"); + return -1; + } + // On AIX both dev will have same name + // On Linux both dev will have diff name + + rc = ctx_init2(p_ctx1, cfDisk[0].dev, DK_AF_ASSIGN_AFU, cfDisk[0].devno[0]); + CHECK_RC(rc, "p_ctx1 Context init failed"); + + rc = ctx_init2(p_ctx2, cfDisk[1].dev, DK_AF_ASSIGN_AFU, cfDisk[1].devno[0]); + CHECK_RC(rc, "p_ctx2 Context init failed"); + + rc = create_resource(p_ctx1, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT for p_ctx1 failed"); + + rc = create_resource(p_ctx2, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT for p_ctx2 failed"); + + cleanup(p_ctx1, -1); + cleanup(p_ctx2, -1); + + return 0; +} + +// 7.1.218 : Pass context token to different process & do REUSE +int test_attach_reuse_diff_proc() +{ + int rc=0; + int nDisk; + struct flash_disk cfDisk[2]; + int cstat; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + + pid = getpid(); + + nDisk = get_flash_disks(cfDisk, FDISKS_SAME_ADPTR); + if (nDisk < 2) + { + fprintf(stderr,"Failed to find 2 flash disks from same adapter..\n"); + return -1; + } + + // sanity check for AIX! + //#ifdef _AIX + // if ( cfDisk[0].devno != cfDisk[1].devno ) return 1; + //#endif + + rc = ctx_init2(p_ctx, cfDisk[0].dev, DK_AF_ASSIGN_AFU, cfDisk[0].devno[0]); + CHECK_RC(rc, "p_ctx Context init failed"); + + rc = fork(); + if ( rc == -1 ) CHECK_RC(1, "fork() failed"); + + // child process + if ( rc == 0 ) + { + pid = getpid(); + +#ifdef _AIX + rc = ctx_init2(p_ctx, cfDisk[1].dev, + DK_AF_REUSE_CTX, cfDisk[0].devno[0]); + if ( 0 == rc ) + CHECK_RC_EXIT(1, "Context init with DK_AF_REUSE_CTX did not fail"); +#else + rc = ctx_init2(p_ctx, cfDisk[1].dev, + DK_CXLFLASH_ATTACH_REUSE_CONTEXT, cfDisk[0].devno[0]); + if ( 0 == rc ) + CHECK_RC_EXIT(1, "Context init with DK_CXLFLASH_ATTACH_REUSE_CONTEXT did not fail"); +#endif + + exit(0); + } + else + { + // Probe child's exit status. + if ( wait(&cstat) == -1 ) + CHECK_RC(1, "Failed while wait() for child"); + + // We expect child to exit itself + if (WIFEXITED(cstat)) + { + // We expect child to exit with rc 0 only ! + if ( WEXITSTATUS(cstat) != 0 ) rc=1; + else rc=0; + } + } + + cleanup(p_ctx, -1); + + return rc; +} + +// 7.1.219 : Pass context token to different process & do detach/release. +int test_detach_diff_proc() +{ + int rc=0; + int cstat; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + + pid = getpid(); + + //ctx_init with default flash disk & devno + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + rc = fork(); + if ( rc == -1 ) CHECK_RC(1, "fork() failed"); + + // child process + if ( rc == 0 ) + { + pid = getpid(); + + rc = ctx_close(p_ctx); + if ( 22 != rc ) + CHECK_RC_EXIT(1, "Context detach did not fail"); + + exit(0); + } + else + { + // Probe child's exit status. + if ( wait(&cstat) == -1 ) + CHECK_RC(1, "Failed while wait() for child"); + + // We expect child to exit itself + if (WIFEXITED(cstat)) + { + // We expect child to exit with rc 0 only ! + if ( WEXITSTATUS(cstat) != 0 ) rc=1; + else rc=0; + } + } + + rc |= ctx_close(p_ctx); + + return rc; +} diff --git a/src/cflash/test/cflash_test_excp.c b/src/cflash/test/cflash_test_excp.c new file mode 100644 index 00000000..b388f91a --- /dev/null +++ b/src/cflash/test/cflash_test_excp.c @@ -0,0 +1,782 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/cflash_test_excp.c$ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include +#ifdef _AIX +#include +#else +#include +#include +#endif +#include +#include + +extern int g_error; +extern char cflash_path[]; +extern pid_t pid; + +#define POLL_TIME_OUT -1 + + +#ifdef _MOREDEBUG +#define MOREDEBUG_FLAG 1 +#else +#define MOREDEBUG_FLAG 0 +#endif + +#ifdef _AIX + +#define POLL_FD_INIT(pctx,myFds) \ +struct pollfd (myFds)[2] = { { (pctx)->adap_fd, POLLPRI, 0},\ + { (pctx)->fd, POLLPRI, 0} } +#define POLL_CALL(myFds) (poll(&((myFds)[0]),2,POLL_TIME_OUT)) + + +#define REVENTS_COMP(myFds, DISK_INDX, ADAP_INDX) \ + POLLPRI == myFds[DISK_INDX].revents || \ + POLLPRI == myFds[ADAP_INDX].revents +#else + +#define POLL_FD_INIT(pctx,myFds) \ +struct pollfd (myFds)[2] = { { (pctx)->fd, POLLPRI, 0} } + +#define POLL_CALL(myFds) (poll(&((myFds)[0]),1,POLL_TIME_OUT)) + +#define REVENTS_COMP(myFds, DISK_INDX, ADAP_INDX) \ + POLLPRI == myFds[DISK_INDX].revents +#endif + +#define DEBUG_MORE(tell_me, ...) \ +{ \ + if(MOREDEBUG_FLAG == 1)\ + printf("---------:%s:%s:%d--" tell_me \ + "---------\n",__FILE__,__FUNCTION__,__LINE__,## __VA_ARGS__); \ +} + + +#define MAX_LENGTH 1024 +#define MSG_LENGTH 512 + +struct exceptionPacket +{ + pthread_mutex_t mutex; + pthread_cond_t cv; + struct ctx * excpCtx; +}; + + +static int exceptionDoneFlag = 0; + +void * do_poll_for_event( void * args) +{ + + debug("------ call do_poll_for_event() -----------\n"); + + struct exceptionPacket * excpPrt = args; + POLL_FD_INIT(excpPrt->excpCtx,myFds); + + DEBUG_MORE("Start do_poll_for_event thread"); + while (1) + { + /*assumption -1 timeout for infinite poll*/ + if (POLL_CALL(myFds) < 0) + { + DEBUG_MORE("poll failed"); + perror("poll failed"); + exceptionDoneFlag=1; + return NULL; + } + + if ( REVENTS_COMP(myFds, CFLASH_DISK_POLL_INDX, CFLASH_ADAP_POLL_INDX)) + { + pthread_mutex_lock( &excpPrt->mutex ); + exceptionDoneFlag=1; + if (POLLPRI == myFds[CFLASH_DISK_POLL_INDX].revents) + debug("Disk POLLPRI ....\n"); + if (POLLPRI == myFds[CFLASH_ADAP_POLL_INDX].revents) + debug("Adap POLLPRI ....\n"); + DEBUG_MORE("lock acuired and flag set"); + pthread_cond_signal( &excpPrt->cv); + pthread_mutex_unlock(&excpPrt->mutex); + + } + else + { + debug(" ..... its not POLLPRI or might be timed out...... \n"); + debug("..... disk.revents = %d ... \n",myFds[CFLASH_DISK_POLL_INDX].revents); + debug("..... adap.revents = %d ... \n",myFds[CFLASH_ADAP_POLL_INDX].revents); + if ( POLL_TIME_OUT > 0 ) + debug("..... timed out in %d sec ..... \n",POLL_TIME_OUT /1000 ); + pthread_mutex_lock( &excpPrt->mutex ); + exceptionDoneFlag=1; + g_error=-1; + pthread_cond_signal( &excpPrt->cv); + pthread_mutex_unlock(&excpPrt->mutex); + } + + + + } +} + +int test_dcqexp_ioctl(int cnum) +{ + + DEBUG_MORE("inside test_dcqexp_ioctl"); + int rc=0; +#ifdef _AIX + __u64 stride=0x10; + + struct ctx u_ctx; + struct exceptionPacket excpVar; + struct exceptionPacket * excpPrt =&excpVar; + + pthread_t thread_intr; + uint64_t verify_exception; + + struct ctx *p_ctx = &u_ctx; + struct ctx *p_ctx_backup = &u_ctx; + struct dk_capi_exceptions exceptions; + char errorMsg[MSG_LENGTH]; + pthread_t thread; + pthread_mutexattr_t mattrVar; + pthread_condattr_t cattrVar; + + pthread_mutexattr_init(&mattrVar); + pthread_condattr_init(&cattrVar); + + pthread_mutex_init(&excpPrt->mutex , &mattrVar); + pthread_cond_init(&excpPrt->cv , &cattrVar); + + __u64 chunk =0; + __u64 nlba =0; + + memset(p_ctx, 0, sizeof(struct ctx)); + memset(excpPrt, 0, sizeof(struct exceptionPacket)); + memset(errorMsg, 0, MSG_LENGTH+1); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + pthread_create(&thread_intr, NULL, ctx_rrq_rx, p_ctx); + + excpPrt->excpCtx = p_ctx ; + + /* Started do_poll_for_event thread until desired exception generated*/ + pthread_create(&thread,NULL,do_poll_for_event, excpPrt); + + sleep(5); // its rare but still avoiding race condition + + switch (cnum) + { + case EXCP_VLUN_DISABLE: // 7.1.230 + + nlba = p_ctx->last_phys_lba + 1; + + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "Create resource failed \n"); + + debug(" ----------- Please unmap disk from the host now -------\n"); + + *p_ctx_backup=*p_ctx; + + debug(" ------ Let the I/O start and then do UA stuff at texan--------\n"); + + do + { + rc = do_io(p_ctx, stride); + if (rc !=0) + { + debug("rc=%d,IO failed..... bye from loop\n",rc); + break; + } + else + { + debug("rc=%d,IO succeeded \n",rc); + } + *p_ctx=*p_ctx_backup; + + }while ( rc ==0); + + + g_error=0; + + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = DK_HINT_SENSE; + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "dk_capi_verify FAILED\n"); + + pthread_mutex_lock( &excpPrt->mutex ); + + while ( exceptionDoneFlag!=1) + { + pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); + } + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + + + rc = ioctl_dk_capi_query_exception(p_ctx); + CHECK_RC(rc, "dk_capi_query FAILED\n"); + verify_exception=DK_CE_PATH_LOST|DK_CE_VERIFY_IN_PROGRESS; + if ( p_ctx->exceptions != verify_exception ) + { + rc=255; /* Non zero rc value */ + debug("%d: expected : 0x%llx and recieved : 0x%llx\n", pid, verify_exception, p_ctx->exceptions); + strcpy(errorMsg, "Fail:EXCP_VLUN_DISABLE:bad excp"); + goto xerror; + } + + break ; + + case EXCP_PLUN_DISABLE: // 7.1.230 + + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + debug(" ----------- Please unmap disk from the host now -------\n"); + + *p_ctx_backup=*p_ctx; + + debug(" ------ Let the I/O start and then do UA stuff at texan--------\n"); + + do + { + rc = do_io(p_ctx, stride); + if (rc !=0) + { + debug("rc=%d,IO failed..... bye from loop\n",rc); + break; + } + else + { + debug("rc=%d,IO succeeded \n",rc); + } + *p_ctx=*p_ctx_backup; + + }while ( rc ==0); + + + g_error=0; + + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = DK_HINT_SENSE; + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "dk_capi_verify FAILED\n"); + + pthread_mutex_lock( &excpPrt->mutex ); + + while ( exceptionDoneFlag!=1) + { + pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); + } + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + rc = ioctl_dk_capi_query_exception(p_ctx); + CHECK_RC(rc, "dk_capi_query FAILED\n"); + verify_exception=DK_CE_PATH_LOST|DK_CE_VERIFY_IN_PROGRESS; + if ( p_ctx->exceptions != verify_exception ) + { + rc=255; /* Non zero rc value */ + strcpy(errorMsg, "Fail:EXCP_VLUN_DISABLE:bad excp"); + goto xerror; + } + + break ; + case EXCP_VLUN_VERIFY: // 7.1.232 //7.1.225 + + chunk = 0x10; + rc = create_resource(p_ctx, 0, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "Create resource failed \n"); + nlba = chunk * (p_ctx->chunk_size); + rc = vlun_resize(p_ctx, nlba); + + //TBD input need to check once + // Heading for verification using ioctl + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = DK_HINT_SENSE; + //strcpy(p_ctx->sense_data,"TBD"); + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "failed : ioctl_dk_capi_verify()"); + pthread_mutex_lock( &excpPrt->mutex ); + + while ( exceptionDoneFlag!=1) + { + pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); + } + + // reset the flag + exceptionDoneFlag=0; + pthread_mutex_unlock(&excpPrt->mutex); + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + rc = ioctl_dk_capi_query_exception(p_ctx); + + CHECK_RC(rc, "dk_capi_query FAILED\n"); + + verify_exception=DK_CE_VERIFY_IN_PROGRESS|DK_CE_VERIFY_SUCCEEDED; + + if ( p_ctx->exceptions != verify_exception ) + { + rc=255; /* Non zero rc value */ + strcpy(errorMsg, "Fail:EXCP_VLUN_VERIFY:bad excp"); + goto xerror; + + } + + + break; + + case EXCP_PLUN_VERIFY : // 7.1.232 // 7.1.225 + rc=ioctl_dk_capi_udirect(p_ctx); + CHECK_RC(rc, "PLUN resource failed \n"); + + //TBD input need to check once + // Heading for verification using ioctl + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = DK_HINT_SENSE; + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "failed : ioctl_dk_capi_verify()"); + + pthread_mutex_lock( &excpPrt->mutex ); + + while ( exceptionDoneFlag!=1) + { + pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); + } + + // reset the flag + exceptionDoneFlag=0; + pthread_mutex_unlock(&excpPrt->mutex); + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + rc = ioctl_dk_capi_query_exception(p_ctx); + CHECK_RC(rc, "dk_capi_query FAILED\n"); + + verify_exception=DK_CE_VERIFY_IN_PROGRESS|DK_CE_VERIFY_SUCCEEDED; + + if ( p_ctx->exceptions != verify_exception ) + { + rc=255; /* Non zero rc value */ + strcpy(errorMsg, "Fail:EXCP_VLUN_VERIFY:bad excp"); + goto xerror; + + } + + break ; + + case EXCP_VLUN_INCREASE : //7.1.231 + rc = create_resource(p_ctx, 0, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "Create resource failed \n"); + // Just increasing by 10 chunk + nlba = 10 * (p_ctx->chunk_size); + + rc = vlun_resize(p_ctx, nlba); + CHECK_RC(rc, "vlun_resize failedi\n"); + + pthread_mutex_lock( &excpPrt->mutex ); + while ( exceptionDoneFlag!=1) + { + pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); + } + + pthread_mutex_unlock(&excpPrt->mutex); + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + rc = ioctl_dk_capi_query_exception(p_ctx); + CHECK_RC(rc, "dk_capi_query FAILED\n"); + + if ( p_ctx->exceptions != DK_CE_SIZE_CHANGE ) + { + rc=255; /* Non zero rc value */ + strcpy(errorMsg, "Fail:EXCP_PLUN_VERIFY:bad excp"); + goto xerror; + } + + + break; + + case EXCP_VLUN_REDUCE : //7.1.233 + // taking all the vlun + nlba = p_ctx->last_phys_lba + 1; + + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "Create resource failed \n"); + + debug("---------- Waiting at poll().. Please decrease Disk size in texan box -----\n"); + pthread_mutex_lock( &excpPrt->mutex ); + while ( exceptionDoneFlag!=1) + { + pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); + } + + pthread_mutex_unlock(&excpPrt->mutex); + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + + rc = ioctl_dk_capi_query_exception(p_ctx); + CHECK_RC(rc, "dk_capi_query FAILED\n"); + + if ( p_ctx->exceptions != DK_CE_VLUN_TRUNCATED) + { + rc=255; /* Non zero rc value */ + strcpy(errorMsg, "Fail:EXCP_PLUN_VERIFY:bad excp"); + goto xerror; + } + + break; + + case EXCP_VLUN_UATTENTION : // going to manual 7.1.234 + + nlba = p_ctx->last_phys_lba + 1; + + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + + CHECK_RC(rc, "Create resource failed \n"); + p_ctx_backup=p_ctx; + + + debug(" ------ Let the I/O start and then do UA stuff at texan--------\n"); + + do + { + rc = do_io(p_ctx, stride); + if (rc !=0) + { + debug("rc=%d,IO failed..... bye from loop\n",rc); + break; + } + else + { + debug("rc=%d,IO succeeded \n",rc); + } + p_ctx=p_ctx_backup; + + }while ( rc ==0); + + g_error=0; + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = DK_HINT_SENSE; + rc = ioctl_dk_capi_verify(p_ctx); + debug("rc = %d , g_error =%d\n",rc,g_error); + CHECK_RC(rc, "dk_capi_verify FAILED\n"); + + debug(" -------- I am waiting at poll() for POLLPRI ---------- \n"); + + pthread_mutex_lock( &excpPrt->mutex ); + while ( exceptionDoneFlag!=1) + { + pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); + } + pthread_mutex_unlock(&excpPrt->mutex); + + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + + rc = ioctl_dk_capi_query_exception(p_ctx); + CHECK_RC(rc, "dk_capi_query FAILED\n"); + + if ( p_ctx->exceptions != (DK_CE_UA_RECEIVED|DK_CE_VERIFY_IN_PROGRESS|DK_CE_VERIFY_SUCCEEDED|DK_CE_SIZE_CHANGE) ) + { + rc=255; /* Non zero rc value */ + strcpy(errorMsg, "Fail:EXCP_VLUN_ATTENTION:bad excp"); + goto xerror; + } + + break; + + case EXCP_PLUN_UATTENTION : + + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + p_ctx_backup=p_ctx; + + + debug(" ------ Let the I/O start and then do UA stuff at texan--------\n"); + + do + { + rc = do_io(p_ctx, stride); + if (rc !=0) + { + debug("rc=%d,IO failed..... bye from loop\n",rc); + break; + } + else + { + debug("rc=%d,IO succeeded \n",rc); + } + p_ctx=p_ctx_backup; + + }while ( rc ==0); + + g_error=0; + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = DK_HINT_SENSE; + rc = ioctl_dk_capi_verify(p_ctx); + debug("rc = %d , g_error =%d\n",rc,g_error); + CHECK_RC(rc, "dk_capi_verify FAILED\n"); + + debug(" -------- I am waiting at poll() for POLLPRI ---------- \n"); + + + pthread_mutex_lock( &excpPrt->mutex ); + while ( exceptionDoneFlag!=1) + { + pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); + } + pthread_mutex_unlock(&excpPrt->mutex); + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + + rc = ioctl_dk_capi_query_exception(p_ctx); + CHECK_RC(rc, "dk_capi_query FAILED\n"); + if ( p_ctx->exceptions != (DK_CE_UA_RECEIVED|DK_CE_VERIFY_IN_PROGRESS|DK_CE_VERIFY_SUCCEEDED|DK_CE_SIZE_CHANGE) ) + { + rc=255; /* Non zero rc value */ + strcpy(errorMsg, "Fail:EXCP_PLUN_UATTENTION:bad excp"); + goto xerror; + } + + break; + + case EXCP_EEH_SIMULATION : // 7.1.229 + + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "Create resource failed \n"); + + rc = do_eeh(p_ctx); + CHECK_RC(rc, "do_eeh() failed"); + + p_ctx->flags = DK_VF_HC_TUR; + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "failed : ioctl_dk_capi_verify()"); + + + pthread_mutex_lock( &excpPrt->mutex ); + while ( exceptionDoneFlag!=1) + { + pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); + } + + pthread_mutex_unlock(&excpPrt->mutex); + + + p_ctx->flags=DK_QEF_ADAPTER; + rc = ioctl_dk_capi_query_exception(p_ctx); + CHECK_RC(rc, "dk_capi_query FAILED\n"); + + verify_exception=DK_CE_ADAPTER_EXCEPTION|DK_CE_VERIFY_IN_PROGRESS | DK_CE_VERIFY_SUCCEEDED ; + if ( p_ctx->exceptions != verify_exception ) + { + rc=255; /* Non zero rc value */ + strcpy(errorMsg, "Fail:EXCP_EEH_SIMULATION:bad excp"); + goto xerror; + } + + // EEH code is still not tested + + if ( p_ctx->adap_except_count != 0 ) + { + rc=255; // Non zero rc value + strcpy(errorMsg, "Fail:EXCP_EEH_SIMULATION:bad excp"); + goto xerror; + } + + if ( p_ctx->adap_except_type != DK_AET_EEH_EVENT|DK_AET_BAD_PF|DK_AET_AFU_ERROR ) + { + rc=255; + strcpy(errorMsg, "Fail:EXCP_EEH_SIMULATION:bad excp"); + goto xerror; + } + + + break; + case EXCP_DISK_INCREASE : //7.1.226 + + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + debug("---------- Please increase Disk size in texan box -----\n"); + debug("---------- You have 15 secs to do that -----\n"); + sleep(15); + debug("---------- Sleep over. Moving on... -----\n"); + + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = 0; + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "dk_capi_verify FAILED\n"); + + pthread_mutex_lock( &excpPrt->mutex ); + while ( exceptionDoneFlag!=1) + { + pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); + } + + pthread_mutex_unlock(&excpPrt->mutex); + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + rc = ioctl_dk_capi_query_exception(p_ctx); + CHECK_RC(rc, "dk_capi_query FAILED\n"); + + verify_exception=DK_CE_VERIFY_IN_PROGRESS|DK_CE_VERIFY_SUCCEEDED|DK_CE_SIZE_CHANGE; + if ( p_ctx->exceptions != verify_exception ) + { + rc=255; /* Non zero rc value */ + debug("%d: expected : 0x%llx and recieved : 0x%llx\n", pid, verify_exception, p_ctx->exceptions); + strcpy(errorMsg, "Fail:EXCP_DISK_INCREASE:bad excp"); + goto xerror; + } + + break; + + default: + rc = -1; + break; + } + +xerror: + + pthread_mutexattr_destroy(&mattrVar); + pthread_condattr_destroy(&cattrVar); + pthread_cancel(thread); + pthread_cancel(thread_intr); + close_res(p_ctx); + ctx_close(p_ctx); + CHECK_RC(rc, errorMsg); +#endif + return rc; + +} + +int test_dcqexp_invalid(int cnum) +{ + int imFailed=0; +#ifdef _AIX + int rc=0; + struct ctx u_ctx; + struct ctx *p_ctx = &u_ctx; + char errorMsg[MSG_LENGTH]; + struct dk_capi_exceptions exceptions; + + __u64 chunk = 10; + __u64 nlba = chunk * NUM_BLOCKS; + + memset(p_ctx, 0, sizeof(struct ctx)); + memset(errorMsg, 0, MSG_LENGTH+1); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + switch (cnum) + { + case EXCP_INVAL_DEVNO : + + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "Create resource failed \n"); + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + + exceptions.version = p_ctx->version; + exceptions.ctx_token = p_ctx->ctx_hndl; + exceptions.rsrc_handle = p_ctx->res_hndl; + exceptions.flags =p_ctx->flags; + + exceptions.devno=0x0000FFFF ; // invalid dev no + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_EXCEPTIONS, &exceptions); + + if (rc == 0 ) + { + imFailed = 1 ; + strcpy(errorMsg, "Fail:EXCP_INVAL_DEVNO "); + goto xerror ; + } + + break; + + case EXCP_INVAL_CTXTKN : + + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "Create resource failed \n"); + + p_ctx->flags=DK_QEF_ALL_RESOURCE; + exceptions.version = p_ctx->version; + exceptions.ctx_token = 0x0000FFFF; // invalid context no + exceptions.rsrc_handle = p_ctx->res_hndl; + exceptions.flags =p_ctx->flags; + + exceptions.devno=p_ctx->devno ; + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_EXCEPTIONS, &exceptions); + + if (rc == 0 ) + { + imFailed = 1 ; + strcpy(errorMsg, "Fail:EXCP_INVAL_CTXTKN "); + goto xerror ; + } + + + break; + + case EXCP_INVAL_RSCHNDL : + + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "Create resource failed \n"); + + exceptions.version = p_ctx->version; + exceptions.ctx_token = p_ctx->ctx_hndl; + exceptions.rsrc_handle = p_ctx->res_hndl; // this no more valid + exceptions.flags =p_ctx->flags; + exceptions.devno=p_ctx->devno ; + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_EXCEPTIONS, &exceptions); + + if (rc == 0 ) + { + imFailed = 1 ; + strcpy(errorMsg, "Fail:EXCP_INVAL_CTXTKN "); + goto xerror ; + } + + break ; + + default: + rc = -1; + break; + } + + + +xerror : + close_res(p_ctx); + ctx_close(p_ctx); + CHECK_RC(imFailed , errorMsg); +#endif + return imFailed; + +} diff --git a/src/cflash/test/cflash_test_io.c b/src/cflash/test/cflash_test_io.c new file mode 100644 index 00000000..a252256c --- /dev/null +++ b/src/cflash/test/cflash_test_io.c @@ -0,0 +1,1215 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/cflash_test_io.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "cflash_test.h" +#include +#include +// We wish to use pthread_tryjoin_np() in linux +#ifndef _AIX +#define __USE_GNU +#endif +#include + +extern char master_dev_path[MC_PATHLEN]; +extern char cflash_path[MC_PATHLEN]; + +extern pid_t pid; +extern int dont_displa_err_msg; + +static pthread_mutex_t mutex; +static pthread_cond_t condv1; +static pthread_cond_t condv2; + +extern int g_error; +static __u32 W_READ = 0; +static __u32 W_WRITE = 0; +static __u32 W_PLAY = 0; +static __u32 W_RW = 0; +static __u32 hitZero = 0; +static int count=50; + +//global allocation of read/write buffer +static struct ctx gs_ctx; + +int N_done; +__u64 clba; + +void *write_io(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + __u64 actual_size; + __u64 nlba; + __u64 stride; + __u64 st_lba; + __u64 chunk; + int rc=0; + + pid = getpid(); + nlba = p_ctx->lun_size; + actual_size = (p_ctx->lun_size/p_ctx->chunk_size); + stride = p_ctx->chunk_size; + st_lba = nlba/2; + while (count > 0) + { + if (count%2) + { + chunk = actual_size - 1; + } + else + { + chunk =actual_size +1; + } + + pthread_mutex_lock(&mutex); + + debug("%d: vLUN chunk 0x%"PRIX64", nlba 0x%"PRIX64"\n",pid,actual_size,nlba); + debug("%d: write st_lba= 0x%"PRIX64",nlba=0x%"PRIX64"\n",pid,st_lba,nlba); + rc |= send_write(p_ctx, st_lba, stride, pid); + clba = st_lba; + W_READ = 1; + count--; + rc |= mc_size1(p_ctx, chunk, &actual_size); + pthread_cond_signal(&condv1); + + while ( W_WRITE != 1) + { + pthread_cond_wait(&condv2,&mutex); + } + W_WRITE = 0; + + pthread_mutex_unlock(&mutex); + nlba = actual_size * p_ctx->chunk_size; + st_lba = nlba/2; + debug("%d: More %d Loop remaining............\n\n",pid,count); + } + if (rc != 0) + { + g_error = rc; + } + return 0; +} + + +void *play_size(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + __u64 actual_size; + __u64 nlba; + __u64 st_lba; + __u64 w_chunk; + int rc=0; + + pid = getpid(); + + w_chunk = (p_ctx->last_phys_lba+1)/(2*p_ctx->chunk_size) - 2; + + while (count > 0) + { + pthread_mutex_lock(&mutex); + while (W_PLAY != 1) + { + pthread_cond_wait(&condv2,&mutex); + } + W_PLAY = 0; + + mc_size1(p_ctx, w_chunk, &actual_size); + if (actual_size == 0) + { + hitZero = 1; + w_chunk = (p_ctx->last_phys_lba+1)/(2*p_ctx->chunk_size) - 2; + rc |= mc_size1(p_ctx, w_chunk, &actual_size); + debug("%d: chunk size reduced to 0, now increase to 0X%"PRIX64"\n", pid, actual_size); + } + + debug("%d: mc_size done 0X%"PRIX64"\n",pid, actual_size); + + nlba = p_ctx->lun_size; + + if (count % 2) + { + st_lba = nlba/2; + w_chunk = actual_size/2+1; + } + else + { + st_lba = nlba -1; + w_chunk = actual_size-1; + } + clba = st_lba; + count--; + debug("%d: clba 0X%"PRIX64": lba range:0X%"PRIX64"\n",pid, clba, nlba-1); + + W_RW = 1; + pthread_cond_signal(&condv1); + pthread_mutex_unlock(&mutex); + + debug("%d: chunk 0x%"PRIX64", nlba 0x%"PRIX64", st_lba 0x%"PRIX64"\n",pid, actual_size,nlba,st_lba); + debug("%d: More %d Loop remaining ............\n",pid, count); + + if (rc != 0) + { + g_error = rc; + fprintf(stderr,"%d: failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + pthread_exit(0); + } + } + return 0; +} + + +void *rw_io(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + int rc = 0; + __u64 llba; + + pid = getpid(); + + while (count > 0) + { + pthread_mutex_lock(&mutex); + + llba = clba; + if ( hitZero !=1 ) + { + debug("\n%d: ------------------------------------------------\n",pid); + debug("%d: Writing @lba 0X%"PRIX64"",pid,llba); + debug(", Max lba range is upto 0X%"PRIX64"\n", + p_ctx->lun_size-1 ); + rc |= send_single_write(p_ctx, llba, pid); + } + + W_PLAY = 1; + pthread_cond_signal(&condv2); + pthread_mutex_unlock(&mutex); + + if (rc && (llba >= p_ctx->lun_size-1)) + { + debug("%d: Resetting RC, write(0X%"PRIX64"), max range(0X%"PRIX64")\n", + pid, llba, p_ctx->lun_size-1); + rc = 0; + } + if (rc != 0) + { + g_error = rc; + fprintf(stderr, "%d : failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + + pthread_mutex_lock(&mutex); + while ( W_RW != 1 ) + { + pthread_cond_wait(&condv1,&mutex); + } + W_RW = 0; + + if ( hitZero !=1 ) + { + debug("%d: Reading from 0X%"PRIX64"",pid, llba); + debug(", Max lba range is upto 0X%"PRIX64"\n", + p_ctx->lun_size-1); + debug("%d: ------------------------------------------------\n",pid); + rc |= send_single_read(p_ctx, llba); + } + + if (rc && llba >= p_ctx->lun_size-1) + { + debug("%d: Resetting RC, read (0X%"PRIX64"), max range(0X%"PRIX64")\n", + pid, llba, p_ctx->lun_size-1); + rc = 0; + } + else + { + if ( hitZero !=1 ) rc |= rw_cmp_single_buf(p_ctx, llba); + } + + hitZero = 0; // Reset at end of read cycle. + W_PLAY = 1; + pthread_cond_signal(&condv2); + pthread_mutex_unlock(&mutex); + if (rc != 0) + { + g_error = rc; + fprintf(stderr, "%d: failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + } + return 0; +} + +void *read_io(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + __u64 stride; + int rc=0; + + stride = p_ctx->chunk_size; + while (count > 0) + { + pthread_mutex_lock(&mutex); + while ( W_READ != 1) + { + pthread_cond_wait(&condv1,&mutex); + } + W_READ = 0; + debug("%d: read clba= 0x%"PRIX64"\n",pid,clba); + rc |= send_read(p_ctx, clba, stride); + rc |= rw_cmp_buf(p_ctx, clba); + + W_WRITE = 1; + pthread_cond_signal(&condv2); + pthread_mutex_unlock(&mutex); + if (rc != 0) + { + g_error = rc; + return NULL; + } + } + return 0; +} + +int test_onectx_twothrd(int cmd) +{ + int rc=0; + struct timespec ts; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + char *fn1=NULL; + char *fn2=NULL; + pthread_t thread; + pthread_t rhthread[2]; + + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + __u64 chunks; + __u64 actual_size; + + pid=getpid(); + + pthread_mutexattr_init(&mattr); + pthread_mutex_init(&mutex, &mattr); + + pthread_condattr_init(&cattr); + pthread_cond_init(&condv1, &cattr); + pthread_cond_init(&condv2, &cattr); + if (test_init(p_ctx) != 0) + { + return -1; + } + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = create_res(p_ctx); + CHECK_RC(rc, "create_res failed"); + + chunks = (p_ctx->last_phys_lba+1)/(p_ctx->chunk_size) - 2; + + rc = mc_size1(p_ctx,chunks, &actual_size); + CHECK_RC(rc, "mc_size1"); + + if (1 == cmd) + { + //a thread write_io & another read_io + pthread_create(&rhthread[0], NULL, write_io, p_ctx); + pthread_create(&rhthread[1], NULL, read_io, p_ctx); + fn1="write_io"; + fn2="read_io"; + } + else if (2 == cmd) + { + //a thread rw & another mc size + clba = (actual_size * p_ctx->chunk_size)/2; + pthread_create(&rhthread[0], NULL, rw_io, p_ctx); + pthread_create(&rhthread[1], NULL, play_size, p_ctx); + fn1="rw_io"; + fn2="play_size"; + } + //pthread_create(&rhthread[1], NULL, inc_dec_size, p_ctx); + + pthread_mutexattr_destroy(&mattr); + pthread_condattr_destroy(&cattr); + +#ifndef _AIX + if (clock_gettime(CLOCK_REALTIME, &ts) == -1) + { + CHECK_RC(1, "clock_gettime() failed"); + } + + ts.tv_sec += 10; + if ( ETIMEDOUT == pthread_timedjoin_np(rhthread[0], NULL, &ts)) + debug("%d: thread fn. %s() timed out.. Exceeded 10 secs, so terminated !\n", pid, fn1); + + ts.tv_sec += 2; + if ( ETIMEDOUT == pthread_timedjoin_np(rhthread[1], NULL, &ts)) + debug("%d: thread fn. %s() timed out.. Exceeded 12 secs, so terminated !\n", pid, fn2); +#else + pthread_join(rhthread[0], NULL); + pthread_join(rhthread[1], NULL); +#endif + + pthread_cancel(thread); + + close_res(p_ctx); + //mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + rc = g_error; + g_error = 0; + return rc; +} + +int test_two_ctx_two_thrd(int cmd) +{ + int i, rc=0; + char *fn1=NULL; + char *fn2=NULL; + struct timespec ts; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + pthread_t rhthread[2]; + + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + __u64 chunks; + __u64 actual_size; + + pthread_mutexattr_init(&mattr); + pthread_mutex_init(&mutex, &mattr); + + pthread_condattr_init(&cattr); + pthread_cond_init(&condv1, &cattr); + pthread_cond_init(&condv2, &cattr); + for (i = 0;i < 2;i++) + { + if (fork() == 0) + { + //child process + + pid = getpid(); + + if (test_init(p_ctx) != 0) + { + exit(-1); + } + chunks = (p_ctx->last_phys_lba+1)/(p_ctx->chunk_size*2) - 2; + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = create_res(p_ctx); + if (rc != 0) + { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + exit(-1); + } + + rc = mc_size1(p_ctx,chunks, &actual_size); + CHECK_RC(rc, "mc_size1"); + + if (1 == cmd) + { + //a thread write_io & another read_io + pthread_create(&rhthread[0], NULL, write_io, p_ctx); + pthread_create(&rhthread[1], NULL, read_io, p_ctx); + fn1="write_io"; + fn2="read_io"; + } + else if (2 == cmd) + { + //a thread rw & another mc size + clba = (actual_size * p_ctx->chunk_size)/2; + pthread_create(&rhthread[0], NULL, rw_io, p_ctx); + pthread_create(&rhthread[1], NULL, play_size, p_ctx); + fn1="rw_io"; + fn2="play_size"; + } + + pthread_mutexattr_destroy(&mattr); + pthread_condattr_destroy(&cattr); + +#ifndef _AIX + if (clock_gettime(CLOCK_REALTIME, &ts) == -1) + { + CHECK_RC(1, "clock_gettime() failed"); + } + + ts.tv_sec += 10; + if ( ETIMEDOUT == pthread_timedjoin_np(rhthread[0], NULL, &ts)) + debug("%d: thread fn. %s() timed out.. Exceeded 10 secs, so terminated !\n", pid, fn1); + + ts.tv_sec += 2; + if ( ETIMEDOUT == pthread_timedjoin_np(rhthread[1], NULL, &ts)) + debug("%d: thread fn. %s() timed out.. Exceeded 12 secs, so terminated !\n", pid, fn2); +#else + pthread_join(rhthread[0], NULL); + pthread_join(rhthread[1], NULL); +#endif + pthread_cancel(thread); + close_res(p_ctx); + //mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + rc = g_error; + debug("%d: I am exiting from here .....rc = %d\n", pid, rc); + exit(rc); + } + } + while ((pid = waitpid(-1,&rc,0))) + { + if (pid == -1) + { + break; + } + + rc = WEXITSTATUS(rc); + debug("%d: wait is over for me............rc = %d\n", pid, rc); + if (rc != 0) + { + g_error = -1; + } + } + rc = g_error; + g_error=0; + return rc; +} + +int test_lun_discovery(int cmd) +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + int fc_port =0; + __u64 *lun_ids; + __u32 n_luns; + int port=2; + int i; + __u64 lun_cap,blk_len; + + if (test_init(p_ctx) != 0) + { + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + // Send Report LUNs to get the list of LUNs and LUN ID + for (i =1; i <= port;i++) + { + rc = send_report_luns(p_ctx, i, &lun_ids,&n_luns); + if (rc) + { + fprintf(stderr, "Report LUNs failed on FC Port %d\n", i); + } + else + { + fc_port = i; + break; + } + } + if (rc || n_luns == 0) + { + ctx_close(p_ctx); + return rc; + } + debug("Report Lun success, num luns= 0x%x\n",n_luns); + for (i = 0; i< n_luns;i++) + { + rc = send_read_capacity(p_ctx,fc_port,lun_ids[i],&lun_cap, &blk_len); + if (rc != 0) + { + fprintf(stderr,"Read capacity failed,lun id =0x%"PRIX64", rc = %d\n",lun_ids[i],rc); + break; + } + debug("LUN id = 0x%"PRIX64" Capacity = 0x%"PRIX64" Blk len = 0x%"PRIX64"\n", + lun_ids[i],lun_cap,blk_len); + } + free(lun_ids); + pthread_cancel(thread); + ctx_close(p_ctx); + return rc; + +} + +int test_vdisk_io() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + + __u64 chunk = 256; + __u64 nlba; + __u64 actual_size; + __u64 st_lba =0; + __u64 stride; + mc_stat_t l_mc_stat; + + + if (mc_init() != 0) + { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + if (ctx_init(p_ctx) != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + /*rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *) p_ctx->p_host_map,&p_ctx->mc_hndl); + if (rc != 0) { + fprintf(stderr, "error registering(%s) ctx_hndl %d, rc %d\n", + master_dev_path, p_ctx->ctx_hndl, rc); + return -1; + }*/ + + rc = create_res(p_ctx); + if (rc != 0) + { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + return -1; + } + + rc = mc_size1(p_ctx, chunk, &actual_size); + l_mc_stat.size = actual_size; + if (rc != 0 || actual_size < 1) //might be chunk want to allocate whole lun + { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + return -1; + } + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat1"); + + pid = getpid(); + + stride = (1 << l_mc_stat.nmask); + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + debug("%d: st_lba = 0X0 and range lba = 0X%"PRIX64"\n", pid, nlba-1); + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)) + { + send_write(p_ctx, st_lba, stride, pid); + send_read(p_ctx, st_lba, stride); + rc = rw_cmp_buf(p_ctx, st_lba); + if (rc) + { + fprintf(stderr,"buf cmp failed for vlba 0x%"PRIX64",rc =%d\n", + st_lba,rc); + break; + } + } + pthread_cancel(thread); + close_res(p_ctx); + //mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + return rc; +} + +void *only_rw_io(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + __u64 stride = 0x1000; //4K + int rc; + __u64 nlba = clba; + __u64 st_lba; + + pid = getpid(); + while (1) + { + for (st_lba =0; st_lba < nlba; st_lba += (NUM_CMDS * stride)) + { + //rc = send_single_write(p_ctx,st_lba,pid); + rc = send_write(p_ctx,st_lba,stride,pid); + if (rc != 0) + { + g_error = rc; + if (!dont_displa_err_msg) + fprintf(stderr, "%d: failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + //rc = send_single_read(p_ctx, st_lba); + rc = send_read(p_ctx, st_lba, stride); + if (rc != 0) + { + g_error = rc; + if (!dont_displa_err_msg) + fprintf(stderr, "%d: failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + //rc = rw_cmp_single_buf(p_ctx, st_lba); + rc = rw_cmp_buf(p_ctx, st_lba); + if (rc != 0) + { + g_error = rc; + if (!dont_displa_err_msg) + fprintf(stderr, "%d: failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + } + if (N_done) + { + break; + } + } + return 0; +} + +void *cancel_thread(void *arg) +{ + sleep(2); + pthread_t *thread=arg; + pthread_cancel(*thread); + return NULL; +} +int test_rw_close_hndl(int cmd) +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + pthread_t rhthread; +#ifdef _AIX + pthread_t thread1; +#endif + mc_stat_t l_mc_stat; + __u64 chunks=128; + __u64 actual_size; + + pid = getpid(); + signal(SIGSEGV, sig_handle); + signal(SIGABRT, sig_handle); + if (test_init(p_ctx) != 0) + { + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = create_res(p_ctx); + CHECK_RC(rc, "create_res"); + + rc = mc_size1(p_ctx,chunks, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size1"); + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat1"); + + clba = actual_size * (1 << l_mc_stat.nmask); + pthread_create(&rhthread, NULL, only_rw_io, p_ctx); + sleep(1); + + if (1 == cmd) + { + //while IO close RHT + debug("%d:closing res handler:0X%"PRIX64"\n",pid,p_ctx->rsrc_handle); + close_res(p_ctx); + } + else if (2 == cmd) + { + //close disk only +#ifdef _AIX //close sys call wil wait until poll thred not killed + pthread_create(&thread1,NULL,cancel_thread,&rhthread); +#endif + debug("%d:closing disk only.... \n",pid); + close(p_ctx->fd); + } + else if (3 == cmd) + { + //While IO ctx close + debug("%d:detach context:0X%"PRIX64"\n",pid,p_ctx->context_id); + ioctl_dk_capi_detach(p_ctx); + } + else if (4 == cmd) + { + //only for Linux + debug("%d:do unmap mmio during IO....\n",pid); + munmap((void *)p_ctx->p_host_map, p_ctx->mmio_size); + } + + N_done = 1; //tell pthread that -ve test performed + debug("%d:sleeping for 2 secs before context detach & disk close\n",pid); + sleep(2); + pthread_cancel(rhthread); + //pthread_join(rhthread, NULL); + N_done = 0; + + pthread_cancel(thread); + // do proper closing + if (cmd == 1) + { + ctx_close(p_ctx); + } + mc_term(); + rc = g_error; + g_error = 0; + return rc; +} +int test_good_ctx_err_ctx(int cmd) +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + pthread_t rhthread; + mc_stat_t l_mc_stat; + + __u64 chunks=128; + __u64 actual_size; + + pid_t mypid = fork(); + //let both process do basic things + if (test_init(p_ctx) != 0) + { + exit(-1); + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = create_res(p_ctx); + if (rc != 0) + { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + exit(-1); + } + + rc = mc_size1(p_ctx,chunks, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size"); + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + clba = actual_size * (1 << l_mc_stat.nmask); + pthread_create(&rhthread, NULL, only_rw_io, p_ctx); + + if (mypid == 0) + { + //child process do err ctx + debug("child pid is :%d\n",getpid()); + sleep(1); //let thrd do some io + printf("error ctx pid is %d\n",getpid()); + if (1 == cmd) + { + //while IO close RHT + close_res(p_ctx); + /*}else if(2 == cmd) { //while IO unreg MC HNDL + mc_unregister(p_ctx->mc_hndl);*/ + } + else if (3 == cmd) + { + //While IO ctx close + //munmap((void*)p_ctx->p_host_map, 0x10000); + close(p_ctx->fd); + //ctx_close(p_ctx); + } + sleep(1); + N_done = 1; //tell pthread that -ve test performed + pthread_cancel(rhthread); + //pthread_join(rhthread, NULL); + N_done = 0; + debug("%d: exiting with rc = %d\n", pid, g_error); + pthread_cancel(thread); + exit(g_error); + } + else + { + debug("Good ctx pid is : %d\n",getpid()); + sleep(2); //main process sleep but thrd keep running + N_done = 1; // + pthread_join(rhthread, NULL); + N_done = 0; + + // do proper closing + pthread_cancel(thread); + close_res(p_ctx); + //mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + debug("%d: waiting for child process %d\n", pid, mypid); + wait(&rc); + } + rc = g_error; + g_error = 0; + mc_term(); + return rc; +} + +int test_mc_ioarcb_ea_alignment(int cmd) +{ + int rc; + int a; + struct rwbuf *p_rwb; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=128; + __u64 actual_size=0; + __u64 st_lba, nlba; + __u64 stride = 0x1000; + int offset; + pthread_t thread; + int max; + mc_stat_t l_mc_stat; + + + if (1 == cmd) //16 byte ea alignment + offset = 16; + else if (2 == cmd) //128 byte ea alignment + offset = 128; + else //invalid ea alignment + offset = 5; + + max = offset * 10; //try for next 10 offset + pid = getpid(); + rc = posix_memalign((void **)&p_rwb, 0x1000, sizeof( struct rwbuf )); + CHECK_RC(rc, "rwbuf allocation failed"); + debug("initial buf address : %p\n",p_rwb); + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + /*rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + */ + rc = create_res(p_ctx); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size1(p_ctx,chunks, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size"); + + if (chunks != actual_size) + { + CHECK_RC(1, "doesn't have enough chunk space"); + } + st_lba = 0; + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + nlba = actual_size * (1 << l_mc_stat.nmask); + debug("EA alignment from begining of 4K\n"); + for (a=offset; a <= max; a+=offset) + { + debug("send alignment offset : %u\n",a); + rc = send_rw_rcb(p_ctx, p_rwb, st_lba, stride, a, 0); + if (rc) break; + rc = send_rw_rcb(p_ctx, p_rwb, nlba/2, stride, a, 0); + if (rc) break; + rc = send_rw_rcb(p_ctx, p_rwb, nlba-(NUM_CMDS * stride), stride, a, 0); + if (rc) break; + } + //CHECK_RC(rc, "send_rw_rcb"); + debug("EA alignment from end of a 4K\n"); + for (a=offset; a <= max; a+=offset) + { + debug("send alignment offset from last : %u\n", a); + rc = send_rw_rcb(p_ctx, p_rwb, st_lba, stride, a, 1); + if (rc) break; + rc = send_rw_rcb(p_ctx, p_rwb, nlba/2, stride, a, 1); + if (rc) break; + rc = send_rw_rcb(p_ctx, p_rwb, nlba-(NUM_CMDS * stride), stride, a, 1); + if (rc) break; + } + pthread_cancel(thread); + //mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + free(p_rwb); + mc_term(); + if (rc!=0 && cmd == 3) + return 3; + return rc; +} + +int mc_test_rwbuff_global(int cmd) +{ + int rc; + struct ctx *p_ctx = &gs_ctx; + __u64 chunks=64; + __u64 actual_size=0; + __u64 st_lba; + __u64 stride; + __u64 nlba; + pthread_t thread; + mc_stat_t l_mc_stat; + + if (2 == cmd) + { + //allocate from heap + //p_ctx = (struct ctx *)malloc(sizeof(struct ctx)); + //IOARCB req 16 Byte Allignment + p_ctx = (struct ctx *)aligned_alloc(16,sizeof(struct ctx)); + if (NULL == p_ctx) + { + fprintf(stderr,"Mem allocation failed\n"); + return -1; + } + } + pid = getpid(); + + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = create_res(p_ctx); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size1(p_ctx, chunks, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size"); + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + nlba = actual_size * (1 << l_mc_stat.nmask); + stride = (1 << l_mc_stat.nmask); + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS * stride)) + { + rc = send_write(p_ctx, st_lba, stride, pid); + CHECK_RC(rc, "send_write"); + rc = send_read(p_ctx, st_lba, stride); + CHECK_RC(rc, "send_read"); + rc = rw_cmp_buf(p_ctx, st_lba); + CHECK_RC(rc, "rw_cmp_buf"); + } + pthread_cancel(thread); + ctx_close(p_ctx); + if (2 == cmd) + { + //deallocate from heap + free(p_ctx); + } + mc_term(); + return rc; +} + +void *only_play_size(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + __u64 actual_size; + __u64 w_chunk; + int rc; + int myloop = count * 10; + mc_stat_t l_mc_stat; + + l_mc_stat.size = p_ctx->lun_size/p_ctx->chunk_size; + rc =mc_stat1(p_ctx, &l_mc_stat); + w_chunk = l_mc_stat.size; + while (myloop-- > 0) + { + w_chunk +=128; + debug("%d: doing mc size from 0X%"PRIX64" to 0X%"PRIX64"\n", pid, actual_size, w_chunk); + rc = mc_size1(p_ctx, w_chunk, &actual_size); + if (rc != 0) + { + g_error = rc; + N_done = 1; + fprintf(stderr, "%d: failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + w_chunk -=128; + debug("%d: doing mc size from 0X%"PRIX64" to 0X%"PRIX64"\n", pid, actual_size, w_chunk); + rc = mc_size1(p_ctx, w_chunk, &actual_size); + if (rc != 0) + { + g_error = rc; + N_done = 1; + fprintf(stderr, "%d: failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + } + N_done = 1; //now tell other thread, i m done + return 0; +} + +int test_mc_rw_size_parallel() +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + pthread_t rhthread[2]; + mc_stat_t l_mc_stat; + int i; + + __u64 chunks=64; + __u64 actual_size; + + for (i =0 ;i < 4; i++) + { + if (fork() == 0) + { + sleep(1); //lets all process get created + rc = test_init(p_ctx); + CHECK_RC(rc, "test init"); + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + /*rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "mc_register"); + */ + rc = create_res(p_ctx); + CHECK_RC(rc, "create_res"); + + rc = mc_size1(p_ctx,chunks, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size"); + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + clba = (actual_size * (1 << l_mc_stat.nmask)); + pthread_create(&rhthread[0], NULL, only_rw_io, p_ctx); + pthread_create(&rhthread[1], NULL, only_play_size, p_ctx); + + pthread_join(rhthread[0], NULL); + pthread_join(rhthread[1], NULL); + + pthread_cancel(thread); + close_res(p_ctx); + //mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + rc = g_error; + exit(rc); + } + } + wait4all(); + rc = g_error; + g_error = 0; + return rc; +} + +int test_mc_rwbuff_shm() +{ + int rc = 0; + struct rwshmbuf l_rwb; + struct rwshmbuf *p_rwb; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=64; + __u64 actual_size=0; + __u64 st_lba; + __u64 nlba; + __u64 stride = 0x100; + pthread_t thread; + pid_t cpid; + mc_stat_t l_mc_stat; + + int shmid; + key_t key=2345; + char *shm; + pid = getpid(); + + if ((shmid = shmget(key,sizeof(struct rwshmbuf), IPC_CREAT | 0666)) < 0) + { + fprintf(stderr, "shmget failed\n"); + return -1; + } + + if ((shm = shmat(shmid, NULL, 0)) == (char *)-1) + { + fprintf(stderr, "shmat failed\n"); + return -1; + } + debug("%d : shared region created\n",pid); + //lets create a child process to keep reading shared area + cpid = fork(); + if (cpid == 0) + { + pid = getpid(); + if ((shmid = shmget(key,sizeof(struct rwshmbuf), IPC_CREAT | 0666)) < 0) + { + fprintf(stderr, "shmget failed\n"); + exit(-1); + } + + if ((shm = shmat(shmid, NULL, 0)) == (char *)-1) + { + fprintf(stderr, "shmat failed\n"); + exit(-1); + } + debug("%d: child started accessing shared memory...\n",pid); + while (1) + { + memcpy(&l_rwb, shm, sizeof(struct rwshmbuf)); + } + } + + p_rwb = (struct rwshmbuf *)shm; + + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + /*rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + */ + rc = create_res(p_ctx); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size1(p_ctx,chunks, &actual_size); + l_mc_stat.size = actual_size; + CHECK_RC(rc, "mc_size"); + + rc = mc_stat1(p_ctx, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + nlba = actual_size * (1 << l_mc_stat.nmask); + debug("%d: started IO where rwbuf in shared memory lba range(0X%"PRIX64")\n", pid, nlba-1); + for (st_lba =0; st_lba < nlba; st_lba += stride) + { + rc = send_rw_shm_rcb(p_ctx, p_rwb, st_lba); + CHECK_RC(rc, "send_rw_rcb"); + } + debug("%d: IO is done now \n", pid); + + debug("%d: now time to kill child %d \n", pid, cpid); + kill(cpid, SIGKILL); + + shmdt(shm); + pthread_cancel(thread); + //mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + return 0; +} diff --git a/src/cflash/test/cflash_test_ioctl.c b/src/cflash/test/cflash_test_ioctl.c new file mode 100644 index 00000000..0d2d17cb --- /dev/null +++ b/src/cflash/test/cflash_test_ioctl.c @@ -0,0 +1,3208 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/cflash_test_ioctl.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + +#define SLEEP_FOR_RACE 2 +extern int MAX_RES_HANDLE; +extern int g_error; +extern int g_errno; +extern pid_t pid; +extern char cflash_path[MC_PATHLEN]; +static int threadRC; + +char *diskList[MC_PATHLEN]; +int diskCount = 0 ; + +int call_attach_diff_devno() +{ + int rc=0,i,j=2; + struct ctx myctx[10]; + struct ctx *p_ctx[10]; + dev64_t devno[10]={ '0' }; + struct flash_disk disks[MAX_FDISK]; + //int cfdisk = MAX_FDISK; + pid = getpid(); + rc = get_flash_disks(disks, FDISKS_SHARED); + if ( rc == 0 ) + CHECK_RC(1, "shared disk not found\n"); + for ( i=0;i<10;i++) + { + p_ctx[i]=&myctx[i]; + } + + strcpy(p_ctx[0]->dev,disks[0].dev); +#ifdef _AIX + j=ioctl_dk_capi_query_path_get_path(p_ctx[0],devno); +#endif + if ( j < 2 ) + return 1; + + for ( i=0; idev, cflash_path); + sub = strtok(cflash_path,"/"); + sub = strtok(NULL,"/"); + memset(&capi_paths,0,sizeof(capi_paths)); + memset(&capi_paths_struct,0,sizeof(capi_paths_struct)); + pthread_t thread; + int i; + pid = getpid(); + switch ( flag ) + { + case 1: // TCN@7.1.1 + //TEST_DCQP_VALID_PATH_COUNT + p_ctx->fd = open_dev(p_ctx->dev, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n",p_ctx->dev, errno); + g_error = -1; + return -1; + } + capi_paths.version = p_ctx->version; + capi_paths.path_count = 1; + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_PATHS, &capi_paths); + debug("%d:flags=0X%"PRIX64" devno=0x%"PRIX64" path_id=%"PRIu16", ret_path_count=%d\n", + pid,capi_paths.path_info[0].flags, capi_paths.path_info[0].devno, + capi_paths.path_info[0].path_id,capi_paths.returned_path_count); + if (capi_paths.returned_path_count < 1) + CHECK_RC(1,"returned_path_count expected was 1+\n"); + break; + case 2: // TCN@7.1.4 + p_ctx->fd = open_dev(p_ctx->dev, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n",p_ctx->dev, errno); + g_error = -1; + return -1; + } + //TEST_DCQP_DUAL_PATH_COUNT + capi_paths_struct.path.path_count = MAX_PATH; + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_PATHS, &capi_paths_struct); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); + debug("%d:returned_path_count=%d\n",pid,capi_paths.returned_path_count); + if (capi_paths_struct.path.returned_path_count > MAX_PATH) + capi_paths_struct.path.returned_path_count=MAX_PATH; + path_info = capi_paths_struct.path.path_info; + for (i=0;ifd = open_dev(p_ctx->dev, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n",p_ctx->dev, errno); + g_error = -1; + return -1; + } +#ifdef _AIX + rc= ioctl_dk_capi_query_path_check_flag(p_ctx,0,0); +#else + return 1; + // need to handle for Linux later +#endif + break; + + case 4: // TCN@7.1.6 + sprintf(cmd, "chdev -l %s -a reserve_policy=single_path",sub); + debug(":%d cmd=%s\n",pid, cmd); + rc = system(cmd); + CHECK_RC(rc, "Setting reserve_policy failed"); + p_ctx->fd = open_dev(p_ctx->dev, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n",p_ctx->dev, errno); + g_error = -1; + return -1; + } +#ifdef _AIX + rc= ioctl_dk_capi_query_path_check_flag(p_ctx,0,DK_CPIF_RESERVED ); +#else + return 1; + // need to handle for Linux later +#endif + break; + default: + fprintf(stderr," Nothing to do with default call\n"); + rc =99; + } + close(p_ctx->fd); + return rc; +} +//7.1.3 +int test_dcqp_error_ioctl(int flag) // func@DK_CAPI_QUERY_PATH error path +{ + int rc = 0; + struct ctx u_ctx; + struct ctx *p_ctx = &u_ctx; + struct cflash_paths + { + struct dk_capi_paths path; + struct dk_capi_path_info paths[255-1]; + }capi_paths; + + pthread_t thread; + memset(p_ctx, 0, sizeof(struct ctx)); + memset(&capi_paths, 0, sizeof(capi_paths)); + pid = getpid(); + //open CAPI Flash disk device + p_ctx->fd = open_dev(cflash_path, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", cflash_path, errno); + g_error = -1; + return -1; + } + capi_paths.path.path_count = 0; + debug("%d:DK_CAPI_QUERY_PATHS with 0 path_count\n"); + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_PATHS, &capi_paths); + debug("%d: rc =%d of path count 0\n",pid, rc); + if ( rc == 0 ) + flag=1; + capi_paths.path.path_count = 255; + debug("%d:DK_CAPI_QUERY_PATHS with 255 path_count"); + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_PATHS, &capi_paths); + debug("%d: rc =%d of path count 255\n",pid, rc); + if ( rc !=0 ) + flag=1; + close(p_ctx->fd); + if ( flag == 1 ) + return 0; + else + return 1; +} +#endif + +int test_dca_ioctl(int flag) // func@DK_CAPI_ATTACH +{ + int rc=0,i; +#ifdef _AIX + struct dk_capi_attach capi_attach; +#else + struct dk_cxlflash_attach capi_attach; +#endif + struct ctx u_ctx, u_ctx_1, u_ctx_bkp; + struct ctx *p_ctx = &u_ctx; + struct ctx *p_ctx_1= &u_ctx_1; + struct ctx *p_ctx_bkp = &u_ctx_bkp; + struct flash_disk disks[MAX_FDISK]; + //pthread_t thread; + get_flash_disks(disks, FDISKS_ALL); + memset(p_ctx, 0, sizeof(struct ctx)); + memset(p_ctx_1, 0, sizeof(struct ctx)); + //memset(p_ctx_bkp, 0, sizeof(struct ctx)); + //open CAPI Flash disk device + pid = getpid(); + p_ctx->fd = open_dev(cflash_path, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", cflash_path, errno); + g_error = -1; + return -1; + } + //thread to handle AFU interrupt & events + // pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); +#else + //TBD for linux +#endif + *p_ctx_bkp = *p_ctx; + switch ( flag ) + { + case 1: //TEST_DCA_VALID_ALL_VALUES TCN@7.1.10 + p_ctx->flags = p_ctx->work.flags; + p_ctx->work.num_interrupts = 4; + rc = ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + break; + case 2: //TEST_DCA_CALL_DIFF_DEVNO_MULTIPLE TCN@7.1.13 + // this test need to check for dual adapter and then proceed + //Applicabe for LPAR having mutlipe corsa adapter + rc = call_attach_diff_devno(); + return rc; + break; + case 3: // TCN@7.1.17 + //TEST_DCA_REUSE_CONTEXT_ALL_CAPI_DISK + rc = ioctl_dk_capi_attach(p_ctx); //first attach + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + for (i=1; i<=MAX_FDISK; i++) //Mutliple disk testing + { + p_ctx_1->fd = open( disks[i].dev, O_RDWR); + if (p_ctx_1->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[i].dev, errno); + g_error = -1; + } +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); + p_ctx_1->devno = p_ctx->devno; + p_ctx_1->flags = DK_AF_REUSE_CTX; +#else + p_ctx_1->flags = DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#endif + p_ctx_1->context_id = p_ctx->context_id; //context id of first call attach + rc = ioctl_dk_capi_attach(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + ioctl_dk_capi_detach(p_ctx_1); + close(p_ctx_1->fd); + } + ioctl_dk_capi_detach(p_ctx); + close(p_ctx->fd); + return rc; + break; + case 4: //TCN@7.1.19 + //TEST_DCA_REUSE_CTX_OF_RELASED_CTX +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); +#else + //TBD for linux +#endif + rc = ioctl_dk_capi_attach(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + + rc = ioctl_dk_capi_udirect(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + +#ifdef _AIX + capi_attach.flags = DK_AF_REUSE_CTX; + capi_attach.version = p_ctx_1->version; + capi_attach.devno = p_ctx_1->devno; //devno of first attach + capi_attach.ctx_token= p_ctx_1->context_id; //context id of first call attach +#else + capi_attach.hdr.flags = DK_CXLFLASH_ATTACH_REUSE_CONTEXT; + capi_attach.hdr.version = p_ctx_1->version; + capi_attach.context_id = p_ctx_1->context_id; //context id of first call attach +#endif + capi_attach.num_interrupts = p_ctx_1->work.num_interrupts; + rc = ioctl_dk_capi_release(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + rc = ioctl_dk_capi_detach(p_ctx_1); +#ifdef _AIX + rc = ioctl(p_ctx->fd, DK_CAPI_ATTACH, &capi_attach); +#else + rc = ioctl(p_ctx->fd, DK_CXLFLASH_ATTACH, &capi_attach); +#endif + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + ioctl_dk_capi_detach(p_ctx); + close(p_ctx->fd); + return rc; + } + //pthread_cancel(thread); + ioctl_dk_capi_detach(p_ctx_bkp); + close(p_ctx->fd); + return rc; +} + +int test_dca_error_ioctl(int flag) // func@DK_CAPI_ATTACH error path +{ + int rc; + struct ctx u_ctx, u_ctx_1; + struct ctx *p_ctx = &u_ctx; + struct ctx *p_ctx_1=&u_ctx_1; + struct flash_disk disks[MAX_FDISK]; // flash disk struct + memset(p_ctx, 0, sizeof(struct ctx)); + memset(p_ctx_1, 0, sizeof(struct ctx)); + int count=get_flash_disks(disks, FDISKS_SAME_ADPTR ); + pid = getpid(); + if (count < 2) + { + fprintf(stderr,"%d:Attention:System doesn't fullfil test req,Need 2 disks from a same adapter\n",pid); + return 100; + } + //open CAPI Flash disk device + strcpy(p_ctx->dev,disks[0].dev); + strcpy(p_ctx_1->dev,disks[1].dev); + p_ctx->fd = open_dev(disks[0].dev, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[0].dev, errno); + g_error = -1; + return -1; + } + p_ctx_1->fd = open_dev(disks[1].dev, O_RDWR); //Hoping to open second disk + if (p_ctx_1->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[1].dev, errno); + g_error = -1; + } +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); +#else + //TBD for linux +#endif + p_ctx->work.num_interrupts = p_ctx_1->work.num_interrupts = 4; + + switch ( flag ) + { + case 1: // TCN@7.1.7 + //TEST_DCA_OTHER_DEVNO + p_ctx->devno = p_ctx->devno + 999 ; //TBD Other than devno of DKPQP + rc = ioctl_dk_capi_attach(p_ctx); + if (rc != 0) //Handling for negative test case as pass for invalid + rc = 0; + return rc; + break; + case 2: //TEST_DCA_Invalid_devno TCN@7.1.8 + p_ctx->devno = 0x999; // Invalid valid devno TBD + rc = ioctl_dk_capi_attach(p_ctx); + if (rc == 22) //Handling for negative test case as pass for invalid + rc = 0; + else rc = 1; + return rc; + break; + + case 3: //TEST_DCA_Invalid_Intrpt_Num TCN@7.1.9 +#ifdef _AIX + p_ctx->work.num_interrupts = 0x1; //TBD interrupt value which is invalid +#else + p_ctx->work.num_interrupts = 0x099; // tested on linux +#endif + rc = ioctl_dk_capi_attach(p_ctx); + if (rc == 22) //Handling for negative test case as pass for invalid + rc = 0; + else rc =1; + return rc; + break; + case 4: //TEST_DCA_Invalid_Flags TCN@7.1.11 + p_ctx->flags = 0x0999; //TBD invliad flags other than //DK_AF_REUSE_CTX + rc = ioctl_dk_capi_attach(p_ctx); + if (rc == 22) //Handling for negative test case as pass for invalid + rc = 0; + else rc = 1; + return rc; + break; + case 5: //TEST_DCA_CALL_TWICE TCN 7.1.12 + rc = ioctl_dk_capi_attach(p_ctx); //first time caling + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + rc = ioctl_dk_capi_attach(p_ctx); //second call may pass + if (rc != 0) //Handling for negative test case as pass for invalid + rc = 0 ; + break; + case 6: // TCN@7.1.14 + //TEST_DCA_REUSE_CONTEXT_FLAG + // using lun_type flag as 4 for ignorin all if conditions + rc=ioctl_dk_capi_attach_reuse(p_ctx,p_ctx_1,4); + CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); + break; + case 7: // TCN@7.1.15 + //TEST_DCA_REUSE_CONTEXT_FLAG_ON_NEW_PLUN_DISK + rc = ioctl_dk_capi_attach(p_ctx); // first attach + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); +#endif + rc = ioctl_dk_capi_attach(p_ctx_1); //attach of plun + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + rc = ioctl_dk_capi_udirect(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + +#ifdef _AIX + p_ctx_1->devno = p_ctx->devno; //devno of first attach + p_ctx_1->flags = DK_AF_REUSE_CTX; +#else + p_ctx_1->flags = DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#endif + p_ctx_1->context_id = p_ctx->context_id; //context id of first call attach + rc = ioctl_dk_capi_attach(p_ctx_1); + if (rc != 0) //Handling for negative test case as pass for invalid + rc = 0 ; + close(p_ctx_1->fd); + break; + case 8: // TCN@7.1.16 + //TEST_DCA_REUSE_CONTEXT_FLAG_ON_NEW_VLUN_DISK + //TEST_DCA_REUSE_CONTEXT_FLAG_ON_NEW_VLUN_DISK + rc=ioctl_dk_capi_attach_reuse(p_ctx,p_ctx_1,LUN_VIRTUAL); + CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); + close(p_ctx_1->fd); + break; + + case 9: // TCN@7.1.18 + //TEST_DCA_REUSE_CONTEXT_OF_DETACH_CTX + + rc = ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); + p_ctx_1->devno = p_ctx->devno; //devno of first attach +#else + p_ctx_1->flags = DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#endif + p_ctx_1->context_id = p_ctx->context_id;//context id of first call attach + + rc = ioctl_dk_capi_detach(p_ctx); + CHECK_RC(rc, "DK_CAPI_DETACH failed"); + rc = ioctl_dk_capi_attach(p_ctx_1); + if (rc != 0) //Handling for negative test case as pass for invalid + rc = 0 ; + close(p_ctx_1->fd); + break; + + case 10: // TCN@7.1.20 + //TEST_DCA_REUSE_CONTEXT_ON_NEW_DISK_AFTER_EEH + rc=ioctl_dk_capi_attach_reuse(p_ctx,p_ctx_1,10); + CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); + + break; + case 11: + //TCN@7.1.19 + //reuse flag on released context. + rc=ioctl_dk_capi_attach_reuse(p_ctx,p_ctx_1,3); + CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); + break; + case 12 : + // TCN @ 7.1.17 + rc=ioctl_dk_capi_attach_reuse_all_disk(); + CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); + return rc; + case 13: + // TCN @ 7.1.215 + rc=ioctl_dk_capi_attach_reuse(p_ctx,p_ctx_1,6); + CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); + break; + + case 14: + // TCN @ 7.1.216 + rc=ioctl_dk_capi_attach_reuse_loop(p_ctx,p_ctx_1); + CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); + break; + + case 15: // TCN@7.1.24 - TEST_DCRC_EEH_VLUN_RESUSE_CTX + rc=ioctl_dk_capi_attach_reuse(p_ctx,p_ctx_1,11); + CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); + + break; + + case 16: // TCN = 7.1.25 -TEST_DCRC_EEH_PLUN_RESUSE_CTX + + rc=ioctl_dk_capi_attach_reuse(p_ctx,p_ctx_1,12); + CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); + break; + case 17: + rc=ioctl_dk_capi_attach_reuse(p_ctx,p_ctx_1,13); + if ( rc == 0 ) + { + CHECK_RC(1, "DK_CAPI_ATTACH should have failed with reuse flag "); + } + rc=0; + break; + + default: + fprintf(stderr," Nothing to do with default call\n"); + rc = 99; + + } + ioctl_dk_capi_detach(p_ctx); + close(p_ctx->fd); + return rc; +} + +int test_dcrc_ioctl(int flag) // func@DK_CAPI_RECOVER_CTX +{ + int rc,i; + struct ctx u_ctx, u_ctx_1,u_ctx_bkp, array_ctx[3];; + struct ctx *p_ctx = &u_ctx; + struct ctx *p_ctx_1 = &u_ctx_1; + struct ctx *P_ctx_bkp = &u_ctx_bkp; + struct ctx *p_array_ctx[3]; + struct flash_disk disks[MAX_FDISK]; + pthread_t thread, thread2; + pthread_t thread_new[3]; + pthread_t ioThreadId; + pthread_t ioThreadId_new[3]; + int threadCleanup=0; + do_io_thread_arg_t ioThreadData; + do_io_thread_arg_t ioThreadData_new[3]; + do_io_thread_arg_t * p_ioThreadData=&ioThreadData; + do_io_thread_arg_t * p_ioThreadData_new[3]; + __u64 nlba = NUM_BLOCKS; + __u64 stride= 0x1000; + + pid = getpid(); + + if ( flag == 4 ) + { + prepDiskList(cflash_path); + + if ( diskCount == 0 ) + CHECK_RC(1,"WARNING : need to export FVT_DEV=/dev/d1,/dev/d2 \n"); + + memset(p_ctx_1, 0, sizeof(struct ctx)); + memset(p_ctx, 0, sizeof(struct ctx)); + + if ((p_ctx->fd = open_dev(diskList[0], O_RDWR)) < 0) + { + fprintf(stderr,"open failed %s, errno %d\n",cflash_path, errno); + g_error = -1; + return -1; + } + + strcpy(p_ctx->dev,diskList[0]); + +#ifdef _AIX + + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "query path failed"); + +#endif /*_AIX */ + + rc = ctx_init_internal(p_ctx, DK_AF_ASSIGN_AFU, p_ctx->devno); + CHECK_RC(rc, "ctx_init_internal failed"); + } + + else + { + get_flash_disks(disks, FDISKS_ALL); + memset(p_ctx_1, 0, sizeof(struct ctx)); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + } + + *P_ctx_bkp = *p_ctx; + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + switch ( flag) + { + case 1: //TCN@7.1.22 + //TEST_DCRC_WITH_NO_EEH + rc =ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); +#ifndef _AIX + p_ctx->flags=DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; //clear any oustanding flag +#else + p_ctx->flags=0; +#endif + rc =ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); + break; + case 2: //TCN@7.1.28 + //TEST_DCRC_DETACHED_CTX + rc =ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + pthread_cancel(thread); + + rc = ioctl_dk_capi_detach(p_ctx); + CHECK_RC(rc, "DK_CAPI_DETACH failed"); + p_ctx->flags = DK_RF_REATTACHED ; + rc =ioctl_dk_capi_recover_ctx(p_ctx); + if (rc != 0) //Handling for negative test + rc = 0 ; + // pthread_cancel(thread); + close(p_ctx->fd); + return rc; + break; + case 3: // TCN@ 7.1.21 - TEST_DCRC_EEH_VLUN + rc =ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + rc =ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); +#ifdef _AIX + if (p_ctx->return_flags != DK_RF_REATTACHED) +#else + if (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags) +#endif + CHECK_RC(1, "recover ctx, expected DK_RF_REATTACHED"); + p_ctx->context_id = p_ctx->new_ctx_token; + break; + case 4: // TCN@7.1.23 - TEST_DCRC_EEH_PLUN_MULTI_VLUN + + for ( i =0; i < 3; i ++ ) + { + p_array_ctx[i] = &array_ctx[ i ]; + } + + rc =ioctl_dk_capi_udirect(p_ctx); //plun + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + + p_ctx_1->fd = open_dev(diskList[1], O_RDWR); //Hoping to open second disk + if (p_ctx_1->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[1].dev, errno); + g_error = -1; + } + + for (i=0; i<3; i++) // multiple vluns creation on second disk + { + + memset(p_array_ctx[i], 0, sizeof(struct ctx)); + strcpy(p_array_ctx[i]->dev,diskList[1]); + p_array_ctx[i]->fd = p_ctx_1->fd; +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_array_ctx[i]); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); +#else + //TBD for linux +#endif + rc = ctx_init_internal(p_array_ctx[i], DK_AF_ASSIGN_AFU, p_array_ctx[i]->devno); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + rc = create_resource(p_array_ctx[i], (p_array_ctx[i]->last_phys_lba+1)/10, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + } + + for (i=0; i<3; i++) + { + pthread_create(&thread_new[i],NULL, ctx_rrq_rx,p_array_ctx[i]); + } + + + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=stride; + p_ioThreadData->loopCount=1000; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + for (i=0; i<3; i++) + { + p_ioThreadData_new[i]=&ioThreadData_new[i]; + p_ioThreadData_new[i]->p_ctx=p_array_ctx[i]; + p_ioThreadData_new[i]->stride=stride; + p_ioThreadData_new[i]->loopCount=1000; + rc = pthread_create(&ioThreadId_new[i],NULL, do_io_thread, (void *)p_ioThreadData_new[i]); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + } + + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + + pthread_cancel(ioThreadId); + + for (i=0; i<3; i++) + { + pthread_cancel(ioThreadId_new[i]); + } + +#ifndef _AIX + + pthread_cancel(thread); + threadCleanup = 1; + + for (i=0; i<3; i++) + { + pthread_cancel(thread_new[i]); + } +#endif + +#ifndef _AIX + p_ctx->flags = DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET ; +#endif + rc =ioctl_dk_capi_recover_ctx(p_ctx); //recovering plun + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); + +#ifdef _AIX + if (p_ctx->return_flags != DK_RF_REATTACHED) +#else + if (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags) +#endif + CHECK_RC(1, "recover ctx, expected DK_RF_REATTACHED"); + + + for (i=0; i<3; i++) // multiple vluns recover with reattach + { + +#ifndef _AIX + p_array_ctx[i]->flags = DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET ; +#endif + rc =ioctl_dk_capi_recover_ctx(p_array_ctx[i]); //recovering vlun + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); + +#ifdef _AIX + if (p_array_ctx[i]->return_flags != DK_RF_REATTACHED) +#else + if (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_array_ctx[i]->return_flags) +#endif + CHECK_RC(1, "recover ctx, expected DK_RF_REATTACHED"); + + } + + rc = ctx_reinit(p_ctx); + CHECK_RC(rc, "ctx_reinit() failed"); + + for (i=0; i<3; i++) + { + rc = ctx_reinit(p_array_ctx[i]); + CHECK_RC(rc, "ctx_reinit() failed"); + } + +#ifndef _AIX + + debug("------------------ intr hndl1 started -------------------------\n"); + + pthread_create(&thread2, NULL, ctx_rrq_rx, p_ctx); + + /* to avoid race condition for handler and IO thread */ + sleep(SLEEP_FOR_RACE); + +#endif + rc = do_io(p_ctx, stride); + if ( rc == 2) rc=0; + else CHECK_RC(rc, "1st IO attempt didn't fail"); + +#ifdef _AIX + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = DK_HINT_SENSE; +#else + p_ctx->hint = DK_CXLFLASH_VERIFY_HINT_SENSE; +#endif + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed\n"); + + rc = do_io(p_ctx, stride); + CHECK_RC(rc, "do_io() failed"); + +#ifndef _AIX + pthread_cancel(thread2); + debug("------------------ intr hndl1 cancelled -------------------------\n"); +#endif + +#ifndef _AIX + + debug("------------------ intr hndl2 started -------------------------\n"); + pthread_create(&thread_new[0],NULL, ctx_rrq_rx,p_array_ctx[0]); + + /* to avoid race condition for handler and IO thread */ + sleep(SLEEP_FOR_RACE); + +#endif + + rc=do_io(p_array_ctx[0],stride); + if ( rc == 2) rc=0; + else CHECK_RC(rc, "1st IO attempt didn't fail"); + +#ifdef _AIX + p_array_ctx[0]->flags = DK_VF_HC_TUR; + p_array_ctx[0]->hint = DK_HINT_SENSE; +#else + p_array_ctx[0]->hint = DK_CXLFLASH_VERIFY_HINT_SENSE; +#endif + rc = ioctl_dk_capi_verify(p_array_ctx[0]); + CHECK_RC(rc, "ioctl_dk_capi_verify failed\n"); + + rc=do_io(p_array_ctx[0],stride); + CHECK_RC(rc, "IO attempt fail"); + +#ifndef _AIX + pthread_cancel(thread_new[0]); + debug("------------------ intr hndl2 cancelled -------------------------\n"); +#endif + + for (i=1; i<3; i++) + { +#ifndef _AIX + debug("------------------ intr hndl%d started -----------------------\n",(i+2)); + pthread_create(&thread_new[i],NULL, ctx_rrq_rx,p_array_ctx[i]); + + /* to avoid race condition for handler and IO thread */ + sleep(SLEEP_FOR_RACE); +#endif + + rc=do_io(p_array_ctx[i],stride); + CHECK_RC(rc, "IO failed "); +#ifndef _AIX + + pthread_cancel(thread_new[i]); + debug("------------------ intr hndl%d cancelled -----------------------\n",(i+2)); +#endif + + } + + +#ifdef _AIX + + for (i=0; i<3; i++) + { + pthread_cancel(thread_new[i]); + } +#endif + break; + case 5: // TCN@7.1.24 - TEST_DCRC_EEH_VLUN_RESUSE_CTX + rc =ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + p_ctx_1->fd = open_dev(disks[1].dev, O_RDWR); //Hoping to open second disk + if (p_ctx_1->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[1].dev, errno); + g_error = -1; + } +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); + p_ctx_1->devno = p_ctx->devno; //devno of first attach +#else + p_ctx_1->flags = DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#endif + p_ctx_1->context_id = p_ctx->context_id; //context id of first call attach + rc = ioctl_dk_capi_attach(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + p_ctx_1->flags = DK_RF_REATTACHED ; + rc =ioctl_dk_capi_recover_ctx(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); + break; + case 6: // TCN = 7.1.25 -TEST_DCRC_EEH_PLUN_RESUSE_CTX + + rc =ioctl_dk_capi_udirect(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + p_ctx_1->fd = open(disks[1].dev, O_RDWR); //Hoping to open second disk + if (p_ctx_1->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[1].dev, errno); + g_error = -1; + } +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); + p_ctx_1->devno = p_ctx->devno; //devno of first attach + p_ctx_1->flags = DK_AF_REUSE_CTX; +#else + p_ctx_1->flags = DK_CXLFLASH_ATTACH_REUSE_CONTEXT; +#endif + p_ctx_1->context_id = p_ctx->context_id; //context id of first call attach + rc = ioctl_dk_capi_attach(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + p_ctx_1->flags = DK_RF_REATTACHED ; + rc =ioctl_dk_capi_recover_ctx(p_ctx_1); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); + break; + case 7: // TCN = 7.1.26 - TEST_DCRC_EEH_VLUN_RESIZE + rc =ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + nlba = p_ctx->last_phys_lba + 1; + rc = vlun_resize(p_ctx, nlba); // vlun size in terms of blocks + CHECK_RC(rc, "vlun_resize failed"); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + p_ctx->flags = DK_RF_REATTACHED ; + rc =ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); +#ifdef _AIX + if ( DK_RF_REATTACHED != p_ctx->return_flags ) +#else + if ( DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags ) +#endif + CHECK_RC(1, "ioctl_dk_capi_recover_ctx flag verification failed"); + + rc = ctx_reinit(p_ctx); + CHECK_RC(rc, "ctx_reinit() failed"); + + break; + case 8: // TCN 7.1.27 - TEST_DCRC_EEH_VLUN_RELEASE + rc =ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + p_ctx->flags = DK_RF_REATTACHED ; + rc =ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); +#ifdef _AIX + if ( DK_RF_REATTACHED != p_ctx->return_flags ) +#else + if ( DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags ) +#endif + CHECK_RC(1, "ioctl_dk_capi_recover_ctx flag verification failed"); + + rc = ctx_reinit(p_ctx); + CHECK_RC(rc, "ctx_reinit() failed"); + pthread_cancel(thread); + rc|=ctx_close(p_ctx); + return rc; + + break; + case 9: //TCN@7.1.112 + rc =ioctl_dk_capi_udirect(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + p_ctx->devno = 0x9999; //TBD invalid dev no. + p_ctx->flags = DK_RF_REATTACHED ; + rc =ioctl_dk_capi_recover_ctx(p_ctx); + if (rc == 22) + rc = 0; + else + rc = 1; + + break; + case 10: //TCN@7.1.113 + // invalid flag will succeed, as flag feild is not defined for recover + rc =ioctl_dk_capi_udirect(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + p_ctx->flags = 0x09999 ; //TBD invalid flags + rc =ioctl_dk_capi_recover_ctx(p_ctx); + if (rc == 0) + rc = 0; + else + rc = 1; + break; + case 11: //TCN@7.1.114 + // reason feild of recover is not implemented. thus will not fail for invalid date + rc =ioctl_dk_capi_udirect(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + p_ctx->reason = 0x09999 ; //TBD invalid reason + rc =ioctl_dk_capi_recover_ctx(p_ctx); + if (rc == 0) + rc = 0; + else + rc = 1; + + break; + case 12: //TCN@7.1.115 + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + stride=0x10; + + // We wish to do IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=stride; + p_ioThreadData->loopCount=1000; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + + // Wait for IO thread to complete + pthread_join(ioThreadId, NULL); +#ifndef _AIX + pthread_cancel(thread); +#endif + + rc =ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); +#ifdef _AIX + if ( DK_RF_REATTACHED != p_ctx->return_flags ) +#else + if ( DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags ) +#endif + CHECK_RC(1, "ioctl_dk_capi_recover_ctx flag verification failed"); + + rc = ctx_reinit(p_ctx); + CHECK_RC(rc, "ctx_reinit() failed"); +#ifndef _AIX + pthread_create(&thread2, NULL, ctx_rrq_rx, p_ctx); +#endif + rc=do_io(p_ctx, stride); + if ( rc == 2 ) rc=0; + else CHECK_RC(1, "1st IO attempt didn't fail"); + + rc=do_io(p_ctx, stride); + CHECK_RC(rc, "do_io() failed"); +#ifndef _AIX + pthread_cancel(thread2); +#endif + break; + case 13: + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + + // We wish to do IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=stride; + p_ioThreadData->loopCount=1000; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + + // Wait for IO thread to complete + pthread_join(ioThreadId, NULL); +#ifndef _AIX + pthread_cancel(thread); +#endif + + rc =ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); +#ifdef _AIX + if ( DK_RF_REATTACHED != p_ctx->return_flags ) +#else + if ( DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags ) +#endif + CHECK_RC(1, "ioctl_dk_capi_recover_ctx flag verification failed"); + + rc = ctx_reinit(p_ctx); + CHECK_RC(rc, "ctx_reinit() failed"); +#ifndef _AIX + pthread_create(&thread2, NULL, ctx_rrq_rx, p_ctx); +#endif + rc=do_io(p_ctx, stride); + if ( rc == 2 ) rc=0; + else CHECK_RC(1, "1st IO attempt didn't fail"); + + rc=do_io(p_ctx, stride); + CHECK_RC(rc, "do_io() failed"); +#ifndef _AIX + pthread_cancel(thread2); +#endif + + break; + default: + fprintf(stderr," Nothing to do with default call\n"); + rc = 99; + } + if ( threadCleanup == 0 ) + { + pthread_cancel(thread); + } + close_res(p_ctx); + ctx_close(p_ctx); + return rc; +} + +int test_dcud_ioctl(int flag) // func@DK_CAPI_USER_DIRECT +{ + int rc,i; + struct ctx u_ctx; + struct ctx *p_ctx = &u_ctx; + struct validatePckt valVar; + struct validatePckt *nPtr=&valVar; + pthread_t thread; + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + switch ( flag ) + { + case 1: // TCN@7.1.32 + //TEST_DCUD_VALID_CTX_VALID_DEVNO + + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "DK_CAPI_UDIRECT failed"); + // expected input + nPtr->ctxPtr=p_ctx; + nPtr->obCase=CASE_PLUN; +#ifdef _AIX + nPtr->expt_return_flags=DK_RF_PATH_ASSIGNED; +#else + nPtr->expt_return_flags=0; +#endif + nPtr->expt_last_lba=p_ctx->last_phys_lba; + rc = validateFunction(nPtr); + CHECK_RC(rc, "ValidateFunction failed"); + break; + + case 2: // TCN@7.1.37 + //TEST_DCUD_WITH_VLUN_CREATED_DESTROYED_ON_SAME_DISK + + rc = create_resource(p_ctx, 0,DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "DK_CAPI UVIRTUAL failed"); + + // expected input + nPtr->ctxPtr=p_ctx; +#ifdef _AIX + nPtr->expt_return_flags=DK_RF_PATH_ASSIGNED; + nPtr->expt_last_lba=0; +#else + nPtr->expt_last_lba=-1; + nPtr->expt_return_flags=0; +#endif + nPtr->obCase=CASE_VLUN; + rc = validateFunction(nPtr); + CHECK_RC(rc, "ValidateFunction failed"); + + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "DK_CAPI_UDIRECT failed"); + // expected input + nPtr->ctxPtr=p_ctx; +#ifdef _AIX + nPtr->expt_return_flags=DK_RF_PATH_ASSIGNED; +#else + nPtr->expt_return_flags=0; +#endif + nPtr->expt_last_lba=p_ctx->last_phys_lba; + nPtr->obCase=CASE_PLUN; + rc = validateFunction(nPtr); + CHECK_RC(rc, "ValidateFunction failed"); + break; + + case 3: // TCN@7.1.38 + //TEST_DCUD_IN_LOOP + for (i=0; i<100; i++) + { + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "DK_CAPI_UDIRECT failed"); + nPtr->ctxPtr=p_ctx; + nPtr->obCase=CASE_PLUN; +#ifdef _AIX + nPtr->expt_return_flags=DK_RF_PATH_ASSIGNED; +#else + nPtr->expt_return_flags=0; +#endif + nPtr->expt_last_lba=p_ctx->last_phys_lba; + rc = validateFunction(nPtr); + CHECK_RC(rc, "ValidateFunction failed"); + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + } + pthread_cancel(thread); + ctx_close(p_ctx); + return rc; + break; + + case 4: // TCN@7.1.39 + //TEST_DCUD_PATH_ID_MASK_VALUES + + p_ctx->path_id_mask = 0; //bit 0 to path 0, + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "DK_CAPI_UDIRECT failed"); + nPtr->ctxPtr=p_ctx; + nPtr->obCase=CASE_PLUN; +#ifdef _AIX + nPtr->expt_return_flags=DK_RF_PATH_ASSIGNED; +#else + nPtr->expt_return_flags=0; +#endif + nPtr->expt_last_lba=p_ctx->last_phys_lba; + rc = validateFunction(nPtr); + CHECK_RC(rc, "ValidateFunction failed"); + + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + + p_ctx->path_id_mask = 1; // bit 1 to path 1 + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "DK_CAPI_UDIRECT failed"); + nPtr->ctxPtr=p_ctx; + nPtr->obCase=CASE_PLUN; +#ifdef _AIX + nPtr->expt_return_flags=DK_RF_PATH_ASSIGNED; +#else + nPtr->expt_return_flags=0; +#endif + nPtr->expt_last_lba=p_ctx->last_phys_lba; + rc = validateFunction(nPtr); + CHECK_RC(rc, "ValidateFunction failed"); + + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + return rc; + break; + default: + fprintf(stderr," Nothing to do with default call\n"); + rc = 99; + } + pthread_cancel(thread); + rc = ioctl_dk_capi_release(p_ctx); + ctx_close(p_ctx); + return rc; + +} + +int test_dcud_error_ioctl(int flag) // DK_CAPI_USER_DIRECT error path +{ + int rc; + struct ctx u_ctx, u_ctx_bkp ; + struct ctx *p_ctx = &u_ctx; + struct ctx *p_ctx_bkp = &u_ctx_bkp; + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + *p_ctx_bkp=*p_ctx; + + switch ( flag) + { + case 1: // TCN@7.1.29 + //TEST_DCUD_INVALID_DEVNO_VALID_CONTX + + p_ctx->devno = 0x999; //TBD invalid devno + rc=ioctl_dk_capi_udirect(p_ctx); + if (rc == 22) //Handling for negative test case as pass for invalid + rc = 0 ; + else rc = 1; + break; + case 2: // TCN@7.1.30 + //TEST_DCUD_INVALID_CONTX_VALID_DEVNO + p_ctx->context_id = 0x3fff7b29; //invalid context ID + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + if (rc == 22) //Handling for negative test case as pass for invalid + rc = 0 ; + else rc = 1; + // moving to correct context id + p_ctx->context_id = p_ctx_bkp->context_id; + rc=ctx_close(p_ctx); + return rc; + break; + case 3: // TCN@7.1.33 + //TEST_DCUD_FLAGS + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "compare failed"); + //TBD some more validation will be added later + rc=close_res(p_ctx); + CHECK_RC(rc, "close res failed"); + rc|=ctx_close(p_ctx); + return rc; + break; + case 4: // TCN@7.1.34 + //TEST_DCUD_TWICE_SAME_CONTX_DEVNO + //OK as per MATT,We only restrict exclusivity between pLUN/vLUN + //on the same disk. You are free to create multiple pLUNs over the same + //disk but are responsible for serializing access and enforcing security + //as each context will have a window into the same data. + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "DK_CAPI_UDIRECT failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "compare failed"); + rc |= create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + rc |= compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "compare failed"); + //TBD some more validation will be added later + rc|=ctx_close(p_ctx); + return rc; + break; + case 5: // TCN@7.1.35 + //TEST_DCUD_WITH_VLUN_ALREADY_CREATED_ON_SAME_DISK + rc = create_resource(p_ctx, 0,DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); +#ifdef _AIX + rc = compare_size(p_ctx->last_lba, 0); +#else + rc = compare_size(p_ctx->last_lba, -1); +#endif + CHECK_RC(rc, "compare failed"); + + // TBD more validation will be added later + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + if (rc == 22) //Handling for negative test case as pass for invalid + rc = 0 ; + // TBD more validation will be added later + rc |= close_res(p_ctx); + CHECK_RC(rc, "close res failed"); + rc |= ctx_close(p_ctx); + return rc; + break; + case 6: // TCN@ 7.1.36 + //TEST_DCUD_WITH_PLUN_ALREADY_CREATED_ON_SAME_DISK + + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "compare failed"); + + // TBD more validation will be added later + // second call + rc = create_resource(p_ctx, 0,DK_UVF_ALL_PATHS, LUN_VIRTUAL); + if (rc == 22) + rc =0; + //TBD more validation will be added later;IN_USE_DIRECT flag check + rc = close_res(p_ctx); + CHECK_RC(rc, "close res failed"); + rc = ctx_close(p_ctx); + CHECK_RC(rc, "p_ctx failed"); + return rc; + break; + case 7: //7.1.40 + //TEST_DCUD_PATH_ID_MASK_VALUES + p_ctx->path_id_mask = 0xff; //invalid path_id_mask + rc = create_resource(p_ctx, 0, 0, LUN_DIRECT); + if (rc == 22) //Handling for negative test case as pass for invalid + rc = 0 ; + else rc = 1; + ctx_close(p_ctx); + return rc; + break; + default: + fprintf(stderr," Nothing to do with default call\n"); + rc = 99; + } + ctx_close(p_ctx_bkp); + return rc; +} +int test_dcuv_ioctl(int flag) // func@DK_CAPI_USER_VIRTUAL +{ + int rc,i,j; + //int max_lun_size; + struct ctx u_ctx; + struct ctx *p_ctx = &u_ctx; + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + +#ifdef _AIX + // Most of the tests are vLUN related + // so, default flag is set to it. + p_ctx->flags = DK_UVF_ALL_PATHS; +#endif + + switch ( flag ) + { + //TEST_DCUV_LUN_VLUN_SIZE_ZERO + case 1: // TCN @7.1.44 + p_ctx->lun_size = 0; + rc = ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); +#ifdef _AIX + rc = compare_size(p_ctx->last_lba, 0); +#else + rc = compare_size(p_ctx->last_lba, -1); +#endif + CHECK_RC(rc, "ERROR: last_lba check failed"); + break; + case 2: // TCN@7.1.46 + //TEST_DCUV_WITH_VLUN_ALREADY_CREATED_ON_SAME_DISK + rc= ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); +#ifdef _AIX + rc = compare_size(p_ctx->last_lba, 0); +#else + rc = compare_size(p_ctx->last_lba, -1); +#endif + CHECK_RC(rc, "ERROR: p_ctx->last_lba check failed"); + rc= ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); +#ifdef _AIX + rc = compare_size(p_ctx->last_lba, 0); +#else + rc = compare_size(p_ctx->last_lba, -1); +#endif + CHECK_RC(rc, "ERROR: p_ctx_1->last_lba check failed"); + break; + case 3: // TCN@ 7.1.48 + //TEST_DCUV_WITH_MULTIPLE_VLUNS_ON_SAME_CAPACITY_SAME_DISK + + //begin with failure rc for case we can't enter loop + rc = -1; + for ( i = 2*NUM_BLOCKS, j = 0; + i <= p_ctx->last_phys_lba+1 && j < 16; + i += 2*NUM_BLOCKS, j++) + { + p_ctx->lun_size = NUM_BLOCKS; // no. of blocks + rc = ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->lun_size-1); + CHECK_RC(rc, "ERROR: p_ctx->last_lba check failed"); + + rc = ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + + rc = compare_size(p_ctx->last_lba, p_ctx->lun_size-1); + CHECK_RC(rc, "ERROR: p_ctx->last_lba check failed"); + } + break; + case 4: // TCN@ 7.1.49 + //TEST_DCUV_TWICE_SAME_CONTX_ID_DEVNO + p_ctx->lun_size = NUM_BLOCKS; + rc= ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->lun_size-1); + CHECK_RC(rc, "ERROR: last_lba check failed"); + + p_ctx->lun_size = p_ctx->last_phys_lba + 1 - NUM_BLOCKS; + rc= ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->lun_size-1); + CHECK_RC(rc, "ERROR: last_lba check failed"); + + break; + case 5: // TCN@7.1.50 + //TEST_DCUV_VLUN_MAX_SIZE + + p_ctx->lun_size = p_ctx->last_phys_lba + 1;// max. no. block in hdisk + rc = ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "ERROR: last_lba check failed"); + break; + case 6: // TCN@7.1.52 + //TEST_DCUV_PLUN_CREATED_DESTROYED_SAME_DISK + p_ctx->flags = DK_UDF_ASSIGN_PATH; + rc=ioctl_dk_capi_udirect(p_ctx); + CHECK_RC(rc, "DK_CAPI_UDIRECT failed"); + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + + p_ctx->lun_size = NUM_BLOCKS; + p_ctx->flags = DK_UVF_ALL_PATHS; + rc = ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI UVIRTUAL failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->lun_size-1); + CHECK_RC(rc, "ERROR: last_lba check failed"); + break; + case 7: // TCN@7.1.55 + //TEST_DCUV_PATH_ID_MASK_VALUES + p_ctx->flags = 0; //let path_id_mask decide + p_ctx->path_id_mask = 1; //bit 1 to path 0, + rc = ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + p_ctx->path_id_mask = 2; // bit 2 to path 1 + rc = ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + if ( p_ctx->return_path_count < 2 && rc == 0 ) rc = 1; + CHECK_RC(rc, "path_count < 2 & vLUN creations didn't fail"); + break; + case 8: //TCN@7.1.56 + //TEST_DCUV_INVALID_PATH_ID_MASK_VALUES + p_ctx->path_id_mask = 0xA; + p_ctx->flags = 0x0; //let path_id_mask mask should use + rc=ioctl_dk_capi_uvirtual(p_ctx); + if ( rc != 22 ) rc = 1; + else rc = 0; + CHECK_RC(rc, "ioctl didn't fail with invalid path_id_mask"); + break; + + case 9: // TCN@7.1.57 + //TEST_DCUV_IN_LOOP + for (i=0; i<100; i++) // Calling ioctl uvirtual in loop + { + rc=ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); +#ifdef _AIX + rc = compare_size(p_ctx->last_lba, 0); +#else + rc = compare_size(p_ctx->last_lba, -1); +#endif + CHECK_RC(rc, "ERROR: last_lba check failed"); + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + + p_ctx->lun_size = p_ctx->last_phys_lba + 1; + rc=ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "ERROR: last_lba check failed"); + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + + p_ctx->lun_size = 0; + } + break; + } + close_res(p_ctx); + ctx_close(p_ctx); + + return rc; +} + +int test_dcuv_error_ioctl(int flag) // func@DK_CAPI_USER_VIRTUAL error path +{ + int rc=0, itr=0; + uint64_t blks_consumed=0; + struct ctx u_ctx, u_ctx_bkp; + struct ctx *p_ctx = &u_ctx; + struct ctx *p_ctx_bkp = &u_ctx_bkp; + pthread_t thread; + pid = getpid(); + //struct dk_capi_uvirtual uvirtual; + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + // Most of the tests are vLUN related + // so, default flag is set to it. + p_ctx->flags = DK_UVF_ALL_PATHS; + + *p_ctx_bkp=*p_ctx; + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + printf("--------------------- case number %d -----------------------\n",flag); + debug("entering switch case:rc =%d , errno = %d\n",rc,errno); + + switch ( flag ) + { + case 1: // TCN@7.1.41 + //TEST_DCUV_INVALID_DEVNO_VALID_CONTX + p_ctx->devno = 0x9999;//TBD wrong dev number + rc=ioctl_dk_capi_uvirtual(p_ctx); + if ( rc != 22 ) rc = 1; + else rc = 0; // Reset RC + rc |= compare_flags(p_ctx->return_flags, DK_RF_LUN_NOT_FOUND); + break; + + case 2: // TCN@7.1.42 + //TEST_DCUV_INVALID_CONTX_INVALID_DEVNO + + p_ctx->devno = 0x9999;//TBD wrong dev number + p_ctx->context_id = 0x9999; //Invalid contx ID + rc=ioctl_dk_capi_uvirtual(p_ctx); + if ( rc != 22 ) rc = 1; + else rc = 0; // Reset RC + rc |= compare_flags(p_ctx->return_flags, DK_RF_ATTACH_NOT_FOUND); + break; + + case 3: // TCN@7.1.43 + //TEST_DCUV_VALID_DEVNO_INVALID_CONTX_ID + p_ctx->context_id = 0x99; //Invalid contx ID + rc=ioctl_dk_capi_uvirtual(p_ctx); + if ( rc != 22 ) rc = 1; + else rc = 0; // Reset RC + rc |= compare_flags(p_ctx->return_flags, DK_RF_ATTACH_NOT_FOUND); + break; + + case 4: // TCN@7.1.45 + //TEST_DCUV_PLUN_ALREADY_CREATED_SAME_DISK + p_ctx->flags = DK_UDF_ASSIGN_PATH; + rc= ioctl_dk_capi_udirect(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + + p_ctx->flags = DK_UVF_ALL_PATHS; + rc= ioctl_dk_capi_uvirtual(p_ctx); + if ( rc != 22 ) rc = 1; + else rc = 0; // Reset RC + // TBD: harden flags while testing + rc |= compare_flags(p_ctx->return_flags, DK_RF_DUPLICATE); + + close_res(p_ctx); + break; + + case 5: // TCN@7.1.47 + //TEST_DCUV_WITH_NO_FURTHER_VLUN_CAPACITY + + // Max vLUN is 16 per ctx, so adjust size to be + // big enough to start failing before last limit + p_ctx->lun_size = (p_ctx->last_phys_lba )/0xA; + // calling multiple times vlun virtual to exhaust hdisk + while ( rc == 0 && itr < 16 ) + { + rc = ioctl_dk_capi_uvirtual(p_ctx); + if ( rc == 0 ) blks_consumed += (p_ctx->last_lba + 1); + p_ctx->last_lba = 0; // clear it off for next iteration ! + itr++; // Just to keep track we didn't hit max vLUN/ctx limit. + } + + debug("%d:blks_consumed=0X%"PRIX64"\n", pid, blks_consumed); + if ( blks_consumed < (p_ctx->last_phys_lba - p_ctx->chunk_size )) + { + p_ctx->lun_size = ( p_ctx->last_phys_lba - blks_consumed ) - 0x1000 ; + rc = ioctl_dk_capi_uvirtual(p_ctx); + if ( rc != 0 ) + CHECK_RC(rc, "Failed to exhaust hdisk using vLUNs"); + } + + p_ctx->lun_size = p_ctx->chunk_size; + rc = ioctl_dk_capi_uvirtual(p_ctx); + if ( rc != 28 ) rc = 1; + else rc = 0; // Reset RC + rc |= compare_flags(p_ctx->return_flags, DK_RF_CHUNK_ALLOC); + + close_res(p_ctx); + break; + + case 6: // TCN@7.1.51 + //TEST_DCUV_VLUN_SIZE_MORE_THAN_DISK_SIZE + p_ctx->lun_size = p_ctx->last_phys_lba + 2; // more than disk size +#ifndef _AIX + rc= ioctl_dk_capi_uvirtual(p_ctx); + debug("ioctl_dk_capi_uvirtual:rc =%d , errno = %d\n\n",rc,errno); + CHECK_RC(rc, "ioctl_dk_capi_uvirtual failed"); + rc |= compare_flags(p_ctx->last_lba, p_ctx->last_phys_lba); +#else + rc= ioctl_dk_capi_uvirtual(p_ctx); + if ( rc != 28 ) + { + rc = 1; + CHECK_RC(rc, "unexpected result : ioctl_dk_capi_uvirtual failed"); + } + else + { + rc = 0; // Reset RC + } + rc |= compare_flags(p_ctx->return_flags, DK_RF_CHUNK_ALLOC); +#endif + debug("compare_flags:rc =%d , errno = %d\n",rc,errno); + break; + + case 7: // TCN@7.1.53 + //TEST_DCUV_WITH_CTX_OF_PLUN + p_ctx->flags = DK_UDF_ASSIGN_PATH; + rc= ioctl_dk_capi_udirect(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); + + p_ctx->flags = DK_UVF_ALL_PATHS; + rc= ioctl_dk_capi_uvirtual(p_ctx); + if ( rc != 22 ) rc = 1; + else rc = 0; // Reset RC + rc |= compare_flags(p_ctx->return_flags, DK_RF_PATH_ASSIGNED); + + close_res(p_ctx); + break; + + case 8: // TCN@7.1.54 + //TEST_DCUD_WITH_CTX_OF_VLUN + rc= ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + + p_ctx->flags = DK_UDF_ASSIGN_PATH; + rc= ioctl_dk_capi_udirect(p_ctx); + if ( rc != 22 ) rc = 1; + else rc = 0; // Reset RC + rc |= compare_flags(p_ctx->return_flags, DK_RF_PATH_ASSIGNED); + + close_res(p_ctx); + break; + + default: + fprintf(stderr," Nothing to do with default call\n"); + rc =99; + } + + pthread_cancel(thread); + ctx_close(p_ctx_bkp); + return rc; +} + +//7.1.2 +int test_all_ioctl_invalid_version(void) //TEST_IOCTL_INVALID_VERSIONS +{ + int rc=0,i,final_rc=22; + for (i=1; i<=11; i++) + { + rc = test_invalid_version_ioctl(i); + if (rc != 22) + final_rc=-1; + + } + return final_rc; + +} + +int test_invalid_version_ioctl(int flag) +{ + int rc,i; + struct ctx u_ctx, u_ctx_bkp; + struct ctx *p_ctx = &u_ctx; + struct ctx *p_ctx_bkp = &u_ctx_bkp; + uint16_t invalid_version[3] = { 1,99, 1000 }; + char sense_data[10]; + pid = getpid(); + memset(p_ctx, 0, sizeof(struct ctx)); + //open CAPI Flash disk device + p_ctx->fd = open_dev(cflash_path, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", cflash_path, errno); + g_error = -1; + return -1; + } + strcpy(p_ctx->dev, cflash_path); +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "DK_CAPI_QUERY_PATHH failed"); +#endif + p_ctx->work.num_interrupts = 4; + switch ( flag ) + { +#ifdef _AIX + case 1: //TEST_DCUV_INVALID_VERSION + for ( i=0; i<3; i++) + { + debug("%d: ioctl query path with %d version\n",pid, invalid_version[i]); + p_ctx->version = invalid_version[i]; + rc = ioctl_dk_capi_query_path(p_ctx); + if (rc == 0) //Handling for negative test + break; + } + break; +#endif + case 2 : //TEST_DCA_Invalid_Version + p_ctx->flags = DK_AF_ASSIGN_AFU; + for (i=0; i<3; i++) + { + p_ctx->version = invalid_version[i]; + rc = ioctl_dk_capi_attach(p_ctx); + if (rc == 0) //Handling for negative test + break; + } + break; + case 3: //TEST_DCUD_INVALID_VERSION + p_ctx->flags = DK_AF_ASSIGN_AFU; + rc=ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + p_ctx->flags = DK_UDF_ASSIGN_PATH; + *p_ctx_bkp = *p_ctx; + for (i=0; i<3; i++) + { + p_ctx->version= invalid_version[i]; + rc = ioctl_dk_capi_udirect(p_ctx); + if (rc == 0) //Handling for negative test + break; + } + + ioctl_dk_capi_detach(p_ctx_bkp); + break; + case 4: //TEST_DCUV_INVALID_VERSION + p_ctx->flags = DK_AF_ASSIGN_AFU; + rc=ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + p_ctx->flags = DK_UVF_ALL_PATHS; + *p_ctx_bkp = *p_ctx; + for (i=0; i<3; i++) + { + p_ctx->version= invalid_version[i]; + rc = ioctl_dk_capi_uvirtual(p_ctx); + if (rc == 0) //Handling for negative test + break; + } + ioctl_dk_capi_detach(p_ctx_bkp); + break; + case 5: //TEST_DCVR_INVALID_VERSIONS -DK_CAPI_VLUN_RESIZE + p_ctx->flags = DK_AF_ASSIGN_AFU; + rc=ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + p_ctx->flags = DK_UVF_ALL_PATHS; + rc = ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_VIRTUAL failed"); + *p_ctx_bkp = *p_ctx; + for ( i=0; i<3; i++) + { + p_ctx->version= invalid_version[i]; + rc = ioctl_dk_capi_vlun_resize(p_ctx); + if (!rc) break; + } + ioctl_dk_capi_release(p_ctx_bkp); + ioctl_dk_capi_detach(p_ctx_bkp); + break; + case 6: // TEST_DCR_INVALID_VERSION + p_ctx->flags = DK_AF_ASSIGN_AFU; + rc=ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH Failed"); + p_ctx->flags = DK_UDF_ASSIGN_PATH; + *p_ctx_bkp = *p_ctx; + for (i=0; i<3; i++) + { + rc = ioctl_dk_capi_udirect(p_ctx); + p_ctx->version= invalid_version[i]; + rc = ioctl_dk_capi_release(p_ctx); + if (rc == 0) //Handling for negative test + break; + *p_ctx = *p_ctx_bkp;//preserv valid version for udirect + } + ioctl_dk_capi_detach(p_ctx); + break; + case 7: //TEST_DCD_INVALID _VERSION + p_ctx->flags = DK_AF_ASSIGN_AFU; + rc=ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH Failed"); + *p_ctx_bkp = *p_ctx; + for (i=0; i<3; i++) + { + p_ctx->version = invalid_version[i]; + rc = ioctl_dk_capi_detach(p_ctx); + if (rc == 0) //Handling for negative test + break; + } + ioctl_dk_capi_detach(p_ctx_bkp); + break; + case 8: // TEST_DCL_INVALID + p_ctx->flags = DK_AF_ASSIGN_AFU; + rc=ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH Failed"); +#ifdef _AIX + p_ctx->flags = DK_LF_TEMP; +#endif + //p_ctx->reason = ??//TBD + *p_ctx_bkp = *p_ctx; + for (i=0; i<3; i++) + { + p_ctx->version = invalid_version[i]; + rc = ioctl_dk_capi_log(p_ctx, sense_data); + if (rc == 0) //Handling for negative test + break; + } + ioctl_dk_capi_detach(p_ctx_bkp); + break; + case 9: + p_ctx->flags = DK_AF_ASSIGN_AFU; + rc=ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH Failed"); + *p_ctx_bkp = *p_ctx; + //generate_unexpected_error(); //TBD +#ifdef _AIX + p_ctx->flags = DK_VF_HC_INQ; + p_ctx->hint = DK_HINT_SENSE; +#endif + for (i=0; i<3; i++) + { + p_ctx->version = invalid_version[i]; + rc = ioctl_dk_capi_verify(p_ctx); + if (rc == 0) //Handling for negative test + break; + + } + ioctl_dk_capi_detach(p_ctx_bkp); + break; + case 10: //TEST_DCRC_INVALID_VERSIONS, need to TBD more in fuctionality + p_ctx->flags = DK_AF_ASSIGN_AFU; + rc=ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH Failed"); + *p_ctx_bkp = *p_ctx; + //rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + for (i=0; i<3; i++) + { + p_ctx->version = invalid_version[i]; + rc = ioctl_dk_capi_recover_ctx(p_ctx); + if (rc == 0) //Handling for negative test + break; + } + ioctl_dk_capi_detach(p_ctx_bkp); + break; + case 11: //TEST_DCQE_INVALID_VERSIONS + p_ctx->flags = DK_AF_ASSIGN_AFU; + rc=ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH Failed"); + p_ctx->flags = DK_UDF_ASSIGN_PATH; + rc = ioctl_dk_capi_udirect(p_ctx); + CHECK_RC(rc, "DK_CAPI_USER_DIRECT failed"); +#ifdef _AIX + p_ctx->flags = DK_QEF_ADAPTER; +#endif + *p_ctx_bkp = *p_ctx; + generate_unexpected_error(); //TBD + for (i=0; i<3; i++) + { + p_ctx->version = invalid_version[i]; + rc = ioctl_dk_capi_query_exception(p_ctx); + if (rc == 0) break; + } + ioctl_dk_capi_release(p_ctx_bkp); + ioctl_dk_capi_detach(p_ctx_bkp); + break; + default: + fprintf(stderr," Nothing to do with default call\n"); + return 99; + } + close(p_ctx->fd); + return rc; +} + +int test_dcd_ioctl( int flag ) // func@DK_CAPI_DETACH +{ + int rc; + struct ctx u_ctx, u_ctx_bkp; + struct ctx *p_ctx = &u_ctx; + struct ctx *p_ctx_bkp = &u_ctx_bkp; + struct flash_disk disks[MAX_FDISK]; + __u64 nlba; + get_flash_disks(disks, FDISKS_ALL); + memset(p_ctx, 0, sizeof(struct ctx)); + pid = getpid(); + p_ctx->fd = open_dev(cflash_path, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", cflash_path, errno); + g_error = -1; + return -1; + } +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); + p_ctx->work.num_interrupts = 5; +#else + p_ctx->work.num_interrupts = 4; +#endif /*_AIX*/ + + p_ctx->flags = DK_AF_ASSIGN_AFU; + rc = ioctl_dk_capi_attach(p_ctx); + CHECK_RC(rc, "DK_CAPI_ATTACH failed"); + nlba = p_ctx->chunk_size; + *p_ctx_bkp = *p_ctx; + switch ( flag ) + { + case 1: // Invalid TC + p_ctx->context_id = 0x000; //TBD invalid ctx id + p_ctx->devno = 0x000; //TBD invalid devno + rc = ioctl_dk_capi_detach(p_ctx); + if (rc != 0) //Handling for negative test case as pass for + rc = 0; + break; + + case 2: // TCN@7.1.83 + p_ctx->devno=0xdead; + rc = ioctl_dk_capi_detach(p_ctx); + if (rc != 0) //Handling for negative test case as pass for + rc = 0; + break; + case 3: // TCN@7.1.84 + p_ctx->context_id = 0xdeaddead; + rc = ioctl_dk_capi_detach(p_ctx); + if (rc != 0) //Handling for negative test case as pass + rc = 0; + break; + case 4: // TCN@7.1.85 + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + rc = ioctl_dk_capi_detach(p_ctx); //first detach + CHECK_RC(rc,"DK_CAPI_DETACH failed"); + rc = ioctl_dk_capi_detach(p_ctx); //second detach fail + if (rc != 0) //Handling for negative test case as pass + rc = 0; + return rc; + break; + //new case of calling detach when no lun is created on context + case 5: //TCN@7.1.86 + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc = ioctl_dk_capi_detach(p_ctx); //first detach + CHECK_RC(rc,"DK_CAPI_DETACH failed"); + rc = ioctl_dk_capi_detach(p_ctx); //second detach fail + if (rc != 0) //Handling for negative test case as pass + rc = 0; + return rc; + break; + default: + fprintf(stderr," Nothing to do with default call\n"); + rc = 99; + } + ioctl_dk_capi_detach(p_ctx_bkp); // clean up + close(p_ctx->fd); + return rc; +} + + +int test_dcr_ioctl( int flag ) // func@DK_CAPI_RELEASE +{ + int rc,i,rc1; + struct ctx myctx, myctx_bkp; + struct ctx *p_ctx = &myctx; + struct ctx *p_ctx_bkp = &myctx_bkp; + uint64_t resources[MAX_RES_HANDLE]; + __u64 nlba; + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + memset(p_ctx_bkp, 0, sizeof(struct ctx)); + nlba = p_ctx->chunk_size; + switch ( flag ) + { + case 1: //TCN@7.1.75 + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + *p_ctx_bkp = *p_ctx; //backuping the defaults contents + p_ctx->devno=0xdeaddead; + rc = ioctl_dk_capi_release(p_ctx); + break; + + case 2: //TCN@7.1.76 + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + *p_ctx_bkp = *p_ctx; //backuping the defaults contents + p_ctx->devno=0x1000; + p_ctx->context_id= 0x9999; + rc = ioctl_dk_capi_release(p_ctx); + break; + + case 3: //TCN@7.1.77 + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + *p_ctx_bkp = *p_ctx; //backuping the defaults contents + p_ctx->context_id= 0x1239999; + rc = ioctl_dk_capi_release(p_ctx); + break; + + case 4://TCN@7.1.78 + //No vlun or plun creation calling directly release ioctl + rc = ioctl_dk_capi_release(p_ctx); + break; + + case 5: //TCN@7.1.79 + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + rc = ioctl_dk_capi_release(p_ctx); // second release, should fail + ctx_close(p_ctx); + return rc; + break; + + case 6: //TCN@7.1.80 + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + rc = ioctl_dk_capi_release(p_ctx); // second release, should fail + ctx_close(p_ctx); + return rc; + break; + + case 7: //TCN@7.1.81 + for (i=0; ichunk_size; + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc =compare_size(p_ctx->last_lba, nlba-1); + CHECK_RC(rc, "size compare failed"); + resources[i]=p_ctx->rsrc_handle; + } + for (i=0; irsrc_handle = resources[i]; + rc = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc, "DK_CAPI_RELEASE failed"); + } + rc |= g_error; + ctx_close(p_ctx); + return rc; + break; + case 8://7.1.82 + //invalid resource handler + for (i=0; ichunk_size; + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_DIRECT failed"); + rc = compare_size(p_ctx->last_lba, nlba-1); + CHECK_RC(rc, "size compare failed"); + resources[i]=p_ctx->rsrc_handle; + } + for (i=0; irsrc_handle = 0xdead + i; + rc = ioctl_dk_capi_release(p_ctx); + if (!rc) + { + ctx_close(p_ctx_bkp); + return -1; + } + } + //now release actual one + for (i=0; irsrc_handle = resources[i]; + rc1 = ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc1, "DK_CAPI_RELEASE failed"); + } + ctx_close(p_ctx_bkp); + return rc; + default : + fprintf(stderr," Nothing to do with default call\n"); + rc = 99; + } + close_res(p_ctx_bkp); + ctx_close(p_ctx_bkp); + return(rc); + +} + +int test_dcvr_ioctl( int flag ) // func@DK_CAPI_VLUN_RESIZE +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 nlba; + __u64 new_nlba = 0x100; + pthread_t thread; + int i; + uint64_t chunk; + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + nlba = p_ctx->chunk_size; + switch ( flag ) + { + case 1: //TCN@7.1.64 + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + // new size id more than disk size + new_nlba= nlba+15; //vlun size is not a factor of 256MB chunk + rc = vlun_resize(p_ctx, new_nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc = compare_size(p_ctx->last_lba, (2*nlba-1)); + break; + + case 2: //TCN@7.1.65 + nlba = 15 * p_ctx->chunk_size; + rc=create_resource(p_ctx, 0, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc = vlun_resize(p_ctx, nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc = compare_size(p_ctx->last_lba, nlba-1); + CHECK_RC(rc, "size compare failed"); + rc = vlun_resize(p_ctx, nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc = compare_size(p_ctx->last_lba, nlba-1); + break; + case 3: //TCN@7.1.66 + nlba = 10 * p_ctx->chunk_size; + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + new_nlba=nlba - p_ctx->chunk_size; //shrink the vlun + rc = vlun_resize(p_ctx, new_nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc=compare_size(p_ctx->last_lba, new_nlba-1); + break; + + case 4: //TCN@7.1.67 + nlba = 7 * p_ctx->chunk_size; + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + new_nlba= nlba + 2*(p_ctx->lun_size); //increase the vlun size + rc = vlun_resize(p_ctx, new_nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc=compare_size(p_ctx->last_lba, new_nlba-1); + break; + + case 5: //TCN@7.1.68 + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + new_nlba = p_ctx->last_phys_lba; //total no. of blocks in hdisk + rc = vlun_resize(p_ctx, new_nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc=compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + break; + + case 6: //TCN@7.1.70 + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + // incrementing in mutliples of 256MB + for (new_nlba=nlba;new_nlba <=(p_ctx->last_phys_lba + 1);new_nlba+=nlba) + { + rc = vlun_resize(p_ctx, new_nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc=compare_size(p_ctx->last_lba, new_nlba-1); + if (rc)break; + } + break; + + case 7: //TCN@7.1.71 + new_nlba = p_ctx->last_phys_lba + 1; //total size of disk + rc=create_resource(p_ctx, new_nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc=compare_size(p_ctx->last_lba, new_nlba-1); + CHECK_RC(rc, "size compare failed"); + while ( new_nlba >= p_ctx->chunk_size) + { + rc = vlun_resize(p_ctx, new_nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc=compare_size(p_ctx->last_lba, new_nlba-1); + new_nlba -= p_ctx->chunk_size; //decreasing in mutliples of 256 Mb + if (rc)break; + } + break; + + case 8: //TCN@7.1.72 + nlba = 17 * p_ctx->chunk_size; + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc = vlun_resize(p_ctx, nlba); + CHECK_RC(rc, "vlun_resize failed"); + new_nlba = nlba + 100; // nlba< new_nlba < nlba+NUM_BLOCKS + rc = vlun_resize(p_ctx, new_nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc =compare_size(p_ctx->last_lba, nlba+p_ctx->chunk_size-1); + break; + + case 9: //TCN@7.1.73 + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + new_nlba = nlba - 100; // nlba > new_nlba < NUM_BLOCKS + rc = vlun_resize(p_ctx, new_nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc=compare_size(p_ctx->last_lba, nlba-1); + break; + + case 10: //TCN@7.1.74 + rc=create_resource(p_ctx, 0, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc = vlun_resize(p_ctx, p_ctx->last_phys_lba+1); + CHECK_RC(rc, "vlun_resize failed"); + rc=compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "size compare failed"); + chunk = p_ctx->last_lba/p_ctx->chunk_size; + for (i=0; i<100; i++) + { + nlba = (rand()%chunk)*(p_ctx->chunk_size); + rc = vlun_resize(p_ctx, nlba); + CHECK_RC(rc, "vlun_resize failed"); + if (nlba)rc=compare_size(p_ctx->last_lba, nlba-1); + if (rc)break; + } + break; + case 11: //new test case G_MC_test_DCVR_ZERO_Vlun_size + nlba = p_ctx->last_phys_lba+1; + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc=compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "size compare failed"); + rc = vlun_resize(p_ctx,0); + CHECK_RC(rc, "vlun_resize failed"); + break; + default : + rc=99; + } + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + rc |= g_error; + return(rc); +} + +int test_dcvr_error_ioctl( int flag ) // func@DK_CAPI_VLUN_RESIZE error path +{ + int rc,i; + struct ctx myctx, array_ctx[3]; + struct ctx *p_ctx = &myctx; + struct ctx *p_array_ctx = array_ctx; + __u64 nlba; + __u64 new_nlba = 0x100 ; //TBD define the appropriate value + pthread_t thread; + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + nlba = p_ctx->chunk_size; + array_ctx[0] = *p_ctx; //backup + switch ( flag ) + { + case 1: //TCN@7.1.58 + rc=create_resource(p_ctx, nlba, DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + p_ctx->devno=0x1000; + nlba = 2 * nlba; + rc = vlun_resize(p_ctx, nlba); + if (rc == 22) rc = 0; + else rc = 1; + break; + case 2: //TCN@7.1.59 + rc=create_resource(p_ctx, nlba, DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + p_ctx->devno = 1000; + p_ctx->context_id = 9999; + nlba = 2 * nlba; + rc = vlun_resize(p_ctx, nlba); + if (rc == 22) rc = 0; + else rc = 1; + break; + case 3: //TCN@7.1.60 + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + p_ctx->context_id= 10010; + nlba = 2 * nlba; + rc = vlun_resize(p_ctx, nlba); + break; + case 4: //TCN@7.1.61 + rc = vlun_resize(p_ctx, nlba); + break; + case 5: //TCN@7.1.62 + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + rc = vlun_resize(p_ctx, nlba); + break; + + case 6: //TCN@7.1.63 + rc=create_resource(p_ctx, nlba, DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + new_nlba=(p_ctx->last_phys_lba + 3); + rc = vlun_resize(p_ctx, new_nlba); +#ifdef _AIX + if ( rc != 0 ) + rc=0; + else + CHECK_RC(1, "vlun_resize did not fail"); +#else + CHECK_RC(rc, "vlun_resize failed"); + rc=compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "size compare failed"); +#endif + break; + case 7: //TCN@7.1.69 + rc = create_resource(p_ctx, nlba,DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + for (i=1; i<3; i++) //create 2 more ctx additional + { + rc = ctx_init(&p_array_ctx[i]); + CHECK_RC(rc, "Context init failed"); + rc = create_resource(&p_array_ctx[i], nlba,DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } +#ifdef _AIX + //new_nlba is equal to size of CAPI flash disk + new_nlba = p_ctx->last_phys_lba + 1; //total no. of blocks in hdisk + //last_block * block size = hdisk size + rc = vlun_resize(p_ctx, new_nlba); + if ( rc == 0 ) + CHECK_RC(1, "create LUN_VIRTUAL failed"); +#endif + // 4*nlba is used up already in 4 vLuns + new_nlba = p_ctx->last_phys_lba + 1 - 4*nlba ; + rc = vlun_resize(p_ctx, new_nlba); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + // existing vLun can increase by new_nlba + new_nlba = new_nlba - 1; // i.e. last block no. + + rc=compare_size(p_ctx->last_lba, new_nlba); + CHECK_RC(rc, "size compare failed"); + break; + default: + rc= 99; + } + pthread_cancel(thread); + close_res(&p_array_ctx[0]); + ctx_close(&p_array_ctx[0]); + return(rc); +} + +int test_dcv_ioctl( int flag ) // func@DK_CAPI_VERIFY + +{ + int rc; + int i; + struct ctx uctx; + struct ctx uctx1; + struct ctx *p_ctx = &uctx; + struct ctx *p_ctx1 = &uctx1; + pthread_t thread, ioThreadId, verifyThreadId; + do_io_thread_arg_t ioThreadData; + do_io_thread_arg_t * p_ioThreadData=&ioThreadData; + __u64 nlba = NUM_BLOCKS; + uint64_t exp_last_lba; + //__u64 stride= 0x1000; + + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + +#ifdef _AIX + exp_last_lba = p_ctx->last_phys_lba; +#endif + + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + debug("-------------------- Starting case num : %d @test_dcv_ioctl()--------------------------\n", flag); + + switch ( flag ) + { + case 1:// TCN@7.1.92 + //After unexpected error,call the ioctl DK_CAPI_VERIFY + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed");//TBD for unexpected error creation + + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + rc =ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); + +#ifdef _AIX + if ( DK_RF_REATTACHED != p_ctx->return_flags ) +#else + if ( DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags ) +#endif + CHECK_RC(1, "ioctl_dk_capi_recover_ctx flag verification failed"); + + rc = ctx_reinit(p_ctx); + + CHECK_RC(rc, "ctx_reinit() failed"); + + p_ctx->flags=DK_VF_LUN_RESET; + rc = ioctl_dk_capi_verify(p_ctx); + // TBD: may also need to return flag checks !! + if ( p_ctx->verify_last_lba != p_ctx->last_phys_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); + break; + case 2:// When no error is present, call the ioctl DK_CAPI_VERIFY. + //TCN@7.1.93 + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); +#ifndef _AIX + exp_last_lba = p_ctx->last_lba; +#endif + if ( p_ctx->verify_last_lba != exp_last_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + break; + case 3: //TCN@7.1.94 + // After an unexpected error, call the ioctl DK_CAPI_VERIFY. While ioctl is still verifying the disk, call for VLUN create, detach etc. should fail + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + rc =ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); + +#ifdef _AIX + if ( DK_RF_REATTACHED != p_ctx->return_flags ) +#else + if ( DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags ) +#endif + CHECK_RC(1, "ioctl_dk_capi_recover_ctx flag verification failed"); + + rc = ctx_reinit(p_ctx); + + CHECK_RC(rc, "ctx_reinit() failed"); + + // A new resorce will be created soon + // copying the p_ctx image + memcpy((void *)p_ctx1,(const void *)p_ctx,sizeof(struct ctx)); + + p_ctx->flags=DK_VF_LUN_RESET; + + // Let verify ioctl execute in different thread. + pthread_create(&verifyThreadId, NULL, verify_ioctl_thread, p_ctx); + + //Need to see the feasiblity + rc=create_resource(p_ctx1, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + if (rc != 0) + { + debug("%d: LUN_VIRTUAL FAILED\n",pid); + } + rc |= ioctl_dk_capi_vlun_resize(p_ctx); + if (rc != 0) + { + debug("%d:vlun_resize FAILED \n", pid); + } + + rc |=ioctl_dk_capi_release(p_ctx); + if (rc != 0) + { + debug("%d:dk_capi_release1 FAILED \n",pid); + } + rc |=ioctl_dk_capi_release(p_ctx1); + if (rc != 0) + { + debug("%d:dk_capi_release2 FAILED \n",pid); + } + + // Wait for verify thread to complete + pthread_join(verifyThreadId, NULL); + // We don't expext verify ioctl to return failure. + if (threadRC != 0 ) + { + rc|=threadRC; + debug("%d:verify ioctl failed \n",pid); + } + + pthread_cancel(thread); + rc|=ctx_close(p_ctx); + + return rc; + + break; + + case 4: // To call ioctl DK_CAPI_VERIFY with DK_VF_LUN_RESET flag + //TCN@7.1.95 + for (i=0; i<3; i++) // multiple vluns creation + { + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + + // We wish to do backgroud IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=0x10; + p_ioThreadData->loopCount=5; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + // Sleep for a while before issuing verify ioctl. + usleep(5); + + p_ctx->flags=DK_VF_LUN_RESET; + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); +#ifndef _AIX + exp_last_lba = p_ctx->last_lba; +#endif + if ( p_ctx->verify_last_lba != exp_last_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + pthread_cancel(ioThreadId); + break; + + case 5: //TCN@7.1.96 + // To call ioctl DK_CAPI_VERIFY with DK_VF_HC_TUR flag ( a test unit ready ) + for (i=0; i<3; i++) // multiple vluns creation + { + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + } + + // We wish to do backgroud IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=0x10; + p_ioThreadData->loopCount=5; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + // Sleep for a while before issuing verify ioctl. + usleep(5); + + p_ctx->flags=DK_VF_HC_TUR ; + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); +#ifndef _AIX + exp_last_lba = p_ctx->last_lba; +#endif + if ( p_ctx->verify_last_lba != exp_last_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + pthread_cancel(ioThreadId); + break; + case 6: // TCN@7.1.97 + // To call ioctl DK_CAPI_VERIFY with DK_VF_HC_INQ flag ( standard enquiry ) + for (i=0; i<3; i++) // multiple vluns creation + { + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + + // We wish to do backgroud IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=0x10; + p_ioThreadData->loopCount=5; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + // Sleep for a while before issuing verify ioctl. + usleep(5); + + p_ctx->flags=DK_VF_HC_INQ ; + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); +#ifndef _AIX + exp_last_lba = p_ctx->last_lba; +#endif + if ( p_ctx->verify_last_lba != exp_last_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + pthread_cancel(ioThreadId); + break; + case 7: // TCN@7.1.98 + // To call ioctl DK_CAPI_VERIFY with DK_HINT_SENSE flag and valid sense date + for (i=0; i<3; i++) // multiple vluns creation + { + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + + // We wish to do backgroud IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=0x10; + p_ioThreadData->loopCount=5; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + // Sleep for a while before issuing verify ioctl. + usleep(5); + +#ifdef _AIX + p_ctx->hint=DK_HINT_SENSE; +#else + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; + // if dummy_sense_flag is set; + // a dummy sense data will be copied into ioctl input + p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; + +#endif + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); +#ifndef _AIX + exp_last_lba = p_ctx->last_lba; +#endif + if ( p_ctx->verify_last_lba != exp_last_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + pthread_cancel(ioThreadId); + break; + case 8: // TCN@7.1.99 + // To call ioctl DK_CAPI_VERIFY with DK_VF_LUN_RESET flag + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + // We wish to do backgroud IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=0x10; + p_ioThreadData->loopCount=5; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + // Sleep for a while before issuing verify ioctl. + usleep(5); + + p_ctx->flags=DK_VF_LUN_RESET; + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); +#ifndef _AIX + exp_last_lba = p_ctx->last_lba; +#endif + if ( p_ctx->verify_last_lba != exp_last_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + pthread_cancel(ioThreadId); + break; + case 9: // TCN@7.1.100 + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "LUN DIRECT creation failed"); + + // We wish to do backgroud IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=0x10; + p_ioThreadData->loopCount=5; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + // Sleep for a while before issuing verify ioctl. + usleep(5); + + p_ctx->flags=DK_VF_HC_TUR; + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); +#ifndef _AIX + exp_last_lba = p_ctx->last_lba; +#endif + if ( p_ctx->verify_last_lba != exp_last_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + pthread_cancel(ioThreadId); + break; + + case 10 : //TCN@7.1.101 + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "LUN DIRECT creation failed"); + + // We wish to do backgroud IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=0x10; + p_ioThreadData->loopCount=5; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + // Sleep for a while before issuing verify ioctl. + usleep(5); + + p_ctx->flags=DK_VF_HC_INQ; + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); +#ifndef _AIX + exp_last_lba = p_ctx->last_lba; +#endif + if ( p_ctx->verify_last_lba != exp_last_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + pthread_cancel(ioThreadId); + break; + case 11: //TCN@7.1.102 + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + // We wish to do backgroud IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=0x10; + p_ioThreadData->loopCount=5; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + // Sleep for a while before issuing verify ioctl. + usleep(5); + +#ifdef _AIX + p_ctx->hint=DK_HINT_SENSE; +#else + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; + // if dummy_sense_flag is set; + // a dummy sense data will be copied into ioctl input + p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; + +#endif + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); +#ifndef _AIX + exp_last_lba = p_ctx->last_lba; +#endif + if ( p_ctx->verify_last_lba != exp_last_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + pthread_cancel(ioThreadId); + break; + case 12: // TCN@7.1.103 + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + // We wish to do backgroud IO in a different thread... Setting up for that ! + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=0x10; + p_ioThreadData->loopCount=5; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + + // Sleep for a while before issuing verify ioctl. + usleep(5); + + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + + // waiting I/O complete + pthread_join(ioThreadId, NULL); + + + rc =ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "DK_CAPI_RECOVER_CTX failed"); + +#ifdef _AIX + if ( DK_RF_REATTACHED != p_ctx->return_flags ) +#else + if ( DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags ) +#endif + CHECK_RC(1, "ioctl_dk_capi_recover_ctx flag verification failed"); + + rc = ctx_reinit(p_ctx); + + CHECK_RC(rc, "ctx_reinit() failed"); + + rc = do_io(p_ctx, 0x5); + if ( rc == 0 ) + { + CHECK_RC(1, "IO did not fail\n"); + } + + // Sleep for a while before issuing verify ioctl. + sleep(2); + +#ifdef _AIX + p_ctx->hint=DK_HINT_SENSE; +#else + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; +#endif + p_ctx->flags=DK_VF_LUN_RESET; + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); + // TBD: may also need to return flag checks !! + if ( p_ctx->verify_last_lba != p_ctx->last_phys_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } + + break; + default: + return 99; + } + pthread_cancel(thread); + rc|=close_res(p_ctx); + rc|=ctx_close(p_ctx); + return rc; +} + +int test_dcv_error_ioctl( int flag ) // func@DK_CAPI_VERIFY error path +{ + int rc; + __u64 nlba = NUM_BLOCKS; + struct ctx uctx, uctx_bkp; + struct ctx *p_ctx = &uctx; + struct ctx *p_ctx_bkp = &uctx_bkp; + pthread_t thread; + + pid = getpid(); + rc = ctx_init(p_ctx); + + *p_ctx_bkp = *p_ctx; + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + switch ( flag ) + { + case 1: //TCN@7.1.87 + // To test invalid path_id for ioctl DK_CAPI_VERIFY + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + p_ctx->path_id=1000; + rc = ioctl_dk_capi_verify(p_ctx); + if (rc == 22) //Handling for negative test case as pass for invalid + rc = 0; + else rc=1; + break; + + case 2: //TCN@7.1.88 + // To test invalid flags for ioctl DK_CAPI_VERIFY + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + p_ctx->flags= 0x8; + // 0x4 is max value for supported flag. + // And, any bit higher than it is ignored + // So, the flag 0x8 is treated as 0 + // and verify ioctl route to default behavior. + rc = ioctl_dk_capi_verify(p_ctx); + break; + case 3: //TCN@7.1.89 + // To test invalid rsrc_handle for ioctl DK_CAPI_VERIFY + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc=close_res(p_ctx); + CHECK_RC(rc, "failed to release resource"); + rc = ioctl_dk_capi_verify(p_ctx); +#ifndef _AIX + if (rc == g_errno && g_errno == 22) //Handling for negative test case as pass for invalid + rc = 0; + else rc=1; + g_errno = 0; +#else + // AIX wouldn't take ctx/res info for this ioctl + if ( p_ctx->verify_last_lba != p_ctx->last_phys_lba ) + { + debug("%d: last_lba returned by verify ioctl() isn't correct: ERROR\n", pid); + rc=1; + } +#endif + break; + default: + rc=99; + } + pthread_cancel(thread); + close_res(p_ctx_bkp); + rc|=ctx_close(p_ctx_bkp); + return rc; +} +#ifdef _AIX +int test_dcle_ioctl( int flag ) // func@DK_CAPI_LOG_EVENT +{ + int rc,i; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 nlba = NUM_BLOCKS; + __u64 stride= 0x1000; + //__u64 new_nlba = 0x10000 ; + pthread_t thread; + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + switch (flag) + { + // Call ioctl with valid field, one errpt report should be generated. + case 1: //TCN@7.1.104 + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ctx reattached failed"); +#ifdef _AIX + if (p_ctx->return_flags != DK_RF_REATTACHED) +#else + if (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags) +#endif + + debug("-----------ctx_reinit called -------------------------\n"); + + ctx_reinit(p_ctx); + +#ifndef _AIX + + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; + // if dummy_sense_flag is set; + // a dummy sense data will be copied into ioctl input + p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; + +#endif + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); + rc = ioctl_dk_capi_log(p_ctx,"DISK OPERATION ERROR"); + CHECK_RC(rc, "ioctl_dk_capi_log failed"); + debug("%d\n", p_ctx->return_flags); + if ( p_ctx->return_flags == 0 ) //TBD for return errors + rc=0; + else + rc=1; + break; + + case 2: //TCN@7.1.108 + // Call ioctl DK_CAPI_LOG_EVENT with DK_LF_TEMP flag + for (i=0; i<3; i++) + { + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + } + do_io(p_ctx, stride); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ctx reattached failed"); +#ifdef _AIX + if (p_ctx->return_flags != DK_RF_REATTACHED) +#else + if (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags) +#endif + + debug("-----------ctx_reinit called -------------------------\n"); + + ctx_reinit(p_ctx); + +#ifndef _AIX + + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; + // if dummy_sense_flag is set; + // a dummy sense data will be copied into ioctl input + p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; + +#endif + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); + + p_ctx->flags=DK_LF_TEMP; + rc = ioctl_dk_capi_log(p_ctx,"DISK OPERATION ERROR"); //TBD + CHECK_RC(rc, "ioctl_dk_capi_log failed"); + if ( p_ctx->return_flags == 0 ) //TBD for return errors + rc=0; + else + rc=1; + break; + + case 3: //TCN@7.1.109 + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + do_io(p_ctx, stride); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ctx reattached failed"); +#ifdef _AIX + if (p_ctx->return_flags != DK_RF_REATTACHED) +#else + if (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags) +#endif + + debug("-----------ctx_reinit called -------------------------\n"); + + ctx_reinit(p_ctx); + +#ifndef _AIX + + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; + // if dummy_sense_flag is set; + // a dummy sense data will be copied into ioctl input + p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; + +#endif + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); + p_ctx->flags=DK_LF_PERM; + rc = ioctl_dk_capi_log(p_ctx,"DISK OPERATION ERROR"); + CHECK_RC(rc, "ioctl_dk_capi_log failed"); + debug("%d\n", p_ctx->return_flags); + if ( p_ctx->return_flags == 0 ) //TBD for return errors + rc=0; + else + rc=1; + break; + + case 4: // TCN@7.1.110 DK_FL_HW_ERR + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + do_io(p_ctx, stride); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ctx reattached failed"); +#ifdef _AIX + if (p_ctx->return_flags != DK_RF_REATTACHED) +#else + if (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags) +#endif + + debug("-----------ctx_reinit called -------------------------\n"); + + ctx_reinit(p_ctx); + +#ifndef _AIX + + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; + // if dummy_sense_flag is set; + // a dummy sense data will be copied into ioctl input + p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; + +#endif + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); + p_ctx->flags=DK_LF_HW_ERR; + rc = ioctl_dk_capi_log(p_ctx,"DISK OPERATION ERROR"); + CHECK_RC(rc, "ioctl_dk_capi_log failed"); + debug("%d\n", p_ctx->return_flags); + if ( p_ctx->return_flags == 0 ) //TBD for return errors + rc=0; + else + rc=1; + break; + + case 5: // TCN@7.1.110 DK_FL_SW_ERR + // Call ioctl DK_CAPI_LOG_EVENT with DK_FL_HW_ERR flag + rc=create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + do_io(p_ctx, stride); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ctx reattached failed"); +#ifdef _AIX + if (p_ctx->return_flags != DK_RF_REATTACHED) +#else + if (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags) +#endif + + debug("-----------ctx_reinit called -------------------------\n"); + + ctx_reinit(p_ctx); + +#ifndef _AIX + + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; + // if dummy_sense_flag is set; + // a dummy sense data will be copied into ioctl input + p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; + +#endif + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); + p_ctx->flags=DK_LF_SW_ERR; + rc = ioctl_dk_capi_log(p_ctx,"DISK OPERATION ERROR"); + CHECK_RC(rc, "ioctl_dk_capi_log failed"); + debug("%d\n", p_ctx->return_flags); + if ( p_ctx->return_flags == 0 ) //TBD for return errors + rc=0; + else + rc=1; + break; + default: + fprintf(stderr," Nothing to do with default call\n"); + rc = 99; + } + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; +} +#endif +int generate_unexpected_error(void) +{ + fprintf(stderr,"Yet to define the unxpected func"); + return 99; +} + diff --git a/src/cflash/test/cflash_test_ioctl_io.c b/src/cflash/test/cflash_test_ioctl_io.c new file mode 100644 index 00000000..51125b81 --- /dev/null +++ b/src/cflash/test/cflash_test_ioctl_io.c @@ -0,0 +1,1354 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/cflash_test_ioctl_io.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "cflash_test.h" +#include +#include +#include +extern int MAX_RES_HANDLE; +extern int g_error; +extern pid_t pid; +extern pid_t ppid; +extern bool afu_reset; +extern bool long_run_enable; +extern char cflash_path[MC_PATHLEN]; +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +static __u64 chunks[]={ 4,8,16,32,24,16,20,2,12,1 }; +static int imLastContext = 0; +static __u64 chunkRemain = 0; + +key_t key = 0x1234; +struct mymsgbuf +{ + long mtype; + char mtext[2]; +}; + +int test_spio_vlun(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + + __u64 chunk = 0x10; + __u64 nlba; + __u64 stride=0x10000; + + pid = getpid(); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + if (3 == cmd) + { + //IO ON NO RES expect AFURC + p_ctx->last_lba = chunk * p_ctx->chunk_size -1; + rc = do_io(p_ctx, stride); + pthread_cancel(thread); + ctx_close(p_ctx); + return rc; + } + + //create 0 vlun size & later call resize ioctl + if (1 == cmd) + { + //0 size + debug("%d: create VLUN with 0 size\n", pid); + rc = create_resource(p_ctx, 0, DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); +#ifdef _AIX + rc = compare_size(p_ctx->last_lba, 0); +#else + rc = compare_size(p_ctx->last_lba, -1); +#endif + CHECK_RC(rc, "failed compare_size"); + p_ctx->last_lba=0xFFFF; + rc = do_io(p_ctx,stride); + if (rc != 0x13 ) + { + CHECK_RC(1,"IO should fail with afu_rc=0x13\n"); + } + else + { + fprintf(stderr, "IO failed as expected, don't worry....\n"); + g_error=0; + rc=0; + } + } + else + { + nlba = 1 * (p_ctx->chunk_size); + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + rc = compare_size(p_ctx->last_lba, nlba-1); + CHECK_RC(rc, "failed compare_size"); + } + nlba = chunk * (p_ctx->chunk_size); + rc = vlun_resize(p_ctx, nlba); + CHECK_RC(rc, "vlun_resize failed"); + rc = compare_size(p_ctx->last_lba, nlba-1); + CHECK_RC(rc, "failed compare_size"); + + //i would like to write/read all lbas + //stride = p_ctx->blk_len; + + rc |= do_io(p_ctx, stride); + rc |= vlun_resize(p_ctx, 0); + rc |= vlun_resize(p_ctx, nlba); + rc |= do_io(p_ctx, stride); + + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + rc |= g_error; + return rc; +} + +int test_spio_plun() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + __u64 stride= 0x10000; + + pid = getpid(); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + //for PLUN 2nd argument(lba_size) would be ignored + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); + CHECK_RC(rc, "failed compare_size"); + + rc = do_io(p_ctx, stride); + + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; +} + +int test_spio_lun(char *dev, dev64_t devno, + __u16 lun_type, __u64 chunk) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + int loop=5; + int i=0; + + __u64 nlba = 0; + __u64 stride= 0x1000; + + pid = getpid(); + + rc = ctx_init2(p_ctx, dev, DK_AF_ASSIGN_AFU, devno); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + if ( LUN_DIRECT == lun_type) + { + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + if (long_run_enable) stride=0x100; + rc = do_io(p_ctx, stride); + } + else + { + rc = create_resource(p_ctx, nlba, + DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + nlba = chunk * p_ctx->chunk_size; + rc = vlun_resize(p_ctx, nlba); + if (rc == 28) + { + fprintf(stderr, "%d:Requested was more..try with half now...\n",pid); + nlba = nlba/2; + rc = vlun_resize(p_ctx, nlba); + if (rc == 28) + { + fprintf(stderr, "%d: No space left.. terminate this context..\n",pid); + return 0; + } + } + CHECK_RC(rc, "vlun_resize failed"); + if (long_run_enable) + { + stride=0x1; + //loop=20; + } + while (i++ 4) + cfdisk=4; + } + for (i = 0; i 3) + cfdisk=3; + } + for (i = 0; i< cfdisk; i++) + { + //create atleast 10 chunks on each on PLUN + for (j=0; j < 10; j++) + { + if (0 == fork()) //child process + { + rc = test_spio_lun(fldisks[i].dev, fldisks[i].devno[0], + LUN_VIRTUAL,chunks[j%10]); + exit(rc); + } + } + } + rc = wait4all(); + return rc; +} + +int test_spio_direct_virtual() +{ + int rc=0; + int j; + int cfdisk = 0; + struct flash_disk fldisks[MAX_FDISK]; + //__u64 chunk; + int count = 1; + int procces=10; + int index = 0; //to switch flash disks for VLUN & PLUN IO + + char *str = getenv("LONG_RUN"); + if (str != NULL) + { + printf("LONG_RUN enabled...\n"); + count = 10; + } + cfdisk = get_flash_disks(fldisks, FDISKS_ALL); + if (cfdisk < 2) + { + fprintf(stderr,"Must have 2 flash disks..\n"); + return -1; + } + + while (count-- >0) + { + if (0 == fork()) + { + rc = test_spio_lun(fldisks[index].dev, + fldisks[index].devno[0],LUN_DIRECT,0); + exit(rc); + } + //create atleast 10 chunks on each on PLUN + index = (index+1)%2; + for (j=0; j < procces; j++) + { + if (0 == fork()) //child process + { + //here you can change the path ids later + rc = test_spio_lun(fldisks[index].dev, fldisks[index].devno[0], + LUN_VIRTUAL,chunks[j]); + exit(rc); + } + } + rc = wait4all(); + CHECK_RC(rc, "wait4all failed"); + printf("%d loop remain................\n",count); + } + return rc; +} + +void* res_thread(void *arg) +{ + int rc; + struct ctx *p_ctx = (struct ctx *)arg; + res_hndl_t res_hndl; + __u64 rsrc_handle; + __u64 stride = 0x1000; + + pthread_mutex_lock(&mutex); + rc = create_resource(p_ctx, p_ctx->lun_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + res_hndl = p_ctx->res_hndl; + rsrc_handle = p_ctx->rsrc_handle; + if (rc) + { + g_error = -1; + pthread_mutex_unlock(&mutex); + return NULL; + } + p_ctx->res_hndl = res_hndl; + rc = do_io(p_ctx, stride); + if (rc) + { + g_error = -1; + pthread_mutex_unlock(&mutex); + return NULL; + } + pthread_mutex_unlock(&mutex); + sleep(1); + + // Closing the resource after IO done + sleep(2); + pthread_mutex_lock(&mutex); + p_ctx->rsrc_handle = rsrc_handle; + rc = close_res(p_ctx); + pthread_mutex_unlock(&mutex); + return 0; +} + +int create_ctx_process(char *dev, dev64_t devno, __u64 chunk) +{ + int rc; + struct ctx my_ctx; + struct ctx *p_ctx = &my_ctx; + g_error=0; + pid = getpid(); + pthread_t threads[MAX_RES_HANDLE]; + pthread_t intr_thread; + //__u64 flags; + int i; + + rc = ctx_init2(p_ctx, dev, DK_AF_ASSIGN_AFU, devno); + CHECK_RC(rc, "Context init failed"); + // interrupt handler per context + rc = pthread_create(&intr_thread, NULL, ctx_rrq_rx, p_ctx); + + p_ctx ->lun_size = chunk * p_ctx->chunk_size; + for (i = 0; i %d of last contxt \n", pid,i+1); + p_ctx ->lun_size = ( chunk + chunkRemain )*p_ctx->chunk_size; + } + rc = pthread_create(&threads[i], NULL, res_thread, p_ctx); + CHECK_RC(rc, "pthread_create failed"); + } + //wait all threads to get complete + for (i = 0; i dev, cflash_path); +#ifdef _AIX + p_ctx->fd =open_dev(p_ctx->dev, O_RDWR); + if (p_ctx->fd < 0) + { + fprintf(stderr,"%s open failed...\n",p_ctx->dev); + return -1; + } + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "query ioctl failed"); + close(p_ctx->fd); +#endif + uint64_t chunk_size; + if (2 == cmd) + { + lba = get_disk_last_lba(p_ctx->dev, p_ctx->devno, &chunk_size); + if (lba == 0) + { + fprintf(stderr, "Failed to get last_lba of %s\n", p_ctx->dev); + return -1; + } + chunk = (lba+1)/chunk_size; + //chunks per context( each context will 16 VLUNS same size) + chunk = chunk/(max_p * MAX_RES_HANDLE); + if ( chunk == 0 ) + { + fprintf(stderr,"chunk-> 0X%"PRIX64" should >= max_p * MAX_RES_HANDLE=>%d\n", + chunk,(max_p * MAX_RES_HANDLE)); + return -1; + } + chunkRemain = chunk%(max_p * MAX_RES_HANDLE); + } + + for (i = 0; i< max_p; i++) + { + if (0 == fork()) + { + //child process + usleep(10000); + if (2 == cmd) + { + if (i == max_p-1 ) + { + // this is the last context + imLastContext=1; + } + rc = create_ctx_process(p_ctx->dev, p_ctx->devno,chunk); + } + else + { + rc = create_ctx_process(p_ctx->dev, p_ctx->devno,1); + } + debug("%d: exiting with rc=%d\n",getpid(), rc); + exit(rc); + } + } + rc = wait4all(); + return rc; +} + +int do_attach_detach(char *dev, dev64_t devno, __u16 lun_type) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunk = 20; + __u64 nlba; + int count = 20; + char *str = getenv("LONG_RUN"); + if (str != NULL) + { + count = 100000; + printf("LONG_RUN enabled...loop=%d\n",count); + fflush(stdout); + } + pid = getpid(); + + while (count-- >0) + { + rc = ctx_init2(p_ctx, dev, DK_AF_ASSIGN_AFU, devno); + CHECK_RC(rc, "Context init failed"); + if (LUN_VIRTUAL == lun_type) + { + chunk = rand()%16; + //create 0 vlun size & later call resize ioctl + rc = create_resource(p_ctx, chunk, DK_UVF_ALL_PATHS, lun_type); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + chunk = rand()%32; + nlba = chunk * p_ctx->chunk_size; + rc = vlun_resize(p_ctx, nlba); + CHECK_RC(rc, "vlun_resize failed"); + } + else + { + rc = create_resource(p_ctx,0, DK_UDF_ASSIGN_PATH, lun_type); + CHECK_RC(rc, "create LUN_DIRECT failed"); + } + close_res(p_ctx); + ctx_close(p_ctx); + if (count%500 == 0) + printf("%d: loop remains....\n",count); + fflush(stdout); + } + return 0; +} + +int test_spio_attach_detach(int cmd) +{ + int rc; + int i; + int cfdisk = 0; + struct flash_disk disks[MAX_FDISK]; + __u64 chunk = 32; + + cfdisk = get_flash_disks(disks, FDISKS_ALL); + if (cfdisk < 2) + { + fprintf(stderr,"Must have 2 flash disks..\n"); + return -1; + } + if (fork() == 0) + { + //child process + if (cmd == 1) + { + //virtual IO + for (i=0;ist_lba= p_ctx->last_lba +1 -(chunk*p_ctx->chunk_size); + if (long_run_enable) p_ctx->st_lba=0; //let do IO on complete plun + for (i=0;i< sizeof(buf_size)/sizeof(__u64);i++) + { + rc = allocate_buf(&rwbuf, buf_size[i]); + CHECK_RC(rc, "memory allocation failed"); + printf("%d: do large io size=0X%"PRIX64"\n",pid, buf_size[i]); + rc = do_large_io(p_ctx, &rwbuf, buf_size[i]); + deallocate_buf(&rwbuf); + if (rc) break; //get out from here + } + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; +} + +int test_large_trnsfr_boundary() +{ + int rc; + struct ctx my_ctx; + struct ctx *p_ctx = &my_ctx; + pthread_t thread; + struct rwlargebuf rwbuf; + __u64 buf_size = 0x1000000; //16MB + __u64 chunk = 10; + + pid = getpid(); +#ifdef _AIX + system("ulimit -d unlimited"); + system("ulimit -s unlimited"); + system("ulimit -m unlimited"); +#endif + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + //do RW last cmd with crossed LBA boundary + //i.e. last_lba size is 0x100; + //do send rw with 0x10 & cross limit of 0x100 + + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + rc = allocate_buf(&rwbuf, buf_size); + CHECK_RC(rc, "memory allocation failed"); + //to make sure last cmd rw beyond boundary + p_ctx->st_lba = p_ctx->last_lba - (chunk * p_ctx->chunk_size); + p_ctx->st_lba = p_ctx->st_lba +20 ; + + rc = do_large_io(p_ctx, &rwbuf, buf_size); + deallocate_buf(&rwbuf); + + pthread_cancel(thread); + close_res(p_ctx); + ctx_close(p_ctx); + return rc; +} + +//int create_res_hndl_afu_reset(char *dev, dev64_t devno, __u64 chunk) +int create_res_hndl_afu_reset(bool do_recover, bool last) +{ + int rc; + struct ctx my_ctx; + struct ctx *p_ctx = &my_ctx; + //int i; + pthread_t thread; + __u64 chunk = 0x1; + __u64 stride= 0x1; + int msgid; + struct mymsgbuf msg_buf; + pthread_t ioThreadId; + do_io_thread_arg_t ioThreadData; + do_io_thread_arg_t * p_ioThreadData=&ioThreadData; + // we have to export "NO_IO; if we want to avoid IO + char * noIOP = getenv("NO_IO"); + + pid = getpid(); +#ifdef _AIX + memset(p_ctx,0,sizeof(my_ctx)); + strcpy(p_ctx->dev,cflash_path); + if ((p_ctx->fd = open_dev(p_ctx->dev, O_RDWR)) < 0) + { + fprintf(stderr,"open failed %s, errno %d\n",p_ctx->dev, errno); + return -1; + } + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_query_path failed...\n"); + rc = ctx_init_internal(p_ctx, 0, p_ctx->devno); +#else + rc = ctx_init(p_ctx); +#endif + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + if ( noIOP == NULL ) + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + //create 0 vlun size & later call resize ioctl + rc = create_resource(p_ctx, chunk * (p_ctx->chunk_size), + 0, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + //last new process send message to waiting process + //that new ctx created now you can try to reattach + msgid = msgget(key, IPC_CREAT | 0666); + if (msgid < 0 ) + { + fprintf(stderr, "%d: msgget() failed before msgsnd()\n", pid); + return -1; + } + memset(&msg_buf, 0, sizeof(struct mymsgbuf)); + if (last) + { + goto end; + } + if ( noIOP == NULL ) + { + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=stride; + p_ioThreadData->loopCount=0x100000; // Need this to go on 10 secs + debug("%d: things look good, doing IO...\n",pid); + rc =pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + } +#ifdef _AIX + rc = do_eeh(p_ctx); +#else + rc = do_poll_eeh(p_ctx); +#endif + g_error=0; //reset any prev error might caught while EEH + if ( noIOP == NULL ) + { + pthread_join(ioThreadId, NULL); + } +#ifndef _AIX //for linux + if ( noIOP == NULL ) + pthread_cancel(thread); +#endif + + //We here after EEH done + if (do_recover) + { + //do if recover true + debug("%d: woow EEH is done recovering...\n",pid); + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ctx reattached failed"); + msg_buf.mtype =2; + strcpy(msg_buf.mtext, "K"); + if (msgsnd(msgid, &msg_buf, 2, IPC_NOWAIT) < 0) + { + fprintf(stderr, "%d: msgsnd failed\n", pid); + return -1; + } +#ifdef _AIX + if (p_ctx->return_flags != DK_RF_REATTACHED) + CHECK_RC(1, "recover ctx, expected DK_RF_REATTACHED"); + p_ctx->flags = DK_VF_HC_TUR; + p_ctx->hint = DK_HINT_SENSE; +#endif + fflush(stdout); + ctx_reinit(p_ctx); +#ifdef _AIX + p_ctx->hint=DK_HINT_SENSE; +#else + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; + // if dummy_sense_flag is set; + // a dummy sense data will be copied into ioctl input + p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; +#endif + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); +#ifndef _AIX //for linux + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); +#endif + } + else + { + //last one is + /*msgid = msgget(key, IPC_CREAT | 0666); + if(msgid < 0 ){ + fprintf(stderr, "%d: msgget() failed before msgrcv()\n", pid); + return -1; + } + debug("%d: Going to wait at msgrcv()..\n", pid); + fflush(stdout); + if(msgrcv(msgid, &msg_buf, 2, 1, 0) < 0) { + fprintf(stderr, "%d: msgrcv failed with errno %d\n", pid, errno); + return -1; + } + + debug("%d: Got out of msgrcv()..EEH is done, Try to recover....\n",pid); + */ + //as per today(9/28/2015) discussion with Sanket that + //new attach will fail until holding context not exited + //hope same apply for Linux as well + return 100; + /*rc = ioctl_dk_capi_recover_ctx(p_ctx); + if(rc) return 100; //this to make sure recover failed + else { + fprintf(stderr,"%d:com'on recover should fail here...\n",pid); + return 1; // we don't want to try IO anyway + }*/ + } + +end: + if ( noIOP == NULL ) + { + stride=0x1; + rc = do_io(p_ctx, stride); + CHECK_RC(rc, "IO failed after EEH/recover"); + } + if ( noIOP == NULL ) + pthread_cancel(thread); + sleep(1); + fflush(stdout); + sleep(5); // additional time to be safe ! + rc=close_res(p_ctx); + + sleep(5); // Don't let child exit to keep max ctx alive + rc |= ctx_close(p_ctx); + CHECK_RC(rc,"ctx close or close_res failed\n"); + return rc; +} + +int max_ctx_rcvr_except_last_one() +{ + int max_p = MAX_OPENS; + int i; + int rc; + bool do_recover = true; + int msgid; + struct mymsgbuf msg_buf; +#ifndef _AIX + char tmpBuff[MAXBUFF]; + char cmdToRun[MAXBUFF]; + const char *configCmdP = "echo 10000000 > /sys/kernel/debug/powerpc/eeh_max_freezes"; +#endif + system("ipcrm -Q 0x1234 >/dev/null 2>&1"); + + for (i = 0; i < max_p-1; i++) + { + if (0 == fork()) + { + //child process + rc = create_res_hndl_afu_reset(do_recover, false); + exit(rc); + } + } + if (0 == fork()) + { + rc = create_res_hndl_afu_reset(false, false); + if (100 == rc) rc = 0; //it was expected to failed with 100 + else rc=1; + exit(rc); + } + + //Wait for all all child to reach do_eeh() call. + sleep(10); +#ifdef _AIX + printf("%d:.....MAIN: BOSS do EEH now manually.......\n",getpid()); +#else + char * autoEehP = getenv("AUTO_EEH"); + if ( NULL != autoEehP ) + { + printf("%d:.....MAIN: doing EEH now .......\n",getpid()); + + rc = diskToPCIslotConv(cflash_path, tmpBuff ); + CHECK_RC(rc, "diskToPCIslotConv failed \n"); + + rc = prepEEHcmd( tmpBuff, cmdToRun); + CHECK_RC(rc, "prepEEHcmd failed \n"); + + rc = system(configCmdP); + if (rc) + { + g_error = -1; + fprintf(stderr,"%d: Failed in %s \n",pid,configCmdP); + } + + debug("%d ---------- Command : %s----------------\n",pid,cmdToRun); + rc = system(cmdToRun); + if (rc) + { + g_error = -1; + fprintf(stderr,"%d: Failed in %s \n",pid,cmdToRun); + } + + + } + else + { + printf("%d:.....MAIN: BOSS do EEH now manually.......\n",getpid()); + } +#endif + + sleep(10); // Let the max ctx recover before we proceed. + + printf("%d:.....MAIN: Hope EEH is done by now..\n",getpid()); + //now create new ctx + //send true argument means, do create a msg queue + //and inform waiting last process to perform reattach + if (0 == fork()) + { + pid=getpid(); + printf("%d: process created to attach after eeh...\n",pid); + msgid = msgget(key, IPC_CREAT | 0666); + if (msgid < 0 ) + { + fprintf(stderr, "%d: msgget() failed before msgsnd()\n", pid); + exit(-1); + } + if (msgrcv(msgid, &msg_buf, 2, 2, 0) < 0) + { + fprintf(stderr, "%d: msgrcv failed with errno %d\n", pid, errno); + exit(-1); + } + printf("%d:OK Recover done now start new attach.......\n",pid); + rc = create_res_hndl_afu_reset(true, true); + exit(rc); + } + rc = wait4all(); + printf("%d: rc for wait4all(): %d\n", getpid(), rc); + system("ipcrm -Q 0x1234"); + return rc; +} + +int no_recover_and_ioctl() +{ + int rc; + struct ctx my_ctx; + struct ctx *p_ctx = &my_ctx; + //__u64 flags; + pthread_t thread; + __u64 chunk = 0x1; + __u64 stride= 0x1; + pthread_t ioThreadId; + +#ifdef _AIX + //these are unused on Linux + int msgid; + struct mymsgbuf msg_buf; +#endif + do_io_thread_arg_t ioThreadData; + do_io_thread_arg_t * p_ioThreadData=&ioThreadData; + + char * noIOP = getenv("NO_IO"); + + pid = getpid(); + printf("%d:no_recover_and_ioctl process created...\n",pid); + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + + rc = create_resource(p_ctx, chunk *(p_ctx->chunk_size), + DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_VIRTUAL failed"); + + if ( noIOP == NULL ) + { + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=stride; + p_ioThreadData->loopCount=100; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + } + +#ifdef _AIX + rc = do_eeh(p_ctx); +#else + rc = do_poll_eeh(p_ctx); +#endif + if ( noIOP == NULL ) + { + pthread_join(ioThreadId, NULL); + } + if ( noIOP == NULL ) + pthread_cancel(thread); +#ifdef _AIX + msgid = msgget(key, IPC_CREAT | 0666); + if (msgid < 0 ) + { + fprintf(stderr, "%d: msgget() failed before msgsnd()\n", pid); + return -1; + } + if (msgrcv(msgid, &msg_buf, 2, 2, 0) < 0) + { + fprintf(stderr, "%d: msgrcv failed with errno %d\n", pid, errno); + return -1; + } + sleep(1); + rc = create_resource(p_ctx, p_ctx->chunk_size, + DK_UVF_ALL_PATHS, LUN_VIRTUAL); + rc |= vlun_resize(p_ctx, 2*p_ctx->chunk_size); + rc |= close_res(p_ctx); + rc |= ctx_close(p_ctx); +#else + // For the lost context, we will create another new. + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + rc = create_resource(p_ctx, chunk *(p_ctx->chunk_size), + DK_UVF_ALL_PATHS, LUN_VIRTUAL); + + pthread_cancel(thread); +#endif + return rc; +} + +int max_ctx_rcvr_last_one_no_rcvr() +{ + int max_p = MAX_OPENS; + int i; + int rc; + int pid=getpid(); + bool do_recover = true; +#ifndef _AIX + char tmpBuff[MAXBUFF]; + char cmdToRun[MAXBUFF]; + const char *configCmdP = "echo 10000000 > /sys/kernel/debug/powerpc/eeh_max_freezes"; +#endif + + for (i = 0; i < max_p-1; i++) + { + if (0 == fork()) + { + //child process + rc = create_res_hndl_afu_reset(do_recover, false); + exit(rc); + } + } + + if (0 == fork()) + { + rc = no_recover_and_ioctl(); +#ifdef _AIX + if (rc == 46) + rc = 0; + else + rc = 1; +#endif + exit(rc); + } + //now do eeh +#ifdef _AIX + sleep(10); // Let all child reach do_eeh() call. +#else + sleep(4); +#endif + +#ifdef _AIX + printf("%d:.....MAIN: BOSS do EEH now manually.......\n",getpid()); +#else + char * autoEehP = getenv("AUTO_EEH"); + if ( NULL != autoEehP ) + { + printf("%d:.....MAIN: doing EEH now .......\n",getpid()); + rc = diskToPCIslotConv(cflash_path, tmpBuff ); + CHECK_RC(rc, "diskToPCIslotConv failed \n"); + + rc = prepEEHcmd( tmpBuff, cmdToRun); + CHECK_RC(rc, "prepEEHcmd failed \n"); + + rc = system(configCmdP); + if (rc) + { + g_error = -1; + fprintf(stderr,"%d: Failed in %s \n",pid,configCmdP); + } + + printf("%d ---------- Command : %s----------------\n",pid,cmdToRun); + + rc = system(cmdToRun); + if (rc) + { + g_error = -1; + fprintf(stderr,"%d: Failed in %s \n",pid,cmdToRun); + } + + } + else + { + printf("%d:.....MAIN: BOSS do EEH now manually.......\n",getpid()); + } +#endif + + rc = wait4all(); + printf("%d: rc for wait4all(): %d\n", pid, rc); + return rc; +} + +int test_clone_ioctl(int cmd) +{ + struct ctx myctx; + int i; + pid_t cpid; + struct ctx *p_ctx=&myctx; + uint64_t nlba; + uint64_t st_lba; + uint64_t stride=0x1000; + int rc=0; + uint64_t src_ctx_id; + uint64_t src_adap_fd; + pthread_t thread; + uint64_t resource[MAX_RES_HANDLE]; + uint64_t RES_CLOSED=-1; + int cl_index[5]={ 1,7,10,12,15 }; + pid = getpid(); + rc =ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + p_ctx->flags = DK_UVF_ALL_PATHS; + for (i=0;ilun_size = (i+1)*p_ctx->chunk_size; + rc = create_res(p_ctx); + CHECK_RC(rc, "create res failed"); + resource[i]=p_ctx->rsrc_handle; + } + for (i=0;i<5;i++) + { + p_ctx->rsrc_handle= resource[cl_index[i]]; + close_res(p_ctx); + resource[cl_index[i]]= RES_CLOSED; + } + for (i=0; ichunk_size; + p_ctx->rsrc_handle = resource[i]; + p_ctx->res_hndl = p_ctx->rsrc_handle & RES_HNDLR_MASK; + for (st_lba=0;st_lbacontext_id; + src_adap_fd = p_ctx->adap_fd; + //do unmap parent mmio 1st + rc =munmap((void *)p_ctx->p_host_map, p_ctx->mmio_size); + CHECK_RC_EXIT(rc, "munmap failed\n"); + //do fresh attach for child + rc = ctx_init_internal(p_ctx,DK_AF_ASSIGN_AFU,p_ctx->devno); + CHECK_RC_EXIT(rc, "ctx_init_internal failed"); + pthread_create(&thread, NULL,ctx_rrq_rx,p_ctx); + //do clone + rc = ioctl_dk_capi_clone(p_ctx, src_ctx_id,src_adap_fd); + CHECK_RC_EXIT(rc, "clone ioctl failed"); + //do read data + for (i=0; i< MAX_RES_HANDLE;i++) + { + if (RES_CLOSED == resource[i]) + continue; + p_ctx->rsrc_handle = resource[i]; + p_ctx->res_hndl = p_ctx->rsrc_handle & RES_HNDLR_MASK; + nlba = (i+1)*p_ctx->chunk_size; + for (st_lba=0;st_lbaflags = DK_UVF_ALL_PATHS; + for (i=0; i < 5;i++) + { + p_ctx->lun_size = (cl_index[i]+1)*p_ctx->chunk_size; + rc = create_res(p_ctx); + CHECK_RC_EXIT(rc,"res_create failed\n"); + resource[cl_index[i]] = p_ctx->rsrc_handle; + } + //do io on new resources + p_ctx->st_lba = 0; + for (i=0;i<5;i++) + { + p_ctx->last_lba = ((cl_index[i]+1)*p_ctx->chunk_size) -1; + p_ctx->res_hndl = resource[cl_index[i]] & RES_HNDLR_MASK; + rc = do_io(p_ctx, stride); + CHECK_RC_EXIT(rc, "do_io failed\n"); + } + pthread_cancel(thread); + ctx_close(p_ctx); + exit(0); + } //child process end + else + { + //create pthread + sleep(1); //let child process do clone & read written data + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + //do open closed res + //now create closed resources + p_ctx->flags = DK_UVF_ALL_PATHS; + for (i=0; i < 5;i++) + { + p_ctx->lun_size = (cl_index[i]+1)*p_ctx->chunk_size; + rc = create_res(p_ctx); + CHECK_RC_EXIT(rc,"res_create failed\n"); + resource[cl_index[i]] = p_ctx->rsrc_handle; + } + //do resize all resources & IO + for (i=0;ireq_size = (rand()%MAX_RES_HANDLE +1) * p_ctx->chunk_size; + p_ctx->rsrc_handle = resource[i]; + p_ctx->res_hndl = p_ctx->rsrc_handle & RES_HNDLR_MASK; + rc = ioctl_dk_capi_vlun_resize(p_ctx); + CHECK_RC(rc, "dk_capi_resize_ioctl failed\n"); + rc = do_io(p_ctx, stride); + CHECK_RC(rc, "do_io failed\n"); + } + //close res + for (i=0;irsrc_handle = resource[i]; + rc = close_res(p_ctx); + CHECK_RC(rc, "cose_res failed\n"); + } + + pthread_cancel(thread); + ctx_close(p_ctx); + rc = wait4all(); + } + return rc; +} + +int max_ctx_on_plun(int cmd) +{ + int i; + int rc = 0; + struct ctx myctx; + struct ctx *p_ctx=&myctx; + pid = getpid(); + pthread_t thread; + int max_p = MAX_OPENS; + for (i=0; idev, cflash_path); + if ((p_ctx->fd = open_dev(p_ctx->dev, O_RDWR)) < 0) + { + fprintf(stderr,"open failed %s, errno %d\n",cflash_path, errno); + exit(rc); + } +#ifdef _AIX + rc |= ioctl_dk_capi_query_path(p_ctx); + rc|=ctx_init_internal(p_ctx, 0, p_ctx->devno); +#else + rc|=ctx_init_internal(p_ctx, 0x2, p_ctx->devno); +#endif + if (2 == cmd) + rc |=create_resource(p_ctx,0,0,LUN_VIRTUAL); + if (3 == cmd) + rc |=create_resource(p_ctx,0,0,LUN_DIRECT); + if (4 == cmd) + { + //do io all vluns created on path_id_mask + pthread_create(&thread, NULL,ctx_rrq_rx,p_ctx); + rc |= create_resource(p_ctx,p_ctx->chunk_size,0,LUN_VIRTUAL); + rc |= do_io(p_ctx,0x10); + + pthread_cancel(thread); + } + sleep(10); //lets all context get created + if ( 1 != cmd ) + rc|=close_res(p_ctx); + rc|=ctx_close(p_ctx); + debug("%d:.exiting with rc=%d\n",pid,rc); + exit(rc); + } + } + rc=wait4all(); + return rc; +} + +int max_vlun_on_a_ctx() +{ + int i; + int rc; + struct ctx myctx; + struct ctx *p_ctx=&myctx; + pid = getpid(); + rc=ctx_init(p_ctx); + __u64 vluns[MAX_VLUNS]; + for (i=0;ichunk_size,0,LUN_VIRTUAL); + CHECK_RC(rc, "create_resource Failed\n"); + vluns[i]=p_ctx->rsrc_handle; + } + for (i=0;irsrc_handle=vluns[i]; + rc=close_res(p_ctx); + CHECK_RC(rc, "close_res failed\n"); + } + rc = ctx_close(p_ctx); + return rc; +} + diff --git a/src/cflash/test/cflash_test_util.c b/src/cflash/test/cflash_test_util.c new file mode 100644 index 00000000..15d04ca9 --- /dev/null +++ b/src/cflash/test/cflash_test_util.c @@ -0,0 +1,4744 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/cflash_test_util.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "cflash_test.h" +#include +#include +#include +#include +#include +#include +#ifdef _AIX +#include +#endif + +int MAX_RES_HANDLE=16; /*default 16 MAX_RHT_CONTEXT */ + +char cflash_path[MC_PATHLEN]; +sigjmp_buf sBuf; +bool afu_reset = false; +bool bad_address = false; +bool err_afu_intrpt = false; +int dont_displa_err_msg = 0; //0 means display error msg +pid_t pid; +pid_t ppid; +int long_run = 0; +__u8 rrq_c_null = 0; //if 1, then hrrq_current = NULL + +#define DATA_SEED 0xdead000000000000ull +#define MAX_TRY_CTX_RESET 0xF + +__u64 lun_id = 0x0; +__u32 fc_port = 0x1; //will find the actual one + +int g_error=0; +int g_errno=0; +uint8_t rc_flags; +bool long_run_enable=false; +int static rw_cmd_loaded=0; +int static force_dump = 0; +//bool static cmd_cleared=false; +//set afu device path from env +int get_fvt_dev_env() +{ + char *fvt_dev = getenv("FVT_DEV"); + char *LONG_RN = getenv("LONG_RUN"); + if (NULL == fvt_dev) + { + fprintf(stderr, "FVT_DEV ENV var NOT set, Please set...\n"); + return -1; + } + +#ifndef _AIX + int rc; + rc=system("/opt/ibm/capikv/bin/cxlfstatus | grep -q superpipe"); + CHECK_RC(rc, "Test Setup doesn't have any Flash disk in spio mode"); +#endif + + strcpy(cflash_path, fvt_dev); + if ( NULL == LONG_RN ) + { + long_run=10; + } + else + { + long_run_enable=true; + long_run=atoi(LONG_RN); + } + return 0; +} + +int ctx_init_thread(void *args) +{ + return ctx_init((ctx_p)args); +} + +void ctx_close_thread(void *args) +{ + ctx_close((ctx_p)args); +} + +int ctx_reinit(struct ctx *p_ctx) +{ + int i; + +#ifndef _AIX + + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + + pthread_mutexattr_init(&mattr); + pthread_condattr_init(&cattr); + +#endif + + + debug("%d:------------- Start ctx_reinit() -------------\n", pid); + p_ctx->context_id = p_ctx->new_ctx_token; + p_ctx->ctx_hndl = CTX_HNDLR_MASK & p_ctx->context_id; + +#ifndef _AIX + + /* Re-init all mutex to avoid any pending unlock after recovery */ + + for (i = 0; i < NUM_CMDS; i++) + { + pthread_mutex_init(&p_ctx->cmd[i].mutex, &mattr); + pthread_cond_init(&p_ctx->cmd[i].cv, &cattr); + } + +#endif + + // initialize cmd fields that never change + for (i = 0; i < NUM_CMDS; i++) + { + p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl; + } + + debug("%d: context_id=0X%"PRIX64" ctx_hndl=%d\n", + pid, p_ctx->context_id, p_ctx->ctx_hndl); + + bzero(p_ctx->p_hrrq_start, NUM_RRQ_ENTRY*sizeof(__u64)); + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + write_64(&p_ctx->p_host_map->rrq_start, (__u64) p_ctx->p_hrrq_start); + write_64(&p_ctx->p_host_map->rrq_end, (__u64) p_ctx->p_hrrq_end); +#ifdef _AIX + write_64(&p_ctx->p_host_map->endian_ctrl,(__u64)SISL_ENDIAN_CTRL_BE); +#endif + write_64(&p_ctx->p_host_map->ctx_ctrl, SISL_MSI_SYNC_ERROR); + write_64(&p_ctx->p_host_map->intr_mask, SISL_ISTATUS_MASK); + + p_ctx->toggle = 1; + + afu_reset = false; + +#ifndef _AIX + + pthread_mutexattr_destroy(&mattr); + pthread_condattr_destroy(&cattr); + +#endif + + debug("%d:------------- End ctx_reinit() -------------\n", pid); + return 0; +} + +int ctx_init_internal(struct ctx *p_ctx, + __u64 flags, dev64_t devno) +{ + int rc=0; + //void *map; + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + int i; + pid_t mypid; + + + pthread_mutexattr_init(&mattr); + pthread_condattr_init(&cattr); + + for (i = 0; i < NUM_CMDS; i++) + { + pthread_mutex_init(&p_ctx->cmd[i].mutex, &mattr); + pthread_cond_init(&p_ctx->cmd[i].cv, &cattr); + } + + p_ctx->flags = flags; + p_ctx->work.num_interrupts = 4; // use num_interrupts from AFU desc + p_ctx->devno = devno; + + //do context attach + rc = ioctl_dk_capi_attach(p_ctx); + if (rc) + { + fprintf(stderr, "ioctl_dk_capi_attach failed\n"); + g_error = -1; + return -1; + } + + // initialize RRQ pointers + p_ctx->p_hrrq_start = &p_ctx->rrq_entry[0]; + p_ctx->p_hrrq_end = &p_ctx->rrq_entry[NUM_RRQ_ENTRY - 1]; + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + + p_ctx->toggle = 1; + + // initialize cmd fields that never change + for (i = 0; i < NUM_CMDS; i++) + { + p_ctx->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; + p_ctx->cmd[i].rcb.rrq = 0x0; + p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl; + } +#ifdef _AIX + write_64(&p_ctx->p_host_map->endian_ctrl,(__u64)SISL_ENDIAN_CTRL_BE); +#endif + + // set up RRQ in AFU + if (rrq_c_null) + { + write_64(&p_ctx->p_host_map->rrq_start, (__u64)NULL); + write_64(&p_ctx->p_host_map->rrq_end, (__u64)NULL); + } + else + { + write_64(&p_ctx->p_host_map->rrq_start, (__u64) p_ctx->p_hrrq_start); + write_64(&p_ctx->p_host_map->rrq_end, (__u64) p_ctx->p_hrrq_end); + } + + mypid = getpid(); + debug("%d: ctx_init() success: p_host_map %p, ctx_hndl %d, rrq_start %p\n", + mypid, p_ctx->p_host_map, p_ctx->ctx_hndl, p_ctx->p_hrrq_start); + + pthread_mutexattr_destroy(&mattr); + pthread_condattr_destroy(&cattr); + return 0; +} +int open_dev(char *dev, int reqflg) +{ + int fd; + int flag; + //open CAPI Flash disk device +#ifdef _AIX + int ext_flag = SC_CAPI_USER_IO; + //ext_flag |= SC_NO_RESERVE; //check later this flag + flag = (reqflg & O_ACCMODE) | O_NONBLOCK; + fd = openx(dev,flag,0,ext_flag); +#else + flag = reqflg | O_NONBLOCK; + fd = open(dev,flag); +#endif + return fd; +} +int ctx_init(struct ctx *p_ctx) +{ +#ifdef _AIX + int rc = 0; +#endif /*_AIX */ + memset(p_ctx, 0, sizeof(struct ctx)); + if ((p_ctx->fd = open_dev(cflash_path, O_RDWR)) < 0) + { + fprintf(stderr,"open failed %s, errno %d\n",cflash_path, errno); + g_error = -1; + return -1; + } + strcpy(p_ctx->dev, cflash_path); +#ifdef _AIX + rc = ioctl_dk_capi_query_path(p_ctx); + CHECK_RC(rc, "query path failed") +#endif /*_AIX */ + //note: for Linux devno will be ignored + return ctx_init_internal(p_ctx, DK_AF_ASSIGN_AFU, p_ctx->devno); +} + +int ctx_init2(struct ctx *p_ctx, char *dev, + __u64 flags, dev64_t devno) +{ + memset(p_ctx, 0, sizeof(struct ctx)); + if ((p_ctx->fd = open_dev(dev, O_RDWR)) < 0) + { + fprintf(stderr,"open failed %s, errno %d\n",dev, errno); + g_error = -1; + return -1; + } + strcpy(p_ctx->dev, dev); + return ctx_init_internal(p_ctx, flags, devno); +} + +int ctx_close(struct ctx *p_ctx) +{ + int rc = 0; +#ifndef _AIX + if (p_ctx->mmio_size != 0) + { + rc = munmap((void *)p_ctx->p_host_map, p_ctx->mmio_size); + if (rc) + fprintf(stderr, "munmap failed with errno = %d", errno); + } +#endif + rc |= ioctl_dk_capi_detach(p_ctx); + rc |= close(p_ctx->fd); + debug("%d: close(%d) done.\n", pid, p_ctx->fd); + p_ctx->mmio_size = 0; + p_ctx->p_host_map = 0; + return rc; +} + +void clear_waiting_cmds(struct ctx *p_ctx) +{ + int i; + struct afu_cmd *p_cmd; + if (!rw_cmd_loaded) return; //rw cmd not loaded + if (DEBUG) + { + fprintf(stderr, + "%d: Clearing all waiting cmds, some error occurred\n", + getpid()); + } + //TBD do context reset + for (i = 0 ; i < NUM_CMDS; i++) + { + p_cmd = &p_ctx->cmd[i]; + pthread_mutex_lock(&p_cmd->mutex); + p_cmd->sa.host_use_b[0] |= B_DONE; + p_ctx->cmd[i].sa.ioasc = 1; + p_ctx->cmd[i].sa.rc.afu_rc =0xFF; + pthread_cond_signal(&p_cmd->cv); + pthread_mutex_unlock(&p_cmd->mutex); + } + //cmd_cleared = true; +} +void ctx_rrq_intr(struct ctx *p_ctx) +{ + struct afu_cmd *p_cmd; + // process however many RRQ entries that are read + debug_2("%d: In ctx_rrq_intr(): toggle_bit= 0X%llx p_ctx->toggle=%x\n", + pid, (*p_ctx->p_hrrq_curr & SISL_RESP_HANDLE_T_BIT), p_ctx->toggle); + + while ((*p_ctx->p_hrrq_curr & SISL_RESP_HANDLE_T_BIT) == + p_ctx->toggle) + { + + debug_2("------------------------------------------------\n"); + debug_2("%d: Got inside loop within ctx_rrq_intr()\n", pid); + debug_2("%d: address in p_hrrq_curr=0X%"PRIX64"\n", pid, *p_ctx->p_hrrq_curr); + debug_2("%d: toggle_bit = 0X%llx p_ctx->toggle=0X%x\n", + pid, (*p_ctx->p_hrrq_curr & SISL_RESP_HANDLE_T_BIT), p_ctx->toggle); + + p_cmd = (struct afu_cmd*) + ((*p_ctx->p_hrrq_curr) & (~SISL_RESP_HANDLE_T_BIT)); + debug_2("%d:cmd_address(IOARCB)=%p\n",pid,p_cmd); + + pthread_mutex_lock(&p_cmd->mutex); + p_cmd->sa.host_use_b[0] |= B_DONE; + pthread_cond_signal(&p_cmd->cv); + pthread_mutex_unlock(&p_cmd->mutex); + + if (p_ctx->p_hrrq_curr < p_ctx->p_hrrq_end) + { + p_ctx->p_hrrq_curr++; /* advance to next RRQ entry */ + } + else + { + /* wrap HRRQ & flip toggle */ + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle ^= SISL_RESP_HANDLE_T_BIT; + debug_2("%d:wrapping up p_hrrq_curr to p_hrrq_start...\n",pid); + } + } +} + +void ctx_sync_intr(struct ctx *p_ctx) +{ + __u64 reg; + __u64 reg_unmasked; + + reg = read_64(&p_ctx->p_host_map->intr_status); + reg_unmasked = (reg & SISL_ISTATUS_UNMASK); + + if (reg_unmasked == 0) + { + if (!dont_displa_err_msg) + { + fprintf(stderr, + "%d: spurious interrupt, intr_status 0x%016lx, ctx %d\n", + pid, reg, p_ctx->ctx_hndl); + } + clear_waiting_cmds(p_ctx); + return; + } + + if (reg_unmasked == SISL_ISTATUS_PERM_ERR_RCB_READ) + { + if (!dont_displa_err_msg) + { + fprintf(stderr, "exiting on SISL_ISTATUS_PERM_ERR_RCB_READ\n"); + } + clear_waiting_cmds(p_ctx); + //pthread_exit(NULL); + // ok - this is a signal to stop this thread + } + else + { + if (!dont_displa_err_msg) + { + fprintf(stderr, + "%d: unexpected interrupt, intr_status 0x%016lx, ctx %d, exiting test...\n", + pid, reg, p_ctx->ctx_hndl); + } + clear_waiting_cmds(p_ctx); + return ; + } + + write_64(&p_ctx->p_host_map->intr_clear, reg_unmasked); + + return; +} + +#ifdef _AIX +void *ctx_rrq_rx(void *arg) +{ + int rc = 0; + struct ctx *p_ctx = (struct ctx*) arg; + struct pollfd poll_list[2]; + memset(&poll_list[0], 0, sizeof(poll_list)); + + poll_list[CFLASH_ADAP_POLL_INDX].fd = p_ctx->adap_fd; + poll_list[CFLASH_ADAP_POLL_INDX].events = POLLIN|POLLSYNC|POLLPRI; + + poll_list[CFLASH_DISK_POLL_INDX].fd = p_ctx->fd; + poll_list[CFLASH_DISK_POLL_INDX].events = POLLIN; + int max_try_ctx_reset =0; + while (1) + { + rc = poll(poll_list, 2, -1);//no timeout + debug_2("%d:rc=%d poll_list[CFLASH_ADAP_POLL_INDX].revents=%d\n", + pid,rc,poll_list[CFLASH_ADAP_POLL_INDX].revents); + if (poll_list[CFLASH_ADAP_POLL_INDX].revents & POLLPRI) + { + if (afu_reset) + continue; //no need for any action now + //bad page fault + if (bad_address) + err_afu_intrpt=true; //set to check process + else + fprintf(stderr, "%d: adapt poll POLLPRI occured\n",pid); + p_ctx->flags = DK_QEF_ADAPTER; + ioctl_dk_capi_query_exception(p_ctx); + if (DK_AET_EEH_EVENT == p_ctx->adap_except_type) + { + afu_reset = true; + printf("%d: EEH is done.. receieved DK_AET_EEH_EVENT event......\n",pid); + max_try_ctx_reset=0; + continue; + } + else if (DK_AET_BAD_PF == p_ctx->adap_except_type) + { + debug("%d: DK_AET_BAD_PF adap_except received.....\n",pid); + } + //Take action based on execption TBD + if (++max_try_ctx_reset == MAX_TRY_CTX_RESET) + { + clear_waiting_cmds(p_ctx); + max_try_ctx_reset = 0; + } + //return NULL; + } + if (poll_list[CFLASH_DISK_POLL_INDX].revents & POLLPRI) + { + //disk exception + fprintf(stderr, "%d: disk poll POLLPRI occured\n",pid); + p_ctx->flags = DK_QEF_ALL_RESOURCE; + ioctl_dk_capi_query_exception(p_ctx); + //TBD dump exception detail or forward above + //Take action based on execption TBD + if (++max_try_ctx_reset == MAX_TRY_CTX_RESET) + { + clear_waiting_cmds(p_ctx); + max_try_ctx_reset = 0; + } + //return NULL; + } + if (poll_list[CFLASH_ADAP_POLL_INDX].revents & POLLMSG) + { + //adapter error interrupt + //this is SISL_MSI_SYNC_ERROR + fprintf(stderr, "%d: adopt poll POLLMSG SISL_MSI_SYNC_ERROR occured\n",pid); + ctx_sync_intr(p_ctx); + //return NULL; + } + if (poll_list[CFLASH_ADAP_POLL_INDX].revents & POLLIN) + { + max_try_ctx_reset =0;//reset + //adapter interrupt for cmd completion + //this is SISL_MSI_RRQ_UPDATED + debug_2("%d:adapter POLLIN, SISL_MSI_RRQ_UPDATED\n",pid); + ctx_rrq_intr(p_ctx); + debug_2("%d:Returned from ctx_rrq_intr()\n",pid); + } + } + return NULL; +} + +#else +void *ctx_rrq_rx(void *arg) +{ + struct cxl_event *p_event; + int len; + struct ctx *p_ctx = (struct ctx*) arg; + + while (1) + { + // + // read adapt fd & block on any interrupt + len = read(p_ctx->adap_fd, &p_ctx->event_buf[0], + sizeof(p_ctx->event_buf)); + + if ((len < 0) && (errno == EIO)) + { + if (!dont_displa_err_msg) + fprintf(stderr, "afu has been reset ...\n"); + + clear_waiting_cmds(p_ctx); + afu_reset = true; + //sleep some time to retry again + sleep(10); + continue; + } + + p_event = (struct cxl_event *)&p_ctx->event_buf[0]; + while (len >= sizeof(p_event->header)) + { + if (p_event->header.type == CXL_EVENT_AFU_INTERRUPT) + { + switch (p_event->irq.irq) + { + case SISL_MSI_RRQ_UPDATED: + ctx_rrq_intr(p_ctx); + break; + + case SISL_MSI_SYNC_ERROR: + ctx_sync_intr(p_ctx); + break; + + default: + if (!dont_displa_err_msg) + { + fprintf(stderr, "%d: unexpected irq %d, ctx %d, exiting test...\n", + pid, p_event->irq.irq, p_ctx->ctx_hndl); + } + clear_waiting_cmds(p_ctx); + //return NULL; + break; + } + + } + else + { + switch (p_event->header.type) + { + case CXL_EVENT_RESERVED: + if (!dont_displa_err_msg) + fprintf(stderr, "%d: CXL_EVENT_RESERVED = size = 0x%x\n", + pid,p_event->header.size); + break; + case CXL_EVENT_DATA_STORAGE: + if (!dont_displa_err_msg) + fprintf(stderr, "%d:CAPI_EVENT_DATA_STOARAGE addr = 0x%lx, dsisr = 0x%lx\n", + pid,p_event->fault.addr,p_event->fault.dsisr); + + //TBD get_intr_status & print + break; + case CXL_EVENT_AFU_ERROR: + if (!dont_displa_err_msg) + fprintf(stderr,"%d:CXL_EVENT_AFU_ERROR error = 0x%lx, flags = 0x%x\n", + pid, p_event->afu_error.error,p_event->afu_error.flags); + break; + default: + if (!dont_displa_err_msg) + { + fprintf(stderr, "%d: Unknown CAPI EVENT type = %d, process_element = 0x%x\n", + pid, p_event->header.type,p_event->header.process_element); + } + } + g_error =1; + clear_waiting_cmds(p_ctx); + err_afu_intrpt = true; + //return NULL; + //let user process terminate thread + } + len -= p_event->header.size; + p_event = (struct cxl_event *) + (((char*)p_event) + p_event->header.size); + } + } + + return NULL; +} +#endif + +// len in __u64 +void fill_buf(__u64* p_buf, unsigned int len,__u64 value) +{ + static __u64 data = DATA_SEED; + int i; + + for (i = 0; i < len; i += 2) + { + p_buf[i] = value; + p_buf[i + 1] = data++; + } +} + +// Send Report LUNs SCSI Cmd to CAPI Adapter +int send_report_luns(struct ctx *p_ctx, __u32 port_sel, + __u64 **lun_ids, __u32 *nluns) +{ + __u32 *p_u32; + __u64 *p_u64, *lun_id; + int len; + int rc; + + memset(&p_ctx->rbuf[0], 0, sizeof(p_ctx->rbuf)); + memset((void *)&p_ctx->cmd[0].rcb.cdb[0], 0, sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_ctx->cmd[0].rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_HOST_READ); + p_ctx->cmd[0].rcb.port_sel = port_sel; + p_ctx->cmd[0].rcb.lun_id = 0x0; + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->rbuf[0]); + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->rbuf[0][0]; + p_ctx->cmd[0].rcb.timeout = 10; /* 10 Secs */ + + p_ctx->cmd[0].rcb.cdb[0] = 0xA0; /* report lun */ + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[6]; + write_32(p_u32, sizeof(p_ctx->rbuf[0])); /* allocation length */ + + p_ctx->cmd[0].sa.host_use_b[1] = 0; /* reset retry cnt */ + do + { + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + if (rc) return rc; + } while (check_status(&p_ctx->cmd[0].sa)); + + + if (p_ctx->cmd[0].sa.host_use_b[0] & B_ERROR) + { + return -1; + } + + // report luns success + len = read_32((__u32*)&p_ctx->rbuf[0][0]); + p_u64 = (__u64*)&p_ctx->rbuf[0][8]; /* start of lun list */ + + *nluns = len/8; + lun_id = (__u64 *)malloc((*nluns * sizeof(__u64))); + + if (lun_id == NULL) + { + fprintf(stderr, "Report LUNs: ENOMEM\n"); + } + else + { + *lun_ids = lun_id; + + while (len) + { + *lun_id = read_64(p_u64++); + lun_id++; + len -= 8; + } + } + + return 0; +} +// Send Read Capacity SCSI Cmd to the LUN +int send_read_capacity(struct ctx *p_ctx, __u32 port_sel, + __u64 lun_id, __u64 *lun_capacity, __u64 *blk_len) +{ + __u32 *p_u32; + __u64 *p_u64; + int rc; + + memset(&p_ctx->rbuf[0], 0, sizeof(p_ctx->rbuf)); + memset((void *)&p_ctx->cmd[0].rcb.cdb[0], 0, sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_ctx->cmd[0].rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_HOST_READ); + p_ctx->cmd[0].rcb.port_sel = port_sel; + p_ctx->cmd[0].rcb.lun_id = lun_id; + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->rbuf[0]); + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->rbuf[0]; + p_ctx->cmd[0].rcb.timeout = 10; /* 10 Secs */ + + p_ctx->cmd[0].rcb.cdb[0] = 0x9E; /* read cap(16) */ + p_ctx->cmd[0].rcb.cdb[1] = 0x10; /* service action */ + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + write_32(p_u32, sizeof(p_ctx->rbuf[0])); /* allocation length */ + + send_single_cmd(p_ctx); + rc = wait_resp(p_ctx); + if (rc) + { + return rc; + } + + p_u64 = (__u64*)&p_ctx->rbuf[0][0]; + *lun_capacity = read_64(p_u64); + + p_u32 = (__u32*)&p_ctx->rbuf[0][8]; + *blk_len = read_32(p_u32); + + return 0; +} + +// len in __u64 +int cmp_buf(__u64* p_buf1, __u64 *p_buf2, unsigned int len) +{ + return memcmp(p_buf1, p_buf2, len*sizeof(__u64)); +} + +int rw_cmp_buf(struct ctx *p_ctx, __u64 start_lba) +{ + int i; + //char buf[32]; + //int read_fd, write_fd; + for (i = 0; i < NUM_CMDS; i++) + { + if (cmp_buf((__u64*)&p_ctx->rbuf[i][0], (__u64*)&p_ctx->wbuf[i][0], + sizeof(p_ctx->rbuf[i])/sizeof(__u64))) + { + printf("%d: miscompare at start_lba 0X%"PRIX64"\n", + pid, start_lba); + + hexdump(&p_ctx->rbuf[i][0],0x20,"Read buf"); + hexdump(&p_ctx->wbuf[i][0],0x20,"Write buf"); + + return -1; + } + } + return 0; +} + +int check_if_can_retry(struct ctx *p_ctx, int old_rc) +{ + int i; + int rc; + bool yes_try=false; + for (i=0;icmd[i].sa.ioasc && p_ctx->cmd[i].sa.rc.scsi_rc) + { + yes_try=true; + break; + } + } + if (!yes_try) return old_rc; + debug("%d: Trying once more to send IO......\n", pid); + for (i=0;icmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } + rc = send_cmd(p_ctx); + if (rc) return rc; + return wait_resp(p_ctx); +} +int send_write(struct ctx *p_ctx, __u64 start_lba, + __u64 stride,__u64 data) +{ + int i; + //__u64 *p_u64; //unused + __u32 *p_u32; + __u64 lba; + int rc; + + for (i = 0; i< NUM_CMDS; i++) + { + fill_buf((__u64*)&p_ctx->wbuf[i][0], + sizeof(p_ctx->wbuf[i])/sizeof(__u64),data); + + memset((void *)&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + lba = start_lba + i*stride; + write_lba((__u64*)&p_ctx->cmd[i].rcb.cdb[2],lba); + + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + + debug_2("%d : send write lba..: 0X%"PRIX64"\n",(int)data,lba); + + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->wbuf[i][0]; + + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[i]); + p_ctx->cmd[i].rcb.cdb[0] = 0x8A; // write(16) + p_ctx->cmd[i].rcb.timeout = 60; //secs + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, p_ctx->blk_len); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + hexdump(&p_ctx->wbuf[i][0],0x20,"Writing"); + } + //send_single_cmd(p_ctx); + rc = send_cmd(p_ctx); + if (rc) return rc; + rc = wait_resp(p_ctx); + return rc; +} + +void send_single_cmd(struct ctx *p_ctx) +{ + + p_ctx->cmd[0].sa.host_use_b[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + + /* make memory updates visible to AFU before MMIO */ + asm volatile( "lwsync" : : ); + + // write IOARRIN + write_64(&p_ctx->p_host_map->ioarrin, (__u64)&p_ctx->cmd[0].rcb); + rw_cmd_loaded=1; +} + +int send_cmd(struct ctx *p_ctx) +{ + int cnt = NUM_CMDS; + int wait_try=MAX_TRY_WAIT; + int p_cmd = 0; + int i; + __u64 room; + + /* make memory updates visible to AFU before MMIO */ + asm volatile( "lwsync" : : ); + while (cnt) + { + room = read_64(&p_ctx->p_host_map->cmd_room); + debug_2("%d:room =0X%"PRIX64"\n",pid,room); +#ifdef _AIX //not to break anything on Linux +#ifndef __64BIT__ + if (0XFFFFFFFF == room) +#else + if ( -1 == room) +#endif + { + fprintf(stderr,"%d:Failed:cmd_room=-1 afu_reset done/not recovered...\n",pid); + usleep(1000); + return -1; + } +#endif + if (0 == room) + { + usleep(MC_BLOCK_DELAY_ROOM); + wait_try--; + debug("%d:still pending cmd:%d\n",pid,cnt); + } + if (0 == wait_try) + { + fprintf(stderr, "%d : send cmd wait over %d cmd remain\n", + pid, cnt); + return -1; + } + for (i = 0; i < room; i++) + { + // add a usleep here if room=0 ? + // write IOARRIN + debug_2("%d:Placing IOARCB =0X%"PRIX64" into ioarrin\n", + pid,(__u64)&p_ctx->cmd[p_cmd].rcb); + hexdump((void *)(&p_ctx->cmd[p_cmd].rcb), sizeof(p_ctx->cmd[p_cmd].rcb), + "RCB data writing in ioarrin........"); + write_64(&p_ctx->p_host_map->ioarrin, + (__u64)&p_ctx->cmd[p_cmd++].rcb); + wait_try = MAX_TRY_WAIT; //each cmd give try max time + if (cnt-- == 1) break; + } + } + rw_cmd_loaded=1; + return 0; +} + +int wait_resp(struct ctx *p_ctx) +{ + int i; + int rc =0; + int p_rc = 0; + __u64 *p_u64; + + for (i = 0; i < NUM_CMDS; i++) + { + pthread_mutex_lock(&p_ctx->cmd[i].mutex); + while (p_ctx->cmd[i].sa.host_use_b[0] != B_DONE) + { + pthread_cond_wait(&p_ctx->cmd[i].cv, &p_ctx->cmd[i].mutex); + } + pthread_mutex_unlock(&p_ctx->cmd[i].mutex); + //force_dump=1; + hexdump((void *)(&p_ctx->cmd[i].rcb), sizeof(p_ctx->cmd[i].rcb), "RCB data........"); + //force_dump=0; + if (p_ctx->cmd[i].sa.ioasc) + { + rc_flags = p_ctx->cmd[i].sa.rc.flags; + rc = p_ctx->cmd[i].sa.rc.afu_rc | + p_ctx->cmd[i].sa.rc.scsi_rc | + p_ctx->cmd[i].sa.rc.fc_rc; + if (!dont_displa_err_msg) + { + + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + if (p_rc != rc) + { + hexdump(&p_ctx->cmd[i].sa.sense_data,0x20,"Sense data Writing"); + + // copied sense_data for further use + memcpy((void *)p_ctx->verify_sense_data,(const void *)p_ctx->cmd[i].sa.sense_data,20); + + fprintf(stderr,"%d: Req failed @lba=0X%"PRIX64" IOASC = flags 0x%x, afu_rc 0x%x, scsi_rc 0x%x, fc_rc 0x%x\n", + pid,read_64(p_u64), + p_ctx->cmd[i].sa.rc.flags,p_ctx->cmd[i].sa.rc.afu_rc, + p_ctx->cmd[i].sa.rc.scsi_rc, p_ctx->cmd[i].sa.rc.fc_rc); + p_rc = rc; + if (p_ctx->cmd[i].sa.rc.scsi_rc || p_ctx->cmd[i].sa.rc.fc_rc) + { + force_dump=1; + hexdump(&p_ctx->cmd[i].sa.sense_data,0x20,"Sense data Writing"); + force_dump=0; + } + } + } + } + } + rw_cmd_loaded=0; + return rc; +} + +int wait_single_resp(struct ctx *p_ctx) +{ + int rc =0; + + pthread_mutex_lock(&p_ctx->cmd[0].mutex); + while (p_ctx->cmd[0].sa.host_use_b[0] != B_DONE) + { + debug_2("%d: Sleeping in wait_single_resp(), host_use_b[0]=%d\n", pid, p_ctx->cmd[0].sa.host_use_b[0]); + pthread_cond_wait(&p_ctx->cmd[0].cv, &p_ctx->cmd[0].mutex); + debug_2("%d: Wokeup in wait_single_resp(), host_use_b[0]=%d\n", pid, p_ctx->cmd[0].sa.host_use_b[0]); + } + pthread_mutex_unlock(&p_ctx->cmd[0].mutex); + debug_2("%d: Got out of wait_single_resp(), host_use_b[0]=%d\n", pid, p_ctx->cmd[0].sa.host_use_b[0]); + + rw_cmd_loaded =0; + if (p_ctx->cmd[0].sa.ioasc) + { + if (!dont_displa_err_msg) + { + if (p_ctx->cmd[0].sa.rc.scsi_rc || p_ctx->cmd[0].sa.rc.fc_rc) + { + force_dump=1; + hexdump(&p_ctx->cmd[0].sa.sense_data,0x20,"Sense data Writing"); + force_dump=0; + } + printf("%d:IOASC = flags 0x%x, afu_rc 0x%x, scsi_rc 0x%x, fc_rc 0x%x\n", + pid, + p_ctx->cmd[0].sa.rc.flags,p_ctx->cmd[0].sa.rc.afu_rc, + p_ctx->cmd[0].sa.rc.scsi_rc, p_ctx->cmd[0].sa.rc.fc_rc); + } + rc_flags = p_ctx->cmd[0].sa.rc.flags; + rc = p_ctx->cmd[0].sa.rc.afu_rc | + p_ctx->cmd[0].sa.rc.scsi_rc | + p_ctx->cmd[0].sa.rc.fc_rc; + return rc; + } + return rc; +} + +int send_read(struct ctx *p_ctx, __u64 start_lba, + __u64 stride) +{ + int i; + //__u64 *p_u64; //unused + __u32 *p_u32; + __u64 lba; + int rc; + + for (i = 0; i < NUM_CMDS; i++) + { + memset(&p_ctx->rbuf[i][0], 0, sizeof(p_ctx->rbuf[i])); + + lba = start_lba + i*stride; + + memset((void *)&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + + p_ctx->cmd[i].rcb.cdb[0] = 0x88; // read(16) + + write_lba((__u64*)&p_ctx->cmd[i].rcb.cdb[2],lba); + + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + + debug_2("%d: send read for lba : 0X%"PRIX64"\n",pid,lba); + + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->rbuf[i]); + p_ctx->cmd[i].rcb.timeout = 60; + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->rbuf[i][0]; + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + + write_32(p_u32, p_ctx->blk_len); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } + + //send_single_cmd(p_ctx); + rc = send_cmd(p_ctx); + CHECK_RC(rc,"send_cmd failed...\n"); + rc = wait_resp(p_ctx); + return rc; +} + +// returns 1 if the cmd should be retried, 0 otherwise +// sets B_ERROR flag based on IOASA +int check_status(volatile sisl_ioasa_t *p_ioasa) +{ + // delete urun !!! + if (p_ioasa->ioasc == 0 || + (p_ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN)) + { + return 0; + } + else + { + p_ioasa->host_use_b[0] |= B_ERROR; + } + + if (p_ioasa->host_use_b[1]++ < 5) + { + if (p_ioasa->rc.afu_rc == 0x30) + { + // out of data buf + // #define all, to add the 2nd case!!! + // do we delay ? + return 1; + } + + if (p_ioasa->rc.scsi_rc) + { + // retry all SCSI errors + // but if busy, add a delay + return 1; + } + } + + return 0; +} + +int test_init(struct ctx *p_ctx) +{ + if (mc_init() != 0) + { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + if (ctx_init(p_ctx) != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + return 0; +} + +/* + * NAME: hexdump + * + * FUNCTION: Display an array of type char in ASCII, and HEX. This function + * adds a caller definable header to the output rather than the fixed one + * provided by the hexdump function. + * + * EXECUTION ENVIRONMENT: + * + * This routine is ONLY AVAILABLE IF COMPILED WITH DEBUG DEFINED + * + * RETURNS: NONE + */ +void +hexdump(void *data, long len, const char *hdr) +{ + + int i,j,k; + char str[18]; + char *p = (char *)data; + if (!force_dump) + { + //ignore env varible for force dump + if (!ENABLE_HEXDUMP) + return; + } + i=j=k=0; + fprintf(stderr, "%s: length=%ld (PID: %d)\n", hdr?hdr:"hexdump()", len, pid); + + /* Print each 16 byte line of data */ + while (i < len) + { + if (!(i%16)) /* Print offset at 16 byte bndry */ + fprintf(stderr,"%03x ",i); + + /* Get next data byte, save ascii, print hex */ + j=(int) p[i++]; + if (j>=32 && j<=126) + str[k++] = (char) j; + else + str[k++] = '.'; + fprintf(stderr,"%02x ",j); + + /* Add an extra space at 8 byte bndry */ + if (!(i%8)) + { + fprintf(stderr," "); + str[k++] = ' '; + } + + /* Print the ascii at 16 byte bndry */ + if (!(i%16)) + { + str[k] = '\0'; + fprintf(stderr," %s\n",str); + k = 0; + } + } + + /* If we didn't end on an even 16 byte bndry, print ascii for partial + * line. */ + if ((j = i%16)) + { + /* First, space over to ascii region */ + while (i%16) + { + /* Extra space at 8 byte bndry--but not if we + * started there (was already inserted) */ + if (!(i%8) && j != 8) + fprintf(stderr," "); + fprintf(stderr," "); + i++; + } + /* Terminate the ascii and print it */ + str[k]='\0'; + fprintf(stderr," %s\n",str); + } + fflush(stderr); + + return; +} + +int rw_cmp_buf_cloned(struct ctx *p_ctx, __u64 start_lba) +{ + int i; + for (i = 0; i < NUM_CMDS; i++) + { + if (cmp_buf_cloned((__u64*)&p_ctx->rbuf[i][0], + sizeof(p_ctx->rbuf[i])/sizeof(__u64))) + { + printf("%d: clone miscompare at start_lba 0X%"PRIX64"\n", + pid, start_lba); + return -1; + } + } + return 0; +} + +// len in __u64 +int cmp_buf_cloned(__u64* p_buf, unsigned int len) +{ + static __u64 data = DATA_SEED; + int i; + + for (i = 0; i < len; i += 2) + { + if (!(p_buf[i] == ppid && p_buf[i + 1] == data++)) + { + return -1; + } + } + return 0; +} + +int send_rw_rcb(struct ctx *p_ctx, struct rwbuf *p_rwb, + __u64 start_lba, __u64 stride, + int align, int where) +{ + __u64 *p_u64; + __u32 *p_u32; + __u64 vlba; + int rc; + int i; + __u32 ea; + pid = getpid(); + if (0 == where)//begining + ea = align; + else //from end of the block + ea = 0x1000 - align; + for (i = 0; i< NUM_CMDS; i++) + { + debug("%d: EA = %p with 0X%X alignment\n",pid, &p_rwb->wbuf[i][ea], ea); + fill_buf((__u64*)&p_rwb->wbuf[i][ea], + sizeof(p_rwb->wbuf[i])/(2*sizeof(__u64)),pid); + memset((void *)&p_ctx->cmd[i].rcb.cdb[0],0,sizeof(p_ctx->cmd[i].rcb.cdb)); + vlba = start_lba + i*stride; + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_lba(p_u64, vlba); + debug_2("%d: send write for 0X%"PRIX64"\n", pid, vlba); + + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_rwb->wbuf[i][ea]; + + p_ctx->cmd[i].rcb.data_len = sizeof(p_rwb->wbuf[i])/2; + p_ctx->cmd[i].rcb.cdb[0] = 0x8A; // write(16) + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, p_ctx->blk_len); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + hexdump(&p_rwb->wbuf[i][ea],0x20,"Write buf"); + } + + rc = send_cmd(p_ctx); + if (rc) return rc; + //send_single_cmd(p_ctx); + rc = wait_resp(p_ctx); + if (rc) return rc; + //fill send read + + for (i = 0; i< NUM_CMDS; i++) + { + memset(&p_rwb->rbuf[i][ea], 0, sizeof(p_rwb->rbuf[i])/2); + + vlba = start_lba + i*stride; + memset((void *)&p_ctx->cmd[i].rcb.cdb[0],0,sizeof(p_ctx->cmd[i].rcb.cdb)); + + p_ctx->cmd[i].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + write_lba(p_u64, vlba); + debug_2("%d: send read for 0X%"PRIX64"\n", pid, vlba); + + p_ctx->cmd[i].rcb.data_len = sizeof(p_rwb->rbuf[i])/2; + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_rwb->rbuf[i][ea]; + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + + write_32(p_u32, p_ctx->blk_len); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } + rc = send_cmd(p_ctx); + if (rc) return rc; + //send_single_cmd(p_ctx); + rc = wait_resp(p_ctx); + if (rc) return rc; + //do cmp r/w buf + for (i = 0; i < NUM_CMDS; i++) + { + vlba = start_lba + i*stride; + if (cmp_buf((__u64*)&p_rwb->rbuf[i][ea], (__u64*)&p_rwb->wbuf[i][ea], + sizeof(p_ctx->rbuf[i])/(2 * sizeof(__u64)))) + { + printf("%d: miscompare at start_lba 0X%"PRIX64"\n", + pid, vlba); + force_dump=1; + hexdump(&p_rwb->rbuf[i][ea],0x20,"Read buf"); + hexdump(&p_rwb->wbuf[i][ea],0x20,"Write buf"); + force_dump=0; + return -1; + } + } + return 0; +} + +int send_rw_shm_rcb(struct ctx *p_ctx, struct rwshmbuf *p_rwb, + __u64 vlba) +{ + __u64 *p_u64; + __u32 *p_u32; + int rc; + + fill_buf((__u64*)&p_rwb->wbuf[0][0], + sizeof(p_rwb->wbuf[0])/(sizeof(__u64)),pid); + memset((void *)&p_ctx->cmd[0].rcb.cdb[0],0,sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_lba(p_u64, vlba); + debug_2("%d: send write for 0X%"PRIX64"\n", pid, vlba); + + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_rwb->wbuf[0][0]; + + p_ctx->cmd[0].rcb.data_len = sizeof(p_rwb->wbuf[0]); + p_ctx->cmd[0].rcb.cdb[0] = 0x8A; // write(16) + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + write_32(p_u32, p_ctx->blk_len); + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + if (rc) return rc; + + memset(&p_rwb->rbuf[0][0], 0, sizeof(p_rwb->rbuf[0])); + + memset((void *)&p_ctx->cmd[0].rcb.cdb[0],0,sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_ctx->cmd[0].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + write_lba(p_u64, vlba); + debug_2("%d: send read for 0X%"PRIX64"\n", pid, vlba); + + p_ctx->cmd[0].rcb.data_len = sizeof(p_rwb->rbuf[0]); + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_rwb->rbuf[0][0]; + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + + write_32(p_u32, p_ctx->blk_len); + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + if (rc) return rc; + //do cmp r/w buf + + if (cmp_buf((__u64*)&p_rwb->rbuf[0][0], (__u64*)&p_rwb->wbuf[0][0], + sizeof(p_rwb->rbuf[0])/(sizeof(__u64)))) + { + printf("%d: miscompare at start_lba 0X%"PRIX64"\n", + pid, vlba); + hexdump(&p_rwb->rbuf[0][0],0x20,"Read buf"); + hexdump(&p_rwb->wbuf[0][0],0x20,"Write buf"); + return -1; + } + return 0; +} + +int do_large_io(struct ctx *p_ctx, struct rwlargebuf *rwbuf, __u64 size) +{ + __u64 st_lba = p_ctx->st_lba;; + int rc = 0; + pid = getpid(); + __u32 blocks = size/(p_ctx->block_size); + debug("%d: st_lba=0X%"PRIX64" last_lba=0X%"PRIX64" blocks=0X%"PRIX32" transfer_size=0X%"PRIX64"\n", + pid,st_lba,p_ctx->last_lba,blocks,size); + for (;st_lba <= p_ctx->last_lba; st_lba += (NUM_CMDS*blocks)) + { + rc = send_rw_lsize(p_ctx, rwbuf, st_lba, blocks); + if (rc) break; + } + debug("%d:-------transfer_size=0X%"PRIX64" done with rc=%d lba=0X%"PRIX64"-----\n", + pid,size,rc,st_lba); + return rc; +} +int send_rw_lsize(struct ctx *p_ctx, struct rwlargebuf *p_rwb, + __u64 start_lba, __u32 blocks) +{ + //__u64 *p_u64; //unused + __u32 *p_u32; + __u64 vlba; + int rc; + int i; + __u32 buf_len = blocks * p_ctx->block_size; + for (i = 0; i< NUM_CMDS; i++) + { + fill_buf((__u64*)p_rwb->wbuf[i],buf_len/sizeof(__u64),pid); + debug_2("%d: write buf address=%p\n",pid,p_rwb->wbuf[i]); + memset((void *)&p_ctx->cmd[i].rcb.cdb[0],0,sizeof(p_ctx->cmd[i].rcb.cdb)); + vlba = start_lba + i*blocks; + write_lba((__u64*)&p_ctx->cmd[i].rcb.cdb[2],vlba); + + debug_2("%d: send write for 0X%"PRIX64"\n", pid, vlba); + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + + p_ctx->cmd[i].rcb.data_ea = (__u64)p_rwb->wbuf[i]; + + p_ctx->cmd[i].rcb.data_len = buf_len; + p_ctx->cmd[i].rcb.cdb[0] = 0x8A; // write(16) + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, blocks); //how many blocks + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + hexdump(p_rwb->wbuf[i],0x20,"Write buf"); + } + + rc = send_cmd(p_ctx); + if (rc) return rc; + //send_single_cmd(p_ctx); + rc = wait_resp(p_ctx); + if (rc) return rc; + //fill send read + + for (i = 0; i< NUM_CMDS; i++) + { + memset(p_rwb->rbuf[i], 0, buf_len/sizeof(__u64)); + //bzero(p_rwb->rbuf[i], buf_len/sizeof(__u64)); + //fill_buf((__u64*)p_rwb->rbuf[i],buf_len/sizeof(__u64),pid); + debug_2("%d: Read buf address=%p\n",pid,p_rwb->rbuf[i]); + + vlba = start_lba + i*blocks; + memset((void *)&p_ctx->cmd[i].rcb.cdb[0],0,sizeof(p_ctx->cmd[i].rcb.cdb)); + + p_ctx->cmd[i].rcb.cdb[0] = 0x88; // read(16) + write_lba((__u64*)&p_ctx->cmd[i].rcb.cdb[2],vlba); + debug_2("%d: send read for 0X%"PRIX64"\n", pid, vlba); + + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + + p_ctx->cmd[i].rcb.data_len = buf_len; + p_ctx->cmd[i].rcb.data_ea = (__u64)p_rwb->rbuf[i]; + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + + write_32(p_u32, blocks); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } + rc = send_cmd(p_ctx); + if (rc) return rc; + //send_single_cmd(p_ctx); + rc = wait_resp(p_ctx); + if (rc) return rc; + char buf[20]; + int rfd, wfd; + //do cmp r/w buf + for (i = 0; i < NUM_CMDS; i++) + { + vlba = start_lba + i*blocks; + //if (cmp_buf((__u64*)p_rwb->rbuf[i], (__u64*)p_rwb->wbuf[i], + // buf_len/sizeof(__u64))); + if (memcmp(p_rwb->rbuf[i], p_rwb->wbuf[i], + buf_len)) + { + sprintf(buf,"read.%d",pid); + rfd = open(buf,O_RDWR|O_CREAT); + sprintf(buf,"write.%d",pid); + wfd = open(buf,O_RDWR|O_CREAT); + write(rfd,p_rwb->rbuf[i],buf_len); + write(wfd,p_rwb->rbuf[i],buf_len); + close(rfd); + close(wfd); + printf("%d: miscompare at start_lba 0X%"PRIX64"\n", + pid, vlba); + force_dump=1; + hexdump(p_rwb->rbuf[i],0x20,"Read buf"); + hexdump(p_rwb->wbuf[i],0x20,"Write buf"); + force_dump=0; + return -1; + } + } + return 0; +} + +int send_single_write(struct ctx *p_ctx, __u64 vlba, __u64 data) +{ + __u64 *p_u64; + __u32 *p_u32; + int rc; + + fill_buf((__u64*)&p_ctx->wbuf[0][0], + sizeof(p_ctx->wbuf[0])/(sizeof(__u64)), data); + memset((void *)&p_ctx->cmd[0].rcb.cdb[0],0,sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_lba(p_u64, vlba); + debug_2("%d: send write for 0X%"PRIX64"\n", pid, vlba); + + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->wbuf[0][0]; + + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->wbuf[0]); + p_ctx->cmd[0].rcb.cdb[0] = 0x8A; // write(16) + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + write_32(p_u32, p_ctx->blk_len); + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + return rc; +} + +int send_single_read(struct ctx *p_ctx, __u64 vlba) +{ + __u64 *p_u64; + __u32 *p_u32; + int rc; + + memset(&p_ctx->rbuf[0][0], 0, sizeof(p_ctx->rbuf[0])); + + memset((void *)&p_ctx->cmd[0].rcb.cdb[0],0,sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_ctx->cmd[0].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + write_lba(p_u64, vlba); + debug_2("%d: send read for 0X%"PRIX64"\n", pid, vlba); + + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->rbuf[0]); + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->rbuf[0][0]; + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + + write_32(p_u32, p_ctx->blk_len); + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + return rc; +} + +int rw_cmp_single_buf(struct ctx *p_ctx, __u64 vlba) +{ + if (cmp_buf((__u64*)&p_ctx->rbuf[0][0], (__u64*)&p_ctx->wbuf[0][0], + sizeof(p_ctx->rbuf[0])/(sizeof(__u64)))) + { + printf("%d: miscompare at start_lba 0X%"PRIX64"\n", + pid, vlba); + hexdump(&p_ctx->rbuf[0][0],0x20,"Read buf"); + hexdump(&p_ctx->wbuf[0][0],0x20,"Write buf"); + return -1; + } + return 0; +} + +#ifdef _AIX +//IOCTL from here +int ioctl_dk_capi_query_path(struct ctx *p_ctx) +{ + int rc; + int i; + int first_path = 0; + struct cflash_paths + { + struct dk_capi_paths path; + struct dk_capi_path_info paths[MAX_PATH-1]; + }capi_paths; + + struct dk_capi_path_info *path_info = NULL; + memset(&capi_paths, 0, sizeof(capi_paths)); + + capi_paths.path.version = p_ctx->version; + capi_paths.path.path_count = MAX_PATH; + + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_PATHS, &capi_paths); + if (rc) + { + CHECK_RC(errno, "DK_CAPI_QUERY_PATHS failed"); + } + if (capi_paths.path.returned_path_count == 0) + { + CHECK_RC(errno, "DK_CAPI_QUERY_PATHS failed"); + } + if (capi_paths.path.returned_path_count > MAX_PATH) + { + fprintf(stderr,"got more paths than provided issued=%d ret=%d\n", + MAX_PATH,capi_paths.path.returned_path_count); + capi_paths.path.returned_path_count = MAX_PATH; + } + + path_info = capi_paths.path.path_info; + //get 1st reserved path + for (i = 0; i < capi_paths.path.returned_path_count; i++) + { + if ((path_info[i].flags & DK_CPIF_RESERVED) && + !(path_info[i].flags & DK_CPIF_FAILED)) + { + first_path = i; + break; + } + } + p_ctx->devno = path_info[first_path].devno; + p_ctx->path_id = path_info[first_path].path_id; + p_ctx->path_id_mask = 1 << path_info[first_path].path_id; + p_ctx->return_flags = capi_paths.path.return_flags; + p_ctx->return_path_count = capi_paths.path.returned_path_count; + debug("%d:----------- DK_CAPI_QUERY_PATHS Info -----------\n", pid); + debug("%d:dev=%s return_path_count=%d return_flags=0X%"PRIX64"\n", + pid, p_ctx->dev, p_ctx->return_path_count,p_ctx->return_flags); + debug("%d:1st Path info: path_id=%d path_id_mask=%d\n", + pid, p_ctx->path_id, p_ctx->path_id_mask); + debug("%d:----------- End DK_CAPI_QUERY_PATHS ---------------\n",pid); + return rc; +} +#endif /* _AIX */ + +int ioctl_dk_capi_attach(struct ctx *p_ctx) +{ + int rc; + +#ifdef _AIX + struct dk_capi_attach capi_attach; + struct devinfo iocinfo; + memset(&iocinfo, 0, sizeof(iocinfo)); +#else + struct dk_cxlflash_attach capi_attach; +#endif + memset(&capi_attach, 0, sizeof(capi_attach)); + + debug("%d:----------- Start DK_CAPI_ATTACH ----------\n", pid); +#ifdef _AIX + capi_attach.version = p_ctx->version; + capi_attach.flags = p_ctx->flags; + capi_attach.devno = p_ctx->devno; + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64" num_interrupts=%d devno=0X%"PRIX64" context_id=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->version,p_ctx->flags, + p_ctx->work.num_interrupts,p_ctx->devno,p_ctx->context_id); +#else + capi_attach.hdr.version = p_ctx->version; + capi_attach.hdr.flags = p_ctx->flags; + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64" num_interrupts=%d context_id=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->version,p_ctx->flags, + p_ctx->work.num_interrupts,p_ctx->context_id); +#endif + capi_attach.num_interrupts = p_ctx->work.num_interrupts; + +#ifdef _AIX + rc = ioctl(p_ctx->fd, DK_CAPI_ATTACH, &capi_attach); +#else + rc = ioctl(p_ctx->fd, DK_CXLFLASH_ATTACH, &capi_attach); +#endif + debug("%d:... DK_CAPI_ATTACH called ...\n", pid); + if (rc) + { + g_errno=errno; + CHECK_RC(errno, "DK_CAPI_ATTACH failed"); + } + + p_ctx->mmio_size = capi_attach.mmio_size; +#ifdef _AIX + p_ctx->p_host_map =(volatile struct sisl_host_map *)capi_attach.mmio_start; + p_ctx->context_id = capi_attach.ctx_token; + p_ctx->last_phys_lba = capi_attach.last_phys_lba; + p_ctx->chunk_size = capi_attach.chunk_size; + p_ctx->devno = capi_attach.devno; + //get max_xfer + rc = ioctl(p_ctx->fd, IOCINFO, &iocinfo); + if (rc) + { + CHECK_RC(errno, "Iocinfo failed with errno\n"); + } + //p_ctx->max_xfer = iocinfo.un.capi_io.max_transfer; + //TBD + p_ctx->max_xfer = iocinfo.un.scdk64.lo_max_request; + if (iocinfo.flags & DF_LGDSK) + { + p_ctx->max_xfer |= (uint64_t)(iocinfo.un.scdk64.hi_max_request << 32); + } + p_ctx->return_flags = capi_attach.return_flags; + p_ctx->block_size = capi_attach.block_size; +#else + if (p_ctx->flags != DK_CXLFLASH_ATTACH_REUSE_CONTEXT) + { + // no need for REUSE flag + p_ctx->p_host_map = mmap(NULL,p_ctx->mmio_size,PROT_READ|PROT_WRITE, MAP_SHARED, + capi_attach.adap_fd,0); + if (p_ctx->p_host_map == MAP_FAILED) + { + fprintf(stderr,"map failed for 0x%lx mmio_size %d errno\n", + p_ctx->mmio_size, errno); + CHECK_RC(1,"mmap failed"); + } + } + p_ctx->context_id = capi_attach.context_id; + p_ctx->last_phys_lba = capi_attach.last_lba; + p_ctx->max_xfer = capi_attach.max_xfer; + p_ctx->chunk_size = NUM_BLOCKS; + p_ctx->return_flags = capi_attach.hdr.return_flags; + p_ctx->block_size = capi_attach.block_size; +#endif + + //default rwbuff handling 4K, Large trasnfer handled exclusive + p_ctx->blk_len = BLOCK_SIZE/p_ctx->block_size; + p_ctx->adap_fd = capi_attach.adap_fd; + + p_ctx->ctx_hndl = CTX_HNDLR_MASK & p_ctx->context_id; + p_ctx->unused_lba = p_ctx->last_phys_lba +1; + +#ifdef _AIX + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64" ret_devno=0X%"PRIX64"\n", + pid,p_ctx->p_host_map,p_ctx->mmio_size,p_ctx->context_id, + p_ctx->last_phys_lba,p_ctx->block_size,p_ctx->chunk_size,p_ctx->max_xfer,p_ctx->devno); +#else + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx->p_host_map,p_ctx->mmio_size,p_ctx->context_id, + p_ctx->last_phys_lba,p_ctx->block_size,p_ctx->chunk_size,p_ctx->max_xfer); +#endif + + debug("%d:adap_fd=%d return_flag=0X%"PRIX64"\n",pid,p_ctx->adap_fd,p_ctx->return_flags); + debug("%d:------------- End DK_CAPI_ATTACH -------------\n", pid); + + // Surelock GA2 supports only 4K block disks + // Can be removed later on new release. + if ( p_ctx->block_size != 0x1000 ) + { + printf("%d: Only 4K block size disk supported. Exiting.", pid); + exit(1); + } + return rc; +} + +int ioctl_dk_capi_detach(struct ctx *p_ctx) +{ + int rc; +#ifdef _AIX + struct dk_capi_detach capi_detach; +#else + struct dk_cxlflash_detach capi_detach; +#endif + p_ctx->flags = 0; //not yet defined + memset(&capi_detach, 0, sizeof(capi_detach)); + debug("%d:--------------- Start DK_CAPI_DETACH -------------\n",pid); + +#ifdef _AIX + capi_detach.version = p_ctx->version; + capi_detach.flags = p_ctx->flags; +#else + capi_detach.hdr.version = p_ctx->version; + capi_detach.hdr.flags = p_ctx->flags; +#endif + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64" ctx_id=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->version,p_ctx->flags, + p_ctx->context_id); +#ifdef _AIX + capi_detach.ctx_token = p_ctx->context_id; + capi_detach.devno = p_ctx->devno; + debug("%d:devno=0X%"PRIX64"\n",pid,p_ctx->devno); + rc = ioctl(p_ctx->fd, DK_CAPI_DETACH, &capi_detach); +#else + capi_detach.context_id = p_ctx->context_id; + rc = ioctl(p_ctx->fd, DK_CXLFLASH_DETACH, &capi_detach); +#endif + debug("%d:... DK_CAPI_DETACH called ...\n",pid); + if (rc) + { + CHECK_RC(errno, "DK_CAPI_DETACH failed"); + } +#ifdef _AIX + p_ctx->return_flags = capi_detach.return_flags; +#else + p_ctx->return_flags = capi_detach.hdr.return_flags; +#endif + debug("%d:return_flag=0X%"PRIX64"\n",pid,p_ctx->return_flags); + + debug("%d:--------------- End DK_CAPI_DETACH -------------\n",pid); + return rc; +} + +int ioctl_dk_capi_udirect(struct ctx *p_ctx) +{ + int rc; +#ifdef _AIX + struct dk_capi_udirect udirect; +#else + struct dk_cxlflash_udirect udirect; +#endif + memset(&udirect, 0, sizeof(udirect)); + + debug("%d:----------- Start DK_CAPI_USER_DIRECT ----------\n", pid); + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64" ctx_id:0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->version,p_ctx->flags, + p_ctx->context_id); +#ifdef _AIX + udirect.version = p_ctx->version; + udirect.flags = p_ctx->flags; + udirect.devno = p_ctx->devno; + udirect.ctx_token = p_ctx->context_id; + udirect.path_id_mask = p_ctx->path_id_mask; + debug("%d:devno=0X%"PRIX64" path_id_mask=%d\n",pid,p_ctx->devno,p_ctx->path_id_mask); + rc = ioctl(p_ctx->fd, DK_CAPI_USER_DIRECT, &udirect); +#else + udirect.hdr.version = p_ctx->version; + + // TBD : flag is not defined in Linux + //udirect.hdr.flags = p_ctx->flags; + udirect.context_id = p_ctx->context_id; + rc = ioctl(p_ctx->fd, DK_CXLFLASH_USER_DIRECT, &udirect); +#endif + + debug("%d:... DK_CAPI_USER_DIRECT called ...\n",pid); + if (rc) + { + p_ctx->last_lba = udirect.last_lba; + p_ctx->rsrc_handle = udirect.rsrc_handle; +#ifdef _AIX + p_ctx->return_flags = udirect.return_flags; +#else + p_ctx->return_flags = udirect.hdr.return_flags; +#endif + + p_ctx->res_hndl = RES_HNDLR_MASK & p_ctx->rsrc_handle; + debug("%d:res_hndl=0X%"PRIX64" last_lba=0X%"PRIX64" return_flag=0X%"PRIX64"\n", + pid,p_ctx->rsrc_handle,p_ctx->last_lba,p_ctx->return_flags); + + CHECK_RC(errno, "DK_CAPI_USER_DIRECT failed"); + } + + p_ctx->last_lba = udirect.last_lba; + p_ctx->rsrc_handle = udirect.rsrc_handle; +#ifdef _AIX + p_ctx->return_flags = udirect.return_flags; +#else + p_ctx->return_flags = udirect.hdr.return_flags; +#endif + + p_ctx->res_hndl = RES_HNDLR_MASK & p_ctx->rsrc_handle; + debug("%d:res_hndl=0X%"PRIX64" last_lba=0X%"PRIX64" return_flag=0X%"PRIX64"\n", + pid,p_ctx->rsrc_handle,p_ctx->last_lba,p_ctx->return_flags); + + debug("%d:-------- End DK_CAPI_USER_DIRECT ----------\n",pid); + return rc; +} + +int ioctl_dk_capi_uvirtual(struct ctx *p_ctx) +{ + int rc; +#ifdef _AIX + struct dk_capi_uvirtual uvirtual; +#else + struct dk_cxlflash_uvirtual uvirtual; +#endif + memset(&uvirtual, 0, sizeof(uvirtual)); + + debug("%d:----------- Start DK_CAPI_USER_VIRTUAL -------------\n", pid); + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64" ctx_id:0X%"PRIX64" lun_size=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->version,p_ctx->flags, + p_ctx->context_id,p_ctx->lun_size); +#ifdef _AIX + uvirtual.version = p_ctx->version; + uvirtual.devno = p_ctx->devno; + uvirtual.vlun_size = p_ctx->lun_size; + uvirtual.ctx_token = p_ctx->context_id; + uvirtual.path_id_mask = p_ctx->path_id_mask; + uvirtual.flags = p_ctx->flags; + debug("%d:devno=0X%"PRIX64" path_id_mask=%d\n",pid,p_ctx->devno,p_ctx->path_id_mask); + rc = ioctl(p_ctx->fd, DK_CAPI_USER_VIRTUAL, &uvirtual); +#else + uvirtual.hdr.version = p_ctx->version; + //TBD enabled flag once defined + //uvirtual.hdr.flags = p_ctx->flags; + uvirtual.context_id = p_ctx->context_id; + uvirtual.lun_size = p_ctx->lun_size; + rc = ioctl(p_ctx->fd, DK_CXLFLASH_USER_VIRTUAL, &uvirtual); +#endif + + debug("%d:... DK_CAPI_USER_VIRTUAL called ...\n",pid); + if (rc) + { + CHECK_RC(errno, "DK_CAPI_USER_VIRTUAL failed"); + } + + p_ctx->rsrc_handle = uvirtual.rsrc_handle; + p_ctx->last_lba = uvirtual.last_lba; +#ifdef _AIX + p_ctx->return_flags = uvirtual.return_flags; +#else + p_ctx->return_flags = uvirtual.hdr.return_flags; +#endif + + p_ctx->res_hndl = RES_HNDLR_MASK & p_ctx->rsrc_handle; + + debug("%d:res_hndl=0X%"PRIX64" last_lba=0X%"PRIX64" return_flag=0X%"PRIX64"\n", + pid,p_ctx->rsrc_handle,p_ctx->last_lba,p_ctx->return_flags); + + debug("%d:--------------- End DK_CAPI_USER_VIRTUAL -------------\n",pid); + + return rc; +} + +int ioctl_dk_capi_release(struct ctx *p_ctx) +{ + int rc; +#ifdef _AIX + struct dk_capi_release release; +#else + struct dk_cxlflash_release release; +#endif + p_ctx->flags = 0; //not yet defined + memset(&release, 0, sizeof(release)); + + release.rsrc_handle = p_ctx->rsrc_handle; + debug("%d:----------- Start DK_CAPI_RELEASE -------------\n",pid); + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64" ctx_id:0X%"PRIX64" rsrc_handle=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->version,p_ctx->flags, + p_ctx->context_id,p_ctx->rsrc_handle); +#ifdef _AIX + release.version = p_ctx->version; + release.flags = p_ctx->flags; + release.devno = p_ctx->devno; + release.ctx_token = p_ctx->context_id; + debug("%d:devno=0X%"PRIX64"\n",pid,p_ctx->devno); + rc = ioctl(p_ctx->fd, DK_CAPI_RELEASE, &release); +#else + release.hdr.version = p_ctx->version; + release.hdr.flags = p_ctx->flags; + release.context_id = p_ctx->context_id; + rc = ioctl(p_ctx->fd, DK_CXLFLASH_RELEASE, &release); +#endif + debug("%d:... DK_CAPI_RELEASE called ...\n",pid); + if (rc) + { + CHECK_RC(errno, "DK_CAPI_RELEASE failed"); + } + +#ifdef _AIX + p_ctx->return_flags = release.return_flags; +#else + p_ctx->return_flags = release.hdr.return_flags; +#endif + debug("%d:return_flag=0X%"PRIX64"\n",pid,p_ctx->return_flags); + + debug("%d:--------- End DK_CAPI_RELEASE -------------\n",pid); + return rc; +} + +int ioctl_dk_capi_vlun_resize(struct ctx *p_ctx) +{ + int rc; +#ifdef _AIX + struct dk_capi_resize resize; +#else + struct dk_cxlflash_resize resize; +#endif + p_ctx->flags = 0; //not yet defined + memset(&resize, 0, sizeof(resize)); + resize.rsrc_handle = p_ctx->rsrc_handle; + + debug("%d:------------- Start DK_CAPI_VLUN_RESIZE -------------\n",pid); + debug("%d:dev=%s fd=%d flags=0X%"PRIX64" ctx_id:0X%"PRIX64" rsrc_handle=0X%"PRIX64" req_size=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->flags,p_ctx->context_id, + p_ctx->rsrc_handle, p_ctx->req_size); +#ifdef _AIX + resize.version = p_ctx->version; + resize.flags = p_ctx->flags; + resize.ctx_token = p_ctx->context_id; + resize.devno = p_ctx->devno; + resize.vlun_size = p_ctx->req_size; + debug("%d:devno=0X%"PRIX64"\n",pid,p_ctx->devno); + rc = ioctl(p_ctx->fd, DK_CAPI_VLUN_RESIZE, &resize); +#else + resize.hdr.version = p_ctx->version; + resize.context_id = p_ctx->context_id; + resize.req_size = p_ctx->req_size; + rc = ioctl(p_ctx->fd, DK_CXLFLASH_VLUN_RESIZE, &resize); +#endif + debug("%d:... DK_CAPI_VLUN_RESIZE called ...\n",pid); + if (rc) + { + debug("%d:lun_size=0X%"PRIX64" last_lba=0X%"PRIX64" return_flag=0X%"PRIX64"\n", + pid,p_ctx->lun_size,p_ctx->last_lba,p_ctx->return_flags); + + CHECK_RC(errno, "DK_CAPI_VLUN_RESIZE failed"); + } + +#ifdef _AIX + p_ctx->return_flags = resize.return_flags; +#else + p_ctx->return_flags = resize.hdr.return_flags; +#endif + p_ctx->last_lba = resize.last_lba; + p_ctx->lun_size = resize.last_lba + 1; + if ( p_ctx->req_size == 0 ) p_ctx->lun_size = 0; + + debug("%d:lun_size=0X%"PRIX64" last_lba=0X%"PRIX64" return_flag=0X%"PRIX64"\n", + pid,p_ctx->lun_size,p_ctx->last_lba,p_ctx->return_flags); + + debug("%d:--------- End DK_CAPI_RESIZE ----------\n",pid); + return rc; +} + +int ioctl_dk_capi_verify(struct ctx *p_ctx) +{ + int rc; + g_errno=0; // Reset errno before using. +#ifdef _AIX + struct dk_capi_verify verify; +#else + struct dk_cxlflash_verify verify; + + /* dummy_sense_data is using Unit attention sense data; + dummy_sense_data will be used when HINT is set and user + dont have such valid sense_data */ + + char * dummy_sense_data = "p\000\006\000\000\000\000\n\000\000\000\000)\000\000\000\000"; + +#endif + memset(&verify, 0, sizeof(verify)); + verify.hint = p_ctx->hint; + + debug("%d:--------- Start DK_CAPI_VERIFY ----------\n",pid); +#ifdef _AIX + memcpy(verify.sense_data,p_ctx->verify_sense_data,DK_VERIFY_SENSE_LEN); + + verify.version = p_ctx->version; + verify.path_id = p_ctx->path_id; + verify.flags = p_ctx->flags; + rc = ioctl(p_ctx->fd, DK_CAPI_VERIFY, &verify); +#else + // Copying sense_data + if ( p_ctx->dummy_sense_flag == 1 ) + { + memcpy((void *)verify.sense_data,(const void *)dummy_sense_data, + DK_CXLFLASH_VERIFY_SENSE_LEN); + memcpy((void *)p_ctx->verify_sense_data,(const void *)dummy_sense_data, + DK_CXLFLASH_VERIFY_SENSE_LEN); + } + else + { + memcpy((void *)verify.sense_data,(const void *)p_ctx->verify_sense_data, + DK_CXLFLASH_VERIFY_SENSE_LEN); + } + + verify.context_id = p_ctx->context_id; + verify.rsrc_handle = p_ctx->rsrc_handle; + verify.hdr.version = p_ctx->version; + verify.hdr.flags = p_ctx->flags; + rc = ioctl(p_ctx->fd, DK_CXLFLASH_VERIFY, &verify); +#endif + + if (rc) + { + g_errno=errno; + debug("%d: ioctl failed with errno=%d & rc=%d\n", pid, g_errno, rc); + } + +#ifdef _AIX + p_ctx->return_flags = verify.return_flags; +#else + p_ctx->return_flags = verify.hdr.return_flags; +#endif + + p_ctx->verify_last_lba = verify.last_lba; + + debug("%d: dev=%s fd=%d ",pid,p_ctx->dev,p_ctx->fd); + +#ifndef _AIX + debug("\n%d: ctx_id=0X%"PRIX64" res_hndl=0X%"PRIX64" \n", + pid,p_ctx->context_id,p_ctx->rsrc_handle); +#else + debug(" path_id=%d\n", p_ctx->path_id); +#endif + + debug("%d: verify_last_lba=0X%"PRIX64" hint=0X%"PRIX64" input_flag=0X%"PRIX64" return_flag=0X%"PRIX64"\n", + pid,p_ctx->verify_last_lba,p_ctx->hint,p_ctx->flags,p_ctx->return_flags); + if (DEBUG) + { + force_dump=1; + hexdump(&p_ctx->verify_sense_data,0x20,"Sense data for verify ioctl"); + force_dump=0; + } + + debug("%d:--------- End DK_CAPI_VERIFY ----------\n",pid); + return g_errno; +} + +#ifdef _AIX +int ioctl_dk_capi_log(struct ctx *p_ctx, char *s_data) +{ + int rc; +#ifdef _AIX + struct dk_capi_log log; +#else + struct dk_cxlflash_log log; +#endif + memset(&log, 0, sizeof(log)); + + log.rsrc_handle = p_ctx->rsrc_handle; + log.reason = p_ctx->reason; +#ifdef _AIX + log.version = p_ctx->version; + log.flags = p_ctx->flags; + log.path_id = p_ctx->path_id; + log.devno = p_ctx->devno; + log.ctx_token = p_ctx->context_id; + rc = ioctl(p_ctx->fd, DK_CAPI_LOG_EVENT, &log); +#else + //log.context_id = p_ctx->context_id; + log.hdr.version = p_ctx->version; + log.hdr.flags = p_ctx->flags; + rc = ioctl(p_ctx->fd, DK_CXLFLASH_LOG_EVENT, &log); +#endif + if (rc) + { + CHECK_RC(errno, "DK_CAPI_LOG_EVENT failed"); + } +#ifdef _AIX + p_ctx->return_flags = log.return_flags; +#else + p_ctx->return_flags = log.hdr.return_flags; +#endif + strncpy(s_data, (char *)&(log.sense_data[0]), 512); + s_data[512]='\0'; + return rc; +} +#else +//dummy function +int ioctl_dk_capi_log(struct ctx *p_ctx, char *s_data) +{ + return -1; +} +#endif +int ioctl_dk_capi_recover_ctx(struct ctx *p_ctx) +{ + int rc; + debug("%d:--------Start DK_CAPI_RECOVER_CTX ---------\n",pid); +#ifdef _AIX + struct dk_capi_recover_context recv_ctx; + memset(&recv_ctx, 0,sizeof(struct dk_capi_recover_context)); + + debug("%d:dev=%s fd=%d flags=0X%"PRIX64" ctx_id:0X%"PRIX64" devno=0X%"PRIX64" reason=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->flags, + p_ctx->context_id,p_ctx->devno,p_ctx->reason); + recv_ctx.version = p_ctx->version; + recv_ctx.devno = p_ctx->devno; + recv_ctx.flags = p_ctx->flags; + recv_ctx.ctx_token = p_ctx->context_id; + recv_ctx.reason = p_ctx->reason; + + rc = ioctl(p_ctx->fd, DK_CAPI_RECOVER_CTX, &recv_ctx); + debug("%d:... DK_CAPI_RECOVER_CTX called ...\n", pid); + if (rc) + { + CHECK_RC(errno, "DK_CAPI_RECOVER_CTX failed"); + } + + p_ctx->new_ctx_token = recv_ctx.new_ctx_token; + p_ctx->p_host_map =(volatile struct sisl_host_map *)recv_ctx.mmio_start; + p_ctx->mmio_size = recv_ctx.mmio_size; + p_ctx->return_flags = recv_ctx.return_flags; +#else + struct dk_cxlflash_recover_afu recv_ctx; + memset(&recv_ctx, 0, sizeof(recv_ctx)); + + recv_ctx.hdr.version = p_ctx->version; + recv_ctx.hdr.flags = p_ctx->flags; + recv_ctx.context_id = p_ctx->context_id; + recv_ctx.reason = p_ctx->reason; + debug("%d:dev=%s fd=%d flags=0X%"PRIX64" ctx_id:0X%"PRIX64" adap_fd=%d reason=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->flags, + p_ctx->context_id,p_ctx->adap_fd,p_ctx->reason); + rc = ioctl(p_ctx->fd, DK_CXLFLASH_RECOVER_AFU, &recv_ctx); + debug("%d:... DK_CAPI_RECOVER_CTX called ...\n", pid); + if (rc) + CHECK_RC(errno, "DK_CXLFLASH_RECOVER_AFU failed"); + + p_ctx->return_flags = recv_ctx.hdr.return_flags; + p_ctx->adap_fd = recv_ctx.adap_fd; + p_ctx->mmio_size = recv_ctx.mmio_size; + p_ctx->new_ctx_token = recv_ctx.context_id; + + if (p_ctx->return_flags == DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET) + { + debug("%d: 1st do munmap then mmap fresh mmio size with new adap_fd\n",pid); + rc = munmap((void *)p_ctx->p_host_map, p_ctx->mmio_size); + if (rc) + fprintf(stderr, "munmap failed with errno = %d", errno); + else debug("%d: munmap() succeeded for older mmio space..\n", pid); + + p_ctx->p_host_map = mmap(NULL,p_ctx->mmio_size,PROT_READ|PROT_WRITE, MAP_SHARED, + p_ctx->adap_fd,0); + if (p_ctx->p_host_map == MAP_FAILED) + { + fprintf(stderr,"map failed for 0x%lx mmio_size %d errno\n", + p_ctx->mmio_size, errno); + CHECK_RC(1,"mmap failed"); + } + else debug("%d: New mmap() returned success..\n", pid); + } + else + { + debug("%d:Recovery Action NOT Needed.. \n", pid); + } + +#endif /* _AIX */ + debug("%d:new_ctx_token=0X%"PRIX64" adap_fd=%d mmio=%p mmio_size=0X%"PRIX64" return_flags=0X%"PRIX64"\n", + pid,p_ctx->new_ctx_token,p_ctx->adap_fd,p_ctx->p_host_map,p_ctx->mmio_size, + p_ctx->return_flags); + debug("%d:------------- End DK_CAPI_RECOVER_CTX -------------\n", pid); + return rc; +} + +int ioctl_dk_capi_query_exception(struct ctx *p_ctx) +{ + int rc = 0; + debug("--------started DK_CAPI_QUERY_EXCEPTIONS ---------\n"); +#ifdef _AIX + struct dk_capi_exceptions exceptions; + memset(&exceptions, 0, sizeof(struct dk_capi_exceptions)); + + exceptions.version = p_ctx->version; + exceptions.ctx_token = p_ctx->context_id; + exceptions.devno = p_ctx->devno; + exceptions.rsrc_handle = p_ctx->rsrc_handle; + exceptions.flags = p_ctx->flags; + + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_EXCEPTIONS, &exceptions); + + debug("-------- O/p of DK_CAPI_QUERY_EXCEPTIONS ----------\n"); + // I will keep this for debug + if (DEBUG) + { + printf(" int rc=%d\n",rc); + printf(" uint16_t version = %d\n",exceptions.version); + printf(" uint64_t flags = 0x%llx\n",exceptions.flags); + printf(" uint64_t return_flags = 0x%llx\n",exceptions.return_flags); + printf(" dev64_t devno = 0x%llx\n",exceptions.devno); + printf(" uint64_t ctx_token = 0x%llx\n",exceptions.ctx_token); + printf(" uint64_t rsrc_handle = 0x%llx\n",exceptions.rsrc_handle); + printf(" uint64_t exceptions = 0x%llx\n",exceptions.exceptions); + printf(" uint64_t adap_except_type = 0x%llx\n",exceptions.adap_except_type); + printf(" uint64_t adap_except_time = 0x%llx\n",exceptions.adap_except_time); + printf(" uint64_t adap_except_data = 0x%llx\n",exceptions.adap_except_data); + printf(" uint64_t adap_except_count = 0x%llx\n",exceptions.adap_except_count); + printf(" uint64_t last_lba = 0x%llx\n",exceptions.last_lba); + } + + if (rc) + { + CHECK_RC(errno, "DK_CAPI_QUERY_EXCEPTIONS failed"); + } + + p_ctx->return_flags = exceptions.return_flags; + p_ctx->exceptions = exceptions.exceptions; + + p_ctx->adap_except_type = exceptions.adap_except_type; + p_ctx->adap_except_data = exceptions.adap_except_data; +#endif /* _AIX */ + + debug("-------- End of DK_CAPI_QUERY_EXCEPTIONS ---------\n"); + + return rc; +} + +int create_res(struct ctx *p_ctx) +{ + return ioctl_dk_capi_uvirtual(p_ctx); +} +int close_res(struct ctx *p_ctx) +{ + return ioctl_dk_capi_release(p_ctx); +} + +int mc_stat1(struct ctx *p_ctx, mc_stat_t *stat) +{ + __u64 size; + stat->nmask = 0; + size = p_ctx->chunk_size; + stat->blk_len = p_ctx->block_size; + while (size) + { + size = size>>1; + stat->nmask++; + } + stat->nmask--; + debug("%d:mc_stat1 chunk=0X%"PRIX64" nmask=0X%X\n",pid,stat->size,stat->nmask); + return 0; +} + +int mc_size1(struct ctx *p_ctx, __u64 chunk, __u64 *actual_size) +{ + int rc; + p_ctx->req_size = (chunk * (p_ctx->chunk_size)); + debug("%d mc_size1 chunk=0X%"PRIX64" lun_size=0X%"PRIX64" req_size=0X%"PRIX64"\n", + pid,chunk,p_ctx->lun_size,p_ctx->req_size); + rc = ioctl_dk_capi_vlun_resize(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_vlun_resize failed\n"); + *actual_size = p_ctx->lun_size/p_ctx->chunk_size; + return rc; +} + +int create_resource(struct ctx *p_ctx, __u64 nlba, + __u64 flags, __u16 lun_type) +{ + p_ctx->flags = flags; + p_ctx->lun_size = nlba; + + if (LUN_VIRTUAL == lun_type) + return ioctl_dk_capi_uvirtual(p_ctx); + else + return ioctl_dk_capi_udirect(p_ctx); +} + +int vlun_resize(struct ctx *p_ctx, __u64 nlba) +{ + p_ctx->req_size = nlba; + return ioctl_dk_capi_vlun_resize(p_ctx); +} + +//dummy function +int mc_init() +{ + return 0; +} + +//dummy function +int mc_term() +{ + return 0; +} +#ifdef _AIX +//IOCTL from here +int set_cflash_paths(struct flash_disk *disk) +{ + int rc=0; + int i; + int fd; + int index=0; + struct cflash_paths + { + struct dk_capi_paths path; + struct dk_capi_path_info paths[MAX_PATH-1]; + }capi_paths; + + struct dk_capi_path_info *path_info = NULL; + memset(&capi_paths, 0, sizeof(capi_paths)); + + capi_paths.path.path_count = MAX_PATH; + fd = open_dev(disk->dev, O_RDWR); + if (fd < 0) + { + fprintf(stderr,"open %s failed\n",disk->dev); + return -1; + } + rc = ioctl(fd, DK_CAPI_QUERY_PATHS, &capi_paths); + close(fd); + if (rc) + CHECK_RC(errno, "DK_CAPI_QUERY_PATHS failed"); + + if (capi_paths.path.returned_path_count == 0) + CHECK_RC(errno, "DK_CAPI_QUERY_PATHS failed"); + + if (capi_paths.path.returned_path_count > MAX_PATH) + { + fprintf(stderr,"got more paths than provided issued=%d ret=%d\n", + MAX_PATH,capi_paths.path.returned_path_count); + capi_paths.path.returned_path_count = MAX_PATH; + } + + path_info = capi_paths.path.path_info; + for (i = 0; i < capi_paths.path.returned_path_count; i++) + { + /*if ((path_info[i].flags & DK_CPIF_RESERVED) && + !(path_info[i].flags & DK_CPIF_FAILED))*/ + { + debug("%d:dev=%s devno=0X%"PRIX64" path_id=%"PRIu16" flags=0X%"PRIX64"\n", + pid,disk->dev,path_info[i].devno, + path_info[i].path_id,path_info[i].flags); + disk->devno[index] = path_info[i].devno; + disk->path_id[index] = path_info[i].path_id; + disk->path_id_mask[index] = 1<path_count = index; + return rc; +} +#endif /* _AIX */ + +int get_flash_disks(struct flash_disk disks[], int type) +{ + int count =0; + int rc=0; + int i=0; + FILE *fptr; + char *p_file; + char buf[10]; +#ifdef _AIX + const char *cmd ="lsdev -c disk -s capidev -t extflash |awk '{print $1}'>/tmp/flist"; +#else + const char *cmd ="/opt/ibm/capikv/bin/cxlfstatus | \ + grep superpipe | sort -u -k5 | \ + awk '{print $1}' | tr -d ':' >/tmp/flist"; +#endif + + rc = system(cmd); + if (rc) return 0; + + if ( type == FDISKS_ALL ) + { + debug("%d:cmd=%s rc=%d\n",pid, cmd, rc); + p_file="/tmp/flist"; + } + else if ( type == FDISKS_SAME_ADPTR ) + { + p_file="/tmp/flist_sameAdap"; + debug("%d: List of capi disks from same adapter present in %s file\n", pid, p_file); + } + else if ( type == FDISKS_SHARED ) + { + p_file="/tmp/flist_disk_shared"; + debug("%d: List of capi disks shared with different adapter in %s file\n", pid, p_file); + } + else + { + p_file="/tmp/flist_diffAdap"; + debug("%d: List of capi disks from diff adapter present in %s file\n", pid, p_file); + } + + debug("%d: List of all unique capi disks present in /tmp/flist\n", pid); + debug("%d: Refer /tmp/flist file for sample format\n", pid); + +#ifndef _AIX + system("rm /tmp/flist_sameAdap /tmp/flist_diffAdap /tmp/flist_disk_shared >/dev/null 2>&1"); +#endif + fptr = fopen(p_file, "r"); + if (NULL == fptr) + { +#ifdef _AIX + fprintf(stderr,"%d: --------------------------------------------------------\n", pid); + fprintf(stderr,"%d: Error opening file %s\n", pid, p_file); + fprintf(stderr,"%d: Retry after fixing/verifying the file: %s\n", pid, p_file); + fprintf(stderr,"%d: --------------------------------------------------------\n", pid); + return 0; +#else + debug("%d: ---------- Automatically populating the disk in adapter -------\n", pid); + if ( type == FDISKS_SAME_ADPTR ) + { + rc = diskInSameAdapater(p_file); + if (rc) + { + fprintf(stderr,"%d: failed in diskInSameAdapater()--\n", pid); + return 0; + } + + fptr = fopen(p_file, "r"); + if (NULL == fptr) + { + fprintf(stderr,"%d: Error opening file %s\n", pid, p_file); + fprintf(stderr,"%d: test aborted \n",pid); + return 0; + } + // reseting errno + if (errno) + errno = 0; + + } + + else if ( type == FDISKS_DIFF_ADPTR ) + { + rc = diskInDiffAdapater(p_file); + if (rc) + { + fprintf(stderr,"%d: failed in diskInDiffAdapater()--\n", pid); + return 0; + } + + fptr = fopen(p_file, "r"); + if (NULL == fptr) + { + fprintf(stderr,"%d: Error opening file %s\n", pid, p_file); + fprintf(stderr,"%d: test aborted \n",pid); + return 0; + } + // reseting errno + if (errno) + errno = 0; + } +#endif + } + + while (fgets(buf,10, fptr) != NULL) + { + i=0; + while (i < 10) + { + if (buf[i] =='\n') + { + buf[i]='\0'; + break; + } + i++; + } + sprintf(disks[count].dev,"/dev/%s",buf); +#ifdef _AIX + //for LINUX devno & path_id have no meaning + //will be ignored + set_cflash_paths(&disks[count]); +#endif + count++; + if (MAX_FDISK == count) + break; + } + fclose(fptr); + return count; +} + + +//wait all child processes to finish their task +int wait4all() +{ + int rc; + pid_t mypid; + + while ((mypid = waitpid(-1, &rc, 0))) + { + if (mypid == -1) + { + break; + } + if (WIFEXITED(rc)) + { + rc = WEXITSTATUS(rc); + if (rc) + g_error = -1; + } + else + { + fprintf(stderr, "%d : abnormally terminated\n", mypid); + g_error =-1; + } + debug("pid %d exited with rc=%d\n", mypid,rc); + fflush(stdout); + + + } + rc = g_error; + g_error = 0; + return rc; +} + +int do_internal_io(struct ctx *p_ctx, __u64 stride, bool iocompare) +{ + __u64 st_lba= p_ctx->st_lba; + __u64 remain; + int rc = 0; + debug("%d: IO st_lba = 0X%"PRIX64" and last_lba = 0X%"PRIX64"\n", + pid, st_lba,p_ctx->last_lba); + if (st_lba >= p_ctx->last_lba) + { + fprintf(stderr, "%d: Failed st_lba should be less than last_lba\n", pid); + return -1; + } + //adjust lbas to rw boundary LBAs & within range + remain = (p_ctx->last_lba+1-st_lba)%(NUM_CMDS*stride); + if (remain) + { + rc = send_write(p_ctx, st_lba, stride, pid); + CHECK_RC(rc, "send_write failed"); + rc = send_read(p_ctx, st_lba, stride); + CHECK_RC(rc, "send_read failed"); + if (iocompare) + { + rc = rw_cmp_buf(p_ctx, st_lba); + if (rc) + { + fprintf(stderr,"buf cmp failed for lba 0x%"PRIX64",rc =%d\n", + st_lba,rc); + return rc; + } + } + st_lba+= remain; + debug("%d: adjusting 0X%"PRIX64" lba with st_lba=0X%"PRIX64"\n",pid,remain,st_lba); + } + + for (; st_lba <= p_ctx->last_lba; st_lba += (NUM_CMDS*stride)) + { + rc = send_write(p_ctx, st_lba, stride, pid); + CHECK_RC(rc, "send_write failed"); + rc = send_read(p_ctx, st_lba, stride); + CHECK_RC(rc, "send_read failed"); + if (iocompare) + { + rc = rw_cmp_buf(p_ctx, st_lba); + if (rc) + { + fprintf(stderr,"buf cmp failed for lba 0X%"PRIX64",rc =%d\n", + st_lba,rc); + break; + } + } + } + return rc; +} + +int do_io(struct ctx *p_ctx, __u64 stride) +{ + int rc; + rc = do_internal_io(p_ctx, stride, true); //true means do IO compare + debug("%d:IO done with rc=%d\n",pid,rc); + return rc; +} +int do_io_nocompare(struct ctx *p_ctx, __u64 stride) +{ + return do_internal_io(p_ctx, stride, false); //0 NO IO compare +} + +int get_max_res_hndl_by_capacity(char *dev) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx=&myctx; + __u64 chunks; + int m_res_pr_ctx=0; + + pid=getpid(); + memset(p_ctx, 0, sizeof(myctx)); + strcpy(p_ctx->dev, dev); + if ((p_ctx->fd = open_dev(p_ctx->dev, O_RDWR)) < 0) + { + fprintf(stderr,"open failed %s, errno %d\n",cflash_path, errno); + return -1; + } +#ifdef _AIX + rc=ioctl_dk_capi_query_path(p_ctx); +#endif + p_ctx->work.num_interrupts =4; + rc = ioctl_dk_capi_attach(p_ctx); + if (rc) + { + close(p_ctx->fd); + return -1; + } + chunks = (p_ctx->last_phys_lba)/(p_ctx->chunk_size); + m_res_pr_ctx=chunks/MAX_OPENS; + debug("%d:chunks=0X%"PRIX64" m_res_pr_ctx=%d\n",pid,chunks,m_res_pr_ctx); + ctx_close(p_ctx); + if (m_res_pr_ctx == 0) + { + printf("%d:Minimum disk capacity required=%luGB\n", + pid,(MAX_OPENS * p_ctx->block_size * p_ctx->chunk_size)/(1024*1024*1024)); + return m_res_pr_ctx; + } + debug("%d:Max res handle per ctx=%d\n",pid,m_res_pr_ctx); + return m_res_pr_ctx; +} + + +__u64 get_disk_last_lba(char *dev, dev64_t devno, uint64_t *chunk_size) +{ + __u64 last_lba = 0; + int rc; + struct ctx myctx; + + memset(&myctx, 0, sizeof(struct ctx)); + myctx.fd = open_dev(dev, O_RDWR); + if (myctx.fd < 0) + { + fprintf(stderr,"%d: %s opened failed\n",pid, dev); + return 0; + } + strcpy(myctx.dev, dev); + myctx.devno = devno; + myctx.flags = DK_AF_ASSIGN_AFU; + myctx.work.num_interrupts =4; + rc = ioctl_dk_capi_attach(&myctx); + last_lba = myctx.last_phys_lba; + *chunk_size = myctx.chunk_size; + rc |= ctx_close(&myctx); + if (rc) return 0; + return last_lba; +} + +#ifdef _AIX +int ioctl_dk_capi_query_path_check_flag(struct ctx *p_ctx, + int flag1, int flag2) +{ + int rc; + struct cflash_paths + { + struct dk_capi_paths path; + struct dk_capi_path_info paths[MAX_PATH-1]; + }capi_paths; + + struct dk_capi_path_info *path_info = NULL; + memset(&capi_paths, 0, sizeof(capi_paths)); + + capi_paths.path.version = p_ctx->version; + capi_paths.path.path_count = MAX_PATH; + + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_PATHS, &capi_paths); + //TBD, handled for one path id, handle for all + p_ctx->devno = path_info[0].devno; + CHECK_RC(rc, "DK_CAPI_QUERY_PATHS failed"); + path_info = capi_paths.path.path_info; + // check for flag if ioctl passed + if ( path_info[0].flags == flag1 && path_info[1].flags == flag2 ) + { + rc=0; + } + else if ( path_info[0].flags == flag2 && path_info[1].flags == flag1 ) + { + rc=0; + } + else + rc=1; + return rc; +} +#endif + + +int do_eeh(struct ctx *p_ctx) +{ + int len; +#ifdef _AIX + if (!DEBUG) + printf("%d: ....... Waiting EEH/AFU_RESET should be done Manually.........\n",pid); + while (1) + { + if (DEBUG) + printf("%d: ....... Waiting EEH/AFU_RESET should be done Manually.........\n",pid); + //1st option + /*len = read(p_ctx->adap_fd, &p_ctx->event_buf[0], + sizeof(p_ctx->event_buf)); + if((len < 0) && (errno == EIO)) + { + afu_reset = true; + }*/ + //2nd option + __u64 room = read_64(&p_ctx->p_host_map->cmd_room); +#ifndef __64BIT__ + if (room == 0XFFFFFFFF) +#else + if (room == -1) +#endif + { + usleep(1000);//just give chance to call exception query + afu_reset = true; + } + if (afu_reset) + { + printf("%d: EEH/AFU_RESET is done.......\n",pid); + //usleep(1000) //just give chance to call exception query + clear_waiting_cmds(p_ctx); + break; + } + sleep(1); + } + +#else + //linux, we read adapter register in ctx_rrq_rx thread + //it will tell us if afu_rest done, + //but some cases if no ctx_rrq_rx started + //better read adapter register here + + int rc = 0; + eehCmd_t eehCmdVar; + eehCmd_t *eehCmdP = &eehCmdVar; + pthread_t thread_eeh; + pthread_mutexattr_t mattrVar; + pthread_condattr_t cattrVar; + char tmpBuff[MAXBUFF]; + + int iautoEeh = 1; + char * autoEehP = getenv("AUTO_EEH"); + if ( NULL == autoEehP ) + { + while (1) + { + printf("%d: ....... Waiting EEH should be done Manually.........\n",pid); + len = read(p_ctx->adap_fd, &p_ctx->event_buf[0], + sizeof(p_ctx->event_buf)); + if ((len < 0) && (errno == EIO)) + { + printf("%d: EEH is done.......\n",pid); + afu_reset=true; + break; + } + sleep(1); + } + } + else + { + iautoEeh = atoi(autoEehP); + eehCmdP->ieehLoop = iautoEeh; + pthread_mutexattr_init(&mattrVar); + pthread_condattr_init(&cattrVar); + pthread_mutex_init(&eehCmdP->eeh_mutex , &mattrVar); + pthread_cond_init(&eehCmdP->eeh_cv , &cattrVar); + + + rc = diskToPCIslotConv(p_ctx->dev , tmpBuff ); + CHECK_RC(rc, "diskToPCIslotConv failed \n"); + + rc = prepEEHcmd( tmpBuff, eehCmdP->cmdToRun); + CHECK_RC(rc, " prepEEHcmd failed \n"); + + eehCmdP->eehSync = 0; + + debug("%d:---------------- Going to trigger EEH --------------------\n",pid); + + pthread_create(&thread_eeh,NULL,do_trigger_eeh_cmd, eehCmdP); + + pthread_mutex_lock( &eehCmdP->eeh_mutex ); + + while ( eehCmdP->eehSync != 1) + { + pthread_cond_wait(&eehCmdP->eeh_cv, &eehCmdP->eeh_mutex); + } + + pthread_cancel(thread_eeh); + + pthread_mutex_unlock( &eehCmdP->eeh_mutex); + + pthread_mutexattr_destroy(&mattrVar); + pthread_condattr_destroy(&cattrVar); + + + while (1) + { + printf("%d: ....... Reading the adap_fd for EEH event.........\n",pid); + len = read(p_ctx->adap_fd, &p_ctx->event_buf[0], + sizeof(p_ctx->event_buf)); + if ((len < 0) && (errno == EIO)) + { + printf("%d: EEH is done.......\n",pid); + afu_reset=true; + break; + } + sleep(1); + } + + + } +#endif + return 0; +} + +void * do_trigger_eeh_cmd( void * arg ) +{ + +#ifndef _AIX + int iCnt = 0; + int rc = 0; + eehCmd_t * eehCmdP = arg; + + const char *configCmdP = "echo 10000000 > /sys/kernel/debug/powerpc/eeh_max_freezes"; + + if ( eehCmdP->ieehLoop <= 0 ) + eehCmdP->ieehLoop = 1 ; + + + pthread_mutex_lock( &eehCmdP->eeh_mutex ); + + rc = system(configCmdP); + if (rc) + { + g_error = -1; + fprintf(stderr,"%d: Failed in %s \n",pid,configCmdP); + } + + for (iCnt =0 ; iCnt < eehCmdP->ieehLoop ; iCnt++) + { + sleep(2); + + rc = system(eehCmdP->cmdToRun); + if (rc) + { + g_error = -1; + fprintf(stderr,"%d: Failed in %s \n",pid,eehCmdP->cmdToRun); + } + } + + eehCmdP->eehSync = 1; + pthread_cond_signal( &eehCmdP->eeh_cv); + pthread_mutex_unlock( &eehCmdP->eeh_mutex ); + + return 0; +#endif + +} + +int do_poll_eeh(struct ctx *p_ctx) +{ +#ifndef _AIX + int len; + printf("%d: ....... Waiting EEH should be done........\n",pid); + + while (1) + { + len = read(p_ctx->adap_fd, &p_ctx->event_buf[0], + sizeof(p_ctx->event_buf)); + if ((len < 0) && (errno == EIO)) + { + printf("%d: EEH is done.......\n",pid); + afu_reset=true; + break; + } + sleep(1); + } +#endif + + return 0; +} + + +bool check_afu_reset(struct ctx *p_ctx) +{ +#ifdef _AIX + __u64 room = read_64(&p_ctx->p_host_map->cmd_room); + if (room == -1) + { + afu_reset = true; + printf("%d: EEH/AFU_RESET is done.......\n",pid); + } +#endif + return afu_reset; +} + +int compare_size(uint64_t act, uint64_t exp) +{ + int rc = 0; + if (act != exp) + { + fprintf(stderr,"%d: Failed in compare_size():\ + act=0X%"PRIX64" exp=0X%"PRIX64"\n", + pid, act, exp); + g_error=-1; + rc = -1; + } + return rc; +} + +int compare_flags(uint64_t act, uint64_t exp) +{ + int rc = 0; + return rc; + // TBD: disable this code if return flags testing is to be skipped + if (act != exp) + { + fprintf(stderr,"%d: Failed in compare_flags():\ + act=0X%"PRIX64" exp=0X%"PRIX64"\n", + pid, act, exp); + g_error=-1; + rc = -1; + } + //return rc; +} + +int validateFunction(struct validatePckt * newVar) +{ + int rc=0; + + switch (newVar->obCase) + { + case CASE_PLUN : + rc = compare_size(newVar->ctxPtr->last_lba, newVar->expt_last_lba); + //TBD - flag check will be enable it later + //if ( newVar->ctxPtr->return_flags != newVar->expt_return_flags ) + //{ + // rc=-1; + //} + break; + case CASE_VLUN : + rc = compare_size(newVar->ctxPtr->last_lba, newVar->expt_last_lba); + //TBD - flag check will be enable it later + //if ( newVar->ctxPtr->return_flags != newVar->expt_return_flags ) + //{ + // rc=-1; + //} + break; + default : + rc=-1; + + } + return rc; +} + +int diskToPCIslotConv( char * diskName , char * pciSlotP) +{ + + int iCount =0; + int rc =0; + int iTer =0; + int iKey =0; + + FILE *fileP; + char tmpBuff[MAXBUFF]; + char npBuff[MAXNP][MAXBUFF]; + char blockCheckP[MAXBUFF]; + + const char *initCmdP = "lspci -v | grep \"Processing accelerators\" | awk '{print $1}' > /tmp/trashFile"; + + rc = system(initCmdP); + if ( rc != 0) + { + fprintf(stderr,"%d: Failed in lspci \n",pid); + goto xerror ; + } + + fileP = fopen("/tmp/trashFile", "r"); + + if (NULL == fileP) + { + fprintf(stderr,"%d: Error opening file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + while (fgets(tmpBuff,MAXBUFF, fileP) != NULL) + { + while (iTer < MAXBUFF) + { + if (tmpBuff[iTer] =='\n') + { + tmpBuff[iTer]='\0'; + break; + } + iTer++; + } + + // only supporting for scsi_generic device + + sprintf(blockCheckP,"ls -l /sys/bus/pci/devices/" + "%s/pci***:**/***:**:**.*/host*/" + "target*:*:*/*:*:*:*/ | grep -w \"scsi_generic\" >/dev/null 2>&1",tmpBuff); + rc = system(blockCheckP); + + if ( rc == 0 ) + { + + iKey = strlen(diskName)-3 ; + + sprintf(npBuff[iCount],"ls -l /sys/bus/pci/devices/" + "%s/pci***:**/***:**:**.*/host*/" + "target*:*:*/*:*:*:*/scsi_generic | grep %s >/dev/null 2>&1",tmpBuff,&diskName[iKey]); + + rc = system(npBuff[iCount]); + if ( rc == 0 ) + { + fclose(fileP); + break; + } + + iCount++; + } + } + + sprintf(npBuff[iCount],"cat /sys/bus/pci/devices" + "/%s/devspec > /tmp/trashFile",tmpBuff); + + rc = system(npBuff[iCount]); + if ( rc != 0) + { + fprintf(stderr,"%d: failed to find PCI devspec \n",pid); + rc = EINVAL ; + goto xerror ; + } + + fileP = fopen("/tmp/trashFile", "r"); + if (NULL == fileP) + { + fprintf(stderr,"%d: Error opening file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + if ( NULL == fgets(tmpBuff,MAXBUFF, fileP) ) + { + fprintf(stderr,"%d: Error in file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + if ( fclose(fileP) == EOF ) + { + fprintf(stderr,"%d: Error closin the file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + + sprintf(npBuff[iCount],"cat /proc/device-tree" + "%s/ibm,loc-code > /tmp/trashFile", tmpBuff); + + rc = system(npBuff[iCount]); + if ( rc != 0) + { + fprintf(stderr,"%d: failed to find PCI devspec \n",pid); + rc = EINVAL ; + goto xerror ; + } + + fileP = fopen("/tmp/trashFile", "r"); + if (NULL == fileP) + { + fprintf(stderr,"%d: Error opening file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + if ( NULL == fgets(tmpBuff,MAXBUFF, fileP) ) + { + fprintf(stderr,"%d: Error in file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + iTer=0; + + while (iTer < MAXBUFF) + { + if (tmpBuff[iTer] ==' ') + { + tmpBuff[iTer]='\0'; + break; + } + iTer++; + } + + if ( fclose(fileP) == EOF ) + { + fprintf(stderr,"%d: Error closin the file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + + // Need to do error handling stuff + strncpy( pciSlotP, tmpBuff,strlen(tmpBuff)+1); + +xerror: + return rc; +} + +int prepEEHcmd( char * pciSlotP, char * cmd ) +{ + int rc =0; + int ikeyFind =0; + + ikeyFind = strlen(pciSlotP) - KEYLEN; + + if ( !strcmp(&pciSlotP[ikeyFind] , "P1-C7")) + { + + sprintf(cmd,"echo 1 > /sys/kernel/debug/powerpc/" + "PCI0000/err_injct_outbound"); + } + else if ( !strcmp(&pciSlotP[ikeyFind] , "P1-C6")) + { + + sprintf(cmd,"echo 1 > /sys/kernel/debug/powerpc/" + "PCI0002/err_injct_outbound"); + + } + else if ( !strcmp(&pciSlotP[ikeyFind] , "P1-C5")) + { + + sprintf(cmd,"echo 1 > /sys/kernel/debug/powerpc/" + "PCI0004/err_injct_outbound"); + + } + else if ( !strcmp(&pciSlotP[ikeyFind] , "P1-C3")) + { + + sprintf(cmd,"echo 1 > /sys/kernel/debug/powerpc/" + "PCI0006/err_injct_outbound"); + + + } + else if ( !strcmp(&pciSlotP[ikeyFind] , "P1-C2")) + { + + sprintf(cmd,"echo 1 > /sys/kernel/debug/powerpc/" + "PCI0007/err_injct_outbound"); + + } + else + { + + fprintf(stderr,"%d: fail in prepEEHcommad() \n", pid); + rc = EINVAL; + + } + + + return rc; +} + + +int ioctl_dk_capi_clone(struct ctx *p_ctx,uint64_t src_ctx_id,int src_adap_fd) +{ + int rc = 0; +#ifndef _AIX + struct dk_cxlflash_clone clone; + memset(&clone, 0, sizeof(clone)); + clone.context_id_src = src_ctx_id; + clone.context_id_dst = p_ctx->context_id; + clone.adap_fd_src = src_adap_fd; + clone.hdr.version = p_ctx->version; + debug("%d:----------- Start DK_CXLFLASH_CLONE ----------\n", pid); + debug("%d:src_ctx_id=0X%"PRIX64" dst_ctx_id=0X%"PRIX64" src_adap_fd=%d\n", + pid,src_ctx_id,p_ctx->context_id,src_adap_fd); + rc =ioctl(p_ctx->fd,DK_CXLFLASH_CLONE, &clone); + if (rc) + CHECK_RC(errno, "DK_CXLFLASH_CLONE failed with errno\n"); + debug("%d:----------- Done DK_CXLFLASH_CLONE ----------\n", pid); +#endif + return rc; +} + +int capi_open_close( struct ctx *p_ctx, char *dev ) +{ + int rc=0; + //open CAPI Flash disk device +#ifdef _AIX + //p_ctx->fd = open_dev(dev, O_RDWR); + p_ctx->fd = open(dev, O_RDWR); +#else + p_ctx->fd = open(dev, O_RDWR); +#endif + if (p_ctx->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", dev, errno); + return -1; + } + // close CAPI Flash disk device + rc=close(p_ctx->fd); + return rc; +} + +// to create LUN_DIRECT +int create_direct_lun( struct ctx *p_ctx ) +{ + int rc,i=0; + + pthread_t thread; + __u64 stride= 0x10000, nlba=0; + rc = ctx_init(p_ctx); + + CHECK_RC(rc, "Context init failed"); + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + rc = create_resource(p_ctx, nlba, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + // do io on context + for ( i=0; i< long_run; i++) + { + rc=do_io(p_ctx, stride); + if ( rc != 0 ) + return rc; + } + return rc; +} + +// to create multiple context + +// create VLUN +int create_vluns(char *dev, dev64_t devno, + __u16 lun_type, __u64 chunk,struct ctx *p_ctx) +{ + int rc,i=0,flag=0; + pthread_t thread; + __u64 stride= 0x10; + __u64 nlba=0; + + + pthread_t ioThreadId; + do_io_thread_arg_t ioThreadData; + do_io_thread_arg_t *p_ioThreadData; + + + + p_ioThreadData=&ioThreadData; + pid = getpid(); + rc = ctx_init2(p_ctx, dev, DK_AF_ASSIGN_AFU, devno); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + // create VLUN + nlba = chunk * (p_ctx->chunk_size); + rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); + CHECK_RC(rc, "create LUN_DIRECT failed"); + for ( i=0; ip_ctx=p_ctx; + p_ioThreadData->stride=stride; + p_ioThreadData->loopCount=5; + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + if ( rc != 0 ) + flag=1; + } + pthread_cancel(thread); + sleep(10); + close_res(p_ctx); + ctx_close(p_ctx); + debug(" FLAG = %d\n", flag ); + return flag; +} + +int create_multiple_vluns(struct ctx *p_ctx) +{ + int rc,j; + int cfdisk = 0; + struct flash_disk fldisks[MAX_FDISK]; + __u64 chunks[10] ={ 1,3,9,15,26,20,8,10,7,4 }; + cfdisk = get_flash_disks(fldisks, FDISKS_ALL); + if (cfdisk < 1) + { + fprintf(stderr,"NO flash disks found\n"); + return -1; + } + // use the first disk + //create atleast 10 chunks on each on PLUN + for (j=0; j < 10; j++) + { + if (0 == fork()) //child process + { + rc = create_vluns(fldisks[0].dev, fldisks[0].devno[0], + LUN_VIRTUAL,chunks[j],p_ctx); + + exit(rc); + } + } + + rc=wait4all(); + return rc; +} + +#ifdef _AIX +int ioctl_dk_capi_query_path_get_path(struct ctx *p_ctx, dev64_t devno1[]) +{ + int rc=0; + int i; + + struct cflash_paths + { + struct dk_capi_paths path; + struct dk_capi_path_info paths[MAX_PATH]; + }capi_paths; + + struct dk_capi_path_info *path_info = NULL; + memset(&capi_paths, 0, sizeof(capi_paths)); + + if ((p_ctx->fd = open_dev(p_ctx->dev,O_RDWR)) < 0) + { + fprintf(stderr,"open failed %s, errno %d\n",p_ctx->dev, errno); + g_error = -1; + return -1; + } + + capi_paths.path.version = p_ctx->version; + capi_paths.path.path_count = MAX_PATH; + + rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_PATHS, &capi_paths); + if (rc) + { + CHECK_RC(errno, "DK_CAPI_QUERY_PATHS failed"); + } + + printf("capi_paths.path.returned_path_count=%d\n", capi_paths.path.returned_path_count); + if (capi_paths.path.returned_path_count == 0) + { + CHECK_RC(errno, "DK_CAPI_QUERY_PATHS failed"); + } + if (capi_paths.path.returned_path_count > MAX_PATH) + { + fprintf(stderr,"got more paths than provided issued=%d ret=%d\n", + MAX_PATH,capi_paths.path.returned_path_count); + capi_paths.path.returned_path_count = MAX_PATH; + } + + path_info = capi_paths.path.path_info; + //get 1st reserved path + for (i = 0; i < capi_paths.path.returned_path_count; i++) + { + devno1[i] = path_info[i].devno; + printf("devno[%d]=%x", i,devno1[i]); + } + /* if (capi_paths.path.return_flags) + { + CHECK_RC(1, "DK_CAPI_QUERY_PATHS failed unexpected return_flags"); + } */ + close(p_ctx->fd); + return capi_paths.path.returned_path_count; +} +#endif + +int get_nonflash_disk(char * dev, dev64_t * devno) +{ + // TBD : cleanup later. + // Till then workaround like this while compile. + strcpy(dev, "/dev/sda"); + return 1; +} + +void * do_io_thread(void * p_arg) +{ + do_io_thread_arg_t * thData; + int counter; + + thData=(do_io_thread_arg_t *)p_arg; + counter=thData->loopCount; + + while (counter--) + { + // do super pipe write. We just don't care about IO in this test. + do_io(thData->p_ctx, thData->stride); + if (afu_reset) break; + } + return NULL; +} + +int traditional_io(int disk_num) +{ + int rc; + + char *disk_name, *str=NULL; + struct flash_disk disks[MAX_FDISK]; // flash disk struct + get_flash_disks(disks, FDISKS_ALL); + pid = getpid(); + str = (char *) malloc(100); + if ( disk_num == 1 ) + { + disk_name = strtok(disks[0].dev,"/"); + disk_name = strtok(NULL,"/"); + } + else + { disk_name = strtok(disks[1].dev,"/"); + disk_name = strtok(NULL,"/"); + } + sprintf(str, "dd if=/usr/lib/boot/unix_64 of=/dev/%s >/tmp/read_write.log 2>&1 &", disk_name); + debug("%s\n", str ); + rc=system(str); + sleep(60); + rc=system("cat /tmp/read_write.log | grep -i \"do not allow\" "); + if ( rc == 0 ) + return 1; + return 0; +} + + +int ioctl_dk_capi_attach_reuse(struct ctx *p_ctx,struct ctx *p_ctx_1, __u16 lun_type ) +{ + int rc,io=0; + pthread_t thread; +#ifdef _AIX + struct dk_capi_attach capi_attach; + struct devinfo iocinfo; + memset(&iocinfo, 0, sizeof(iocinfo)); +#else + struct dk_cxlflash_attach capi_attach; +#endif + memset(&capi_attach, 0, sizeof(capi_attach)); + + p_ctx->flags = DK_AF_ASSIGN_AFU; + debug("%d:----------- Start First DK_CAPI_ATTACH ----------\n", pid); + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->version,p_ctx->flags); + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx->p_host_map,p_ctx->mmio_size,p_ctx->context_id, + p_ctx->last_phys_lba,p_ctx->block_size,p_ctx->chunk_size,p_ctx->max_xfer); + debug("%d:adap_fd=%d return_flag=0X%"PRIX64"\n",pid,p_ctx->adap_fd,p_ctx->return_flags); +#ifdef _AIX + capi_attach.version = p_ctx->version; + capi_attach.flags = p_ctx->flags; + capi_attach.devno = p_ctx->devno; + debug("%d:devno=0X%"PRIX64"\n",pid,p_ctx->devno); +#else + capi_attach.hdr.version = p_ctx->version; + capi_attach.hdr.flags = p_ctx->flags; +#endif + capi_attach.num_interrupts = p_ctx->work.num_interrupts; + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64" num_interrupts=%d context_id=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->version,p_ctx->flags, + p_ctx->work.num_interrupts,p_ctx->context_id); + +#ifdef _AIX + rc = ioctl(p_ctx->fd, DK_CAPI_ATTACH, &capi_attach); +#else + rc = ioctl(p_ctx->fd, DK_CXLFLASH_ATTACH, &capi_attach); +#endif + debug("%d:...First DK_CAPI_ATTACH called ...\n", pid); + if (rc) + { + CHECK_RC(errno, "FIRST DK_CAPI_ATTACH failed"); + } + + p_ctx->mmio_size = capi_attach.mmio_size; +#ifdef _AIX + p_ctx->p_host_map =(volatile struct sisl_host_map *)capi_attach.mmio_start; + p_ctx->context_id = capi_attach.ctx_token; + p_ctx->last_phys_lba = capi_attach.last_phys_lba; + p_ctx->chunk_size = capi_attach.chunk_size; + //get max_xfer + rc = ioctl(p_ctx->fd, IOCINFO, &iocinfo); + if (rc) + { + CHECK_RC(errno, "Iocinfo failed with errno\n"); + } + //p_ctx->max_xfer = iocinfo.un.capi_io.max_transfer; + //TBD + p_ctx->max_xfer = iocinfo.un.scdk64.lo_max_request; + if (iocinfo.flags & DF_LGDSK) + { + p_ctx->max_xfer |= (uint64_t)(iocinfo.un.scdk64.hi_max_request << 32); + } + p_ctx->return_flags = capi_attach.return_flags; + p_ctx->block_size = capi_attach.block_size; +#else + p_ctx->p_host_map = mmap(NULL,p_ctx->mmio_size,PROT_READ|PROT_WRITE, MAP_SHARED, + capi_attach.adap_fd,0); + if (p_ctx->p_host_map == MAP_FAILED) + { + fprintf(stderr,"map failed for 0x%lx mmio_size %d errno\n", + p_ctx->mmio_size, errno); + CHECK_RC(1,"mmap failed"); + } + p_ctx->context_id = capi_attach.context_id; + p_ctx->last_phys_lba = capi_attach.last_lba; + p_ctx->max_xfer = capi_attach.max_xfer; + p_ctx->chunk_size = NUM_BLOCKS; + p_ctx->return_flags = capi_attach.hdr.return_flags; + p_ctx->block_size = capi_attach.block_size; +#endif + + //default rwbuff handling 4K, Lorge trasnfer handled exclusive + p_ctx->blk_len = BLOCK_SIZE/p_ctx->block_size; + p_ctx->adap_fd = capi_attach.adap_fd; + + p_ctx->ctx_hndl = CTX_HNDLR_MASK & p_ctx->context_id; + p_ctx->unused_lba = p_ctx->last_phys_lba +1; + + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx->p_host_map,p_ctx->mmio_size,p_ctx->context_id, + p_ctx->last_phys_lba,p_ctx->block_size,p_ctx->chunk_size,p_ctx->max_xfer); + + debug("%d:adap_fd=%d return_flag=0X%"PRIX64"\n",pid,p_ctx->adap_fd,p_ctx->return_flags); + debug("%d:------------- End FIRST DK_CAPI_ATTACH -------------\n", pid); + + ctx_init_reuse(p_ctx); + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + + // will create a LUN if required + p_ctx->lun_size=p_ctx->chunk_size; + if ( LUN_VIRTUAL == lun_type || 11 == lun_type ) + { + io=1; + rc=ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(errno, "ioctl_dk_capi_uvirtual failed"); + } + else if ( LUN_DIRECT == lun_type || 12 == lun_type ) + { + io=1; + rc=ioctl_dk_capi_udirect(p_ctx); + CHECK_RC(errno, "ioctl_dk_capi_udirect failed"); + } + else if ( 3 == lun_type ) + { + rc=ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(errno, "ioctl_dk_capi_uvirtual failed"); + rc=ioctl_dk_capi_release(p_ctx); + CHECK_RC(errno, "ioctl_dk_capi_release failed"); + } + else if ( 10 == lun_type ) + { + io=0; + rc=ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(errno, "ioctl_dk_capi_uvirtual failed"); + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ctx reattached failed"); +#ifdef _AIX + if (p_ctx->return_flags != DK_RF_REATTACHED) +#else + if (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags) +#endif + + debug("-----------ctx_reinit called -------------------------\n"); + + ctx_reinit(p_ctx); + +#ifndef _AIX + + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; + // if dummy_sense_flag is set; + // a dummy sense data will be copied into ioctl input + p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; + +#endif + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); + + } + + debug("%d:----------- Start DK_CAPI_ATTACH with REUSE flag ----------\n", pid); + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64"\n", + pid,p_ctx_1->dev,p_ctx_1->fd,p_ctx_1->version,p_ctx_1->flags); + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx_1->p_host_map,p_ctx_1->mmio_size,p_ctx_1->context_id, + p_ctx_1->last_phys_lba,p_ctx_1->block_size,p_ctx_1->chunk_size,p_ctx_1->max_xfer); +#ifdef _AIX + p_ctx_1->context_id=p_ctx->context_id; + capi_attach.ctx_token=p_ctx->context_id; + capi_attach.flags = DK_AF_REUSE_CTX; + rc = ioctl(p_ctx_1->fd, DK_CAPI_ATTACH, &capi_attach); +#else + p_ctx_1->context_id=p_ctx->context_id; + capi_attach.context_id=p_ctx->context_id; + capi_attach.hdr.flags = DK_CXLFLASH_ATTACH_REUSE_CONTEXT; + rc = ioctl(p_ctx_1->fd, DK_CXLFLASH_ATTACH, &capi_attach); +#endif + debug("%d:... DK_CAPI_ATTACH called with REUSE flag...\n", pid); + if (rc) + { + CHECK_RC(errno, "DK_CAPI_ATTACH with REUSE flag failed"); + } + + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx_1->p_host_map,p_ctx_1->mmio_size,p_ctx_1->context_id, + p_ctx_1->last_phys_lba,p_ctx_1->block_size,p_ctx_1->chunk_size,p_ctx_1->max_xfer); + + debug("%d:adap_fd=%d return_flag=0X%"PRIX64"\n",pid,p_ctx_1->adap_fd,p_ctx_1->return_flags); + debug("%d:------------- End DK_CAPI_ATTACH with REUSE flag -------------\n", pid); + + + p_ctx_1->mmio_size = capi_attach.mmio_size; +#ifdef _AIX + p_ctx_1->p_host_map =(volatile struct sisl_host_map *)capi_attach.mmio_start; + p_ctx_1->context_id = capi_attach.ctx_token; + p_ctx_1->last_phys_lba = capi_attach.last_phys_lba; + p_ctx_1->chunk_size = capi_attach.chunk_size; + //get max_xfer + rc = ioctl(p_ctx_1->fd, IOCINFO, &iocinfo); + if (rc) + { + CHECK_RC(errno, "Iocinfo failed with errno\n"); + } + //p_ctx_1->max_xfer = iocinfo.un.capi_io.max_transfer; + //TBD + p_ctx_1->max_xfer = iocinfo.un.scdk64.lo_max_request; + if (iocinfo.flags & DF_LGDSK) + { + p_ctx_1->max_xfer |= (uint64_t)(iocinfo.un.scdk64.hi_max_request << 32); + } + p_ctx_1->return_flags = capi_attach.return_flags; + p_ctx_1->block_size = capi_attach.block_size; +#else + p_ctx_1->p_host_map = mmap(NULL,p_ctx_1->mmio_size,PROT_READ|PROT_WRITE, MAP_SHARED, //p_ctx->adap_fd,0); + capi_attach.adap_fd,0); + if (p_ctx_1->p_host_map == MAP_FAILED) + { + fprintf(stderr,"map failed for 0x%lx mmio_size %d errno\n", + p_ctx_1->mmio_size, errno); + CHECK_RC(1,"mmap failed"); + } + p_ctx_1->context_id = capi_attach.context_id; + p_ctx_1->last_phys_lba = capi_attach.last_lba; + p_ctx_1->max_xfer = capi_attach.max_xfer; + p_ctx_1->chunk_size = NUM_BLOCKS; + p_ctx_1->return_flags = capi_attach.hdr.return_flags; + p_ctx_1->block_size = capi_attach.block_size; +#endif + + //default rwbuff handling 4K, Lorge trasnfer handled exclusive + p_ctx_1->blk_len = BLOCK_SIZE/p_ctx_1->block_size; + p_ctx_1->adap_fd = p_ctx->adap_fd;//capi_attach.adap_fd; + + p_ctx_1->ctx_hndl = CTX_HNDLR_MASK & p_ctx_1->context_id; + p_ctx_1->unused_lba = p_ctx_1->last_phys_lba +1; + + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx_1->p_host_map,p_ctx_1->mmio_size,p_ctx_1->context_id, + p_ctx_1->last_phys_lba,p_ctx_1->block_size,p_ctx_1->chunk_size,p_ctx_1->max_xfer); + + debug("%d:adap_fd=%d return_flag=0X%"PRIX64"\n",pid,p_ctx_1->adap_fd,p_ctx_1->return_flags); + debug("%d:------------- End DK_CAPI_ATTACH -------------\n", pid); + if ( 11 == lun_type || 12 == lun_type ) + { + io=0; + rc = do_eeh(p_ctx); + CHECK_RC(rc, "Failed to do EEH injection"); + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ctx reattached failed"); +#ifdef _AIX + if (p_ctx->return_flags != DK_RF_REATTACHED) +#else + if (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET != p_ctx->return_flags) +#endif + + debug("-----------ctx_reinit called -------------------------\n"); + + ctx_reinit(p_ctx); + +#ifndef _AIX + + p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; + // if dummy_sense_flag is set; + // a dummy sense data will be copied into ioctl input + p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; + +#endif + + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); + + } + // doing I/O + if ( io == 1 ) + rc = do_io(p_ctx, 0x1000); + pthread_cancel(thread); + return rc; +} +void handleSignal(int sigNo) +{ + debug("\n%d:****** Signal Handler called ******\n",pid); + + switch (sigNo) + { + case SIGSEGV : + debug("\n%d:------- caught SIGSEGV --------\n\n",pid); + siglongjmp(sBuf, 1); + break; + case SIGABRT: + debug("\n%d:------- caught SIGABRT --------\n\n",pid); + siglongjmp(sBuf, 1); + break; + default : + debug("\n\n%d:FAIL------- No a expected Signal -----\n",pid); + g_error=255; + break; + } + + return ; +} +void sig_handle(int sig) +{ + printf("-------- Sig %d recieved ............\n", sig); + printf("exiting from process %d gracefully... \n",getpid()); + exit(10); +} + +int do_write_or_read(struct ctx *p_ctx, __u64 stride, int do_W_R) +{ + __u64 st_lba=p_ctx->st_lba; + int i; + int rc = 0; + + if ( do_W_R == 1 ) + { + debug("%d: WRITE Ops\n", pid); + } + else + { + debug("%d: READ & DATA COMPARE Ops\n", pid); + } + + if ( do_W_R == 1 ) + { + rc = send_write(p_ctx, st_lba, stride, pid); + CHECK_RC(rc, "send_write failed"); + force_dump=1; + for (i=0;iwbuf[i][0],0x20,"Write buf"); + force_dump=0; + + } + else + { + rc = send_read(p_ctx, st_lba, stride); + CHECK_RC(rc, "send_read failed"); + force_dump=1; + for (i=0;irbuf[i][0],0x20,"Read buf"); + + rc = rw_cmp_buf(p_ctx, st_lba); + if (rc) + { + fprintf(stderr,"buf cmp failed for lba 0X%"PRIX64",rc =%d\n", + st_lba,rc); + } + } + + force_dump=0; + return rc; +} + +int allocate_buf(struct rwlargebuf *rwbuf, __u64 size) +{ + int i; + for (i=0; i wbuf[i] = (char *)malloc(size); + rwbuf->rbuf[i] = (char *)malloc(size); + if ( rwbuf->wbuf[i] == NULL || rwbuf->rbuf[i] == NULL) + return -1; + //strncpy(rwbuf->rbuf[i], rwbuf->wbuf[i], size); + } + return 0; +} + +void deallocate_buf(struct rwlargebuf *rwbuf) +{ + int i; + for (i=0; i wbuf[i]); + free(rwbuf->rbuf[i]); + } +} + +int ioctl_dk_capi_attach_reuse_all_disk( ) +{ + int rc=0,i; + struct ctx *new_ctx[10],myctx[10] ; + int cfdisk; + struct flash_disk disks[MAX_FDISK]; // flash disk struct + + cfdisk = get_flash_disks(disks, FDISKS_SAME_ADPTR ); + pid = getpid(); + +#ifdef _AIX + struct dk_capi_attach capi_attach; + struct devinfo iocinfo; + memset(&iocinfo, 0, sizeof(iocinfo)); +#else + struct dk_cxlflash_attach capi_attach; +#endif + memset(&capi_attach, 0, sizeof(capi_attach)); + for ( i=0; idev,disks[i].dev); + new_ctx[i]->fd = open_dev(disks[i].dev, O_RDWR); //Hoping to open second disk + if (new_ctx[i]->fd < 0) + { + fprintf(stderr, "open() failed: device %s, errno %d\n", disks[i].dev, errno); + g_error = -1; + } + + if ( i == 0 ) + { + //thread to handle AFU interrupt & events + //pthread_create(&thread, NULL, ctx_rrq_rx, new_ctx[i]); +#ifdef _AIX + rc = ioctl_dk_capi_query_path(new_ctx[i]); + CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); +#endif + } + +#ifdef _AIX + new_ctx[i]->work.num_interrupts = 5; +#else + new_ctx[i]->work.num_interrupts = 4; + //TBD for linux +#endif + + if ( i == 0 ) + { +#ifdef _AIX + capi_attach.version = new_ctx[i]->version; + capi_attach.flags = DK_AF_ASSIGN_AFU; + capi_attach.devno = new_ctx[i]->devno; + debug("%d:devno=0X%"PRIX64"\n",pid,new_ctx[i]->devno); +#else + capi_attach.hdr.version = new_ctx[i]->version; + capi_attach.hdr.flags = DK_AF_ASSIGN_AFU; +#endif + capi_attach.num_interrupts = new_ctx[i]->work.num_interrupts; + +#ifdef _AIX + rc = ioctl(new_ctx[i]->fd, DK_CAPI_ATTACH, &capi_attach); +#else + rc = ioctl(new_ctx[i]->fd, DK_CXLFLASH_ATTACH, &capi_attach); +#endif + debug("%d:...First DK_CAPI_ATTACH called ...\n", pid); + if (rc) + { + CHECK_RC(errno, "FIRST DK_CAPI_ATTACH failed"); + } + } + else + { +#ifdef _AIX + new_ctx[i]->context_id=new_ctx[0]->context_id; + new_ctx[i]->flags = capi_attach.flags = DK_AF_REUSE_CTX; + debug("%d:----------- Start DK_CAPI_ATTACH with REUSE flag ----------\n", pid); + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64"\n", + pid,new_ctx[i]->dev,new_ctx[i]->fd,new_ctx[i]->version,new_ctx[i]->flags); + rc = ioctl(new_ctx[i]->fd, DK_CAPI_ATTACH, &capi_attach); +#else + new_ctx[i]->context_id=new_ctx[0]->context_id; + capi_attach.hdr.flags = DK_CXLFLASH_ATTACH_REUSE_CONTEXT; + rc = ioctl(new_ctx[i]->fd, DK_CXLFLASH_ATTACH, &capi_attach); +#endif + debug("%d:... DK_CAPI_ATTACH called with REUSE flag...\n", pid); + if (rc) + { + CHECK_RC(errno, "DK_CAPI_ATTACH with REUSE flag failed"); + } + } + + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,new_ctx[i]->p_host_map,new_ctx[i]->mmio_size,new_ctx[i]->context_id, + new_ctx[i]->last_phys_lba,new_ctx[i]->block_size,new_ctx[i]->chunk_size,new_ctx[i]->max_xfer); + + debug("%d:------------- End DK_CAPI_ATTACH with REUSE flag -------------\n", pid); + + + new_ctx[i]->mmio_size = capi_attach.mmio_size; +#ifdef _AIX + new_ctx[i]->p_host_map =(volatile struct sisl_host_map *)capi_attach.mmio_start; + new_ctx[i]->context_id = capi_attach.ctx_token; + new_ctx[i]->last_phys_lba = capi_attach.last_phys_lba; + new_ctx[i]->chunk_size = capi_attach.chunk_size; + //get max_xfer + rc = ioctl(new_ctx[i]->fd, IOCINFO, &iocinfo); + if (rc) + { + CHECK_RC(errno, "Iocinfo failed with errno\n"); + } + //new_ctx[i]->max_xfer = iocinfo.un.capi_io.max_transfer; + //TBD + new_ctx[i]->max_xfer = iocinfo.un.scdk64.lo_max_request; + if (iocinfo.flags & DF_LGDSK) + { + new_ctx[i]->max_xfer |= (uint64_t)(iocinfo.un.scdk64.hi_max_request << 32); + } + new_ctx[i]->return_flags = capi_attach.return_flags; + new_ctx[i]->block_size = capi_attach.block_size; +#else + new_ctx[i]->p_host_map = mmap(NULL,new_ctx[i]->mmio_size,PROT_READ|PROT_WRITE, MAP_SHARED, + capi_attach.adap_fd,0); + if (new_ctx[i]->p_host_map == MAP_FAILED) + { + fprintf(stderr,"map failed for 0x%lx mmio_size %d errno\n", + new_ctx[i]->mmio_size, errno); + CHECK_RC(1,"mmap failed"); + } + new_ctx[i]->context_id = capi_attach.context_id; + new_ctx[i]->last_phys_lba = capi_attach.last_lba; + new_ctx[i]->max_xfer = capi_attach.max_xfer; + new_ctx[i]->chunk_size = NUM_BLOCKS; + new_ctx[i]->return_flags = capi_attach.hdr.return_flags; + new_ctx[i]->block_size = capi_attach.block_size; +#endif + + //default rwbuff handling 4K, Lorge trasnfer handled exclusive + new_ctx[i]->blk_len = BLOCK_SIZE/new_ctx[i]->block_size; + new_ctx[i]->adap_fd = new_ctx[i]->adap_fd;//capi_attach.adap_fd; + + new_ctx[i]->ctx_hndl = CTX_HNDLR_MASK & new_ctx[i]->context_id; + new_ctx[i]->unused_lba = new_ctx[i]->last_phys_lba +1; + + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,new_ctx[i]->p_host_map,new_ctx[i]->mmio_size,new_ctx[i]->context_id, + new_ctx[i]->last_phys_lba,new_ctx[i]->block_size,new_ctx[i]->chunk_size,new_ctx[i]->max_xfer); + + debug("%d:adap_fd=%d return_flag=0X%"PRIX64"\n",pid,new_ctx[i]->adap_fd,new_ctx[i]->return_flags); + debug("%d:------------- End DK_CAPI_ATTACH -------------\n", pid); + + } + //ctx_close(new_ctx[0]); crash @ fp_ioctl+000080c + ioctl_dk_capi_detach(new_ctx[0]); + for ( i=0;idev,new_ctx[i]->fd); + close(new_ctx[i]->fd); + // printf("closing done %s and fd=%d\n",new_ctx[i]->dev,new_ctx[i]->fd); + } + return rc; +} + +int ioctl_dk_capi_attach_reuse_loop(struct ctx *p_ctx,struct ctx *p_ctx_1 ) +{ + int rc,i; + struct ctx u_ctx,u_ctx_1; + struct ctx *temp=&u_ctx,*temp1=&u_ctx_1 ; + +#ifdef _AIX + struct dk_capi_attach capi_attach; + struct devinfo iocinfo; + memset(&iocinfo, 0, sizeof(iocinfo)); +#else + struct dk_cxlflash_attach capi_attach; +#endif + memset(&capi_attach, 0, sizeof(capi_attach)); + // taking temp + temp->fd=p_ctx->fd; + strcpy(temp->dev,p_ctx->dev); + temp1->fd=p_ctx_1->fd; + strcpy(temp1->dev,p_ctx_1->dev); + p_ctx->flags = DK_AF_ASSIGN_AFU; + debug("%d:----------- Start First DK_CAPI_ATTACH ----------\n", pid); + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64"\n", + pid,p_ctx->dev,p_ctx->fd,p_ctx->version,p_ctx->flags); + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx->p_host_map,p_ctx->mmio_size,p_ctx->context_id, + p_ctx->last_phys_lba,p_ctx->block_size,p_ctx->chunk_size,p_ctx->max_xfer); + debug("%d:adap_fd=%d return_flag=0X%"PRIX64"\n",pid,p_ctx->adap_fd,p_ctx->return_flags); +#ifdef _AIX + capi_attach.version = p_ctx->version; + capi_attach.flags = p_ctx->flags; + capi_attach.devno = p_ctx->devno; + debug("%d:devno=0X%"PRIX64"\n",pid,p_ctx->devno); +#else + capi_attach.hdr.version = p_ctx->version; + capi_attach.hdr.flags = p_ctx->flags; +#endif + capi_attach.num_interrupts = p_ctx->work.num_interrupts; + +#ifdef _AIX + rc = ioctl(p_ctx->fd, DK_CAPI_ATTACH, &capi_attach); +#else + rc = ioctl(p_ctx->fd, DK_CXLFLASH_ATTACH, &capi_attach); +#endif + debug("%d:...First DK_CAPI_ATTACH called ...\n", pid); + if (rc) + { + CHECK_RC(errno, "FIRST DK_CAPI_ATTACH failed"); + } + + p_ctx->mmio_size = capi_attach.mmio_size; +#ifdef _AIX + p_ctx->p_host_map =(volatile struct sisl_host_map *)capi_attach.mmio_start; + p_ctx->context_id = capi_attach.ctx_token; + p_ctx->last_phys_lba = capi_attach.last_phys_lba; + p_ctx->chunk_size = capi_attach.chunk_size; + //get max_xfer + rc = ioctl(p_ctx->fd, IOCINFO, &iocinfo); + if (rc) + { + CHECK_RC(errno, "Iocinfo failed with errno\n"); + } + //p_ctx->max_xfer = iocinfo.un.capi_io.max_transfer; + //TBD + p_ctx->max_xfer = iocinfo.un.scdk64.lo_max_request; + if (iocinfo.flags & DF_LGDSK) + { + p_ctx->max_xfer |= (uint64_t)(iocinfo.un.scdk64.hi_max_request << 32); + } + p_ctx->return_flags = capi_attach.return_flags; + p_ctx->block_size = capi_attach.block_size; +#else + p_ctx->p_host_map = mmap(NULL,p_ctx->mmio_size,PROT_READ|PROT_WRITE, MAP_SHARED, + capi_attach.adap_fd,0); + if (p_ctx->p_host_map == MAP_FAILED) + { + fprintf(stderr,"map failed for 0x%lx mmio_size %d errno\n", + p_ctx->mmio_size, errno); + CHECK_RC(1,"mmap failed"); + } + p_ctx->context_id = capi_attach.context_id; + p_ctx->last_phys_lba = capi_attach.last_lba; + p_ctx->max_xfer = capi_attach.max_xfer; + p_ctx->chunk_size = NUM_BLOCKS; + p_ctx->return_flags = capi_attach.hdr.return_flags; + p_ctx->block_size = capi_attach.block_size; +#endif + + //default rwbuff handling 4K, Lorge trasnfer handled exclusive + p_ctx->blk_len = BLOCK_SIZE/p_ctx->block_size; + p_ctx->adap_fd = capi_attach.adap_fd; + + p_ctx->ctx_hndl = CTX_HNDLR_MASK & p_ctx->context_id; + p_ctx->unused_lba = p_ctx->last_phys_lba +1; + + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx->p_host_map,p_ctx->mmio_size,p_ctx->context_id, + p_ctx->last_phys_lba,p_ctx->block_size,p_ctx->chunk_size,p_ctx->max_xfer); + + debug("%d:adap_fd=%d return_flag=0X%"PRIX64"\n",pid,p_ctx->adap_fd,p_ctx->return_flags); + debug("%d:------------- End FIRST DK_CAPI_ATTACH -------------\n", pid); + + + for ( i = 1; i < 10 ; i++ ) + { + debug("************************************%d**************************\n",i); + // detach the REUSED CONTEXT + if ( i%2==0 ) + { + strcpy(p_ctx->dev,temp->dev); + p_ctx->fd=temp->fd; + rc = ioctl_dk_capi_detach(p_ctx); + CHECK_RC(rc,"dk_capi_detach on reuse flag failed\n"); + if ( rc == 0 ) + { + // try creating VLUN on another fd + p_ctx_1->flags=DK_UVF_ALL_PATHS; + p_ctx_1->lun_size=p_ctx_1->chunk_size; + rc=ioctl_dk_capi_uvirtual(p_ctx_1); + CHECK_RC(rc,"dk_capi_detach on reuse flag failed\n"); + //do_io(); + rc=ioctl_dk_capi_release(p_ctx_1); + CHECK_RC(rc,"dk_capi_release on reuse ctx failed\n"); + } + strcpy(p_ctx_1->dev,temp->dev); + p_ctx_1->fd=temp->fd; + } + if ( i>2 && i%2==1 ) + { + strcpy(p_ctx_1->dev,temp1->dev); + p_ctx_1->fd=temp1->fd; + rc = ioctl_dk_capi_detach(p_ctx_1); + CHECK_RC(rc,"dk_capi_detach on reuse flag failed\n"); + if ( rc == 0 ) + { + p_ctx->flags=DK_UVF_ALL_PATHS; + // try creating VLUN on another fd + p_ctx->lun_size=p_ctx->chunk_size; + rc=ioctl_dk_capi_uvirtual(p_ctx); + CHECK_RC(rc,"dk_capi_detach on reuse flag failed\n"); + //do_io(); + rc=ioctl_dk_capi_release(p_ctx); + CHECK_RC(rc,"dk_capi_release on reuse ctx failed\n"); + } + } + + + + + debug("%d:----------- Start DK_CAPI_ATTACH with REUSE flag ----------\n", pid); + debug("%d:dev=%s fd=%d Ver=%u flags=0X%"PRIX64"\n", + pid,p_ctx_1->dev,p_ctx_1->fd,p_ctx_1->version,p_ctx_1->flags); + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx_1->p_host_map,p_ctx_1->mmio_size,p_ctx_1->context_id, + p_ctx_1->last_phys_lba,p_ctx_1->block_size,p_ctx_1->chunk_size,p_ctx_1->max_xfer); + +#ifdef _AIX + p_ctx_1->devno=p_ctx->devno; + p_ctx_1->context_id=p_ctx->context_id; + capi_attach.flags = DK_AF_REUSE_CTX; + rc = ioctl(p_ctx_1->fd, DK_CAPI_ATTACH, &capi_attach); +#else + p_ctx_1->context_id=p_ctx->context_id; + capi_attach.hdr.flags = DK_CXLFLASH_ATTACH_REUSE_CONTEXT; + rc = ioctl(p_ctx_1->fd, DK_CXLFLASH_ATTACH, &capi_attach); +#endif + debug("%d:... DK_CAPI_ATTACH called with REUSE flag...\n", pid); + if (rc) + { + CHECK_RC(errno, "DK_CAPI_ATTACH with REUSE flag failed"); + } + + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx_1->p_host_map,p_ctx_1->mmio_size,p_ctx_1->context_id, + p_ctx_1->last_phys_lba,p_ctx_1->block_size,p_ctx_1->chunk_size,p_ctx_1->max_xfer); + + debug("%d:adap_fd=%d return_flag=0X%"PRIX64"\n",pid,p_ctx_1->adap_fd,p_ctx_1->return_flags); + debug("%d:------------- End DK_CAPI_ATTACH with REUSE flag -------------\n", pid); + + + p_ctx_1->mmio_size = capi_attach.mmio_size; +#ifdef _AIX + p_ctx_1->p_host_map =(volatile struct sisl_host_map *)capi_attach.mmio_start; + p_ctx_1->context_id = capi_attach.ctx_token; + p_ctx_1->last_phys_lba = capi_attach.last_phys_lba; + p_ctx_1->chunk_size = capi_attach.chunk_size; + //get max_xfer + rc = ioctl(p_ctx_1->fd, IOCINFO, &iocinfo); + if (rc) + { + CHECK_RC(errno, "Iocinfo failed with errno\n"); + } + //p_ctx_1->max_xfer = iocinfo.un.capi_io.max_transfer; + //TBD + p_ctx_1->max_xfer = iocinfo.un.scdk64.lo_max_request; + if (iocinfo.flags & DF_LGDSK) + { + p_ctx_1->max_xfer |= (uint64_t)(iocinfo.un.scdk64.hi_max_request << 32); + } + p_ctx_1->return_flags = capi_attach.return_flags; + p_ctx_1->block_size = capi_attach.block_size; +#else + p_ctx_1->p_host_map = mmap(NULL,p_ctx_1->mmio_size,PROT_READ|PROT_WRITE, MAP_SHARED, //p_ctx->adap_fd,0); + capi_attach.adap_fd,0); + if (p_ctx_1->p_host_map == MAP_FAILED) + { + fprintf(stderr,"map failed for 0x%lx mmio_size %d errno\n", + p_ctx_1->mmio_size, errno); + CHECK_RC(1,"mmap failed"); + } + p_ctx_1->context_id = capi_attach.context_id; + p_ctx_1->last_phys_lba = capi_attach.last_lba; + p_ctx_1->max_xfer = capi_attach.max_xfer; + p_ctx_1->chunk_size = NUM_BLOCKS; + p_ctx_1->return_flags = capi_attach.hdr.return_flags; + p_ctx_1->block_size = capi_attach.block_size; +#endif + + //default rwbuff handling 4K, Lorge trasnfer handled exclusive + p_ctx_1->blk_len = BLOCK_SIZE/p_ctx_1->block_size; + p_ctx_1->adap_fd = p_ctx->adap_fd;//capi_attach.adap_fd; + + p_ctx_1->ctx_hndl = CTX_HNDLR_MASK & p_ctx_1->context_id; + p_ctx_1->unused_lba = p_ctx_1->last_phys_lba +1; + + debug("%d:mmio=%p mmio_size=0X%"PRIX64" ctx_id=0X%"PRIX64" last_lba=0X%"PRIX64" block_size=0X%"PRIX64" chunk_size=0X%"PRIX64" max_xfer=0X%"PRIX64"\n", + pid,p_ctx_1->p_host_map,p_ctx_1->mmio_size,p_ctx_1->context_id, + p_ctx_1->last_phys_lba,p_ctx_1->block_size,p_ctx_1->chunk_size,p_ctx_1->max_xfer); + + debug("%d:adap_fd=%d return_flag=0X%"PRIX64"\n",pid,p_ctx_1->adap_fd,p_ctx_1->return_flags); + debug("%d:------------- End DK_CAPI_ATTACH -------------\n", pid); + + } + + return rc; +} + +int set_spio_mode() +{ + int count =0; + int rc=0; + int i=0; + FILE *fptr; + char *p_file; + char buf[64]; + char cmdstr[1024]; + +#ifndef _AIX + const char *cmd ="/opt/ibm/capikv/bin/cxlfstatus | grep legacy \ + | awk '{print $NF}' | sort -u > /tmp/idlist"; +#else + const char *cmd=NULL; + return 0; +#endif + + rc = system(cmd); + if (rc) return 0; + + debug("%d: List of all capi disks ids present in /tmp/idlist\n", pid); + + p_file="/tmp/idlist"; + fptr = fopen(p_file, "r"); + if (NULL == fptr) + { + fprintf(stderr,"%d: --------------------------------------------------------\n", pid); + fprintf(stderr,"%d: Error opening file %s\n", pid, p_file); + fprintf(stderr,"%d: --------------------------------------------------------\n", pid); + return 0; + } + while (fgets(buf,64, fptr) != NULL) + { + while (i < 64) + { + if (buf[i] =='\n') + { + buf[i]='\0'; + break; + } + i++; + } + + sprintf(cmdstr,"/opt/ibm/capikv/bin/cxlfsetlunmode %s 1",buf); + rc |= system(cmdstr); + + sprintf(cmdstr,"grep -q %s /opt/ibm/capikv/etc/sioluntable || echo %s >> /opt/ibm/capikv/etc/sioluntable", buf, buf); + rc |= system(cmdstr); + + rc |= system("/opt/ibm/capikv/bin/cxlfrefreshluns"); + + count++; + } + + if (rc) fprintf(stderr,"%d: Error while setting spio mode\n", pid); + + fclose(fptr); + return count; +} + +int keep_doing_eeh_test(struct ctx *p_ctx) +{ + int rc; + pthread_t ioThreadId; + pthread_t thread; + do_io_thread_arg_t ioThreadData; + do_io_thread_arg_t * p_ioThreadData=&ioThreadData; + p_ioThreadData->p_ctx=p_ctx; + p_ioThreadData->stride=1; + p_ioThreadData->loopCount=0x7fffffff; //unlimited + while (1) + { +#ifndef _AIX + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); +#endif + debug("%d: Things look good, start IO & wait next EEH event\n",pid); + rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); + CHECK_RC(rc, "do_io_thread() pthread_create failed"); + //Trigger EEH + do_eeh(p_ctx); + rc = ioctl_dk_capi_recover_ctx(p_ctx); + CHECK_RC(rc, "ctx reattached failed"); +#ifndef _AIX + pthread_cancel(thread); +#endif +#ifdef _AIX + if (!(p_ctx->return_flags & DK_RF_REATTACHED)) + CHECK_RC(1, "recover ctx, expected DK_RF_REATTACHED"); + p_ctx->flags = DK_VF_HC_INQ; + p_ctx->hint = DK_HINT_SENSE; +#endif + ctx_reinit(p_ctx); + usleep(1000); +#ifdef _AIX + //better to use io(get failed with UA) rather than verify + //otherwise do call verify ioctl on all paths + rc=do_io(p_ctx,0x10000); + if (rc == 0x2) + { + fprintf(stderr,"%d:expected to fail for UA, dont worry....\n",pid); + } +#endif + rc = ioctl_dk_capi_verify(p_ctx); + CHECK_RC(rc, "ioctl_dk_capi_verify failed"); + usleep(1000); + } + return 0; +} + +int ctx_init_reuse(struct ctx *p_ctx) +{ + //void *map; + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + int i; + pid_t mypid; + + + pthread_mutexattr_init(&mattr); + pthread_condattr_init(&cattr); + + for (i = 0; i < NUM_CMDS; i++) + { + pthread_mutex_init(&p_ctx->cmd[i].mutex, &mattr); + pthread_cond_init(&p_ctx->cmd[i].cv, &cattr); + } + + // initialize RRQ pointers + p_ctx->p_hrrq_start = &p_ctx->rrq_entry[0]; + p_ctx->p_hrrq_end = &p_ctx->rrq_entry[NUM_RRQ_ENTRY - 1]; + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + + p_ctx->toggle = 1; + + // initialize cmd fields that never change + for (i = 0; i < NUM_CMDS; i++) + { + p_ctx->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; + p_ctx->cmd[i].rcb.rrq = 0x0; + p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl; + } +#ifdef _AIX + write_64(&p_ctx->p_host_map->endian_ctrl,(__u64)SISL_ENDIAN_CTRL_BE); +#endif + + // set up RRQ in AFU + if (rrq_c_null) + { + write_64(&p_ctx->p_host_map->rrq_start, (__u64)NULL); + write_64(&p_ctx->p_host_map->rrq_end, (__u64)NULL); + } + else + { + write_64(&p_ctx->p_host_map->rrq_start, (__u64) p_ctx->p_hrrq_start); + write_64(&p_ctx->p_host_map->rrq_end, (__u64) p_ctx->p_hrrq_end); + } + + mypid = getpid(); + debug("%d: ctx_init() success: p_host_map %p, ctx_hndl %d, rrq_start %p\n", + mypid, p_ctx->p_host_map, p_ctx->ctx_hndl, p_ctx->p_hrrq_start); + + pthread_mutexattr_destroy(&mattr); + pthread_condattr_destroy(&cattr); + return 0; +} + +void displayBuildinfo() +{ + int i=0; + static int entrycounter=0; + FILE *fptr; + char buf[1024]; + char cmd[1024]; + + if ( entrycounter > 0 ) return; + entrycounter++; + + printf("Kernel Level:\n"); + printf("-------------------------------------------\n"); + fflush(stdout); + system("uname -a"); + system("dpkg -l | grep -w `uname -a | awk '{print $3}'|cut -f2 -d-` | grep -i linux"); + printf("\ncat /opt/ibm/capikv/version.txt:\n"); + printf("-------------------------------------------\n"); + fflush(stdout); + system("cat /opt/ibm/capikv/version.txt"); + printf("\nAFU level:\n"); + printf("-------------------------------------------\n"); + fflush(stdout); + system("ls /dev/cxl/afu[0-9]*.0m > /tmp/afuF"); + + fptr = fopen("/tmp/afuF", "r"); + while (fgets(buf,1024, fptr) != NULL) + { + i=0; + while (i < 1024) + { + if (buf[i] =='\n') + { + buf[i]='\0'; + break; + } + i++; + } + + sprintf(cmd, "/opt/ibm/capikv/afu/cxl_afu_dump %s | grep Version", buf); + system(cmd); + } + fclose(fptr); + printf("-------------------------------------------\n"); + fflush(stdout); + system("update_flash -d"); + printf("-------------------------------------------\n\n"); + fflush(stdout); +} + +int allDiskToArray( char ** allDiskArrayP, int * diskCountP) +{ + + int i=0; + FILE *fptr; + char *p_file="/tmp/flist"; + char buf[MAXBUFF]; + + fptr = fopen(p_file, "r"); + if (NULL == fptr) + { + fprintf(stderr,"%d: --------------------------------------------------------\n", pid); + fprintf(stderr,"%d: Error opening file %s\n", pid, p_file); + + return -1; + } + + *diskCountP = 0; + while (fgets(buf,MAXBUFF, fptr) != NULL) + { + i=0; + while (i < MAXBUFF) + { + if (buf[i] =='\n') + { + buf[i]='\0'; + break; + } + i++; + } + allDiskArrayP[*diskCountP] = malloc(strlen(buf)+1); + + strcpy(allDiskArrayP[*diskCountP],buf); + + *diskCountP=*diskCountP+1; + } + + return 0; +} + +int diskInSameAdapater( char * p_file ) +{ + + int rc =0; +#ifndef _AIX + int iCount =0; + int iTer =0; + + FILE *fileP; + + char tmpBuff[MAXBUFF]; + char npBuff[MAXNP][MAXBUFF]; + char blockCheckP[MAXBUFF]; + char * allDiskArray [MAXBUFF]; + char sameAdapDisk[MAXNP][MAXBUFF]; + + int diskCount = 0; + int smCnt = 0; + int allCnt = 0; + + const char *initCmdP = "lspci -v | grep \"Processing accelerators\" | awk '{print $1}' > /tmp/trashFile"; + + rc = system(initCmdP); + if ( rc != 0) + { + fprintf(stderr,"%d: Failed in lspci \n",pid); + goto xerror ; + } + + rc = allDiskToArray(allDiskArray, &diskCount ); + + if ( rc != 0) + { + fprintf(stderr,"%d: Failed in allDiskToArray \n",pid); + goto xerror ; + } + + fileP = fopen("/tmp/trashFile", "r"); + + if (NULL == fileP) + { + fprintf(stderr,"%d: Error opening file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + while (fgets(tmpBuff,MAXBUFF, fileP) != NULL && smCnt < 2 ) + { + iTer=0; + while (iTer < MAXBUFF) + { + if (tmpBuff[iTer] =='\n') + { + tmpBuff[iTer]='\0'; + } + iTer++; + } + + // reset the smCnt value + + smCnt = 0; + + // only supporting for scsi_generic device now + + sprintf(blockCheckP,"ls -l /sys/bus/pci/devices/" + "%s/pci***:**/***:**:**.*/host*/" + "target*:*:*/*:*:*:*/ | grep -w \"scsi_generic\" >/dev/null 2>&1",tmpBuff); + rc = system(blockCheckP); + + if ( rc == 0 ) + { + + for ( allCnt=0; allCnt < diskCount ;allCnt++) + { + + sprintf(npBuff[iCount],"ls -l /sys/bus/pci/devices/" + "%s/pci***:**/***:**:**.*/host*/" + "target*:*:*/*:*:*:*/scsi_generic | grep %s >/dev/null 2>&1",tmpBuff,allDiskArray[allCnt]); + + rc = system(npBuff[iCount]); + if ( rc == 0 ) + { + strcpy(sameAdapDisk[smCnt], allDiskArray[allCnt] ); + smCnt++; + } + } + + iCount++; + } + } + + if ( fclose(fileP) == EOF ) + { + fprintf(stderr,"%d: Error closin the file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + if ( smCnt < 2 ) + { + rc = -1; + goto xerror; + } + + for ( iCount = 0; iCount < smCnt ; iCount++ ) + { + sprintf(tmpBuff,"echo %s >> %s", sameAdapDisk[iCount],p_file); + rc = system(tmpBuff); + if ( rc != 0 ) + { + rc = EINVAL; + goto xerror; + } + + } + +xerror: +#endif + return rc; + +} + +int diskInDiffAdapater( char * p_file ) +{ + + int rc =0; +#ifndef _AIX + int iCount =0; + int iTer =0; + + FILE *fileP; + + char tmpBuff[MAXBUFF]; + char npBuff[MAXNP][MAXBUFF]; + char blockCheckP[MAXBUFF]; + char * allDiskArray [MAXBUFF]; + char diffAdapDisk[MAXNP][MAXBUFF]; + + int diskCount = 0; + int diffCnt = 0; + int allCnt = 0; + + const char *initCmdP = "lspci -v | grep \"Processing accelerators\" | awk '{print $1}' > /tmp/trashFile"; + + rc = system(initCmdP); + if ( rc != 0) + { + fprintf(stderr,"%d: Failed in lspci \n",pid); + goto xerror ; + } + + rc = allDiskToArray(allDiskArray, &diskCount ); + + if ( rc != 0) + { + fprintf(stderr,"%d: Failed in allDiskToArray \n",pid); + goto xerror ; + } + + fileP = fopen("/tmp/trashFile", "r"); + + if (NULL == fileP) + { + fprintf(stderr,"%d: Error opening file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + fileP = fopen("/tmp/trashFile", "r"); + + if (NULL == fileP) + { + fprintf(stderr,"%d: Error opening file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + while (fgets(tmpBuff,MAXBUFF, fileP) != NULL && diffCnt < 2 ) + { + iTer=0; + while (iTer < MAXBUFF) + { + if (tmpBuff[iTer] =='\n') + { + tmpBuff[iTer]='\0'; + } + iTer++; + } + + // only supporting for scsi_generic device now + sprintf(blockCheckP,"ls -l /sys/bus/pci/devices/" + "%s/pci***:**/***:**:**.*/host*/" + "target*:*:*/*:*:*:*/ | grep -w \"scsi_generic\" >/dev/null 2>&1",tmpBuff); + rc = system(blockCheckP); + if ( rc == 0 ) + { + for ( allCnt=0; allCnt < diskCount ;allCnt++) + { + sprintf(npBuff[iCount],"ls -l /sys/bus/pci/devices/" + "%s/pci***:**/***:**:**.*/host*/" + "target*:*:*/*:*:*:*/scsi_generic | grep %s >/dev/null 2>&1",tmpBuff,allDiskArray[allCnt]); + + rc = system(npBuff[iCount]); + if ( rc == 0 ) + { + strcpy(diffAdapDisk[diffCnt], allDiskArray[allCnt] ); + diffCnt++; + break; + } + } + + iCount++; + } + } + + if ( fclose(fileP) == EOF ) + { + fprintf(stderr,"%d: Error closin the file /tmp/trashFile \n", pid); + rc = EINVAL ; + goto xerror ; + } + + if ( diffCnt < 2 ) + { + rc = -1; + goto xerror; + } + + for ( iCount = 0; iCount < diffCnt ; iCount++ ) + { + sprintf(tmpBuff,"echo %s >> %s", diffAdapDisk[iCount],p_file); + rc = system(tmpBuff); + if ( rc != 0 ) + { + rc = EINVAL; + goto xerror; + } + + } +xerror: +#endif + return rc; +} + diff --git a/src/cflash/test/fvt_cflash.C b/src/cflash/test/fvt_cflash.C new file mode 100644 index 00000000..fe42cfdc --- /dev/null +++ b/src/cflash/test/fvt_cflash.C @@ -0,0 +1,1303 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/fvt_cflash.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * \ingroup + ******************************************************************************/ +#include + +extern "C" +{ +#include "cflash_test.h" +} + +TEST(Cflash_FVT_Suite, E_MC_test_Ioctl_Invalid_Versions) +{ +#ifdef _AIX + ASSERT_EQ(22,mc_test_engine(TEST_IOCTL_INVALID_VERSIONS)); +#else + ASSERT_NE(0,mc_test_engine(TEST_IOCTL_INVALID_VERSIONS)); +#endif +} + +#ifdef _AIX +/*** DK_CAPI_QUERY_PATH ****/ +TEST(Cflash_FVT_Suite, G_MC_test_DCQP_Path_Count_Valid) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCQP_VALID_PATH_COUNT)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCQP_Path_Count_Invalid) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCQP_INVALID_PATH_COUNT)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCQP_Dual_Path_Count) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCQP_DUAL_PATH_COUNT)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCQP_Dk_Cpif_Reserved) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCQP_DK_CPIF_RESERVED)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCQP_Dk_Cpif_Failed) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCQP_DK_CPIF_FAILED)); +} +/*** DK_CAPI_ATTACH ****/ + +TEST(Cflash_FVT_Suite, G_MC_test_DCA_Other_Devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_OTHER_DEVNO)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCA_Invalid_Devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_INVALID_DEVNO)); +} +#endif + +TEST(Cflash_FVT_Suite, E_MC_test_DCA_Invalid_Intrpt_Num) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_INVALID_INTRPT_NUM)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCA_Valid_Values) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_VALID_VALUES)); +} +TEST(Cflash_FVT_Suite, E_MC_test_DCA_Invalid_Flags) +{ +#ifdef _AIX + ASSERT_EQ(0,mc_test_engine(TEST_DCA_INVALID_FLAGS)); +#else + ASSERT_EQ(1,mc_test_engine(TEST_DCA_INVALID_FLAGS)); +#endif +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCA_Twice) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_CALL_TWICE)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCA_Reuse_Context_Flag) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_REUSE_CTX_FLAG)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCA_Reuse_Context_Flag_On_New_Plun_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_REUSE_CTX_FLAG_NEW_PLUN_DISK)); +} + + +TEST(Cflash_FVT_Suite, G_MC_test_DCA_Reuse_Context_Flag_On_New_Vlun_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_REUSE_CTX_NEW_VLUN_DISK)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCA_Reuse_Context_All_Capi_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_REUSE_CTX_ALL_CAPI_DISK)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCA_Reuse_Context_Of_Detach_Ctx) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_REUSE_CTX_OF_DETACH_CTX)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCA_Reuse_Context_Of_Relased_Ioctl) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_REUSE_CTX_OF_RELASED_IOCTL)); +} + +/*** DK_CAPI_RECOVER_CTX ***/ +TEST(Cflash_FVT_Suite, G_MC_test_DCRC_No_EEH) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_NO_EEH)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCRC_Reattach_Detach_Ctx) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_DETACHED_CTX)); +} + +TEST(Cflash_FVT_Suite, G_TEST_MAX_VLUNS_ONE_CTX) +{ + ASSERT_EQ(0,mc_test_engine(G_TEST_MAX_VLUNS)); +} + +#ifdef _AIX +TEST(Cflash_FVT_Suite, E_MC_test_DCRC_Invalid_Devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_INVALID_DEVNO)); +} +#endif + +TEST(Cflash_FVT_Suite, E_MC_test_DCRC_Invalid_Flag) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_INVALID_FLAG)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCRC_Invalid_Reason) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_INVALID_REASON)); +} + +/*** DK_CAPI_USER_DIRECT ***/ +#ifdef _AIX +TEST(Cflash_FVT_Suite, E_MC_test_DCUD_Invalid_Devno_Valid_Contx_ID) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_INVALID_DEVNO_VALID_CTX)); +} +#endif +TEST(Cflash_FVT_Suite, E_MC_test_DCUD_Invalid_Contx_Valid_Devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_INVALID_CTX_VALID_DEVNO)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCUD_Valid_Contx_Valid_Devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_VALID_CTX_VALID_DEVNO)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCUD_Flags) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_FLAGS)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCUD_Twice_Same_Contx_Devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_TWICE_SAME_CTX_DEVNO)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCUD_With_Vlun_created_On_Same_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_VLUN_ALREADY_CREATED_SAME_DISK)); +} +TEST(Cflash_FVT_Suite, E_MC_test_DCUD_With_Plun_created_On_Same_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_PLUN_ALREADY_CREATED_SAME_DISK)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCUD_With_Vlun_Created_destroyed_On_Same_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_VLUN_CREATED_DESTROYED_SAME_DISK)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCUD_In_Loop) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_IN_LOOP)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCUD_Path_Id_Mask_Values) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_PATH_ID_MASK_VALUES)); +} + +#ifdef _AIX +TEST(Cflash_FVT_Suite, E_MC_test_DCUD_Invalid_Path_Id_Mask_Values) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_BAD_PATH_ID_MASK_VALUES)); +} +/*** DK_CAPI_USER_VIRTUAL ***/ + +TEST(Cflash_FVT_Suite, E_MC_test_DCUV_Invalid_Devno_Valid_Contx) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_INVALID_DEVNO_VALID_CTX)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCUV_Invalid_Contx_Invalid_Devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_INVALID_CTX_INVALID_DEVNO)); +} +#endif + +TEST(Cflash_FVT_Suite, E_MC_test_DCUV_Valid_Devno_Invalid_Contx) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_VALID_DEVNO_INVALID_CTX)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCUV_Vlun_Size_Zero) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_LUN_VLUN_SIZE_ZERO)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCUV_With_Plun_Already_Created_On_Same_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_PLUN_ALREADY_CREATED_SAME_DISK)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCUV_With_Vlun_Already_Created_On_Same_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_VLUN_ALREADY_CREATED_SAME_DISK)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCUV_With_No_Further_Vlun_Capacity) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_NO_FURTHER_VLUN_CAPACITY)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCUV_With_Multiple_Vluns_On_Same_Capacity_Same_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_MTPLE_VLUNS_SAME_CAPACITY_SAME_DISK)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCUV_With_Twice_Same_Contx_Devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_TWICE_SAME_CTX_DEVNO)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCUV_Vlun_Max_Size) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_VLUN_MAX)); +} +TEST(Cflash_FVT_Suite, E_MC_test_DCUV_Vlun_size_More_Than_Disk_Size) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_VLUN_SIZE_MORE_THAN_DISK_SIZE)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCUV_With_Plun_Created_Destroyed_On_Same_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_PLUN_CREATED_DESTROYED_SAME_DISK)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCUV_With_Contx_Of_PLUN) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_WITH_CTX_OF_PLUN)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCUD_With_Contx_Of_VLUN) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUD_WITH_CTX_OF_VLUN)); +} + +#ifdef _AIX +TEST(Cflash_FVT_Suite, G_MC_test_DCUV_Path_ID_Mask_Values) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_PATH_ID_MASK_VALUES)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCUV_Invalid_Path_ID_Mask_Values) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_INVALID_PATH_ID_MASK_VALUES)); +} +#endif + +TEST(Cflash_FVT_Suite, G_MC_test_DCUV_In_Loop) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCUV_IN_LOOP)); +} +/*** DK_CAPI_VLUN_RESIZE ***/ +#ifdef _AIX +TEST(Cflash_FVT_Suite, E_MC_test_DCVR_Invalid_devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_INVALID_DEVNO)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCVR_Invalid_devno_Ctx) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_INVALID_CTX_DEVNO)); +} +#endif + +TEST(Cflash_FVT_Suite, E_MC_test_DCVR_Invalid_Ctx) +{ + ASSERT_EQ(22,mc_test_engine(TEST_DCVR_INVALID_CTX)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCVR_No_Vlun) +{ + ASSERT_EQ(22,mc_test_engine(TEST_DCVR_NO_VLUN)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCVR_On_Plun) +{ + ASSERT_EQ(22,mc_test_engine(TEST_DCVR_ON_PLUN)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCVR_Gt_Disk_size) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_GT_DISK_SIZE)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_Not_factor_256MB) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_NOT_FCT_256MB)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_EQ_CT_Vlun_size) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_EQ_CT_VLUN_SIZE)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_LT_CT_Vlun_size) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_LT_CT_VLUN_SIZE)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_GT_CT_Vlun_size) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_GT_CT_VLUN_SIZE)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_EG_Disk_size_None_Vlun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_EQ_DISK_SIZE_NONE_VLUN)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCVR_EQ_Disk_size_other_Vlun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_EQ_DISK_SIZE_OTHER_VLUN)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_INC_256MB) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_INC_256MB)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_DEC_256MB) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_DEC_256MB)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_GT_CT_Vlun_LT_256MB) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_GT_CT_VLUN_LT_256MB)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_LT_CT_Vlun_LT_256MB) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_LT_CT_VLUN_LT_256MB)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_Inc_Dec_Loop) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCVR_INC_DEC_LOOP)); +} +TEST(Cflash_FVT_Suite, G_MC_test_DCVR_zero_vlun_size) +{ + ASSERT_EQ(0,mc_test_engine(G_MC_test_DCVR_ZERO_Vlun_size)); +} + +/*** DK_CAPI_RELEASE ***/ +#ifdef _AIX +TEST(Cflash_FVT_Suite, E_MC_test_DCR_Invalid_devno) +{ + ASSERT_EQ(22,mc_test_engine(TEST_DCR_INVALID_DEVNO)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCR_Invalid_devno_Ctx) +{ + ASSERT_EQ(22,mc_test_engine(TEST_DCR_INVALID_DEVNO_CTX)); +} +#endif +TEST(Cflash_FVT_Suite, E_MC_test_DCR_Invalid_Ctx) +{ + ASSERT_EQ(22,mc_test_engine(TEST_DCR_INVALID_CTX)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCR_No_Vlun) +{ + ASSERT_EQ(22,mc_test_engine(TEST_DCR_NO_VLUN)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCR_Plun_again) +{ + ASSERT_EQ(22,mc_test_engine(TEST_DCR_PLUN_AGIAN)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCR_Vlun_again) +{ + ASSERT_EQ(22,mc_test_engine(TEST_DCR_VLUN_AGIAN)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCR_Multiple_Vlun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCR_MULTP_VLUN)); +} +TEST(Cflash_FVT_Suite, E_MC_test_DCR_inv_res) +{ + ASSERT_EQ(22,mc_test_engine(TEST_DCR_VLUN_INV_REL)); +} + +/*** DK_CAPI_DETACH ***/ +#ifdef _AIX +TEST(Cflash_FVT_Suite, E_MC_test_DCD_Invalid_Devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCD_INVALID_DEVNO)); +} +#endif +TEST(Cflash_FVT_Suite, E_MC_test_DCD_Invalid_Ctx) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCD_INVALID_CTX)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCD_Twice_on_Plun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCD_TWICE_ON_PLUN)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCD_Twice_on_vlun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCD_TWICE_ON_VLUN)); +} + +/*** DK_CAPI_VERIFY ***/ +#ifdef _AIX +TEST(Cflash_FVT_Suite, E_MC_test_DCV_Invalid_Devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_INVALID_DEVNO)); +} +#endif + +TEST(Cflash_FVT_Suite, E_MC_test_DCV_Invalid_flags) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_INVALID_FLAGS)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCV_Invalid_Res_handle) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_INVALID_RES_HANDLE)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_No_error) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_NO_ERR)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Vlun_Rst_flag) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_VLUN_RST_FlAG)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Vlun_Tur_flag) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_VLUN_TUR_FLAG)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Vlun_Inq_flag) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_VLUN_INQ_FLAG)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Vlun_Hint_Sense) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_VLUN_HINT_SENSE)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Plun_Rst_flag) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_PLUN_RST_FlAG)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Plun_Tur_flag) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_PLUN_TUR_FLAG)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Plun_Inq_flag) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_PLUN_INQ_FLAG)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Plun_Hint_Sense) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_PLUN_HINT_SENSE)); +} + +TEST(Cflash_FVT_Suite, G_spio_single_0_vlun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_SPIO_0_VLUN)); +} +TEST(Cflash_FVT_Suite, G_spio_single_vlun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_SPIO_VLUN)); +} +TEST(Cflash_FVT_Suite, G_spio_single_plun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_SPIO_A_PLUN)); +} + +TEST(Cflash_FVT_Suite, E_spio_no_res_afurc) +{ +#ifdef _AIX + ASSERT_EQ(0x1,mc_test_engine(TEST_SPIO_NORES_AFURC)); +#else + ASSERT_EQ(0x5,mc_test_engine(TEST_SPIO_NORES_AFURC)); +#endif +} +TEST(Cflash_FVT_Suite, G_sp_io_all_pluns) +{ + ASSERT_EQ(0,mc_test_engine(TEST_SPIO_ALL_PLUN)); +} + +TEST(Cflash_FVT_Suite, G_sp_io_all_vluns) +{ + ASSERT_EQ(0,mc_test_engine(TEST_SPIO_ALL_VLUN)); +} + +TEST(Cflash_FVT_Suite, G_sp_io_vlun_plun_altr) +{ + ASSERT_EQ(0,mc_test_engine(TEST_SPIO_VLUN_PLUN)); +} + +TEST(Cflash_FVT_Suite, G_sp_io_size_regress) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_SIZE_REGRESS)); +} + +TEST(Cflash_FVT_Suite, G_mc_ctx_crt_dstr_rgrs) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_REGRESS_CTX_CRT_DSTR)); +} +TEST(Cflash_FVT_Suite, G_mc_ctx_crt_dstr_rgrs_io) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_REGRESS_CTX_CRT_DSTR_IO)); +} +TEST(Cflash_FVT_Suite, G_mc_regress_resource) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_REGRESS_RESOURCE)); +} + +TEST(Cflash_FVT_Suite, G_mc_two_ctx_rd_wrthrd) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_TWO_CTX_RD_WRTHRD)); +} + +TEST(Cflash_FVT_Suite, G_mc_two_ctx_rdwr_size) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_TWO_CTX_RDWR_SIZE)); +} + +TEST(Cflash_FVT_Suite, G_mc_wr_thrd_rd_thrd) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_ONE_CTX_TWO_THRD)); +} + +TEST(Cflash_FVT_Suite, G_mc_rdwr_thrd_size_thrd) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_ONE_CTX_RD_WRSIZE)); +} + +TEST(Cflash_FVT_Suite, G_max_res_handl_one_ctx) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_MAX_RES_HNDL)); +} + +TEST(Cflash_FVT_Suite, G_one_unit_chunk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_ONE_UNIT_SIZE)); +} + +TEST(Cflash_FVT_Suite, G_max_ctx_res_unit_lun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MAX_CTX_RES_UNIT)); +} + +TEST(Cflash_FVT_Suite, G_max_ctx_res_lun_capacity) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MAX_CTX_RES_LUN_CAP)); +} + +TEST(Cflash_FVT_Suite, G_mc_lun_cap_incremnt) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_MAX_SIZE)); +} + +TEST(Cflash_FVT_Suite, G_spio_vlun_attach_detach) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_SPIO_VLUN_ATCH_DTCH)); +} + +TEST(Cflash_FVT_Suite, G_spio_plun_attach_detach) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_SPIO_PLUN_ATCH_DTCH)); +} + +//---------------------- + +TEST(Cflash_FVT_Suite, E_Test_mc_afu_rc_invalid_opcode) +{ + ASSERT_EQ(0x21, test_mc_invalid_ioarcb(1)); +} + +TEST(Cflash_FVT_Suite, E_Test_mc_ioarcb_ea_null) +{ +#ifdef _AIX + ASSERT_EQ(7, test_mc_invalid_ioarcb(2)); +#else + int res = test_mc_invalid_ioarcb(2); + ASSERT_EQ(0xff,res); +#endif +} + +TEST(Cflash_FVT_Suite, E_Test_mc_ioarcb_invalid_flags) +{ + ASSERT_EQ(0x58, test_mc_invalid_ioarcb(3)); +} + +TEST(Cflash_FVT_Suite, E_Test_mc_ioarcb_invalid_lun_fc_port) +{ + int res = test_mc_invalid_ioarcb(4); + if (res == 0x21 || res == 0x5 || res == 0x3) + res =1; + ASSERT_EQ(1, res); +} + +TEST(Cflash_FVT_Suite, E_Test_mc_afu_rc_rht_invalid) +{ + ASSERT_EQ(0x5, test_mc_invalid_ioarcb(5)); +} + +TEST(Cflash_FVT_Suite, E_Test_mc_afu_rc_rht_out_of_bounds) +{ + int res = test_mc_invalid_ioarcb(6); + if (res == 0x3 || res == 0x5) + res=1; + ASSERT_EQ(1, res); +} + +TEST(Cflash_FVT_Suite, E_Test_mc_error_page_fault) +{ +#ifdef _AIX + ASSERT_EQ(7, test_mc_invalid_ioarcb(7)); +#else + ASSERT_EQ(0xff, test_mc_invalid_ioarcb(7)); +#endif + +} +TEST(Cflash_FVT_Suite, E_Test_mc_ioarcb_invalid_ctx_id) +{ + ASSERT_EQ(0x21, test_mc_invalid_ioarcb(8)); +} + +TEST(Cflash_FVT_Suite, E_Test_rc_flags_underrun) +{ + ASSERT_EQ(0x2, test_mc_invalid_ioarcb(9)); +} + +TEST(Cflash_FVT_Suite, E_Test_rc_flags_overrun) +{ + ASSERT_EQ(0x2, test_mc_invalid_ioarcb(10)); +} + +TEST(Cflash_FVT_Suite, E_Test_scsi_rc_check) +{ + ASSERT_EQ(0x2, test_mc_invalid_ioarcb(11)); +} +TEST(Cflash_FVT_Suite, E_Test_ioarcb_d_len_0) +{ + ASSERT_EQ(0x2, test_mc_invalid_ioarcb(12)); +} +TEST(Cflash_FVT_Suite, E_Test_ioarcb_blk_len_0) +{ + ASSERT_EQ(0x0, test_mc_invalid_ioarcb(13)); +} + +TEST(Cflash_FVT_Suite, E_Test_ioarcb_vlba_out_range) +{ + ASSERT_EQ(0x13, test_mc_invalid_ioarcb(14)); +} + +TEST(Cflash_FVT_Suite, E_Test_ioarcb_plba_out_range) +{ + ASSERT_EQ(0x2, test_mc_invalid_ioarcb(15)); +} + +TEST(Cflash_FVT_Suite, E_bad_ioarcb_address) +{ + ASSERT_EQ(100, test_mc_invalid_ioarcb(100)); +} + +TEST(Cflash_FVT_Suite, E_bad_ioasa_address) +{ +#ifdef _AIX + ASSERT_EQ(255, test_mc_invalid_ioarcb(101)); +#else + ASSERT_EQ(10, test_mc_invalid_ioarcb(101)); +#endif +} + +TEST(Cflash_FVT_Suite, E_cmd_room_violation) +{ + int rc = test_mc_invalid_ioarcb(102); +#ifdef _AIX + if(NUM_CMDS <= 4) + ASSERT_EQ(255,rc); + else + ASSERT_EQ(102,rc); +#else + if(NUM_CMDS <= 16) + ASSERT_EQ(0,rc); + else + ASSERT_EQ(102,rc); +#endif +} + +TEST(Cflash_FVT_Suite, E_bad_hrrq_address) +{ + ASSERT_EQ(103, test_mc_invalid_ioarcb(103)); +} + +TEST(Cflash_FVT_Suite, E_Test_ioarcb_intr_prcs_ctx) +{ + ASSERT_EQ(0x21, test_mc_inter_prcs_ctx(1)); +} +TEST(Cflash_FVT_Suite, E_Test_ioarcb_intr_prcs_ctx_rsh_clsd) +{ + ASSERT_EQ(0x21, test_mc_inter_prcs_ctx(2)); +} +TEST(Cflash_FVT_Suite, G_Test_mc_ioarcb_ea_alignmnt_16) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_IOARCB_EA_ALGNMNT_16)); +} + +TEST(Cflash_FVT_Suite, G_Test_mc_ioarcb_ea_alignmnt_128) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_IOARCB_EA_ALGNMNT_128)); +} + +TEST(Cflash_FVT_Suite, G_Test_mc_ioarcb_ea_mix_alignmnt) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_IOARCB_EA_INVLD_ALGNMNT)); +} +TEST(Cflash_FVT_Suite, E_Test_scsi_cmds) +{ + ASSERT_EQ(0x21,mc_test_engine(E_test_SCSI_CMDS)); +} + +TEST(Cflash_FVT_Suite, G_Test_mc_rwbuff_in_global) +{ + ASSERT_EQ(0,mc_test_engine(MC_TEST_RWBUFF_GLOBAL)); +} + +TEST(Cflash_FVT_Suite, G_Test_mc_rwbuff_in_shared_memory) +{ + ASSERT_EQ(0,mc_test_engine(MC_TEST_RWBUFF_SHM)); +} + +TEST(Cflash_FVT_Suite, G_Test_mc_rwbuff_in_heap) +{ + ASSERT_EQ(0,mc_test_engine(MC_TEST_RWBUFF_HEAP)); +} + +TEST(Cflash_FVT_Suite, G_Test_mc_rw_size_parallel_sync) +{ + ASSERT_EQ(0,mc_test_engine(MC_TEST_RW_SIZE_PARALLEL)); +} +TEST(Cflash_FVT_Suite, E_spio_good_path_disk_err_path_disk) +{ + ASSERT_EQ(0,mc_test_engine(MC_TEST_GOOD_ERR_AFU_DEV)); +} + +TEST(Cflash_FVT_Suite, E_Test_mc_rw_close_res_hndl) +{ + int res = mc_test_engine(TEST_MC_RW_CLS_RSH); + if (res == 0x13 || res == 0x05) + res =1; + ASSERT_EQ(1, res); +} +TEST(Cflash_FVT_Suite, E_Test_mc_rw_close_ctx) +{ +#ifdef _AIX + int res=mc_test_engine(TEST_MC_RW_CLOSE_CTX); + if(res == 10 || res == 0x5 || res == 0x13) + res=1; + ASSERT_EQ(1,res); +#else + ASSERT_EQ(0x13,mc_test_engine(TEST_MC_RW_CLOSE_CTX)); +#endif +} +TEST(Cflash_FVT_Suite, E_Test_mc_rw_close_disk_fd) +{ + int res=mc_test_engine(TEST_MC_RW_CLOSE_DISK_FD); + if(res == 0x5 || res == 0x0|| res == 10||res == 0x19) + res=1; + ASSERT_EQ(1,res); +} +#ifndef _AIX +TEST(Cflash_FVT_Suite, E_Test_mc_rw_unmap_mmio) +{ + ASSERT_EQ(10,mc_test_engine(TEST_MC_RW_UNMAP_MMIO)); +} +#endif +TEST(Cflash_FVT_Suite, G_Test_large_transfer_io) +{ + ASSERT_EQ(0,mc_test_engine(TEST_LARGE_TRANSFER_IO)); +} + +TEST(Cflash_FVT_Suite, E_Test_large_trsnfr_boundary) +{ + int res = mc_test_engine(TEST_LARGE_TRNSFR_BOUNDARY); + if(res == 0x2 || res == 255) + res=1; + ASSERT_EQ(1, res); +} + + +TEST(Cflash_FVT_Suite, E_test_mmio_errcase1) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MMIO_ERRCASE1)); +} + +TEST(Cflash_FVT_Suite, E_test_mmio_errcase2) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MMIO_ERRCASE2)); +} + +TEST(Cflash_FVT_Suite, E_test_mmio_errcase3) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MMIO_ERRCASE3)); +} + + +TEST(Cflash_FVT_Suite, E_test_spio_killprocess) +{ + ASSERT_EQ(0,mc_test_engine(TEST_SPIO_KILLPROCESS)); +} + +TEST(Cflash_FVT_Suite, E_test_spio_exit) +{ + ASSERT_EQ(0,mc_test_engine(TEST_SPIO_EXIT)); +} + +TEST(Cflash_FVT_Suite, E_test_ioctl_spio_errcase) +{ + ASSERT_EQ(0,mc_test_engine(TEST_IOCTL_SPIO_ERRCASE)); +} +TEST(Cflash_FVT_Suite, E_test_attach_reuse_diff_proc) +{ + ASSERT_EQ(0,mc_test_engine(TEST_ATTACH_REUSE_DIFF_PROC)); +} + +TEST(Cflash_FVT_Suite, E_test_detach_diff_proc) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DETACH_DIFF_PROC)); +} + +TEST(Cflash_FVT_Suite, E_test_vlun_verify) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_VLUN_VERIFY)); +} + +TEST(Cflash_FVT_Suite, E_test_plun_verify) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_PLUN_VERIFY)); +} + + +TEST(Cflash_FVT_Suite,E_test_inval_devno) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_INVAL_DEVNO)); +} +TEST(Cflash_FVT_Suite,E_test_inval_ctxtkn) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_INVAL_CTXTKN)); +} + +TEST(Cflash_FVT_Suite,E_test_inval_rschndl) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_INVAL_RSCHNDL)); +} + +#ifndef _AIX +TEST(Cflash_FVT_Suite,G_dk_capi_clone) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DK_CAPI_CLONE)); +} +#endif +#ifdef _AIX +TEST(Cflash_FVT_Suite, G_MC_test_MRC_MC_VLUN) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_119)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_MRC_MC_PLUN) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_120)); +} + +TEST(Cflash_FVT_Suite, E_test_SPIO_RMDEV) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_180)); +} +TEST(Cflash_FVT_Suite, E_test_SPIO_RMDEV_1) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_1801)); +} +TEST(Cflash_FVT_Suite, E_test_SPIO_LSM) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_181)); +} +TEST(Cflash_FVT_Suite, E_test_CFL_OPEN_SPIO) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_182)); +} +TEST(Cflash_FVT_Suite, G_test_SPIO_CFG_UNCFG) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_187)); +} +TEST(Cflash_FVT_Suite, E_test_QRY_NO_RES) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_212)); +} +TEST(Cflash_FVT_Suite, E_test_QRY_PATH_DIS) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_213)); +} +#endif +/* +TEST(Cflash_FVT_Suite, G_7_1_214) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_214)); +} +*/ +TEST(Cflash_FVT_Suite, E_test_REUSE_DTCH) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_215)); +} +TEST(Cflash_FVT_Suite, E_test_REUSE_DTCH_LOOP) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_216)); +} + +TEST(Cflash_FVT_Suite, E_test_SPIO_RLS_DET) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_190)); +} +#ifdef _AIX +TEST(Cflash_FVT_Suite, G_test_CFG_UNCFG_VLUN) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_191)); +} +TEST(Cflash_FVT_Suite, G_test_ATH_DTCH_VLUN) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_192)); +} +TEST(Cflash_FVT_Suite, G_test_SPIO_NCHN) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_193_1)); +} + +TEST(Cflash_FVT_Suite, G_test_SPIO_NCHN_2) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_193_2)); +} +#endif + +TEST(Cflash_FVT_Suite, G_test_MUL_PLUN_VLUN) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_196)); +} + +#ifndef _AIX +TEST(Cflash_FVT_Suite, E_test_CHN_CTX_ID) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_197)); +} +#endif +TEST(Cflash_FVT_Suite, E_test_CHN_CTX_ID_DIF_PRC) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_198)); +} + +TEST(Cflash_FVT_Suite, G_7_1_203) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_203)); +} +#ifdef _AIX +TEST(Cflash_FVT_Suite, E_test_VG_V_PLUN) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_209)); +} +#endif +TEST(Cflash_FVT_Suite, E_test_RD_PRM_WRITE) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_210)); +} + +TEST(Cflash_FVT_Suite, E_test_SPIO_VLUN_PLUN_SIM) +{ + ASSERT_EQ(255,mc_test_engine(E_ioctl_7_1_211)); +} +TEST(Cflash_FVT_Suite, E_test_context_reset) +{ + ASSERT_EQ(0,mc_test_engine(E_TEST_CTX_RESET)); +} +TEST(Cflash_FVT_Suite, G_test_max_ctx_plun) +{ + ASSERT_EQ(0,mc_test_engine(G_TEST_MAX_CTX_PLUN)); +} +TEST(Cflash_FVT_Suite, G_test_max_ctx_0_vlun) +{ + ASSERT_EQ(0,mc_test_engine(G_TEST_MAX_CTX_0_VLUN)); +} + +TEST(Cflash_FVT_Suite, G_test_max_ctx_only) +{ + ASSERT_EQ(0,mc_test_engine(G_TEST_MAX_CTX_ONLY)); +} +TEST(Cflash_FVT_Suite, G_test_max_ctx_io_noflg) +{ + ASSERT_EQ(0,mc_test_engine(G_TEST_MAX_CTX_IO_NOFLG)); +} + +#ifdef MANUAL + +TEST(Cflash_FVT_Suite, M_7_5_13_1) +{ + ASSERT_EQ(0,mc_test_engine(M_TEST_7_5_13_1)); +} + +TEST(Cflash_FVT_Suite, M_7_5_13_2) +{ + ASSERT_EQ(0,mc_test_engine(M_TEST_7_5_13_2)); +} + +TEST(Cflash_FVT_Suite, E_test_fc_reset_vlun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_FC_PR_RESET_VLUN)); +} +TEST(Cflash_FVT_Suite, E_test_fc_reset_plun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_FC_PR_RESET_PLUN)); +} + +TEST(Cflash_FVT_Suite, E_MC_test_DCA_EEH_Flag_Set_Reuse_CTX_On_New_Disk) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_REUSE_CTX_NEW_DISK_AFTER_EEH)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCRC_EEH_of_VLUN) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_EEH_VLUN)); +} +TEST(Cflash_FVT_Suite, G_MC_test_DCRC_EEH_Vlun_Resuse_Ctx) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_EEH_VLUN_RESUSE_CTX)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCRC_EEH_Plun_Resuse_Ctx) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_EEH_PLUN_RESUSE_CTX)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCRC_EEH_Vlun_Resize) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_EEH_VLUN_RESIZE)); +} +TEST(Cflash_FVT_Suite, G_MC_test_DCRC_EEH_Vlun_Release) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_EEH_VLUN_RELEASE)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCRC_IO_Eeh_vlun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_IO_EEH_VLUN)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCRC_IO_Eeh_Plun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_IO_EEH_PLUN)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Unexpected_error) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_UNEXPECTED_ERR)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Unexpected_error_vlun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_UNEXPECTED_ERR_VLUN)); +} +TEST(Cflash_FVT_Suite, G_MC_test_DCV_Plun_Rst_flag_EEH) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCV_PLUN_RST_FlAG_EEH)); +} +/*** DK_CAPI_LOG_EVENT ****/ + +#ifdef _AIX +TEST(Cflash_FVT_Suite, G_MC_test_DCLE_Valid_values) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCLE_VALID_VALUES)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCLE_Dk_Lf_Temp) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCLE_DK_LF_TEMP)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCLE_DK_Lf_Perm) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCLE_DK_LF_PERM)); +} +TEST(Cflash_FVT_Suite, G_MC_test_DCLE_Dk_Fl_Hw_Err) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCLE_DK_FL_HW_ERR)); +} + +TEST(Cflash_FVT_Suite, G_MC_test_DCLE_Dk_Fl_Sw_Err) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCLE_DK_FL_SW_ERR)); +} + +TEST(Cflash_FVT_Suite, E_test_ioctl_fcp) +{ + ASSERT_EQ(0,mc_test_engine(TEST_IOCTL_FCP)); +} +#endif + +TEST(Cflash_FVT_Suite, E_test_vSpio_eehRecovery) +{ + ASSERT_EQ(0,mc_test_engine(TEST_VSPIO_EEHRECOVERY)); +} + +TEST(Cflash_FVT_Suite, E_test_dSpio_eehRecovery) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DSPIO_EEHRECOVERY)); +} + +TEST(Cflash_FVT_Suite, E_test_vSpio_eehRecovery_long) +{ + int rc=get_fvt_dev_env(); + if(rc) + ASSERT_EQ(0,rc); + ASSERT_EQ(0,test_vSpio_eehRecovery(2)); +} +TEST(Cflash_FVT_Suite, E_test_dSpio_eehRecovery_long) +{ + int rc=get_fvt_dev_env(); + if(rc) + ASSERT_EQ(0,rc); + ASSERT_EQ(0,test_dSpio_eehRecovery(2)); +} +TEST(Cflash_FVT_Suite, E_test_vlun_disbale) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_VLUN_DISABLE)); +} + +TEST(Cflash_FVT_Suite, E_test_plun_disbale) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_PLUN_DISABLE)); +} +/* +TEST(Cflash_FVT_Suite, E_test_vlun_increase) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_VLUN_INCREASE )); +} + +TEST(Cflash_FVT_Suite,E_test_vlun_reduce) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_VLUN_REDUCE)); +} +*/ +TEST(Cflash_FVT_Suite, E_max_ctx_rcvr_except_last_one) +{ + ASSERT_EQ(0,mc_test_engine(MAX_CTX_RCVR_EXCEPT_LAST_ONE)); +} + +TEST(Cflash_FVT_Suite, E_max_ctx_rcvr_last_one_no_rcvr) +{ + ASSERT_EQ(0,mc_test_engine(MAX_CTX_RCVR_LAST_ONE_NO_RCVR)); +} + +TEST(Cflash_FVT_Suite,E_test_eeh_simulation) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_EEH_SIMULATION)); +} + +TEST(Cflash_FVT_Suite,E_test_disk_incrs_excp) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_DISK_INCREASE)); +} + +TEST(Cflash_FVT_Suite, E_test_VLUN_TDIO) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_174)); +} + +TEST(Cflash_FVT_Suite, E_test_TDIO_VLUN) +{ + ASSERT_EQ(0,mc_test_engine(E_ioctl_7_1_175)); +} + +TEST(Cflash_FVT_Suite, E_test_LUN_RESET ) +{ + printf("\n================== Manual ========================\n"); + printf("Test: G_7_1_203 need to be run from one session &\n"); + printf("Error injection from another session using \n"); + printf("'sg_reset -H /dev/sg#' And 'sg_reset -d /dev/sg#'.\n"); + printf("Ex: While true; do sg_reset -H /dev/sg#; sleep 5;\n"); + printf("sg_reset -d /dev/sg#; sleep 5; done"); + printf("\n================== Manual ========================\n"); + ASSERT_EQ(1,0); +} + +#ifndef _AIX +TEST(Cflash_FVT_Suite, E_CAPI_LINK_DOWN) +{ + ASSERT_EQ(0,mc_test_engine(E_CAPI_LINK_DOWN)); +} +#endif + +#ifdef _AIX +TEST(Cflash_FVT_Suite, G_MC_test_DCA_Diff_Devno_Multiple) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCA_CALL_DIFF_DEVNO_MULTIPLE)); +} +TEST(Cflash_FVT_Suite, E_test_cfdisk_ctxs_diff_devno) +{ + ASSERT_EQ(0,mc_test_engine(TEST_CFDISK_CTXS_DIFF_DEVNO)); +} +#endif +TEST(Cflash_FVT_Suite, G_MC_test_DCRC_EEH_Plun_Mutli_Vlun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_DCRC_EEH_PLUN_MULTI_VLUN)); +} +TEST(Cflash_FVT_Suite,E_test_vlun_uattention) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_VLUN_UATTENTION)); +} + +TEST(Cflash_FVT_Suite,E_test_plun_uattention) +{ + ASSERT_EQ(0,mc_test_engine(EXCP_PLUN_UATTENTION)); +} +TEST(Cflash_FVT_Suite, G_test_SPIO_VLUN_CFG_UNCFG) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_188)); +} + +TEST(Cflash_FVT_Suite, G_test_SPIO_PLUN_CFG_UNCFG) +{ + ASSERT_EQ(0,mc_test_engine(G_ioctl_7_1_189)); +} +#endif + diff --git a/src/cflash/test/fvt_ini.C b/src/cflash/test/fvt_ini.C new file mode 100644 index 00000000..f85d27f3 --- /dev/null +++ b/src/cflash/test/fvt_ini.C @@ -0,0 +1,238 @@ +// IBM_PROLOG_BEGIN_TAG IBM_PROLOG_END +/** + ******************************************************************************* + * \file + * \brief + * \ingroup + ******************************************************************************/ +#include + +#include +extern "C" +{ + #include + + int32_t g_traceE = 1; /* error traces */ + int32_t g_traceI = 1; /* informative 'where we are in code' traces */ + int32_t g_traceF = 1; /* function exit/enter */ + int32_t g_traceV = 1; /* verbose trace...lots of information */ +} + +#define HOST_ID 1 +#define STR_HOST_ID "1" + + +TEST(Provisioning_FVT_Suite, IniSectionIsValid) +{ + EXPECT_FALSE(section_is_valid((char*)"invalid;semicolon")); + EXPECT_FALSE(section_is_valid((char*)"invalid inner\nspaces")); + EXPECT_FALSE(section_is_valid((char*)"invalidinner\nnewline")); + EXPECT_FALSE(section_is_valid((char*)"invalidtrailingspaces\r")); + EXPECT_FALSE(section_is_valid((char*)"\tinvalidleadingspaces")); + EXPECT_FALSE(section_is_valid((char*)"\t")); + EXPECT_FALSE(section_is_valid((char*)" ")); + EXPECT_FALSE(section_is_valid((char*)"")); + EXPECT_FALSE(section_is_valid((char*)";")); + EXPECT_FALSE(section_is_valid((char*)NULL)); +} + +TEST(Provisioning_FVT_Suite, IniTrimWhitespace) +{ + char buffer[80] = "middle space"; + EXPECT_STREQ("middle", trim_whitespace_inplace(buffer)); + EXPECT_STREQ("middle", buffer); + strcpy(buffer,"mid space"); + EXPECT_STREQ("mid", trim_whitespace_inplace(buffer)); + strcpy(buffer,"tab\tspace"); + EXPECT_STREQ("tab", trim_whitespace_inplace(buffer)); + strcpy(buffer," spacetab\tspace"); + EXPECT_STREQ("spacetab", trim_whitespace_inplace(buffer)); + strcpy(buffer," \t padded stuff"); + EXPECT_STREQ("padded", trim_whitespace_inplace(buffer)); + strcpy(buffer," \t "); + EXPECT_STREQ("", trim_whitespace_inplace(buffer)); + strcpy(buffer," "); + EXPECT_STREQ("", trim_whitespace_inplace(buffer)); + strcpy(buffer,""); + EXPECT_STREQ("", trim_whitespace_inplace(buffer)); + EXPECT_EQ(NULL, trim_whitespace_inplace(NULL)); +} + +TEST(Provisioning_FVT_Suite, IniTrimComments) +{ + char buffer[80] = "middle space"; + trim_comments(buffer); + EXPECT_STREQ("middle space", buffer); + strcpy(buffer,"mid space;"); + trim_comments(buffer); + EXPECT_STREQ("mid space", buffer); + strcpy(buffer,"mid ; space"); + trim_comments(buffer); + EXPECT_STREQ( "mid ", buffer); + strcpy(buffer,"mid; space"); + trim_comments(buffer); + EXPECT_STREQ( "mid", buffer); + strcpy(buffer,"mi; space"); + trim_comments(buffer); + EXPECT_STREQ( "mi", buffer); + strcpy(buffer,"m; space"); + trim_comments(buffer); + EXPECT_STREQ( "m", buffer); + strcpy(buffer,";test space"); + trim_comments(buffer); + EXPECT_STREQ( "", buffer); +} + + +TEST(Provisioning_FVT_Suite, IniCreateRootEntry) +{ + ini_dict_t* ini_entry = NULL; + ini_dict_t* cur_entry = NULL; + append_ini_entry(&ini_entry, (char*)"section", (char*)"key", (char*)"value"); + ASSERT_TRUE((ini_entry != NULL)); + EXPECT_STREQ("section",ini_entry->section); + EXPECT_STREQ("key",ini_entry->key); + EXPECT_STREQ("value",ini_entry->value); + ASSERT_TRUE((ini_entry->next == NULL)); + cur_entry = append_ini_entry(&ini_entry, (char*)"sec_section", (char*)"sec_key", (char*)"sec_value"); + ASSERT_TRUE((cur_entry != NULL)); + EXPECT_STREQ("sec_section",cur_entry->section); + EXPECT_STREQ("sec_key",cur_entry->key); + EXPECT_STREQ("sec_value",cur_entry->value); + ASSERT_TRUE((cur_entry->next == NULL)); + cur_entry = append_ini_entry(&ini_entry, (char*)"3rd_sec", (char*)"3rd_key", (char*)"3rd_val"); + ASSERT_TRUE((cur_entry != NULL)); + EXPECT_STREQ("3rd_sec",cur_entry->section); + EXPECT_STREQ("3rd_key",cur_entry->key); + EXPECT_STREQ("3rd_val",cur_entry->value); + ASSERT_TRUE((cur_entry->next == NULL)); +} + + +TEST(Provisioning_FVT_Suite, COMPARE_WWPNS_ARRAY1_NULL) +{ + prov_wwpn_info_t * wwpn_array_1 = NULL; + uint16_t num_wwpns_1 = 1; + prov_wwpn_info_t wwpn_array_2[MAX_WWPNS]; + uint16_t num_wwpns_2; + + provGetAllWWPNs(wwpn_array_2, &num_wwpns_2); + + int rv = compareWWPNs(wwpn_array_1, wwpn_array_2, num_wwpns_1, num_wwpns_2); + + ASSERT_EQ(rv, -1); +} + +TEST(Provisioning_FVT_Suite, COMPARE_WWPNS_ARRAY2_NULL) +{ + prov_wwpn_info_t wwpn_array_1[MAX_WWPNS]; + uint16_t num_wwpns_1; + prov_wwpn_info_t * wwpn_array_2 = NULL; + uint16_t num_wwpns_2 = 1; + + provGetAllWWPNs(wwpn_array_1, &num_wwpns_1); + provGetAllWWPNs(wwpn_array_2, &num_wwpns_2); + + int rv = compareWWPNs(wwpn_array_1, wwpn_array_2, num_wwpns_1, num_wwpns_2); + + ASSERT_EQ(rv, -1); +} + +TEST(Provisioning_FVT_Suite, COMPARE_WWPNS_0_NUM_1) +{ + prov_wwpn_info_t wwpn_array_1[MAX_WWPNS]; + uint16_t num_wwpns_1; + prov_wwpn_info_t wwpn_array_2[MAX_WWPNS]; + uint16_t num_wwpns_2; + + provGetAllWWPNs(wwpn_array_1, &num_wwpns_1); + num_wwpns_1 = 0; + provGetAllWWPNs(wwpn_array_2, &num_wwpns_2); + + int rv = compareWWPNs(wwpn_array_1, wwpn_array_2, num_wwpns_1, num_wwpns_2); + + ASSERT_EQ(rv, -1); +} + +TEST(Provisioning_FVT_Suite, COMPARE_WWPNS_0_NUM_2) +{ + prov_wwpn_info_t wwpn_array_1[MAX_WWPNS]; + uint16_t num_wwpns_1; + prov_wwpn_info_t wwpn_array_2[MAX_WWPNS]; + uint16_t num_wwpns_2; + + provGetAllWWPNs(wwpn_array_1, &num_wwpns_1); + provGetAllWWPNs(wwpn_array_2, &num_wwpns_2); + num_wwpns_2 = 0; + + int rv = compareWWPNs(wwpn_array_1, wwpn_array_2, num_wwpns_1, num_wwpns_2); + + ASSERT_EQ(rv, -1); +} + +TEST(Provisioning_FVT_Suite, CONCATENATE_WWPNS_VALID_INPUT) +{ + prov_wwpn_info_t wwpn_array[MAX_WWPNS]; + uint16_t num_wwpns; + + strcpy(wwpn_array[0].wwpn, "0000000000000000"); + strcpy(wwpn_array[1].wwpn, "AAAAAAAAAAAAAAAA"); + strcpy(wwpn_array[2].wwpn, "BBBBBBBBBBBBBBBB"); + num_wwpns = 3; + + char concat_wwpns[WWPN_STRING_SIZE]; + bzero(concat_wwpns, WWPN_STRING_SIZE); + + int rv = concatenateWWPNs(wwpn_array, num_wwpns, concat_wwpns); + + ASSERT_EQ(rv, 0); + ASSERT_STREQ("0000000000000000:AAAAAAAAAAAAAAAA:BBBBBBBBBBBBBBBB", concat_wwpns); +} + +TEST(Provisioning_FVT_Suite, CONCATENATE_WWPNS_NULL_ARRAY) +{ + prov_wwpn_info_t * wwpn_array = NULL; + uint16_t num_wwpns = 3; + + char concat_wwpns[WWPN_STRING_SIZE]; + bzero(concat_wwpns, WWPN_STRING_SIZE); + + int rv = concatenateWWPNs(wwpn_array, num_wwpns, concat_wwpns); + + ASSERT_EQ(rv, -1); +} + +TEST(Provisioning_FVT_Suite, CONCATENATE_WWPNS_0_NUM) +{ + prov_wwpn_info_t wwpn_array[MAX_WWPNS]; + uint16_t num_wwpns; + + strcpy(wwpn_array[0].wwpn, "0000000000000000"); + strcpy(wwpn_array[1].wwpn, "AAAAAAAAAAAAAAAA"); + strcpy(wwpn_array[2].wwpn, "BBBBBBBBBBBBBBBB"); + num_wwpns = 0; + + char concat_wwpns[WWPN_STRING_SIZE]; + bzero(concat_wwpns, WWPN_STRING_SIZE); + + int rv = concatenateWWPNs(wwpn_array, num_wwpns, concat_wwpns); + + ASSERT_EQ(rv, 0); +} + +TEST(Provisioning_FVT_Suite, CONCATENATE_WWPNS_VALID_NULL_CONCAT_WWPNS) +{ + prov_wwpn_info_t wwpn_array[MAX_WWPNS]; + uint16_t num_wwpns; + + strcpy(wwpn_array[0].wwpn, "0000000000000000"); + strcpy(wwpn_array[1].wwpn, "AAAAAAAAAAAAAAAA"); + strcpy(wwpn_array[2].wwpn, "BBBBBBBBBBBBBBBB"); + num_wwpns = 3; + + char * concat_wwpns = NULL; + + int rv = concatenateWWPNs(wwpn_array, num_wwpns, concat_wwpns); + + ASSERT_EQ(rv, -1); +} diff --git a/src/cflash/test/makefile b/src/cflash/test/makefile new file mode 100644 index 00000000..56a64b0e --- /dev/null +++ b/src/cflash/test/makefile @@ -0,0 +1,85 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/cflash/test/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +UNAME=$(shell uname) + +ROOTPATH = ../../.. +USER_DIR = . +SUBDIRS = +TESTDIR = ${ROOTPATH}/obj/tests + +#test code != production code, so allow warnings here. +ALLOW_WARNINGS = yes + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${ROOTPATH}/img +LIBPATHS = -L${ROOTPATH}/img + +BTESTS= +BIN_TESTS=$(addprefix ${TESTDIR}/, ${BTESTS}) + +# AIX only +ifeq ($(UNAME),AIX) +LINKLIBS+=-lpthreads + +#Linux only +else +LINKLIBS+=-lpthread +endif + +GTESTS = run_cflash_fvt +GTESTS_DIR = $(addprefix $(TESTDIR)/, $(GTESTS)) + +run_cflash_fvt_OFILES = fvt_cflash.o cflash_test_ioctl_io.o \ + cflash_test_engine.o cflash_test_error.o \ + cflash_test_io.o cflash_test_util.o cflash_test2.o \ + cflash_test_ioctl.o cflash_test_error2.o \ + cflash_test_excp.o manual.o \ + 119.o 174.o 174_175.o 188.o 191.o 192.o 196.o 197.o 198.o 209.o 210.o 211.o 215.o 216.o 203.o + +ifeq ($(UNAME),AIX) + +GTESTS64 = $(addsuffix 64, $(GTESTS)) +GTESTS64_DIR = $(addprefix $(TESTDIR)/, $(GTESTS64)) + +endif + +DEPS=$(addprefix $(TESTDIR)/, $(run_cflash_fvt_OFILES:.o=.dep)) + +CFLAGS += \ + -g \ + -D__FVT__\ + -I$(ROOTPATH)/src/test/framework/googletest/googletest/include +CXXFLAGS+=$(CFLAGS) + +include ${ROOTPATH}/config.mk + +include $(ROOTPATH)/src/test/framework/gtest.objtests.mk + +unit: + -@if [[ -e /dev/cxl ]]; then \ + $(TESTDIR)/run_cflash_fvt --gtest_output=xml:$(TESTDIR)/cflash_fvt_results.xml; \ + else \ + echo "SKIPPING run_cflash_fvt"; \ + fi diff --git a/src/cflash/test/manual.c b/src/cflash/test/manual.c new file mode 100644 index 00000000..721dd04f --- /dev/null +++ b/src/cflash/test/manual.c @@ -0,0 +1,79 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/manual.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "cflash_test.h" +#include + +extern int g_error; +extern pid_t pid; + +// M_TEST_7_5_13_1 & M_TEST_7_5_13_2 cases +// i.e. M_7_5_13_1 & M_7_5_13_2 TC names +int ioctl_7_5_13(int ops) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t threadId; + __u64 stride; + + // pid used to create unique data patterns & logging from util ! + pid = getpid(); + + //ctx_init with default flash disk & devno + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + //thread to handle AFU interrupt & events + rc = pthread_create(&threadId, NULL, ctx_rrq_rx, p_ctx); + CHECK_RC(rc, "pthread_create failed"); + + rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); + CHECK_RC(rc, "create LUN_DIRECT failed"); + + stride=0x16; + + // To be used before reboot, for writing data + if (ops == 1) + { + // Just perform io write now. + rc = do_write_or_read(p_ctx, stride, 1); + CHECK_RC(rc, "io write failed"); + + printf("Now, reboot the system..\n"); + sleep(1000); + } + // To be used after reboot, for verification of data + else + { + // Now perform io read & data compare test. + rc = do_write_or_read(p_ctx, stride, 2); + CHECK_RC(rc, "io read failed"); + } + + // Marking fail always for manual veirification. + return -1; +} + diff --git a/src/cflash/test/run_cflash_fvt.C b/src/cflash/test/run_cflash_fvt.C new file mode 100644 index 00000000..66ac44f7 --- /dev/null +++ b/src/cflash/test/run_cflash_fvt.C @@ -0,0 +1,43 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/cflash/test/run_cflash_fvt.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * \ingroup + ******************************************************************************/ +#include + +//DO NOT ADD TEST CASES IN THIS FILE + +class Cflash_FVT_Suite : public testing::Test +{ + void SetUp() + { + } + void TearDown() + { + } +}; diff --git a/src/common/cblk_mc.c b/src/common/cblk_mc.c new file mode 100644 index 00000000..57060f5f --- /dev/null +++ b/src/common/cblk_mc.c @@ -0,0 +1,646 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/common/cblk_mc.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "cblk.h" + +/***************************************************************************** + * Procedure: xfer_data + * + * Description: Perform a transfer operation for the given + * socket file descriptor. + * + * Parameters: + * fd: Socket File Descriptor + * op: Read or Write Operation + * buf: Buffer to either read from or write to + * exp_size: Size of data transfer + * + * Return: 0, if successful + * non-zero otherwise + *****************************************************************************/ +int +xfer_data(int fd, int op, void *buf, ssize_t exp_size) +{ + int rc = 0; + ssize_t offset = 0; + ssize_t bytes_xfer = 0; + ssize_t target_size = exp_size; + struct iovec iov; + struct msghdr msg; + + while ( 1 ) + { + // Set up IO vector for IO operation. + memset(&msg, 0, sizeof(struct msghdr)); + iov.iov_base = buf + offset; + iov.iov_len = target_size; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + // Check to see if we are sending or receiving data + if ( op == XFER_OP_READ ) + { + bytes_xfer = recvmsg(fd, &msg, MSG_WAITALL); + } + else + { + bytes_xfer = sendmsg(fd, &msg, 0); + } + + if ( -1 == bytes_xfer ) + { + if ( EAGAIN == errno || EWOULDBLOCK == errno || EINTR == errno) + { + fprintf(stderr, "xfer_data: Operation(%d) fail errno %d\n", + op, errno); + + // just retry the whole request + continue; + } + else + { + fprintf(stderr, "xfer_data: Operation(%d) Connection closed %d(rc %d)\n", + op, errno, rc); + // connection closed by the other end + rc = 1; + break; + } + } + else if ( 0 == bytes_xfer ) + { + fprintf(stderr, "xfer_data: Operation(%d) 0 Bytes Transfered %d(rc %d)\n", + op, errno, rc); + // connection closed by the other end + rc = 1; + break; + } + else if ( bytes_xfer == target_size ) + { + // We have transfered all the bytes we wanted, we + // can stop now. + rc = 0; + break; + } + else + { + fprintf(stderr, "xfer_data: Operation(%d) Partial Transfered %d(Total %d)\n", + op, bytes_xfer, target_size); + // less than target size - partial condition + // set up to transfer for the remainder of the request + offset += bytes_xfer; + target_size = (target_size - bytes_xfer); + } + } + + return rc; +} + +/***************************************************************************** + * Procedure: blk_connect + * + * Description: Connect to the server entity + * + * Parameters: + * + * Return: 0 or greater is the file descriptor for the connection + * -1 is error + *****************************************************************************/ +int +blk_connect() +{ + struct sockaddr_un svr_addr; + int conn_fd = -1; + int rc = 0; + + // Create a socket file descriptor + conn_fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (conn_fd < 0) + { + fprintf(stderr, "blk_connect: socket failed: %d (%d)\n", + conn_fd, errno); + } + else + { + bzero(&svr_addr, sizeof(struct sockaddr_un)); + svr_addr.sun_family = AF_UNIX; + + // This can be changed to any file + strcpy(svr_addr.sun_path, "/tmp/surelock"); + + // Connect to the server entity. + rc = connect(conn_fd, (struct sockaddr *)&svr_addr, sizeof(svr_addr)); + if ( rc ) + { + fprintf(stderr, "block_connect: Connect failed: %d (%d)\n", + rc, errno); + close(conn_fd); + conn_fd = -1; + } + } + + return conn_fd; +} + +/***************************************************************************** + * Procedure: blk_send_command + * + * Description: Send a command to the server entity + * + * Parameters: + * conn_fd: Socket File Descriptor + * cmd: Command to perform + * cmdblock: Command request block + * cmdresp: Command response block + * + * Return: 0, if successful + * non-zero otherwise + *****************************************************************************/ +int +blk_send_command(int conn_fd, void *cmdblock, void *cmdresp) +{ + CReqBlkHeader_p cmd_hdr; + CRespBlkHeader_p cmdresp_hdr; + int rc = 0; + + cmd_hdr = (CReqBlkHeader_t *)cmdblock; + cmd_hdr->creqh_pid = getpid(); + cmd_hdr->creqh_pthid = pthread_self(); + + cmdresp_hdr = (CRespBlkHeader_p)cmdresp; + + // Send the command block + rc = xfer_data(conn_fd, 0, (void *)cmdblock, cmd_hdr->creqh_cmdsize); + if (rc) + { + fprintf(stderr, "blk_send_command: Child %d failed send: %d\n", + getpid(), rc); + } + else + { + // Wait for the response + rc = xfer_data(conn_fd, 1, (void *)cmdresp, cmdresp_hdr->cresh_respsize); + if (rc) + { + fprintf(stderr, "blk_send_command: Child %d failed read: %d\n", + getpid(), rc); + } + } + + return rc; +} + + + +/************MC Block Allocator Command Functions*****************************/ + +/************************************************************************* + * NAME: blk_sendmcregister + * + * FUNCTION: + * This will send a MCREG cmd to the MC process + * + * PARAMETERS: None + * + * RETURNS: None + ************************************************************************/ +int +blk_sendmcregister(dev_t adap_devno, char *adap_name, + ctx_handle_t *ctx_handle) +{ + CReqBlk_MCRegister_t cmd_mcreg; + CRespBlk_MCRegister_t cmdresp_mcreg; + int rc; + + memset(&cmd_mcreg, '\0', sizeof(CReqBlk_MCRegister_t)); + memset(&cmdresp_mcreg, '\0', sizeof(CRespBlk_MCRegister_t)); + + cmd_mcreg.header.creqh_command = CMDBLK_CMD_MCREG; + cmd_mcreg.header.creqh_cmdsize = sizeof(CReqBlk_MCRegister_t); + + cmdresp_mcreg.header.cresh_respsize = sizeof(CRespBlk_MCRegister_t); + + cmd_mcreg.adap_devno = adap_devno; + strcpy(cmd_mcreg.adap_name, adap_name); + + rc = blk_send_command(conn_fd, &cmd_mcreg, &cmdresp_mcreg); + + if (cmdresp_mcreg.header.cresh_status != 0 || + cmdresp_mcreg.ctx_handle < 0) { + printf("blk_sendmcreg: MCREG failed - status: %x \n", + cmdresp_mcreg.header.cresh_status); + return(cmdresp_mcreg.header.cresh_status); + } + + printf("blk_sendmcreg: MCREG Success - ctx_id %x\n", + CTX_ID(cmdresp_mcreg.ctx_handle)); + + *ctx_handle = cmdresp_mcreg.ctx_handle; + + return(0); +} + +/************************************************************************* + * NAME: blk_sendmcbainit + * + * FUNCTION: + * This will send a MCBAINIT cmd to the MC process + * + * PARAMETERS: None + * + * RETURNS: None + ************************************************************************/ +int +blk_sendmcbainit(size_t lunsize) +{ + CReqBlk_MCBaInit_t cmd_mcbainit; + CRespBlk_MCBaInit_t cmdresp_mcbainit; + lungrp_info_t lungrp; + lungrp_vec_p lunptr; + int rc; + + memset(&cmd_mcbainit, '\0', sizeof(CReqBlk_MCBaInit_t)); + memset(&cmdresp_mcbainit, '\0', sizeof(CRespBlk_MCBaInit_t)); + + cmd_mcbainit.header.creqh_command = CMDBLK_CMD_MCBAINIT; + cmd_mcbainit.header.creqh_cmdsize = sizeof(CReqBlk_MCBaInit_t); + + cmdresp_mcbainit.header.cresh_respsize = sizeof(CRespBlk_MCBaInit_t); + + lungrp.total_luns = 1; + + lunptr = (lungrp_vec_p) malloc(sizeof(lungrp_vec_t)); + + if (lunptr == NULL) { + printf("blk_sendmcbainit: malloc lunptr failed ENOMEM\n"); + return(ENOMEM); + } + + lunptr->lun_id = 0x10000; + lunptr->wwpn = 0x20000; + lunptr->lun_size = lunsize; + + lungrp.lunptr = lunptr; + + cmd_mcbainit.lungrp = &lungrp; + + rc = blk_send_command(conn_fd, &cmd_mcbainit, &cmdresp_mcbainit); + + if (cmdresp_mcbainit.header.cresh_status != 0 || + cmdresp_mcbainit.ba_handle == NULL) { + printf("blk_sendmcbainit: MCBAINIT failed - status: %x ba_handle %llx\n", + cmdresp_mcbainit.header.cresh_status, cmdresp_mcbainit.ba_handle); + } else { + printf("blk_sendmcbainit: MCBAINIT Success - ba_handle %llx\n", + cmdresp_mcbainit.ba_handle); + } + + return(0); +} + +/************************************************************************* + * NAME: blk_sendmcbaclose + * + * FUNCTION: + * This will send a MCBACLOSE cmd to the MC process + * + * PARAMETERS: None + * + * RETURNS: None + ************************************************************************/ +int +blk_sendmcbaclose(ba_handle_t ba_handle) +{ + CReqBlk_MCBaClose_t cmd_mcbaclose; + CRespBlk_MCBaClose_t cmdresp_mcbaclose; + int rc; + + memset(&cmd_mcbaclose, '\0', sizeof(CReqBlk_MCBaClose_t)); + memset(&cmdresp_mcbaclose, '\0', sizeof(CRespBlk_MCBaClose_t)); + + cmd_mcbaclose.header.creqh_command = CMDBLK_CMD_MCBACLOSE; + cmd_mcbaclose.header.creqh_cmdsize = sizeof(CReqBlk_MCBaClose_t); + + cmdresp_mcbaclose.header.cresh_respsize = sizeof(CRespBlk_MCBaClose_t); + + cmd_mcbaclose.ba_handle = ba_handle; + + rc = blk_send_command(conn_fd, &cmd_mcbaclose, &cmdresp_mcbaclose); + + if (cmdresp_mcbaclose.header.cresh_status != 0) { + printf("blk_sendmcbaclose: MCBACLOSE failed - status: %x ba_handle %llx\n", + cmdresp_mcbaclose.header.cresh_status, cmd_mcbaclose.ba_handle); + } else { + printf("blk_sendmcbaclose: MCBACLOSE Success - ba_handle %llx\n", + cmd_mcbaclose.ba_handle); + } +} + +/************************************************************************* + * NAME: blk_sendmcopen + * + * FUNCTION: + * This will send a MCOPEN cmd to the MC process + * + * PARAMETERS: None + * + * RETURNS: None + ************************************************************************/ +int +blk_sendmcopen(dev_t adap_devno, char *adap_name, + ctx_handle_t ctx_handle) +{ + CReqBlk_MCOpen_t cmd_mcopen; + CRespBlk_MCOpen_t cmdresp_mcopen; + ctx_info_p ctx_ptr; + res_handle_info_p res_info_ptr, tmp_res_info_ptr; + ctx_id_t ctx_id = CTX_ID(ctx_handle); + int rc; + + if (ctx_id < 0 || ctx_id > CTX_LIST_SIZE) { + printf("blk_sendmcopen: ctx_id Invalid ctx_id %x- EINVAL \n", + ctx_id); + return(EINVAL); + } + + memset(&cmd_mcopen, '\0', sizeof(CReqBlk_MCOpen_t)); + memset(&cmdresp_mcopen, '\0', sizeof(CRespBlk_MCOpen_t)); + + cmd_mcopen.header.creqh_command = CMDBLK_CMD_MCOPEN; + cmd_mcopen.header.creqh_cmdsize = sizeof(CReqBlk_MCOpen_t); + + cmdresp_mcopen.header.cresh_respsize = sizeof(CRespBlk_MCOpen_t); + + cmd_mcopen.adap_devno = adap_devno; + cmd_mcopen.ctx_handle = ctx_handle; + + strcpy(cmd_mcopen.adap_name, adap_name); + + rc = blk_send_command(conn_fd, &cmd_mcopen, &cmdresp_mcopen); + + if (cmdresp_mcopen.header.cresh_status != 0 || + cmdresp_mcopen.r_handle < 0) { + printf("blk_sendmcopen: MCOPEN failed - status: %x \n", + cmdresp_mcopen.header.cresh_status); + return(cmdresp_mcopen.header.cresh_status); + } + + printf("blk_sendmcopen: MCOPEN Success - r_handle %x\n", + cmdresp_mcopen.r_handle); + + return(0); +} + +/************************************************************************* + * NAME: blk_sendmcclose + * + * FUNCTION: + * This will send a MCCLOSE cmd to the MC process + * + * PARAMETERS: None + * + * RETURNS: None + ************************************************************************/ +int +blk_sendmcclose(dev_t adap_devno, ctx_handle_t ctx_handle, + res_handle_t r_handle) +{ + CReqBlk_MCClose_t cmd_mcclose; + CRespBlk_MCClose_t cmdresp_mcclose; + ctx_id_t ctx_id = CTX_ID(ctx_handle); + int rc; + + memset(&cmd_mcclose, '\0', sizeof(CReqBlk_MCClose_t)); + memset(&cmdresp_mcclose, '\0', sizeof(CRespBlk_MCClose_t)); + + cmd_mcclose.header.creqh_command = CMDBLK_CMD_MCCLOSE; + cmd_mcclose.header.creqh_cmdsize = sizeof(CReqBlk_MCClose_t); + + cmdresp_mcclose.header.cresh_respsize = sizeof(CRespBlk_MCClose_t); + + cmd_mcclose.adap_devno = adap_devno; + cmd_mcclose.r_handle = r_handle; + + rc = blk_send_command(conn_fd, &cmd_mcclose, &cmdresp_mcclose); + + if (cmdresp_mcclose.header.cresh_status != 0) { + printf("blk_sendmcclose: MCCLOSE failed - status: %x \n", + cmdresp_mcclose.header.cresh_status); + return(cmdresp_mcclose.header.cresh_status); + } + + printf("blk_sendmcclose: MCCLOSE Success ctx_id %x r_handle %x\n", + ctx_id, r_handle); + + return(0); +} + +/************************************************************************* + * NAME: blk_sendmcresize + * + * FUNCTION: + * This will send a MCRESIZE cmd to the MC process + * + * PARAMETERS: None + * + * RETURNS: None + ************************************************************************/ +int +blk_sendmcresize(dev_t adap_devno, ctx_handle_t ctx_handle, + res_handle_t r_handle, size_t r_size) +{ + CReqBlk_MCResize_t cmd_mcresize; + CRespBlk_MCResize_t cmdresp_mcresize; + ctx_id_t ctx_id = CTX_ID(ctx_handle); + int rc; + + memset(&cmd_mcresize, '\0', sizeof(CReqBlk_MCResize_t)); + memset(&cmdresp_mcresize, '\0', sizeof(CRespBlk_MCResize_t)); + + cmd_mcresize.header.creqh_command = CMDBLK_CMD_MCRESIZE; + cmd_mcresize.header.creqh_cmdsize = sizeof(CReqBlk_MCResize_t); + + cmdresp_mcresize.header.cresh_respsize = sizeof(CRespBlk_MCResize_t); + + cmd_mcresize.ctx_handle = ctx_handle; + cmd_mcresize.adap_devno = adap_devno; + cmd_mcresize.r_handle = r_handle; + cmd_mcresize.r_size = r_size; + + rc = blk_send_command(conn_fd, &cmd_mcresize, &cmdresp_mcresize); + + if (cmdresp_mcresize.header.cresh_status != 0) { + printf("blk_sendmcresize: MCRESIZE failed - status: %x \n", + cmdresp_mcresize.header.cresh_status); + return(cmdresp_mcresize.header.cresh_status); + } + + printf("blk_sendmcresize: MCRESIZE Success status: %x\n", + cmdresp_mcresize.header.cresh_status); + + if (cmdresp_mcresize.new_r_size != r_size) { + printf("blk_sendmcresize: Partial Size Alloc Size: %x Req Size: %x\n", + cmdresp_mcresize.new_r_size, r_size); + } + + return(0); +} + +/************************************************************************* + * NAME: blk_sendmcgetsize + * + * FUNCTION: + * This will send a MCRESIZE cmd to the MC process + * + * PARAMETERS: None + * + * RETURNS: None + ************************************************************************/ +int +blk_sendmcgetsize(dev_t adap_devno, ctx_handle_t ctx_handle, + res_handle_t r_handle, size_t *size) +{ + CReqBlk_MCGetsize_t cmd_mcgetsize; + CRespBlk_MCGetsize_t cmdresp_mcgetsize; + ctx_id_t ctx_id = CTX_ID(ctx_handle); + int rc; + + memset(&cmd_mcgetsize, '\0', sizeof(CReqBlk_MCGetsize_t)); + memset(&cmdresp_mcgetsize, '\0', sizeof(CRespBlk_MCGetsize_t)); + + cmd_mcgetsize.header.creqh_command = CMDBLK_CMD_MCGETSIZE; + cmd_mcgetsize.header.creqh_cmdsize = sizeof(CReqBlk_MCGetsize_t); + + cmdresp_mcgetsize.header.cresh_respsize = sizeof(CRespBlk_MCGetsize_t); + + cmd_mcgetsize.ctx_handle = ctx_handle; + cmd_mcgetsize.adap_devno = adap_devno; + cmd_mcgetsize.r_handle = r_handle; + + rc = blk_send_command(conn_fd, &cmd_mcgetsize, &cmdresp_mcgetsize); + + if (cmdresp_mcgetsize.header.cresh_status != 0) { + printf("blk_sendmcgetsize: MCGETSIZE failed - status: %x \n", + cmdresp_mcgetsize.header.cresh_status); + return(cmdresp_mcgetsize.header.cresh_status); + } + + printf("blk_sendmcgetsize: MCGETSIZE Success status: %x\n", + cmdresp_mcgetsize.header.cresh_status); + + *size = cmdresp_mcgetsize.r_size; + + return(0); +} + +/************************************************************************* + * NAME: blk_sendmcclone + * + * FUNCTION: + * This will send a MCCLONE cmd to the MC process + * + * PARAMETERS: None + * + * RETURNS: None + ************************************************************************/ +int +blk_sendmcclone(dev_t adap_devno, ctx_handle_t ctx_handle, + res_handle_t r_handle, res_handle_t *new_r_handle) +{ + CReqBlk_MCClone_t cmd_mcclone; + CRespBlk_MCClone_t cmdresp_mcclone; + ctx_id_t ctx_id = CTX_ID(ctx_handle); + int rc; + + memset(&cmd_mcclone, '\0', sizeof(CReqBlk_MCClone_t)); + memset(&cmdresp_mcclone, '\0', sizeof(CRespBlk_MCClone_t)); + + cmd_mcclone.header.creqh_command = CMDBLK_CMD_MCCLONE; + cmd_mcclone.header.creqh_cmdsize = sizeof(CReqBlk_MCClone_t); + + cmdresp_mcclone.header.cresh_respsize = sizeof(CRespBlk_MCClone_t); + + rc = blk_send_command(conn_fd, &cmd_mcclone, &cmdresp_mcclone); + + if (cmdresp_mcclone.header.cresh_status != 0) { + printf("blk_sendmcclone: MCCLONE failed - status: %x \n", + cmdresp_mcclone.header.cresh_status); + return(cmdresp_mcclone.header.cresh_status); + } + + printf("blk_sendmcclone: MCCLONE Success status: %x\n", + cmdresp_mcclone.header.cresh_status); + + return(0); +} + +/************************************************************************* + * NAME: blk_sendmctxlba + * + * FUNCTION: + * This will send a MCTXLBA cmd to the MC process + * + * PARAMETERS: None + * + * RETURNS: None + ************************************************************************/ +int +blk_sendmctxlba(dev_t adap_devno, ctx_handle_t ctx_handle, + res_handle_t r_handle, + int64_t v_lba, int64_t *p_lba) +{ + CReqBlk_MCTxLBA_t cmd_mctxlba; + CRespBlk_MCTxLBA_t cmdresp_mctxlba; + ctx_id_t ctx_id = CTX_ID(ctx_handle); + int rc; + + memset(&cmd_mctxlba, '\0', sizeof(CReqBlk_MCTxLBA_t)); + memset(&cmdresp_mctxlba, '\0', sizeof(CRespBlk_MCTxLBA_t)); + + cmd_mctxlba.header.creqh_command = CMDBLK_CMD_MCTXLBA; + cmd_mctxlba.header.creqh_cmdsize = sizeof(CReqBlk_MCTxLBA_t); + + cmdresp_mctxlba.header.cresh_respsize = sizeof(CRespBlk_MCTxLBA_t); + + cmd_mctxlba.ctx_handle = ctx_handle; + cmd_mctxlba.adap_devno = adap_devno; + cmd_mctxlba.r_handle = r_handle; + cmd_mctxlba.v_lba = v_lba; + + rc = blk_send_command(conn_fd, &cmd_mctxlba, &cmdresp_mctxlba); + + if (cmdresp_mctxlba.header.cresh_status != 0) { + printf("blk_sendmctxlba: MCTXLBA failed - status: %x \n", + cmdresp_mctxlba.header.cresh_status); + return(cmdresp_mctxlba.header.cresh_status); + } + + printf("blk_sendmcgetsize: MCTXLBA Success status: %x\n", + cmdresp_mctxlba.header.cresh_status); + + *p_lba = cmdresp_mctxlba.p_lba; + + return(0); +} + +/************End of MC Block Allocator Command Functions**********************/ \ No newline at end of file diff --git a/src/common/cflash_scsi_user.c b/src/common/cflash_scsi_user.c new file mode 100644 index 00000000..71ce5246 --- /dev/null +++ b/src/common/cflash_scsi_user.c @@ -0,0 +1,1106 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/common/cflash_scsi_user.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include + +/* + * NAME: cflash_build_scsi_inquiry + * + * FUNCTION: Builds a SCSI Inquiry command for the specified + * page. + * + * + * INPUTS: + * cdb - Pointer to SCSI CDB to contain Inquiry command + * + * code_page - Code page to use for Inquiry. -1 indicates + * to use standard inquiry. + * + * data_len - Date length (in bytes) returned inquiry data + * buffer. This can not exceed 255 bytes + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_build_scsi_inquiry(scsi_cdb_t *scsi_cdb, int page_code,uint8_t data_size) +{ + int rc = 0; + + if (scsi_cdb == NULL) { + + errno = EINVAL; + return -1; + } + + bzero(scsi_cdb,sizeof(*scsi_cdb)); + + /* + * INQUIRY Command + *+=====-=======-=======-=======-=======-=======-=======-=======-=======+ + *| Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + *|Byte | | | | | | | | | + *|=====+===============================================================| + *| 0 | Operation Code (12h) | + *|-----+---------------------------------------------------------------| + *| 1 | Reserved | CmdDT | EVPD | + *|-----+---------------------------------------------------------------| + *| 2 | Page or Operation Code | + *|-----+---------------------------------------------------------------| + *| 3 | Reserved | + *|-----+---------------------------------------------------------------| + *| 4 | Allocation Length | + *|-----+---------------------------------------------------------------| + *| 5 | Control | + *+=====================================================================+ + */ + + scsi_cdb->scsi_op_code = SCSI_INQUIRY; + if (page_code == -1) { + + /* Standard Inquiry */ + + scsi_cdb->scsi_bytes[0] = 0x00; + scsi_cdb->scsi_bytes[1] = 0x00; + + } else { + scsi_cdb->scsi_bytes[0] = 0x01; + scsi_cdb->scsi_bytes[1] = (uint8_t)(0xff & page_code); + } + + scsi_cdb->scsi_bytes[2] = 0x00; + scsi_cdb->scsi_bytes[3] = data_size; + scsi_cdb->scsi_bytes[4] = 0x00; + + return rc; +} + +/* + * NAME: cflash_process_scsi_inquiry_std_data + * + * FUNCTION: Process the Standard Inquiry data + * + * + * + * INPUTS: + * std_inq_data - Pointer to returned standard inquiry data. + * + * inq_data_len - Length of inquiry data. + * + * vendor_id - returned vendor id found. Must be 8 bytes in + * in size. + * + * product_id - returned product id found. Must be 8 bytes in + * in size. + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_process_scsi_inquiry(void *std_inq_data,int inq_data_len, + void *vendor_id,void *product_id) +{ + int rc = 0; + struct inqry_data *inq_data; + uint8_t ansi_type; + + if ((vendor_id == NULL) || + (product_id == NULL)) { + + return EINVAL; + } + + inq_data = std_inq_data; + + if ((inq_data->pdevtype & SCSI_PDEV_MASK) != SCSI_DISK) { + + /* + * This is not a disk device. Fail the request. + */ + + return EINVAL; + + } + ansi_type = inq_data->versions & 0x7; + + if (!((ansi_type >= 0x3) && (inq_data->resdfmt & NORMACA_BIT) && + (inq_data->flags2 & CMDQUE_BIT))) { + /* + * This device does not support both + * NACA and command tag queuing. + * + * TODO Maybe we should not impose this now, since + * we are not using it. + */ + + return EINVAL; + } + + + bcopy(inq_data->vendor,vendor_id,8); + + bcopy(inq_data->product,product_id,8); + + return rc; +} + +/* + * NAME: cflash_process_scsi_inquiry_dev_id_page + * + * FUNCTION: Process the Inquiry data from the device ID + * code page (0x83). + * + * + * + * INPUTS: + * inq_data - Pointer to returned inquiry data. + * + * inq_data_len - Length of inquiry data. + * + * wwid - returned (lun) world wide id. + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_process_scsi_inquiry_dev_id_page(void *inq_data,int inq_data_len,char *wwid) +{ + char identifier_type; + char *ucp = (char *) NULL; + char *end_of_page = (char *) NULL; + char *iddesc_p = (char *) NULL; + char tmp_buf[SCSI_INQSIZE+1]; + char *inqdata = inq_data; + int rc = 0, + page_length = 0, + id_len = 0, + id_desc_size = 0, + id_codeset = 0; + + unsigned long long serial_no_64; + + /* Initialize wwid to a NULL string */ + *wwid = '\0'; + + /* + * Obtain the page_length -- actually the amount of space + * beyond the device id header stuff allocated for holding + * all the ID descriptors. + */ + page_length = (int) *(inqdata + + CFLASH_INQ_DEVICEID_PAGELEN_OFFSET); + + if (page_length > (inq_data_len - CFLASH_INQ_DEVICEID_PAGELEN_OFFSET)) { + /* + * If the specified page_length exceeds our inquiry data, then + * trunc it to the max available. + */ + page_length = inq_data_len - CFLASH_INQ_DEVICEID_PAGELEN_OFFSET; + } + + /* + * Now form a ptr. to the last offset within the code page. + * Note that owing to the possible adjustment of page_length + * above, end_of_page is also bounded by the size of the + * inquiry data. + */ + end_of_page = inqdata + page_length + + CFLASH_INQ_DEVICEID_PAGELEN_OFFSET; + + /* + * Set our pointer to the first ID descriptor. + */ + ucp = inqdata + CFLASH_INQ_DEVICEID_IDDESC_LIST_OFFSET; + + /* + * Walk through ID descriptors, searching for ID descriptor associated + * with the LUN. + */ + + DEBUG_PRNT("%s: at start of walk id desc, first at 0x%08X\n", + "sas_get_wwid",ucp); + DEBUG_PRNT(" and end_of_page is 0x%08X\n", end_of_page); + + + while (!iddesc_p && (ucp < end_of_page)) { + + DEBUG_PRNT("%s: Looking at id_descriptor at 0x%08X\n", + "sas_get_wwid", + ucp); + + /* + * Check if this ID type is one of the ones we support. + */ + identifier_type = (char) (*(ucp + + CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_OFFSET) & + ((char) CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_MASK)); + + /* + * Check if the the ID Descriptor is for the LUN + */ + if ( ( (char) *(ucp + CFLASH_INQ_DEVICEID_IDDESC_ASSOC_OFFSET) & + CFLASH_INQ_DEVICEID_IDDESC_ASSOC_MASK) == + CFLASH_INQ_DEVICEID_IDDESC_ASSOC_LUN ) { + + /* ID Descriptor is for addressed LUN */ + + DEBUG_PRNT ("%s: Found ID Desc for address LUN\n", + "sas_get_wwid"); + + if ((identifier_type == + CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_NAA) || + (identifier_type == + CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_EUI64) ) { + /* If ID Descriptor is NAA Type or EUI64. Then + * store LUN ID Descriptor + */ + + DEBUG_PRNT("ID Desc is type NAA or EUI64. type %x\n", + identifier_type); + iddesc_p = ucp; + } + } + + /* + * Get the overall size of this ID descriptor so we can advance + * to the next one. + */ + id_desc_size = CFLASH_INQ_DEVICEID_IDESC_HEADER_SIZE + + (int) *(ucp + CFLASH_INQ_DEVICEID_IDDESC_IDLEN_OFFSET); + ucp += id_desc_size; + } /* end-while !iddesc_p && (ucp < end_of_page) */ + + if (iddesc_p) { + /* Found a valid iddesc_p, update ww_id attribute with value */ + + DEBUG_PRNT("%s: updating %llx,%llx \n", + "sas_get_wwid", sid, lun); + + /* + * Grab the id length. + */ + id_len = (int)*(iddesc_p + + CFLASH_INQ_DEVICEID_IDDESC_IDLEN_OFFSET); + + DEBUG_PRNT("%s: id_len = %d = 0x%x\n", "sas_get_wwid", id_len, id_len); + + /* + * The format (as indicated by id_codeset) of the identifier + * will be either in binary or ascii. + */ + id_codeset = (int) *(iddesc_p + + CFLASH_INQ_DEVICEID_IDDESC_CODESET_OFFSET) & + CFLASH_INQ_DEVICEID_IDDESC_CODESET_MASK; + + /* + * Ensure that identifier isn't specified incorrectly such that + * the length would put us past the end of the entire code page. + */ + if ((iddesc_p + id_len + + CFLASH_INQ_DEVICEID_IDDESC_IDENT_OFFSET - 1) > end_of_page) { + + DEBUG_PRNT("%s: id_len %d puts us past the entire code page\n", + "sas_get_wwid", id_len); + + return(EINVAL); + } + + /* + * Ensure that the id_len isn't too long for the target buf. + */ + if (id_codeset == CFLASH_INQ_DEVICEID_IDDESC_CODESET_ASCII) { + if ((id_len+1) > SCSI_INQSIZE) { + + DEBUG_PRNT("%s: code page 0x83 ASCII id_len %d > INQ_SIZE\n", + "sas_get_wwid", id_len); + + return(EINVAL); + } + } + else { + /* + * Code is binary, so representing in ASCII will require + * two bytes per binary byte. + */ + if (((id_len * 2)+1) > SCSI_INQSIZE) { + + DEBUG_PRNT("%s: code page 0x83 BINARY id_len %d * 2 > INQ_SIZE\n", + "sas_get_wwid", id_len); + + return(EINVAL); + } + } + + DEBUG_PRNT("%s: identifer_type = %d = 0x%x\n", "sas_get_wwid", + identifier_type, identifier_type); + + + /* + * The following ID types have a specified length. Sanity check + * that they conform to the spec. + */ + if (identifier_type == CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_EUI64) { + /* + * Given the particular identifier types satisfied by the + * condition above, we assume that the data herein is + * ALWAYS binary and of a fixed-length called out in + * the scsi spec. + */ + + + if (sizeof(serial_no_64) != CFLASH_INQ_DEVICEID_IDDESC_EUI64_LEN) { + return(EINVAL); + } + + } + else { + if ((identifier_type != + CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_NOAUTH) && + (identifier_type != + CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_VENDOR_PLUS) && + (identifier_type != + CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_NAA)) { + + DEBUG_PRNT("%s:For code page 0x83, dont support idtype 0x%02x\n", + "sas_get_wwid", identifier_type); + + return(EINVAL); + } + } + + switch(id_codeset) { + case CFLASH_INQ_DEVICEID_IDDESC_CODESET_BIN: + /* + * Data is binary. + */ + if ((identifier_type == + CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_NOAUTH) || + (identifier_type == + CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_VENDOR_PLUS) || + (identifier_type == + CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_NAA) ) { + + int j; + char *ucp2; + + + ucp = iddesc_p + CFLASH_INQ_DEVICEID_IDDESC_IDENT_OFFSET; + tmp_buf[0] = '\0'; + ucp2 = tmp_buf; + for (j = 0; j < id_len; j++, ucp++) { + ucp2 += sprintf(ucp2,"%02x", *ucp); + } + *ucp2 = '\0'; + strcpy(wwid, tmp_buf); + } + else { + memcpy((void *) &serial_no_64, + (void *) (iddesc_p + + CFLASH_INQ_DEVICEID_IDDESC_IDENT_OFFSET), + (size_t) id_len); + + sprintf(wwid,"%016llx",serial_no_64); + } + break; + + case CFLASH_INQ_DEVICEID_IDDESC_CODESET_ASCII: + strncpy(wwid, iddesc_p + + CFLASH_INQ_DEVICEID_IDDESC_IDENT_OFFSET, + id_len); + if (strlen(wwid) < 1) { + /* + * For some reason the src string was all blanks + * so just set it to empty. + */ + *wwid = '\0'; + } + break; + + default: + /* + * Unknown id_codeset. + */ + return(EINVAL); + } + } + + return rc; +} + + +/* + * NAME: cflash_build_scsi_tur + * + * FUNCTION: Builds a SCSI Test Unit Ready command. + * + * + * INPUTS: + * cdb - Pointer to SCSI CDB to contain Test Unit Read + * command. + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_build_scsi_tur(scsi_cdb_t *scsi_cdb) +{ + int rc = 0; + + if (scsi_cdb == NULL) { + + errno = EINVAL; + return -1; + } + + bzero(scsi_cdb,sizeof(*scsi_cdb)); + + /* TEST UNIT READY Command + *+=====-=======-=======-=======-=======-=======-=======-=======-=======+ + *| Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + *|Byte | | | | | | | | | + *|=====+===============================================================| + *| 0 | Operation Code (00h) | + *|-----+---------------------------------------------------------------| + *| 1 | Logical Unit Number | Reserved | + *|-----+---------------------------------------------------------------| + *| 2 | Reserved | + *|-----+---------------------------------------------------------------| + *| 3 | Reserved | + *|-----+---------------------------------------------------------------| + *| 4 | Reserved | + *|-----+---------------------------------------------------------------| + *| 5 | Control | + *+=====================================================================+ + */ + + scsi_cdb->scsi_op_code = SCSI_TEST_UNIT_READY; + + return rc; +} + +/* + * NAME: cflash_build_scsi_report_luns + * + * FUNCTION: Builds a SCSI Report Luns command. + * + * + * INPUTS: + * cdb - Pointer to SCSI CDB to contain Report Luns command + * + * length_list - Length List (in bytes) of returned list of luns + + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_build_scsi_report_luns(scsi_cdb_t *scsi_cdb, uint32_t length_list) +{ + int rc = 0; + + if (scsi_cdb == NULL) { + + errno = EINVAL; + return -1; + } + + + bzero(scsi_cdb,sizeof(*scsi_cdb)); + + /* + * + * REPORT LUNS command + * +====-======-======-======-======-======-======-======-======+ + * | Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * |Byte| | | | | | | | | + * |=====+======================================================| + * | 0 | Operation code (A0h) | + * |-----+------------------------------------------------------| + * | 1 | Reserved | + * |-----+-- ---| + * | 5 | | + * |-----+------------------------------------------------------| + * | 6 |(MSB) | + * |- - -+- - Allocation length - -| + * | 9 | (LSB)| + * |-----+------------------------------------------------------| + * | 10 | Reserved | + * |-----+------------------------------------------------------| + * | 11 | Control | + * +============================================================+ + + */ + + scsi_cdb->scsi_op_code = SCSI_REPORT_LUNS; + + scsi_cdb->scsi_bytes[5] = (length_list >>24) & 0xff; + scsi_cdb->scsi_bytes[6] = (length_list >> 16) & 0xff; + scsi_cdb->scsi_bytes[7] = (length_list >> 8) & 0xff; + scsi_cdb->scsi_bytes[8] = length_list & 0xff; + + + return rc; +} + + +/* + * NAME: cflash_process_scsi_report_luns + * + * FUNCTION: Processes the returned lun list from the SCSI + * Report Luns command. + * + * + * INPUTS: + * lun_list_rsp - Pointer to lin list returned from Report + * Luns command. + * + * length_list - List (in bytes) of the lun_list_rsp buffer. + * + * actual_lun_list - List of Lun ids returned (header removed) + * It is assumed this buffer is large enough + * hold the full number of lun_ids that + * can fit in the returned lun_list_rsp. + * + * num_actual_luns - Number of lun ids in the actual_lun_list. + * + * + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_process_scsi_report_luns(void *lun_list_rsp, uint32_t length_list, + uint64_t **actual_lun_list, int *num_actual_luns) +{ + int rc = 0; + char *lun_table; /* data returned from Report */ + /* Luns command. */ +#ifdef CFLASH_LITTLE_ENDIAN_HOST + uint64_t *lun_ptr; + int i; +#endif /* CFLASH_LITTLE_ENDIAN_HOST */ + + struct lun_list_hdr *list_hdr; /* Header of data returned from*/ + /* Report Luns command. */ + + + + if (lun_list_rsp == NULL) { + + errno = EINVAL; + return -1; + } + + /* + * Report luns command was successful. + * Now verify if the list contains all valid + * luns for this device. + */ + + + /* + * SCSI-3 (SPC Rev 8) LUN reporting parameter list format + * +=====-====-====-====-====-====-====-====-====+ + * | Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * |Byte | | | | | | | | | + * |=====+=======================================| + * | 0 | (MSB) | + * |- - -+- - LUN list length (n-7) - -| + * | 3 | (LSB) | + * |-----+---------------------------------------| + * | 4 | | + * |- - -+- - Reserved - -| + * | 7 | | + * |=====+=======================================| + * | | LUN list | + * |=====+=======================================| + * | 8 | (MSB) | + * |- - -+- - LUN - -| + * | 15 | (LSB) | + * |-----+---------------------------------------| + * | | . | + * | | . | + * | | . | + * |-----+---------------------------------------| + * | n-7 | (MSB) | + * |- - -+- - LUN - -| + * | n | (LSB) | + * +=============================================+ + */ + + + list_hdr = lun_list_rsp; + lun_table = lun_list_rsp; + + + + if ((CFLASH_FROM_ADAP32(list_hdr->lun_list_length)) > length_list ) { + /* + * Lun table does not contain all lun ids. + * So retry the Report Luns command again with + * a lun table of the correct length. + * NOTE: In the situation where the lun list allocated + * is not big enough for all luns, the returned + * lun_list_length will indicate how big it should be. + */ + + return EAGAIN; + } else { + + /* + * lun table contains all lun ids that we + * could extract for this SCSI id. So copy them + * out for the caller. + * + * NOTE: list_hdr->lun_list_length is the length + * in bytes of the lun list after the + * header. + */ + + if ((CFLASH_FROM_ADAP32(list_hdr->lun_list_length)) == 0) { + + + /* + * No luns found. + */ + + *actual_lun_list = NULL; + rc = ENODEV; + + + } else if ((CFLASH_FROM_ADAP32(list_hdr->lun_list_length)) > sizeof(*list_hdr)) { + + /* + * Return the list of luns after the header to the + * the caller if it is a multiple of 8 in length + * + * NOTE: We return the number of valid lun ids found. + * If lun_list_length is not a multiple of 8, + * then the last fragment is not counted as lun id. + */ + + + *num_actual_luns = (CFLASH_FROM_ADAP32(list_hdr->lun_list_length)) / sizeof(uint64_t); + + if ((CFLASH_FROM_ADAP32(list_hdr->lun_list_length)) % sizeof(uint64_t)) { + + *actual_lun_list = NULL; + rc = EINVAL; + } else { + *actual_lun_list = (uint64_t *)&lun_table[START_LUNS]; +#ifdef CFLASH_LITTLE_ENDIAN_HOST + lun_ptr = (uint64_t *)&lun_table[START_LUNS]; + for (i=0; i < *num_actual_luns; i++) { + lun_ptr[i] = CFLASH_FROM_ADAP64(lun_ptr[i]); + + } +#endif /* CFLASH_LITTLE_ENDIAN_HOST */ + } + } else { + + *actual_lun_list = NULL; + } + + } + + return rc; +} + +/* + * NAME: cflash_build_scsi_mode_sense_10 + * + * FUNCTION: Builds a SCSI Mode Sense 10 command. + * + * + * INPUTS: + * cdb - Pointer to SCSI CDB to contain Mode Sense 10 command + * + * data_len - Date length (in bytes) returned mode sense data + * buffer. This can not exceed 64K bytes + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_build_scsi_mode_sense_10(scsi_cdb_t *scsi_cdb, + uint16_t data_len, int flags) +{ + int rc = 0; + + bzero(scsi_cdb,sizeof(*scsi_cdb)); + + /* + * MODE SENSE(10) Command + *+=====-=======-=======-=======-=======-=======-=======-=======-=======+ + *| Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + *|Byte | | | | | | | | | + *|=====+===============================================================| + *| 0 | Operation Code (5Ah) | + *|-----+---------------------------------------------------------------| + *| 1 | Resvd | DBD | Reserved | + *|-----+---------------------------------------------------------------| + *| 2 | PC | Page Code | + *|-----+---------------------------------------------------------------| + *| 3 | Reserved | + *|-----+---------------------------------------------------------------| + *| 4 | Reserved | + *|-----+---------------------------------------------------------------| + *| 5 | Reserved | + *|-----+---------------------------------------------------------------| + *| 6 | Reserved | + *|-----+---------------------------------------------------------------| + *| 7 | (MSB) | + *|-----+-- Allocation Length ---| + *| 8 | (LSB) | + *|-----+---------------------------------------------------------------| + *| 9 | Control | + *+=====================================================================+ + * + */ + + scsi_cdb->scsi_op_code = SCSI_MODE_SENSE_10; + + /* + * Set Page Code byte to request all supported pages....set Page + * Control bits appropriately depending if we want changeable or + * current mode data + */ + + + scsi_cdb->scsi_bytes[1] = (0x3F | + ((flags & SCSI_MODE_SNS_CHANGEABLE) ? + 0x40 : 0x0)); + scsi_cdb->scsi_bytes[6] = + (uint8_t) ((data_len >> 8) & 0xff); + scsi_cdb->scsi_bytes[7] = + (uint8_t)(data_len & 0xff); + + + + return rc; +} + +/* + * NAME: cflash_build_scsi_mode_select_10 + * + * FUNCTION: Builds a SCSI Mode Select 10 command. + * + * + * INPUTS: + * cdb - Pointer to SCSI CDB to contain Mode Select 10 command + * + * data_len - Date length (in bytes) returned mode sense data + * buffer. This can not exceed 64K bytes + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_build_scsi_mode_select_10(scsi_cdb_t *scsi_cdb, + uint16_t data_len, int flags) +{ + int rc = 0; + + bzero(scsi_cdb,sizeof(*scsi_cdb)); + + /* + * MODE SELECT(10) Command + *+=====-=======-=======-=======-=======-=======-=======-=======-=======+ + *| Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + *|Byte | | | | | | | | | + *|=====+===============================================================| + *| 0 | Operation Code (55h) | + *|-----+---------------------------------------------------------------| + *| 1 | Reserved | PF | Reserved | SP | + *|-----+---------------------------------------------------------------| + *| 2 | Reserved | + *|-----+---------------------------------------------------------------| + *| 3 | Reserved | + *|-----+---------------------------------------------------------------| + *| 4 | Reserved | + *|-----+---------------------------------------------------------------| + *| 5 | Reserved | + *|-----+---------------------------------------------------------------| + *| 6 | Reserved | + *|-----+---------------------------------------------------------------| + *| 7 | (MSB) | + *|-----+-- Parameter List Length ---| + *| 8 | (LSB) | + *|-----+---------------------------------------------------------------| + *| 9 | Control | + *+=====================================================================+ + * + */ + + scsi_cdb->scsi_op_code = SCSI_MODE_SELECT_10; + + + /* + * Indicates mode pages comply to Page Format, by setting + * PF bit + */ + scsi_cdb->scsi_bytes[0] = 0x10; + scsi_cdb->scsi_bytes[6] = + (uint8_t) ((data_len >> 8) & 0xff); + scsi_cdb->scsi_bytes[7] = + (uint8_t)(data_len & 0xff); + + + + return rc; +} + +/* + * NAME: cflash_build_scsi_read_cap + * + * FUNCTION: Builds a SCSI Read Capacity (10 command. + * + * NOTE: There is no data length for Read Capacity since it is assumed + * to always be 8 bytes. + * + * INPUTS: + * cdb - Pointer to SCSI CDB to contain Read Capacity 10 command + * + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_build_scsi_read_cap(scsi_cdb_t *scsi_cdb) +{ + int rc = 0; + + bzero(scsi_cdb,sizeof(*scsi_cdb)); + + /* + * READ CAPACITY Command + * +=====-=======-=======-=======-=======-=======-=======-=======-=======+ + * | Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * |Byte | | | | | | | | | + * |=====+===============================================================| + * | 0 | Operation Code (25h) | + * |-----+---------------------------------------------------------------| + * | 1 | Logical Unit Number | Reserved |RelAdr | + * |-----+---------------------------------------------------------------| + * | 2 | (MSB) | + * |-----+----- -------| + * | 3 | Logical Block Address | + * |-----+----- -------| + * | 4 | | + * |-----+----- -------| + * | 5 | (LSB) | + * |-----+---------------------------------------------------------------| + * | 6 | Reserved | + * |-----+---------------------------------------------------------------| + * | 7 | Reserved | + * |-----+---------------------------------------------------------------| + * | 8 | Reserved | PMI | + * |-----+---------------------------------------------------------------| + * | 9 | Control | + * +=====================================================================+ + */ + + scsi_cdb->scsi_op_code = SCSI_READ_CAPACITY; + + return rc; +} + +/* + * NAME: cflash_build_scsi_read_cap16 + * + * FUNCTION: Builds a SCSI Read Capacity 16 command. + * + * + * INPUTS: + * cdb - Pointer to SCSI CDB to contain Read Capacity 16 command + * + * data_len - Date length (in bytes) of returned Read Capacity 16 + * data buffer. This can not exceed 255 bytes. Ideally + * this should sizeof(struct readcap16_data). + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_build_scsi_read_cap16(scsi_cdb_t *scsi_cdb, uint8_t data_len) +{ + int rc = 0; + + bzero(scsi_cdb,sizeof(*scsi_cdb)); + + /* + * SERVICE ACTION IN Command for issuing + * READ CAPACITY 16 + * +=====-=======-=======-=======-=======-=======-=======-=======-=======+ + * | Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * |Byte | | | | | | | | | + * |=====+===============================================================| + * | 0 | Operation Code (9Eh) | + * |-----+---------------------------------------------------------------| + * | 1 | Reserved | SERVICE ACTION (10h) | + * |-----+---------------------------------------------------------------| + * | 2 | (MSB) | + * |-----+----- -------| + * | 3 | | + * |-----+----- -------| + * | 4 | | + * |-----+----- -------| + * | 5 | Logical Block Address | + * |-----+----- -------| + * | 6 | | + * |-----+----- -------| + * | 7 | | + * |-----+----- -------| + * | 8 | | + * |-----+----- -------| + * | 9 | (LSB) | + * |-----+---------------------------------------------------------------| + * | 10 | (MSB) | + * |-----+----- --------| + * | 11 | | + * |-----+----- Allocation Length --------| + * | 12 | | + * |-----+----- --------| + * | 13 | (LSB) | + * |-----+---------------------------------------------------------------| + * | 14 | Reserved | PMI | + * |-----+---------------------------------------------------------------| + * | 15 | Control | + * +=====================================================================+ + */ + + scsi_cdb->scsi_op_code = SCSI_SERVICE_ACTION_IN; + scsi_cdb->scsi_bytes[0] = SCSI_READ_CAP16_SRV_ACT_IN; + scsi_cdb->scsi_bytes[12] = data_len; + + return rc; +} + + +/* + * NAME: cflash_process_scsi_read_cap16 + * + * FUNCTION: Processes the returned lun list from the SCSI + * Report Luns command. + * + * + * INPUTS: + * lun_list_rsp - Pointer to lin list returned from Report + * Luns command. + * + * length_list - List (in bytes) of the lun_list_rsp buffer. + * + * actual_lun_list - List of Lun ids returned (header removed) + * It is assumed this buffer is large enough + * hold the full number of lun_ids that + * can fit in the returned lun_list_rsp. + * + * num_actual_luns - Number of lun ids in the actual_lun_list. + * + * + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_process_scsi_read_cap16(struct readcap16_data *readcap16_data, uint32_t *block_size, + uint64_t *last_lba) +{ + int rc = 0; + + + if (readcap16_data == NULL) { + + return -1; + } + + *block_size = CFLASH_FROM_ADAP32(readcap16_data->len); + *last_lba = CFLASH_FROM_ADAP64(readcap16_data->lba); + + return rc; +} + +/* + * NAME: cflash_build_scsi_request_sense + * + * FUNCTION: Builds a SCSI Request Sense command. + * + * + * INPUTS: + * cdb - Pointer to SCSI CDB to contain Request Sense command + * + * data_len - Date length (in bytes) of returned sense data. + * This can not exceed 255 bytes + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int cflash_build_scsi_request_sense(scsi_cdb_t *scsi_cdb, uint8_t data_len) +{ + int rc = 0; + + bzero(scsi_cdb,sizeof(*scsi_cdb)); + + /* + * REQUEST SENSE Command + *+=====-=======-=======-=======-=======-=======-=======-=======-=======+ + *| Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + *|Byte | | | | | | | | | + *|=====+===============================================================| + *| 0 | Operation Code (03h) | + *|-----+---------------------------------------------------------------| + *| 1 | Logical Unit Number | Reserved | + *|-----+---------------------------------------------------------------| + *| 2 | Reserved | + *|-----+---------------------------------------------------------------| + *| 3 | Reserved | + *|-----+---------------------------------------------------------------| + *| 4 | Allocation Length | + *|-----+---------------------------------------------------------------| + *| 5 | Control | + *+=====================================================================+ + */ + + scsi_cdb->scsi_op_code = SCSI_REQUEST_SENSE; + scsi_cdb->scsi_bytes[3] = data_len; + + return rc; +} diff --git a/src/common/cflash_tools_user.c b/src/common/cflash_tools_user.c new file mode 100644 index 00000000..a93d5582 --- /dev/null +++ b/src/common/cflash_tools_user.c @@ -0,0 +1,154 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/common/cflash_tools_user.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + + +#include +#include +#include + +#include +#if !defined(_AIX) && !defined(_MACOSX) +#include +#endif /* !_AIX && !_MACOSX */ + +#include + +/* + * NAME: dumppage + * + * FUNCTION: Dumps a page of data to the screen. + * + * + * INPUTS: + * buffer - Buffer to dump data. + * size - Size of buffer. + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ +void +dumppage(void *buffer, int size) +{ + int i; + + for (i =0 ; i=32 && j<=126) + str[k++] = (char) j; + else + str[k++] = '.'; + fprintf(stderr,"%02x ",j); + + /* Add an extra space at 8 byte bndry */ + if (!(i%8)) + { + fprintf(stderr," "); + str[k++] = ' '; + } + + /* Print the ascii at 16 byte bndry */ + if (!(i%16)) + { + str[k] = '\0'; + fprintf(stderr," %s\n",str); + k = 0; + } + } + + /* If we didn't end on an even 16 byte bndry, print ascii for partial + * line. */ + if ((j = i%16)) { + /* First, space over to ascii region */ + while (i%16) + { + /* Extra space at 8 byte bndry--but not if we + * started there (was already inserted) */ + if (!(i%8) && j != 8) + fprintf(stderr," "); + fprintf(stderr," "); + i++; + } + /* Terminate the ascii and print it */ + str[k]='\0'; + fprintf(stderr," %s\n",str); + } + fflush(stderr); + + return; +} diff --git a/src/common/makefile b/src/common/makefile new file mode 100644 index 00000000..ea0d9e59 --- /dev/null +++ b/src/common/makefile @@ -0,0 +1,38 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/common/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +ROOTPATH = ../.. + +OBJS = cflash_scsi_user.o cflash_tools_user.o + +OBJS64 = cflash_scsi_user.64o cflash_tools_user.64o + +MODULE = cflshcom + +EXPFLAGS = -bexpall + +SUBDIRS = + + +include ${ROOTPATH}/config.mk diff --git a/src/include/afu.h b/src/include/afu.h new file mode 100644 index 00000000..480a39be --- /dev/null +++ b/src/include/afu.h @@ -0,0 +1,72 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/afu.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _AFU_MEMCOPY_H_ +#define _AFU_MEMCOPY_H_ + +#include + +#ifndef VERBOSE +#define VERBOSE 3 +#endif + +#if VERBOSE > 0 +#define debug printf +#else +#define debug (...) +#endif + +struct afu { + int fd; /* file descriptor */ + void *ps_addr; /* problem state registers */ + struct cxl_ioctl_start_work work; + __u32 process_element; + int started; /* AFU state */ +}; + +// Create and open AFU device then map MMIO registers +struct afu *afu_map (); + +// Unmap AFU device +void afu_unmap (struct afu *afu); + +// Set WED address and have PSL send reset and start to AFU +void afu_start (struct afu *afu); + +// MMIO write based on AFU offset, 32-bit +void afu_mmio_write_sw (struct afu *afu, unsigned offset, __u32 value); + +// MMIO write based on AFU offset, 64-bit +void afu_mmio_write_dw (struct afu *afu, unsigned offset, __u64 value); + +// MMIO read based on AFU offset, 32-bit +void afu_mmio_read_sw (struct afu *afu, unsigned offset, __u32 *value); + +// MMIO read based on AFU offset, 64-bit +void afu_mmio_read_dw (struct afu *afu, unsigned offset, __u64 *value); + +// Wait for AFU to complete job +void afu_wait (struct afu *afu); + +#endif /* #define _AFU_MEMCOPY_H_ */ diff --git a/src/include/afu_fc.h b/src/include/afu_fc.h new file mode 100644 index 00000000..6f8709e4 --- /dev/null +++ b/src/include/afu_fc.h @@ -0,0 +1,121 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/afu_fc.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef AFU_FC_H +#define AFU_FC_H + + +/** + * @enum AFU_PORT_ID + * @brief Typedef of the two supported error log levels. + * Informational logs will be hidden from the user. Predictive logs will + * be reported. All predictive errors MUST have a callout. + * MAX_WWPNS_PER_ADAPTER must be the last item in the list! + */ +typedef enum +{ + AFU_PORT_ID_TOP = 0x00, + AFU_PORT_ID_BOTTOM = 0x01, + MAX_WWPNS_PER_ADAPTER // This MUST be last +} AFU_PORT_ID; + + +// FC module register address offset for each port (byte address) +#define FC_PORT_REG_SIZE 0x1000 +#define FC_PORT_BASE_OFFSET 0x2012000 +#define FC_PORT_MMAP_SIZE ((FC_PORT_BASE_OFFSET) + (MAX_WWPNS_PER_ADAPTER) * (FC_PORT_REG_SIZE)) + + +// FC module register offset (byte address) +#define FC_MTIP_REV 0x000 +#define FC_MTIP_SCRATCH 0x008 +#define FC_MTIP_CMDCONFIG 0x010 +#define FC_MTIP_STATUS 0x018 +#define FC_MTIP_INITTIMER 0x020 +#define FC_MTIP_EVENTTIME 0x028 +#define FC_MTIP_CREDIT 0x030 +#define FC_MTIP_BB_SCN 0x038 +#define FC_MTIP_RX_SF 0x040 +#define FC_MTIP_TX_SE 0x048 +#define FC_MTIP_TX_SF 0x050 +#define FC_MTIP_RX_AE 0x058 +#define FC_MTIP_RX_AF 0x060 +#define FC_MTIP_TX_AE 0x068 +#define FC_MTIP_TX_AF 0x070 +#define FC_MTIP_FRMLEN 0x078 +#define FC_MTIP_SD_RCFG_CMD 0x100 +#define FC_MTIP_SD_RCFG_WRDAT 0x108 +#define FC_MTIP_SD_RCFG_RDDAT 0x110 +#define FC_MTIP_TX_FRM_CNT 0x200 +#define FC_MTIP_TX_CRC_ERR_CNT 0x208 +#define FC_MTIP_RX_FRM_CNT 0x210 +#define FC_MTIP_RX_CRC_ERR_CNT 0x218 +#define FC_MTIP_RX_LGTH_ERR_CNT 0x220 +#define FC_MTIP_FRM_DISC_CNT 0x228 + +#define FC_PNAME 0x300 +#define FC_NNAME 0x308 +#define FC_PORT_ID 0x310 +#define FC_CONFIG 0x320 +#define FC_CONFIG2 0x328 +#define FC_STATUS 0x330 +#define FC_TIMER 0x338 +#define FC_E_D_TOV 0x340 +#define FC_ERROR 0x380 +#define FC_ERRCAP 0x388 +#define FC_ERRMSK 0x390 +#define FC_ERRINJ 0x3A0 +#define FC_TGT_D_ID 0x400 +#define FC_TGT_PNAME 0x408 +#define FC_TGT_NNAME 0x410 +#define FC_TGT_LOGI 0x418 +#define FC_TGT_B2BCR 0x420 +#define FC_TGT_E_D_TOV 0x428 +#define FC_TGT_CLASS3 0x430 +#define FC_CNT_CLKRATIO 0x500 +#define FC_CNT_FCREFCLK 0x508 +#define FC_CNT_PCICLK 0x510 +#define FC_CNT_TXRDWR 0x518 +#define FC_CNT_TXLOGI 0x520 +#define FC_CNT_TXDATA 0x528 +#define FC_CNT_LINKERR 0x530 +#define FC_CNT_CRCERR 0x538 +#define FC_CNT_CRCERR_RO 0x540 +#define FC_CNT_OTHERERR 0x548 +#define FC_CNT_TIMEOUT 0x550 +#define FC_CRC_THRESH 0x580 +#define FC_DBGDISP 0x600 +#define FC_DBGDATA 0x608 +#define FC_LOOPBACK_TXCNT 0x670 +#define FC_LOOPBACK_PASSCNT 0x678 +#define FC_LOOPBACK_ERRCNT 0x680 + +#define FC_MTIP_CMDCONFIG_ONLINE 0x20ull +#define FC_MTIP_CMDCONFIG_OFFLINE 0x40ull + +#define FC_MTIP_STATUS_MASK 0x30ull +#define FC_MTIP_STATUS_ONLINE 0x20ull +#define FC_MTIP_STATUS_OFFLINE 0x10ull + +#endif diff --git a/src/include/capi_aix_types.h b/src/include/capi_aix_types.h new file mode 100644 index 00000000..4c6a5a24 --- /dev/null +++ b/src/include/capi_aix_types.h @@ -0,0 +1,38 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/capi_aix_types.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + + +#ifndef _H_CAPI_AIX_TYPES_H +#define _H_CAPI_AIX_TYPES_H + +typedef unsigned char __u8; +typedef unsigned short __u16; +typedef short __s16; +typedef unsigned int __u32; +typedef int __s32; +typedef unsigned long long __u64; + +#endif /* _H_CAPI_AIX_TYPES_H */ diff --git a/src/include/capiblock.h b/src/include/capiblock.h new file mode 100644 index 00000000..2abfec6b --- /dev/null +++ b/src/include/capiblock.h @@ -0,0 +1,336 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/capiblock.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _H_CFLASH_BLOCK +#define _H_CFLASH_BLOCK + +#include +#include +#include +#if !defined(_AIX) && !defined(_MACOSX) +#include +#endif /* !_AIX && !_NACOSX */ + +/* + * This header file specifies the API for the CAPI flash + * user space block layer. + */ + + +#define NULL_CHUNK_ID -1 + +typedef int chunk_id_t; +typedef uint64_t chunk_ext_arg_t; + +/************************************************************************/ +/* Chunk statistics */ +/************************************************************************/ + +typedef struct chunk_stats_s { + uint32_t block_size; /* Block size of this chunk. */ + uint32_t num_paths; /* Number of paths of this chunk. */ + uint64_t max_transfer_size; /* Maximum transfer size in */ + /* blocks of this chunk. */ + uint64_t num_reads; /* Total number of reads issued */ + /* via cblk_read interface */ + uint64_t num_writes; /* Total number of writes issued */ + /* via cblk_write interface */ + uint64_t num_areads; /* Total number of async reads */ + /* issued via cblk_aread interface */ + uint64_t num_awrites; /* Total number of async writes */ + /* issued via cblk_awrite interface*/ + uint32_t num_act_reads; /* Current number of reads active */ + /* via cblk_read interface */ + uint32_t num_act_writes; /* Current number of writes active */ + /* via cblk_write interface */ + uint32_t num_act_areads; /* Current number of async reads */ + /* active via cblk_aread interface */ + uint32_t num_act_awrites; /* Current number of async writes */ + /* active via cblk_awrite interface*/ + uint32_t max_num_act_writes; /* High water mark on the maximum */ + /* number of writes active at once */ + uint32_t max_num_act_reads; /* High water mark on the maximum */ + /* number of reads active at once */ + uint32_t max_num_act_awrites; /* High water mark on the maximum */ + /* number of asyync writes active */ + /* at once. */ + uint32_t max_num_act_areads; /* High water mark on the maximum */ + /* number of asyync reads active */ + /* at once. */ + uint64_t num_blocks_read; /* Total number of blocks read */ + uint64_t num_blocks_written; /* Total number of blocks written */ + uint64_t num_errors; /* Total number of all error */ + /* responses seen */ + uint64_t num_aresult_no_cmplt; /* Number of times cblk_aresult */ + /* returned with no command */ + /* completion */ + uint64_t num_retries; /* Total number of all commmand */ + /* retries. */ + uint64_t num_timeouts; /* Total number of all commmand */ + /* time-outs. */ + uint64_t num_fail_timeouts; /* Total number of all commmand */ + /* time-outs that led to a command */ + /* failure. */ + uint64_t num_no_cmds_free; /* Total number of times we didm't */ + /* have free command available */ + uint64_t num_no_cmd_room ; /* Total number of times we didm't */ + /* have room to issue a command to */ + /* the AFU. */ + uint64_t num_no_cmds_free_fail; /* Total number of times we didn't */ + /* have free command available and */ + /* failed a request because of this*/ + uint64_t num_fc_errors; /* Total number of all FC */ + /* error responses seen */ + uint64_t num_port0_linkdowns; /* Total number of all link downs */ + /* seen on port 0. */ + uint64_t num_port1_linkdowns; /* Total number of all link downs */ + /* seen on port 1. */ + uint64_t num_port0_no_logins; /* Total number of all no logins */ + /* seen on port 0. */ + uint64_t num_port1_no_logins; /* Total number of all no logins */ + /* seen on port 1. */ + uint64_t num_port0_fc_errors; /* Total number of all general FC */ + /* errors seen on port 0. */ + uint64_t num_port1_fc_errors; /* Total number of all general FC */ + /* errors seen on port 1. */ + uint64_t num_cc_errors; /* Total number of all check */ + /* condition responses seen */ + uint64_t num_afu_errors; /* Total number of all AFU error */ + /* responses seen */ + uint64_t num_capi_false_reads; /* Total number of all times */ + /* poll indicated a read was ready */ + /* but there was nothing to read. */ + uint64_t num_capi_read_fails; /* Total number of all */ + /* CXL_EVENT_READ_FAIL responses */ + /* seen. */ + uint64_t num_capi_adap_resets; /* Total number of all adapter */ + /* reset errors. */ + uint64_t num_capi_adap_chck_err;/* Total number of all check */ + /* adapter errors. */ + uint64_t num_capi_reserved_errs;/* Total number of all */ + /* CXL_EVENT_RESERVED responses */ + /* seen. */ + uint64_t num_capi_data_st_errs; /* Total number of all */ + /* CAPI data storage event */ + /* responses seen. */ + uint64_t num_capi_afu_errors; /* Total number of all */ + /* CAPI error responses seen */ + uint64_t num_capi_afu_intrpts; /* Total number of all */ + /* CAPI AFU interrupts for command */ + /* responses seen. */ + uint64_t num_capi_unexp_afu_intrpts; /* Total number of all of */ + /* unexpected AFU interrupts */ + uint64_t num_success_threads; /* Total number of pthread_creates */ + /* that succeed. */ + uint64_t num_failed_threads; /* Total number of pthread_creates */ + /* that failed. */ + uint64_t num_canc_threads; /* Number of threads we had to */ + /* cancel, which succeeded. */ + uint64_t num_fail_canc_threads; /* Number of threads we had to */ + /* cancel, but the cancel failed */ + uint64_t num_fail_detach_threads;/* Number of threads we detached */ + /* but the detach failed */ + uint64_t num_active_threads; /* Current number of threads */ + /* running. */ + uint64_t max_num_act_threads; /* Maximum number of threads */ + /* running simultaneously. */ + uint64_t num_cache_hits; /* Total number of cache hits */ + /* seen on all reads */ + uint64_t num_reset_contexts; /* Total number of reset contexts */ + /* done */ + uint64_t num_reset_contxt_fails;/* Total number of reset context */ + /* failures */ + uint32_t primary_path_id; /* Primary path id */ + uint64_t num_path_fail_overs; /* Total number of times a request */ + /* has failed over to another path.*/ +} chunk_stats_t; + + +/************************************************************************/ +/* General flags */ +/************************************************************************/ + +#define CBLK_SCRUB_DATA_FLG 1 /* Scrub virtual lun data blocks, */ + /* when they are no longer in use. */ + + +#ifdef _AIX +typedef offset_t cflash_offset_t; +#else +typedef off_t cflash_offset_t; +#endif +/************************************************************************/ +/* Open flags */ +/************************************************************************/ + +#define CBLK_OPN_SCRUB_DATA CBLK_SCRUB_DATA_FLG + +#define CBLK_OPN_VIRT_LUN 2 /* Use a virtual lun */ + +#define CBLK_OPN_NO_INTRP_THREADS 4 /* Do not use back threads for */ + /* handling interrupts processing */ + +#define CBLK_OPN_SHARE_CTXT 8 /* Share context in same process */ + +#ifdef _AIX +#define CBLK_OPN_RESERVE 0x10 /* Tell master context to use */ + /* reservations on this lun. */ +#define CBLK_OPN_FORCED_RESERVE 0x20 /* Tell master context to break */ + /* reservations for this lun and */ + /* establish a new reservation */ +#define CBLK_OPN_MPIO_FO 0x40 /* Use multi-path I/O fail over */ + /* this lun. */ +#endif /* _AIX */ + + +/************************************************************************/ +/* cblk_aread and cblk_awrite flags */ +/************************************************************************/ + + +#define CBLK_ARW_WAIT_CMD_FLAGS 1 /* Wait for commmand for cblk_aread */ + /* or cblk_awrite. */ +#define CBLK_ARW_USER_TAG_FLAG 2 /* The caller is specifying a user */ + /* defined tag for this request. */ +#define CBLK_ARW_USER_STATUS_FLAG 4 /* The caller has set the status */ + /* parameter to the address which it */ + /* expects command completion status */ + /* to be posted. */ +typedef enum { + + CBLK_ARW_STATUS_PENDING = 0, /* Command has not completed */ + CBLK_ARW_STATUS_SUCCESS = 1, /* Command completed successfully */ + CBLK_ARW_STATUS_INVALID = 2, /* Caller's request was invalid */ + CBLK_ARW_STATUS_FAIL = 3, /* Command completed with error */ +} cblk_status_type_t; + + +typedef struct cblk_arw_status_s { + cblk_status_type_t status; /* Status of command */ + /* See errno field for additional */ + /* details on failure. */ + size_t blocks_transferred; /* Number of blocks transferred for */ + /* this request. */ + int fail_errno; /* Errno when status indicates */ + /* CBLK_ARW_STAT_FAIL. */ +} cblk_arw_status_t; + +/************************************************************************/ +/* cblk_aresult flags */ +/************************************************************************/ + +#define CBLK_ARESULT_NEXT_TAG 1 /* cblk_aresult will return the tag */ + /* of the next async I/O to complete */ + /* for this chunk. If this flag is not*/ + /* set then caller should have passed */ + /* the address of the tag for which */ + /* they are waiting to complete. */ + +#define CBLK_ARESULT_BLOCKING 2 /* If set then cblk_aresult will block*/ + /* until the specified tag completes. */ + /* Otherwise cblk_aresult will return */ + /* immediately with a value of 1 if */ + /* the specified tag has not yet */ + /* completed */ + +#define CBLK_ARESULT_USER_TAG 4 /* If set then the tag parameter */ + /* specifies a user defined tag that */ + /* was provided when the cblk_aread */ + /* or cblk_awrite call was issued. */ + +/************************************************************************/ +/* cblk_listio flags and structure */ +/************************************************************************/ + +#define CBLK_LISTIO_WAIT_ISSUE_CMD 1 /* Wait for commmand for all commands */ + /* in issue_io_list. */ + +typedef struct cblk_io { + uint8_t version; /* Version of structure */ + int flags; /* Flags for the request */ +#define CBLK_IO_USER_TAG 0x0001 /* Caller is specifying a user defined*/ + /* tag. */ +#define CBLK_IO_USER_STATUS 0x0002/* Caller is specifying a status */ + /* location to be updated. */ +#define CBLK_IO_PRIORITY_REQ 0x0004/* This is a (high) priority request */ + /* that should be expedited vs non- */ + /* priority requests. */ + uint8_t request_type; /* Type of request */ +#define CBLK_IO_TYPE_READ 0x01 /* Read data request */ +#define CBLK_IO_TYPE_WRITE 0x02 /* Write data request */ + void *buf; /* Data buffer for request. */ + cflash_offset_t lba; /* Starting logical block address for */ + /* request. */ + size_t nblocks; /* Size of request based on number of */ + /* blocks. */ + int tag; /* Tag for request */ + cblk_arw_status_t stat; /* Status of request. */ +} cblk_io_t; + + +int cblk_init(void *arg,uint64_t flags); +int cblk_term(void *arg,uint64_t flags); + +chunk_id_t cblk_open(const char *path, int max_num_requests, int mode, chunk_ext_arg_t ext, int flags); +int cblk_close(chunk_id_t chunk_id,int flags); + +/* Determine number blocks on CAPI flash device (lun) */ +int cblk_get_lun_size(chunk_id_t chunk_id, size_t *nblocks, int flags); + +/* Determine number blocks on CAPI flash chunk */ +int cblk_get_size(chunk_id_t chunk_id, size_t *nblocks, int flags); + +/* Allocate/deallocate blocks on CAPI flash chunk */ +int cblk_set_size(chunk_id_t chunk_id, size_t nblocks, int flags); + +/* Get statistics for a CAPI flash chunk */ +int cblk_get_stats(chunk_id_t chunk_id, chunk_stats_t *stats, int flags); + +/* Blocking CAPI flash read */ +int cblk_read(chunk_id_t chunk_id,void *buf,cflash_offset_t lba, size_t nblocks, int flags); + +/* Blocking CAPI flash write */ +int cblk_write(chunk_id_t chunk_id,void *buf,cflash_offset_t lba, size_t nblocks, int flags); + +/* Asynchronous CAPI flash read */ +int cblk_aread(chunk_id_t chunk_id,void *buf,cflash_offset_t lba, size_t nblocks, int *tag, cblk_arw_status_t *status, int flags); + +/* Asynchronous CAPI flash write */ +int cblk_awrite(chunk_id_t chunk_id,void *buf,cflash_offset_t lba, size_t nblocks, int *tag, cblk_arw_status_t *status, int flags); + +/* Wait for completion and results of asynchronous read/write */ +int cblk_aresult(chunk_id_t chunk_id,int *tag, uint64_t *status, int flags); + +/* CAPI flash I/O request interface */ +int cblk_listio(chunk_id_t chunk_id, + cblk_io_t *issue_io_list[],int issue_items, + cblk_io_t *pending_io_list[], int pending_items, + cblk_io_t *wait_io_list[],int wait_items, + cblk_io_t *completion_io_list[],int *completion_items, + uint64_t timeout,int flags); + +/* Clone a chunk (such as a parent and chilld process' chunk */ +int cblk_clone_after_fork(chunk_id_t chunk_id, int mode, int flags); + +#endif /* _H_CFLASH_BLOCK */ diff --git a/src/include/cblk.h b/src/include/cblk.h new file mode 100644 index 00000000..74ce645e --- /dev/null +++ b/src/include/cblk.h @@ -0,0 +1,207 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/cblk.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _CBLK_H +#define _CBLK_H + +#include +#include +#include + +/*************************************************************************** + This file defines IPC messages to the master context deamon. + + The daemon listens on a unique per AFU UNIX socket. The + name of the socket can be derived from the path name to the + master AFU device node as follows: + master_device = /dev/cxl/afu0.0m + corresponding socket = MC_SOCKET_DIR/dev/cxl/afu0.0m + If MC_SOCKET_DIR is /tmp, the socket is /tmp/dev/cxl/afu0.0m + + A client wishing to do direct IPC with the master selects the + correct socket name to connect to based on the target AFU. All + messages exchanged on that connection are executed by the daemon + in the context of the AFU implied by the socket name. +***************************************************************************/ + +#define MC_SOCKET_DIR "/opt/ibm/capikv/data" + +#define XFER_OP_READ 1 // Read from the Blk-MC Socket +#define XFER_OP_WRITE 2 // Write to the Blk-MC Socket + +/* Commad opcodes in IPC requests */ +#define CMD_MCREG 1 // Register an AFU + context handle with MC +#define CMD_MCUNREG 2 // Unregister a context handle with MC + +#define CMD_MCOPEN 3 // Get a new & zero sized Resource Handle +#define CMD_MCCLOSE 4 // Free a Resouce Handle + +#define CMD_MCSIZE 5 // Resize a Resouce Handle (grow or shrink) +#define CMD_UNUSED 6 + +#define CMD_MCXLATE_LBA 7 // Translate Virtual LBA to Physical LBA + +#define CMD_MCCLONE 8 // Clone Resource Handles from a context + +#define CMD_MCDUP 9 // Dup a one context to another + +#define CMD_MCSTAT 10 // query a Resource Handle + +#define CMD_MCNOTIFY 11 // Notify master of certain events + +typedef struct mc_req_header +{ + uint8_t version; // 0 + uint8_t command; + uint8_t size; + uint8_t tag; // command tag to identify the active request +} mc_req_header_t; + + +typedef struct mc_resp_header +{ + uint8_t version; // 0 + uint8_t command; // same as command in the request + uint8_t size; + uint8_t tag; // same as tag in request + int status; // 0: success, otherwise set to a errno value +} mc_resp_header_t; + +typedef struct mc_req +{ + mc_req_header_t header; + + union { + // The client sends a MCREG with the challenge field set to something + // the server can use to validate that the client is the true owner of + // the AFU context it wants to register. The specifics of this protocol + // is left to the implementation. + // + // Since the server remembers the afu (implicitly tied to connection) and + // the registered ctx_hndl, these are not needed to be sent again in + // subsequent IPC messages. + // + // MCREG must be the first command on a new connection. If the registation + // fails, the MCREG can be retried on the same connection any number of + // times. Once successful, subsequent MCREGs are failed until after a + // MCUNREG. + // + struct { + pid_t client_pid; + int client_fd; + __u8 mode; +#define MCREG_INITIAL_REG 0x1 // fresh registration +#define MCREG_DUP_REG 0x0 // dup + ctx_hndl_t ctx_hndl; + __u64 challenge; + } reg; + + // After MCUNREG, the client can send a new MCREG on the existing + // connection or close it. + // + struct { + } unreg; + + struct { + __u64 flags; + } open; + + struct { + res_hndl_t res_hndl; + } close; + + struct { + res_hndl_t res_hndl; + __u64 new_size; // size in chunks + } size; + + struct { + res_hndl_t res_hndl; + __u64 v_lba; + } xlate_lba; + + struct { + ctx_hndl_t ctx_hndl_src; + __u64 flags; + __u64 challenge; + } clone; + + struct { + ctx_hndl_t ctx_hndl_cand; + __u64 challenge; + } dup; + + struct { + res_hndl_t res_hndl; + } stat; + + mc_notify_t notify; + + }; +} mc_req_t; + + +typedef struct mc_resp +{ + mc_resp_header_t header; + + union { + struct { + } reg; + + struct { + } unreg; + + struct { + res_hndl_t res_hndl; + } open; + + struct { + } close; + + struct { + __u64 act_new_size; + } size; + + struct { + __u64 p_lba; + } xlate_lba; + + struct { + } clone; + + struct { + } dup; + + mc_stat_t stat; + + struct { + } notify; + + }; + +} mc_resp_t; + + +#endif diff --git a/src/include/cflash_eras.h b/src/include/cflash_eras.h new file mode 100644 index 00000000..ce9eac9d --- /dev/null +++ b/src/include/cflash_eras.h @@ -0,0 +1,46 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/cflash_eras.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _H_CFLASH_ERAS_H +#define _H_CFLASH_ERAS_H + + +#include +#include +#if !defined(_AIX) && !defined(_MACOSX) +#include +#endif /* !_AIX && !_MACOSX */ + +typedef uint32_t eye_catch4b_t; +/* + * Macros to define eye-catchers using character literals + */ + +#define __EYEC2(__a,__b) (((__a)<< 8) | (__b)) +#define __EYEC4(__a,__b,__c,__d) ((__EYEC2(__a,__b) << 16) | __EYEC2(__c,__d)) + + + +#endif /* _H_CFLASH_ERAS_H */ diff --git a/src/include/cflash_mmio.h b/src/include/cflash_mmio.h new file mode 100644 index 00000000..62be2b7d --- /dev/null +++ b/src/include/cflash_mmio.h @@ -0,0 +1,169 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/cflash_mmio.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _H_CFLASH_MMIO +#define _H_CFLASH_MMIO + + +#include +#include + +#if !defined(_AIX) && !defined(_MACOSX) + +/* + * TODO: ?? remove the out_be, and in_be functions, since + * we should routines that can under the covers + * handle the host endianess. + */ + +// MMIO write, 64 bit +static inline void out_be64 (__u64 *addr, __u64 val) +{ + __u64 val2 = CFLASH_TO_ADAP64(val); + __asm__ __volatile__ ("sync; std%U0%X0 %1, %0" + : "=m" (*addr) : "r" (val2) : "memory"); +} + + +// MMIO read, 64 bit +static inline __u64 in_be64 (__u64 *addr) +{ + __u64 ret; + __asm__ __volatile__ ("sync; ld%U1%X1 %0, %1; twi 0,%0,0; isync" + : "=r" (ret) : "m" (*addr) : "memory"); + return CFLASH_FROM_ADAP64(ret); +} + + + +// MMIO write, 64 bit +static inline void out_mmio64 (volatile __u64 *addr, __u64 val) +{ +#ifdef CFLASH_LITTLE_ENDIAN_HOST + __u64 zero = 0; + asm volatile ( "stdbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +#else + *((volatile __u64 *)(addr)) = val; +#endif /* _AIX */ +} + + +// MMIO read, 64 bit +static inline __u64 in_mmio64 (volatile __u64 *addr) +{ + __u64 val; +#ifdef CFLASH_LITTLE_ENDIAN_HOST + __u64 zero = 0; + asm volatile ( "ldbrx %0, %1, %2" : "=r"(val) : "r"(zero), "r"(addr) ); +#else + val = *((volatile __u64 *)(addr)); +#endif /* _AIX */ + + return val; +} + +// MMIO write, 32 bit +static inline void out_mmio32 (volatile __u64 *addr, __u32 val) +{ +#ifdef CFLASH_LITTLE_ENDIAN_HOST + __u32 zero = 0; + asm volatile ( "stwbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +#else + *((volatile __u32 *)(addr)) = val; +#endif /* _AIX */ +} + +// MMIO read, 32 bit +static inline __u32 in_mmio32 (volatile __u64 *addr) +{ + __u32 val; +#ifdef CFLASH_LITTLE_ENDIAN_HOST + __u32 zero = 0; + asm volatile ( "lwbrx %0, %1, %2" : "=r"(val) : "r"(zero), "r"(addr) ); +#else + val = *((volatile __u64 *)(addr)); +#endif /* _AIX */ + + return val; +} + +#else +static inline void out_be64 (__u64 *addr, __u64 val) +{ + __u64 val2 = CFLASH_TO_ADAP64(val); + + *((volatile __u64 *)addr) = val2; +} + + +// MMIO read, 64 bit +static inline __u64 in_be64 (__u64 *addr) +{ + __u64 ret; + + ret = *((volatile __u64 *)addr); + + return CFLASH_FROM_ADAP64(ret); +} + +static inline void out_mmio64 (volatile __u64 *addr, __u64 val) +{ + __u64 val2 = CFLASH_TO_ADAP64(val); + + *((volatile __u64 *)addr) = val2; +} + + +// MMIO read, 64 bit +static inline __u64 in_mmio64 (volatile __u64 *addr) +{ + __u64 ret; + + ret = *((volatile __u64 *)addr); + + return CFLASH_FROM_ADAP64(ret); +} + + +static inline void out_mmio32 (volatile __u64 *addr, __u32 val) +{ + __u32 val2 = CFLASH_TO_ADAP32(val); + + *((volatile __u32 *)addr) = val2; +} + + +// MMIO read, 64 bit +static inline __u32 in_mmio32 (volatile __u64 *addr) +{ + __u64 ret; + + ret = *((volatile __u32 *)addr); + + return CFLASH_FROM_ADAP32(ret); +} +#endif /* AIX or MAC OSX */ + +#endif /* _H_CFLASH_MMIO */ diff --git a/src/include/cflash_scsi_user.h b/src/include/cflash_scsi_user.h new file mode 100644 index 00000000..86174e1b --- /dev/null +++ b/src/include/cflash_scsi_user.h @@ -0,0 +1,487 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/cflash_scsi_user.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _H_CFLASH_SCSI_USER +#define _H_CFLASH_SCSI_USER + +#include +#include +#include +#if !defined(_AIX) && !defined(_MACOSX) +#include +#endif /* !_AIX && !_MACOSX */ + + +/*****************************************************************************/ +/* */ +/* SCSI Op codes */ +/* */ +/*****************************************************************************/ + +#define SCSI_INQUIRY 0x12 +#define SCSI_MODE_SELECT 0x15 +#define SCSI_MODE_SELECT_10 0x55 +#define SCSI_MODE_SENSE 0x1A +#define SCSI_MODE_SENSE_10 0x5A +#define SCSI_PERSISTENT_RESERVE_IN 0x5E +#define SCSI_PERSISTENT_RESERVE_OUT 0x5F +#define SCSI_READ 0x08 +#define SCSI_READ_16 0x88 +#define SCSI_READ_CAPACITY 0x25 +#define SCSI_READ_EXTENDED 0x28 +#define SCSI_REPORT_LUNS 0xA0 +#define SCSI_REQUEST_SENSE 0x03 +#define SCSI_SERVICE_ACTION_IN 0x9E +#define SCSI_SERVICE_ACTION_OUT 0x9F +#define SCSI_START_STOP_UNIT 0x1B +#define SCSI_TEST_UNIT_READY 0x00 +#define SCSI_WRITE 0x0A +#define SCSI_WRITE_16 0x8A +#define SCSI_WRITE_AND_VERIFY 0x2E +#define SCSI_WRITE_AND_VERIFY_16 0x8E +#define SCSI_WRITE_EXTENDED 0x2A +#define SCSI_WRITE_SAME 0x41 +#define SCSI_WRITE_SAME_16 0x93 + + + + +/* + A simple subset of the ANSI SCSI SERVICE ACTION + IN codes (in alphabetical order) + */ +#define SCSI_READ_CAP16_SRV_ACT_IN 0x10 + + + +#define SCSI_MODE_SNS_CHANGEABLE 0x1 + +/* + * SCSI CDB structure + */ +typedef struct scsi_cdb { /* structure of the SCSI cmd block */ + uint8_t scsi_op_code; /* first byte of SCSI cmd block */ + uint8_t scsi_bytes[15]; /* other bytes of SCSI cmd block */ +} scsi_cdb_t; + + + +/* + * + * + * READ(16) Command + * +=====-======-======-======-======-======-======-======-======+ + * | Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * |Byte | | | | | | | | | + * |=====+=======================================================| + * | 0 | Operation Code (88h) | + * |-----+-------------------------------------------------------| + * | 1 | | DPO | FUA | Reserved |RelAdr| + * |-----+-------------------------------------------------------| + * | 2 | (MSB) | + * |-----+--- ---| + * | 3 | | + * |-----+--- ---| + * | 4 | | + * |-----+--- ---| + * | 5 | Logical Block Address | + * |-----+--- ---| + * | 6 | | + * |-----+--- ---| + * | 7 | | + * |-----+--- ---| + * | 8 | | + * |-----+--- ---| + * | 9 | (LSB) | + * |-----+-------------------------------------------------------| + * | 10 | (MSB) | + * |-----+--- ---| + * | 11 | | + * |-----+--- Transfer Length ---| + * | 12 | | + * |-----+--- ---| + * | 13 | (LSB) | + * |-----+-------------------------------------------------------| + * | 14 | Reserved | + * |-----+-------------------------------------------------------| + * | 15 | Control | + * +=============================================================+ + * + * + * WRITE(16) Command + * +=====-======-======-======-======-======-======-======-======+ + * | Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * |Byte | | | | | | | | | + * |=====+=======================================================| + * | 0 | Operation Code (8Ah) | + * |-----+-------------------------------------------------------| + * | 1 | | DPO | FUA | Reserved |RelAdr| + * |-----+-------------------------------------------------------| + * | 2 | (MSB) | + * |-----+--- ---| + * | 3 | | + * |-----+--- ---| + * | 4 | | + * |-----+--- ---| + * | 5 | Logical Block Address | + * |-----+--- ---| + * | 6 | | + * |-----+--- ---| + * | 7 | | + * |-----+--- ---| + * | 8 | | + * |-----+--- ---| + * | 9 | (LSB) | + * |-----+-------------------------------------------------------| + * | 10 | (MSB) | + * |-----+--- ---| + * | 11 | | + * |-----+--- Transfer Length ---| + * | 12 | | + * |-----+--- ---| + * | 13 | (LSB) | + * |-----+-------------------------------------------------------| + * | 14 | Reserved | + * |-----+-------------------------------------------------------| + * | 15 | Control | + * +=============================================================+ + * + * + */ +static inline void CFLASH_BUILD_RW_16(scsi_cdb_t *scsi_cdb,uint64_t lba, + uint32_t blk_cnt) +{ + uint64_t *lba_offset; + + scsi_cdb->scsi_bytes[0] = 0x00; + + /* Set LBA */ + lba_offset = (uint64_t *)&scsi_cdb->scsi_bytes[1]; + *lba_offset = CFLASH_TO_ADAP64(lba); + + /* Set transfer size */ + scsi_cdb->scsi_bytes[9] = ((blk_cnt >> 24) & 0xff); + scsi_cdb->scsi_bytes[10] = ((blk_cnt >> 16) & 0xff); + scsi_cdb->scsi_bytes[11] = ((blk_cnt >> 8) & 0xff); + scsi_cdb->scsi_bytes[12] = (blk_cnt & 0xff); + + scsi_cdb->scsi_bytes[13] = 0x00; + + return; +} + +#define SCSI_INQSIZE 256 + +/* + * Standard SCSI-3 INQUIRY data format + * +=====-=======-=======-=======-=======-=======-=======-=======-=======+ + * | Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * |Byte | | | | | | | | | + * |=====+=======================+=======================================| + * | 0 | Peripheral qualifier | Peripheral device type | + * |-----+---------------------------------------------------------------| + * | 1 | RMB | Device-type modifier | + * |-----+---------------------------------------------------------------| + * | 2 | ISO version | ECMA version | ANSI-approved version| + * |-----+-----------------+---------------------------------------------| + * | 3 | AERC | TrmTsk|NormACA|Reserve| Response data format | + * |-----+---------------------------------------------------------------| + * | 4 | Additional length (n-4) | + * |-----+---------------------------------------------------------------| + * | 5 | Reserved | + * |-----+---------------------------------------------------------------| + * | 6 |Reserve|EncServ| Port | DualP | MChngr|ACKQREQ|Addr32 |Addr16 | + * |-----+---------------------------------------------------------------| + * | 7 | RelAdr| WBus32| WBus16| Sync | Linked|TranDis| CmdQue| SftRe | + * |-----+---------------------------------------------------------------| + * | 8 | (MSB) | + * |- - -+--- Vendor identification ---| + * | 15 | (LSB) | + * |-----+---------------------------------------------------------------| + * | 16 | (MSB) | + * |- - -+--- Product identification ---| + * | 31 | (LSB) | + * |-----+---------------------------------------------------------------| + * | 32 | (MSB) | + * |- - -+--- Product revision level ---| + * | 35 | (LSB) | + * |-----+---------------------------------------------------------------| + * | 36 | | + * |- - -+--- Vendor-specific ---| + * | 55 | | + * |-----+---------------------------------------------------------------| + * | 56 | | + * |- - -+--- Reserved ---| + * | 95 | | + * |=====+===============================================================| + * | | Vendor-specific parameters | + * |=====+===============================================================| + * | 96 | | + * |- - -+--- Vendor-specific ---| + * | n | | + * +=====================================================================+ + */ +struct inqry_data { + uint8_t pdevtype; /* Peripherial device/qualifier */ + +#define SCSI_PDEV_MASK 0x1F /* Mask to extract Peripheral device*/ + /* type from byte 0 of Inquiry data */ +#define SCSI_DISK 0x00 /* Peripheral Device Type of SCSI */ + /* Disk. */ + + uint8_t rmbdevtq; + uint8_t versions; /* Versions field */ + uint8_t resdfmt; + +#define NORMACA_BIT 0x20 /* Mask to extract NormACA bit */ + /* from standard inquiry data */ + uint8_t add_length; /* Additional length */ + uint8_t reserved; + uint8_t flags1; +#define SES_MSK 0x40 /* Mask to extract the EncServ bit */ + uint8_t flags2; +#define CMDQUE_BIT 0x02 /* Mask to extract cmdque bit */ + /* from standard inquiry data */ + char vendor[8]; /* Vendor ID */ + char product[8]; /* Product ID */ + char res[12]; + char sno[8]; + char misc[212]; +}; + + + +/************************************************************************/ +/* Device ID Code Page defines used for extracting WWID */ +/************************************************************************/ +#define CFLASH_INQ_DEVICEID_CODE_PAGE 0x83 +#define CFLASH_INQ_DEVICEID_PAGELEN_OFFSET 3 +#define CFLASH_INQ_DEVICEID_IDESC_HEADER_SIZE 4 +#define CFLASH_INQ_DEVICEID_IDDESC_LIST_OFFSET 4 +#define CFLASH_INQ_DEVICEID_IDDESC_CODESET_OFFSET 0 +#define CFLASH_INQ_DEVICEID_IDDESC_CODESET_MASK 0xF +#define CFLASH_INQ_DEVICEID_IDDESC_CODESET_BIN 1 +#define CFLASH_INQ_DEVICEID_IDDESC_CODESET_ASCII 2 +#define CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_OFFSET 1 +#define CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_MASK 0xF +#define CFLASH_INQ_DEVICEID_IDDESC_ASSOC_OFFSET 1 +#define CFLASH_INQ_DEVICEID_IDDESC_ASSOC_MASK 0x30 + +#define CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_NOAUTH 0 + +#define CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_VENDOR_PLUS 1 +#define CFLASH_INQ_DEVICEID_IDDESC_VENDOR_PORTION_LEN 8 + +#define CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_EUI64 2 +#define CFLASH_INQ_DEVICEID_IDDESC_EUI64_LEN 8 + +#define CFLASH_INQ_DEVICEID_IDDESC_IDTYPE_NAA 3 +#define CFLASH_INQ_DEVICEID_IDDESC_NAA_LEN 8 + +#define CFLASH_INQ_DEVICEID_IDDESC_ASSOC_LUN 0x00 +#define CFLASH_INQ_DEVICEID_IDDESC_ASSOC_PORT 0x10 +#define CFLASH_INQ_DEVICEID_IDDESC_ASSOC_TARGET 0x20 + +#define CFLASH_INQ_DEVICEID_IDDESC_IDLEN_OFFSET 3 +#define CFLASH_INQ_DEVICEID_IDDESC_IDENT_OFFSET 4 + +/* + * Standard SCSI-3 Report Luns data header + * +=====-=======-=======-=======-=======-=======-=======-=======-=======+ + * | Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * |Byte | | | | | | | | | + * |=====+=======================+=======================================| + * | 0 | (MSB) | + * |-----+--- Lun List Length ---| + * | 1 | | + * |-----+--- ---| + * | 2 | | + * |-----+--- ---| + * | 3 | (LSB) | + * |-----+---------------------------------------------------------------| + * | 4 | | + * |-----+---- Reserved -------| + * | 7 | | + * +---------------------------------------------------------------------+ + */ + +struct lun_list_hdr { + uint32_t lun_list_length; + uint32_t resvd; +}; +#define START_LUNS 8 /* The offset in the report luns */ + /* where the actual lun list starts.*/ + + +/************************************************************************/ +/* Read Capacity 10 returned data */ +/************************************************************************/ +struct readcap_data { + uint32_t lba; /* last logical block address */ + int len; /* block length in bytes */ +}; + + +/************************************************************************/ +/* Read Capacity 16 returned data */ +/************************************************************************/ +struct readcap16_data { + uint64_t lba; /* last logical block address */ + int len; /* block length in bytes */ + uint32_t reserved:4; /* reserved field */ + int p_type:3; /* The T10 protection type to */ + /* which this lun is formatted*/ + + int prot_en:1; /* Indicates the protection */ + /* is enabled or not */ + uint8_t reserved1[19]; /* Reserved */ +}; + + + +/************************************************************************/ +/* SCSI Status */ +/************************************************************************/ +#define SCSI_GOOD_STATUS 0x00 /* target completed successfully */ +#define SCSI_CHECK_CONDITION 0x02 /* target is reporting an error, + exception, or abnormal condition */ +#define SCSI_BUSY_STATUS 0x08 /* target is busy and cannot accept + a command from initiator */ +#define SCSI_INTMD_GOOD 0x10 /* intermediate status good when using + linked commands */ + +#define SCSI_RESERVATION_CONFLICT 0x18 /* attempted to access a LUN which is + reserved by another initiator */ +#define SCSI_COMMAND_TERMINATED 0x22 /* Command has been terminated by + the device. */ +#define SCSI_QUEUE_FULL 0x28 /* Device's command queue is full */ +#define SCSI_ACA_ACTIVE 0x30 /* The device has an ACA condition + that requires a Clear ACA task + management request to clear. */ +#define SCSI_TASK_ABORTED 0x40 /* The device has aborted a command */ + + +/************************************************************************/ +/* Request (Auto) Sense Data Block */ +/************************************************************************/ +/* + * Error Codes 70h and 71h Fixed Sense Data Format + * +=====-======-======-======-======-======-======-======-======+ + * | Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * |Byte | | | | | | | | | + * |=====+======+================================================| + * | 0 | Valid| Error Code (70h or 71h) | + * |-----+-------------------------------------------------------| + * | 1 | Segment Number | + * |-----+-------------------------------------------------------| + * | 2 |Filema| EOM | ILI |Reserv| Sense Key | + * |-----+-------------------------------------------------------| + * | 3 | (MSB) | + * |- - -+--- Information ---| + * | 6 | (LSB) | + * |-----+-------------------------------------------------------| + * | 7 | Additional Sense Length (n-7) | + * |-----+-------------------------------------------------------| + * | 8 | (MSB) | + * |- - -+--- Command-Specific Information ---| + * | 11 | (LSB) | + * |-----+-------------------------------------------------------| + * | 12 | Additional Sense Code | + * |-----+-------------------------------------------------------| + * | 13 | Additional Sense Code Qualifier | + * |-----+-------------------------------------------------------| + * | 14 | Field Replaceable Unit Code | + * |-----+-------------------------------------------------------| + * | 15 | SKSV| | + * |- - -+------------ Sense-Key Specific ---| + * | 17 | | + * |-----+-------------------------------------------------------| + * | 18 | | + * |- - -+--- Additional Sense Bytes ---| + * | n | | + * +=============================================================+ + * + * Structure for Fixed Sense Data Format + */ + +struct request_sense_data { + uint8_t err_code; /* error class and code */ + uint8_t rsvd0; + uint8_t sense_key; +#define CFLSH_NO_SENSE 0x00 +#define CFLSH_RECOVERED_ERROR 0x01 +#define CFLSH_NOT_READY 0x02 +#define CFLSH_MEDIUM_ERROR 0x03 +#define CFLSH_HARDWARE_ERROR 0x04 +#define CFLSH_ILLEGAL_REQUEST 0x05 +#define CFLSH_UNIT_ATTENTION 0x06 +#define CFLSH_DATA_PROTECT 0x07 +#define CFLSH_BLANK_CHECK 0x08 +#define CFLSH_VENDOR_UNIQUE 0x09 +#define CFLSH_COPY_ABORTED 0x0A +#define CFLSH_ABORTED_COMMAND 0x0B +#define CFLSH_EQUAL_CMD 0x0C +#define CFLSH_VOLUME_OVERFLOW 0x0D +#define CFLSH_MISCOMPARE 0x0E + + uint8_t sense_byte0; + uint8_t sense_byte1; + uint8_t sense_byte2; + uint8_t sense_byte3; + uint8_t add_sense_length; + uint8_t add_sense_byte0; + uint8_t add_sense_byte1; + uint8_t add_sense_byte2; + uint8_t add_sense_byte3; + uint8_t add_sense_key; + uint8_t add_sense_qualifier; + uint8_t fru; + uint8_t flag_byte; + uint8_t field_ptrM; + uint8_t field_ptrL; +}; + + +/************************************************************************/ +/* Function prototypes */ +/************************************************************************/ +int cflash_build_scsi_inquiry(scsi_cdb_t *scsi_cdb, int page_code,uint8_t data_size); +int cflash_process_scsi_inquiry(void *std_inq_data,int inq_data_len, + void *vendor_id,void *product_id); +int cflash_process_scsi_inquiry_dev_id_page(void *inqdata,int inq_data_len,char *wwid); +int cflash_build_scsi_tur(scsi_cdb_t *scsi_cdb); +int cflash_build_scsi_report_luns(scsi_cdb_t *scsi_cdb, uint32_t length_list); +int cflash_process_scsi_report_luns(void *lun_list_rsp, uint32_t length_list, + uint64_t **actual_lun_list, int *num_actual_luns); +int cflash_build_scsi_mode_sense_10(scsi_cdb_t *scsi_cdb, + uint16_t data_len, int flags); +int cflash_build_scsi_mode_select_10(scsi_cdb_t *scsi_cdb, + uint16_t data_len, int flags); +int cflash_build_scsi_read_cap(scsi_cdb_t *scsi_cdb); +int cflash_build_scsi_read_cap16(scsi_cdb_t *scsi_cdb, uint8_t data_len); +int cflash_process_scsi_read_cap16(struct readcap16_data *readcap16_data, uint32_t *block_size, + uint64_t *last_lba); +int cflash_build_scsi_request_sense(scsi_cdb_t *scsi_cdb, uint8_t data_len); + +#endif /* _H_CFLASH_SCSI_USER */ diff --git a/src/include/cflash_sisl.h b/src/include/cflash_sisl.h new file mode 100755 index 00000000..a8f95098 --- /dev/null +++ b/src/include/cflash_sisl.h @@ -0,0 +1,504 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/cflash_sisl.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +/****************************************************************************** + * COMPONENT_NAME: sysxcflash + * + * FUNCTION: CAPI Flash sislite specification definitions. + * + * FUNCTIONS: NONE + * + *****************************************************************************/ + +#ifndef _SISLITE_H +#define _SISLITE_H + +#include +#if 0 +#include +#endif +#if !defined(_AIX) && !defined(_MACOSX) +#include +#endif /* !_AIX && ! _MACOSX */ +#if defined(_AIX) || defined(_MACOSX) +typedef unsigned char __u8; +typedef unsigned short __u16; +typedef short __s16; +typedef unsigned int __u32; +typedef int __s32; +typedef unsigned long long __u64; +#endif /* AIX */ + +/************************************************************************/ +/* Sislite AFU Alignment */ +/************************************************************************/ +#define SISL_BYTE_ALIGN 16 +#define SISL_ALIGN_MASK 0xFFFFFFFFFFFFFFF0ULL /* mask off the offset */ + + +/************************************************************************/ +/* Sislite Macros */ +/************************************************************************/ +#define SISL_PER_CONTEXT_MMIO_SIZE 0x10000 /* Per context MMIO space*/ +#define SISL_TOTAL_MMIO_SIZE 0X20A0000 /* Total MMIO space */ + +#define SISL_NUM_FC_PORTS 2 +#define SISL_MAX_CONTEXT 512 /* how many contexts per afu */ + +/* make the fp byte */ +#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm)) + +/* make the fp byte for a clone from a source fp and clone flags + * flags must be only 2 LSB bits. + */ +#define SISL_RHT_FP_CLONE(src_fp, clone_flags) ((src_fp) & (0xFC | (clone_flags))) + +/* extract the perm bits from a fp */ +#define SISL_RHT_PERM(fp) ((fp) & 0x3) + +#define SISL_RHT_PERM_READ 0x01u +#define SISL_RHT_PERM_WRITE 0x02u + +/* AFU Sync Commands */ +#define SISL_AFU_SYNC_CMD 0xC0 + +/* AFU Sync Mode byte */ +#define SISL_AFU_LW_SYNC 0x0u +#define SISL_AFU_HW_SYNC 0x1u +#define SISL_AFU_GSYNC 0x2u + +/* AFU Task Management Function Commands */ +#define SISL_TMF_LUN_RESET 0x1 +#define SISL_TMF_CLEAR_ACA 0x2 + +/************************************************************************/ +/* IOARCB: 64 bytes, min 16 byte alignment required */ +/************************************************************************/ + +typedef __u16 ctx_hndl_t; +typedef __u32 res_hndl_t; + + +/*************************************************************************** + * + * Below this point are data structures that make up the adapter API. + * + * We do not want the compiler to insert any extra padding in these + * structures. + * + ***************************************************************************/ +#pragma pack(1) + + +/* + * IOARCB strcuture for AFU + */ +typedef struct sisl_ioarcb_s { + __u16 ctx_id; /* ctx_hndl_t */ + __u16 req_flags; +#define SISL_REQ_FLAGS_RES_HNDL 0x8000u /* bit 0 (MSB) */ +#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000u + +#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000u /* bit 1 */ + +#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000u /* bits 8,9 */ +#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040u +#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080u +#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0u + +#define SISL_REQ_FLAGS_TMF 0x0004u /* bit 13 */ +#define SISL_REQ_FLAGS_AFU_CMD 0x0002u /* bit 14 */ + +#define SISL_REQ_FLAGS_HOST_WRITE 0x0001u /* bit 15 (LSB) */ +#define SISL_REQ_FLAGS_HOST_READ 0x0000u + + union { + __u32 res_hndl; /* res_hndl_t */ + __u32 port_sel; /* this is a selection mask: + * 0x1 -> port#0 can be selected, + * 0x2 -> port#1 can be selected. + * Can be bitwise ORed. + */ + }; + __u64 lun_id; + __u32 data_len; /* 4K for read/write */ + __u32 ioadl_len; + union { + __u64 data_ea; /* min 16 byte aligned */ + __u64 ioadl_ea; + }; + __u8 msi; /* LISN to send on RRQ write */ +#define SISL_MSI_PSL_XLATE 0 /* reserved for PSL */ +#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */ +#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */ +#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */ + /* The above LISN allocation permits user contexts to use 3 interrupts. + * Only master needs 4. This saves IRQs on the system. + */ + + __u8 rrq; /* 0 for a single RRQ */ + __u16 timeout; /* in units specified by req_flags */ + __u32 rsvd1; + __u8 cdb[16]; /* must be in big endian */ + __u64 rsvd2; +} sisl_ioarcb_t; + + + +struct sisl_rc { + __u8 flags; +#define SISL_RC_FLAGS_SENSE_VALID 0x80u +#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40u +#define SISL_RC_FLAGS_OVERRUN 0x20u +#define SISL_RC_FLAGS_UNDERRUN 0x10u + + __u8 afu_rc; +#define SISL_AFU_RC_RHT_INVALID 0x01u /* user error */ +#define SISL_AFU_RC_RHT_UNALIGNED 0x02u /* should never happen */ +#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */ +#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra + may retry if afu_retry is off + possible on master exit + */ +#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */ +#define SISL_AFU_RC_LXT_UNALIGNED 0x12u /* should never happen */ +#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */ +#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra + may retry if afu_retry is off + possible on master exit + */ +#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */ + +#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible when master exited */ + + /* NO_CHANNELS means the FC ports selected by dest_port in + * IOARCB or in the LXT entry are down when the AFU tried to select + * a FC port. If the port went down on an active IO, it will set + * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead. + */ +#define SISL_AFU_RC_NO_CHANNELS 0x20u /* see afu_extra, may retry */ +#define SISL_AFU_RC_CAP_VIOLATION 0x21u /* either user error or + afu reset/master restart + */ +#define SISL_AFU_RC_SYNC_IN_PROGRESS 0x22u /* AFU Sync issued when previous + sync is in progress + */ +#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30u /* always retry */ +#define SISL_AFU_RC_DATA_DMA_ERR 0x31u /* see afu_extra + may retry if afu_retry is off + */ +#define SISL_AFU_RC_RCB_TIMEOUT_PRE_FC 0x50u /* RCB timeout before reaching + FC layers inside AFU + */ +#define SISL_AFU_RC_RCB_TIMEOUT_POST_FC 0x51u /* RCB timeout after reaching + FC layers inside AFU + */ + + __u8 scsi_rc; /* SCSI status byte, retry as appropriate */ +#define SISL_SCSI_RC_CHECK 0x02u +#define SISL_SCSI_RC_BUSY 0x08u + + __u8 fc_rc; /* retry */ + /* + * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) + * for commands that are in flight when a link goes down or is logged out. + * If the link is down or logged out before AFU selects the port, either + * it will choose the other port or we will get afu_rc=0x20 (no_channel) + * if there is no valid port to use. + * + * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these + * would happen if a frame is dropped and something times out. + * NOLOGI or LINKDOWN can be retried if the other port is up. + * RESIDERR can be retried as well. + * + * ABORTFAIL might indicate that lots of frames are getting CRC errors. + * So it maybe retried once and reset the link if it happens again. + * The link can also be reset on the CRC error threshold interrupt. + */ +#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */ +#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */ +#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */ +#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */ +#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */ +#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */ +#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */ +#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */ +#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */ +#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI + reported len, possbly due to dropped + frames */ +#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */ + +}; + + +#define SISL_SENSE_DATA_LEN 20 /* Sense data length */ + +/* + * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required + */ + +typedef struct sisl_ioasa_s { + union { + struct sisl_rc rc; + __u32 ioasc; +#define SISL_IOASC_GOOD_COMPLETION 0x00000000u + }; + __u32 resid; + __u8 port; + __u8 afu_extra; + /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR): + * afu_exta contains PSL response code. Useful codes are: + */ +#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein SW_Implication + * Enabled N/A + * Disabled retry + */ +#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error + * afu_rc SW_Implication + * 0x04, 0x14 Indicates master exit. + * 0x31 user error. + */ + /* when afu rc=0x20 (no channels): + * afu_extra bits [4:5]: available portmask, [6:7]: requested portmask. + */ +#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2) +#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03) + + __u8 scsi_extra; + __u8 fc_extra; + __u8 sense_data[SISL_SENSE_DATA_LEN]; + + union { + __u64 host_use[4]; + __u8 host_use_b[32]; + __u64 next_cmd[4]; + }; +} sisl_ioasa_t; + + +/* single request+response block: 128 bytes. + cache line aligned for better performance. +*/ +typedef struct sisl_iocmd_s { + sisl_ioarcb_t rcb; + sisl_ioasa_t sa; +} sisl_iocmd_t __attribute__ ((aligned (128))); + +#define SISL_RESP_HANDLE_T_BIT 0x1ull /* Toggle bit */ +#define SISL_RESP_HANDLE_BADDR_MASK 0xFull /* Toggle bit */ + +/* MMIO space is required to support only 64-bit access */ + +/* per context host transport MMIO */ +struct sisl_host_map { + __u64 endian_ctrl; +#define SISL_ENDIAN_CTRL_BE 0x8000000000000080ull + +__u64 intr_status; /* this sends LISN# programmed in ctx_ctrl. + * Only recovery in a PERM_ERR is a context exit since + * there is no way to tell which command caused the error. + */ +#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ull /* b59, user error */ +#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ull /* b60, user error */ +#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ull /* b61, user error */ +#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ull /* b62, user error */ + /* Page in wait accessing RCB/IOASA/RRQ is reported in b63. + * Same error in data/LXT/RHT access is reported via IOASA. + */ +#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ull /* b63, can be generated + * only when AFU auto retry is + * disabled. If user can determine + * the command that caused the error, + * it can be retried. + */ +#define SISL_ISTATUS_UNMASK (0x001Full) /* 1 means unmasked */ +#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */ + + __u64 intr_clear; + __u64 intr_mask; + __u64 ioarrin; /* only write what cmd_room permits */ +#define SISL_IOARRIN_CTX_RST 0x01 + __u64 rrq_start; /* start & end are both inclusive */ + __u64 rrq_end; /* write sequence: start followed by end */ + __u64 cmd_room; + __u64 ctx_ctrl; /* least signiifcant byte or b56:63 is LISN# */ + __u64 mbox_w; /* restricted use */ +}; + +/* per context provisioning & control MMIO */ +struct sisl_ctrl_map { + __u64 rht_start; + __u64 rht_cnt_id; + /* both cnt & ctx_id args must be ull */ +#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32)) + + __u64 ctx_cap; /* afu_rc below is when the capability is violated */ +#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ull /* afu_rc 0x1a */ +#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ull /* afu_rc 0x21 */ + __u64 mbox_r; +}; + +/* single copy global regs */ +struct sisl_global_regs { + __u64 aintr_status; + /* In surelock, each FC port/link gets a byte of status */ +#define SISL_ASTATUS_FC0_OTHER 0x8000ull /* b48, other err, FC_ERRCAP[31:20] */ +#define SISL_ASTATUS_FC0_LOGO 0x4000ull /* b49, target sent FLOGI/PLOGI/LOGO + while logged in */ +#define SISL_ASTATUS_FC0_CRC_T 0x2000ull /* b50, CRC threshold exceeded */ +#define SISL_ASTATUS_FC0_LOGI_R 0x1000ull /* b51, login state mechine timed out + and retrying */ +#define SISL_ASTATUS_FC0_LOGI_F 0x0800ull /* b52, login failed, FC_ERROR[19:0] */ +#define SISL_ASTATUS_FC0_LOGI_S 0x0400ull /* b53, login succeeded */ +#define SISL_ASTATUS_FC0_LINK_DN 0x0200ull /* b54, link online to offline */ +#define SISL_ASTATUS_FC0_LINK_UP 0x0100ull /* b55, link offline to online */ + +#define SISL_ASTATUS_FC1_OTHER 0x0080ull /* b56 */ +#define SISL_ASTATUS_FC1_LOGO 0x0040ull /* b57 */ +#define SISL_ASTATUS_FC1_CRC_T 0x0020ull /* b58 */ +#define SISL_ASTATUS_FC1_LOGI_R 0x0010ull /* b59 */ +#define SISL_ASTATUS_FC1_LOGI_F 0x0008ull /* b60 */ +#define SISL_ASTATUS_FC1_LOGI_S 0x0004ull /* b61 */ +#define SISL_ASTATUS_FC1_LINK_DN 0x0002ull /* b62 */ +#define SISL_ASTATUS_FC1_LINK_UP 0x0001ull /* b63 */ + +#define SISL_ASTATUS_UNMASK 0xFFFFull /* 1 means unmasked */ +#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */ + + __u64 aintr_clear; + __u64 aintr_mask; + __u64 afu_ctrl; +#define SISL_CTRL_LOCAL_LUN 0x01ull /*b63 */ + __u64 afu_hb; + __u64 afu_scratch_pad; + __u64 afu_port_sel; +#define SISL_PORT_SEL_PORT0 0x01ull /*b63 */ +#define SISL_PORT_SEL_PORT1 0x02ull /*b62 */ + __u64 afu_config; +#define SISL_CONFIG_MBOX_CLR_READ 0x00010ull /* b59 */ +#define SISL_CONFIG_LE_MODE 0x00020ull /* b58 */ +#define SISL_CONFIG_RRQ_PAGIN_RETRY 0x00100ull /* b55 */ +#define SISL_CONFIG_IOASA_PAGIN_RETRY 0x00200ull /* b54 */ +#define SISL_CONFIG_RESRC_ERR_RETRY 0x00400ull /* b53 */ +#define SISL_CONFIG_DATA_PAGIN_RETRY 0x00800ull /* b52 */ +#define SISL_CONFIG_RHT_PAGIN_TRYRY 0x01000ull /* b51 */ +#define SISL_CONFIG_LXT_PAGIN_RETRY 0x02000ull /* b50 */ +#define SISL_CONFIG_IOARCB_PAGIN_RETRY 0x04000ull /* b49 */ + __u64 rrin_read_to; /* RRIN read timeout */ + __u64 cont_retry_to; /* Continue response retry timeout */ + __u64 rsvd[0xf6]; + __u64 afu_version; +}; + + +struct sisl_global_map { + union { + struct sisl_global_regs regs; + char page0[0x1000]; /* page 0 */ + }; + + char page1[0x1000]; /* page 1 */ + __u64 fc_regs[SISL_NUM_FC_PORTS][512]; /* pages 2 & 3, see afu_fc.h */ + __u64 fc_port[SISL_NUM_FC_PORTS][512]; /* pages 4 & 5 (lun tbl) */ + + char page6_15[0xA000]; +}; + + +struct surelock_afu_map { + union { + struct sisl_host_map host; + char harea[0x10000]; /* 64KB each */ + } hosts[SISL_MAX_CONTEXT]; + + union { + struct sisl_ctrl_map ctrl; + char carea[128]; /* 128B each */ + } ctrls[SISL_MAX_CONTEXT]; + + union { + struct sisl_global_map global; + char garea[0x10000]; /* 64KB single block */ + }; + char reserved[0x20000]; + char afu_dbg[0x20000]; + char fc0_dbg[0x20000]; + char fc1_dbg[0x20000]; +}; + + +/* LBA translation control blocks */ + +typedef struct sisl_lxt_entry { + __u64 rlba_base; /* bits 0:47 is base + * b48:55 is lun index + * b58:59 is write & read perms + * (if no perm, afu_rc=0x15) + * b60:63 is port_sel mask + */ + +} sisl_lxt_entry_t; + + +typedef struct sisl_vrht_entry { + sisl_lxt_entry_t *lxt_start; + __u32 lxt_cnt; + __u16 rsvd; + __u8 fp; /* format & perm nibbles. + * (if no perm, afu_rc=0x05) + */ + __u8 nmask; +} sisl_vrht_entry_t __attribute__ ((aligned (16))); + +typedef struct sisl_prht_entry { + __u64 lun_id; + __u8 valid; + __u8 rsvd[5]; + __u8 fp; /* format & perm nibbles. + * (if no perm, afu_rc=0x05) + */ + __u8 pmask; +} sisl_prht_entry_t __attribute__ ((aligned (16))); + +typedef union sisl_rht_entry { + sisl_vrht_entry_t vrht; + sisl_prht_entry_t prht; +} sisl_rht_entry_t; + + + +#pragma pack(pop) + + +#endif /* _SISLITE_H */ + diff --git a/src/include/cflash_tools_user.h b/src/include/cflash_tools_user.h new file mode 100644 index 00000000..7146e668 --- /dev/null +++ b/src/include/cflash_tools_user.h @@ -0,0 +1,133 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/cflash_tools_user.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _H_CFLASH_TOOLS_USER +#define _H_CFLASH_TOOLS_USER + +//the if / elif / else syntax below is intentionally explicit +//to help find cases where the TARGET_ARCH is set to an unknown +//value (e.g. someone mis-spells PPC64EL and PPC64LE). +#if defined TARGET_ARCH_PPC64EL + #define CFLASH_LITTLE_ENDIAN_HOST 1 +#elif defined TARGET_ARCH_PPC64BE + #define CFLASH_BIG_ENDIAN_HOST 1 +#else + #error "Unknown Architecture. This typically indicates a makefile or environment variable error. Check the TARGET_ARCH env var." +#endif + +/* + * Macros to convert to and from bus byte order. Note that these macros + * assume (and require) that shorts are 16 bits and uint32_ts are 32 bits. + * + * + * CFLSH_REV16() Byte reverse 16-bit value. + * CFLSH_REV24() Byte reverse 24-bit value. + * CFLSH_REV32() Byte reverse 32-bit value. + * CFLSH_REV64() Byte reverse 64-bit value. + */ +#define CFLASH_REV16(h) ((ushort) ( \ + (((ushort)(h)<<8) & 0xff00) | \ + (((ushort)(h)>>8) & 0x00ff) \ + )) +#define CFLASH_REV24(w) ((uint32_t) ( \ + (((uint32_t)(w)>>16) & 0x000000ff) | \ + ((uint32_t)(w) & 0x0000ff00) | \ + (((uint32_t)(w)<<16) & 0x00ff0000) \ + )) + +#define CFLASH_REV32(w) ((uint32_t) ( \ + (((uint32_t)(w)>>24) & 0x000000ff) | \ + (((uint32_t)(w)>> 8) & 0x0000ff00) | \ + (((uint32_t)(w)<< 8) & 0x00ff0000) | \ + (((uint32_t)(w)<<24) & 0xff000000) \ + )) + +#define CFLASH_MASKSHIFT(x) ((uint64_t)0xff << (x)) + +#define CFLASH_REV64(w) ((uint64_t) ( \ + (((uint64_t)(w)>>56) & CFLASH_MASKSHIFT( 0)) | \ + (((uint64_t)(w)>>40) & CFLASH_MASKSHIFT( 8)) | \ + (((uint64_t)(w)>>24) & CFLASH_MASKSHIFT(16)) | \ + (((uint64_t)(w)>> 8) & CFLASH_MASKSHIFT(24)) | \ + (((uint64_t)(w)<< 8) & CFLASH_MASKSHIFT(32)) | \ + (((uint64_t)(w)<<24) & CFLASH_MASKSHIFT(40)) | \ + (((uint64_t)(w)<<40) & CFLASH_MASKSHIFT(48)) | \ + (((uint64_t)(w)<<56) & CFLASH_MASKSHIFT(56)) \ + )) + +#ifdef CFLASH_LITTLE_ENDIAN_HOST + +/* + * Little Endian Host + */ +// TODO:?? Maybe use byteswap.h and __bswap_ routines here for Linux. +#define CFLASH_TO_ADAP16(h) CFLASH_REV16(h) + +#define CFLASH_TO_ADAP32(w) CFLASH_REV32(w) + +#define CFLASH_TO_ADAP64(w) CFLASH_REV64(w) + +#define CFLASH_FROM_ADAP16(h) CFLASH_REV16(h) + +#define CFLASH_FROM_ADAP32(w) CFLASH_REV32(w) + +#define CFLASH_FROM_ADAP64(w) CFLASH_REV64(w) + +#else + +/* + * BIG Endian Host + */ +#define CFLASH_TO_ADAP16(h) (h) + +#define CFLASH_TO_ADAP32(w) (w) + +#define CFLASH_TO_ADAP64(w) (w) + +#define CFLASH_FROM_ADAP16(h) (h) + +#define CFLASH_FROM_ADAP32(w) (w) + +#define CFLASH_FROM_ADAP64(w) (w) + + +#endif + + +#ifdef DEBUG +#include +#define DEBUG_PRNT(fmt, ...) fprintf(stderr,fmt,## __VA_ARGS__) +#else +#define DEBUG_PRNT(...) +#endif + + +/************************************************************************/ +/* Function prototypes */ +/************************************************************************/ +void dumppage(void *buffer, int size); +void hexdump(void *data, long len, const char *hdr); + + +#endif /* _H_CFLASH_TOOLS_USER */ diff --git a/src/include/cxl.h b/src/include/cxl.h new file mode 100644 index 00000000..18a1ab70 --- /dev/null +++ b/src/include/cxl.h @@ -0,0 +1,109 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/cxl.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _MISC_CXL_H +#define _MISC_CXL_H + + +#if !defined(_AIX) && !defined(_MACOSX) +#include +#include +#else +#include +#endif /* !_AIX */ + + +struct cxl_ioctl_start_work { + __u64 flags; + __u64 work_element_descriptor; + __u64 amr; + __s16 num_interrupts; + __s16 reserved1; + __s32 reserved2; + __u64 reserved3; + __u64 reserved4; + __u64 reserved5; + __u64 reserved6; +}; + +#define CXL_START_WORK_AMR 0x0000000000000001ULL +#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL +#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\ + CXL_START_WORK_NUM_IRQS) + +/* ioctl numbers */ +#define CXL_MAGIC 0xCA +#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work) +#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32) + +#define CXL_READ_MIN_SIZE 0x1000 /* 4K */ + +/* Events from read() */ +enum cxl_event_type { + CXL_EVENT_RESERVED = 0, + CXL_EVENT_AFU_INTERRUPT = 1, + CXL_EVENT_DATA_STORAGE = 2, + CXL_EVENT_AFU_ERROR = 3, +}; + +struct cxl_event_header { + __u16 type; + __u16 size; + __u16 process_element; + __u16 reserved1; +}; + +struct cxl_event_afu_interrupt { + __u16 flags; + __u16 irq; /* Raised AFU interrupt number */ + __u32 reserved1; +}; + +struct cxl_event_data_storage { + __u16 flags; + __u16 reserved1; + __u32 reserved2; + __u64 addr; + __u64 dsisr; + __u64 reserved3; +}; + +struct cxl_event_afu_error { + __u16 flags; + __u16 reserved1; + __u32 reserved2; + __u64 error; +}; + +struct cxl_event { + struct cxl_event_header header; + union { + struct cxl_event_afu_interrupt irq; + struct cxl_event_data_storage fault; + struct cxl_event_afu_error afu_error; + }; +}; + +#endif /* _MISC_CXL_H */ diff --git a/src/include/libcxl.h b/src/include/libcxl.h new file mode 100644 index 00000000..de6daa9e --- /dev/null +++ b/src/include/libcxl.h @@ -0,0 +1,148 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/libcxl.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _LIBCXL_H +#define _LIBCXL_H + +#include +#include "cxl.h" + +/* + * This is a very early library to simplify userspace code accessing a CXL + * device. + * + * Currently there are only a couple of functions here - more is on the way. + * + * Suggestions to improve the library, simplify it's usage, add additional + * functionality, etc. are welcome + */ + +#define CXL_SYSFS_CLASS "/sys/class/cxl" +#define CXL_DEV_DIR "/dev/cxl" + +/* + * Opaque types + */ +struct cxl_adapter_h; +struct cxl_afu_h; + +/* + * Adapter Enumeration + * + * Repeatedly call cxl_adapter_next() (or use the cxl_for_each_adapter macro) + * to enumerate the available CXL adapters. + * + * cxl_adapter_next() will implicitly free used buffers if it is called on the + * last adapter, or cxl_adapter_free() can be called explicitly. + */ +struct cxl_adapter_h * cxl_adapter_next(struct cxl_adapter_h *adapter); +char * cxl_adapter_devname(struct cxl_adapter_h *adapter); +void cxl_adapter_free(struct cxl_adapter_h *adapter); +#define cxl_for_each_adapter(adapter) \ + for (adapter = cxl_adapter_next(NULL); adapter; adapter = cxl_adapter_next(adapter)) + +/* + * AFU Enumeration + * + * Repeatedly call cxl_adapter_afu_next() (or use the + * cxl_for_each_adapter_afu macro) to enumerate AFUs on a specific CXL + * adapter, or use cxl_afu_next() or cxl_for_each_afu to enumerate AFUs over + * all CXL adapters in the system. + * + * For instance, if you just want to find any AFU attached to the system but + * don't particularly care which one, just do: + * struct cxl_afu_h *afu_h = cxl_afu_next(NULL); + * + * cxl_[adapter]_afu_next() will implicitly free used buffers if it is called + * on the last AFU, or cxl_afu_free() can be called explicitly. + */ +struct cxl_afu_h * cxl_adapter_afu_next(struct cxl_adapter_h *adapter, struct cxl_afu_h *afu); +struct cxl_afu_h * cxl_afu_next(struct cxl_afu_h *afu); +char * cxl_afu_devname(struct cxl_afu_h *afu); +#define cxl_for_each_adapter_afu(adapter, afu) \ + for (afu = cxl_adapter_afu_next(adapter, NULL); afu; afu = cxl_adapter_afu_next(NULL, afu)) +#define cxl_for_each_afu(afu) \ + for (afu = cxl_afu_next(NULL); afu; afu = cxl_afu_next(afu)) + + +/* + * Open AFU - either by path, by AFU being enumerated, or tie into an AFU file + * descriptor that has already been opened. The AFU file descriptor will be + * closed by cxl_afu_free() regardless of how it was opened. + */ +struct cxl_afu_h * cxl_afu_open_dev(char *path); +int cxl_afu_open_h(struct cxl_afu_h *afu, unsigned long master); +struct cxl_afu_h * cxl_afu_fd_to_h(int fd); +void cxl_afu_free(struct cxl_afu_h *afu); + +/* + * Attach AFU context to this process + */ +int cxl_afu_attach_full(struct cxl_afu_h *afu, __u64 wed, __u16 num_interrupts, + __u64 amr); +int cxl_afu_attach(struct cxl_afu_h *afu, __u64 wed); + +/* + * Get AFU process element + */ +int cxl_afu_get_process_element(struct cxl_afu_h *afu); + +/* + * Returns the file descriptor for the open AFU to use with event loops. + * Returns -1 if the AFU is not open. + */ +int cxl_afu_fd(struct cxl_afu_h *afu); + +/* + * TODO: All in one function - opens an AFU, verifies the operating mode and + * attaches the context. + * int cxl_afu_open_and_attach(struct cxl_afu_h *afu, mode) + */ + +/* + * sysfs helpers + * + * NOTE: On success, these functions automatically allocate the returned + * buffers, which must be freed by the caller (much like asprintf). + */ +int cxl_afu_sysfs_pci(char **pathp, struct cxl_afu_h *afu); + +/* + * Events + */ +bool cxl_pending_event(struct cxl_afu_h *afu); +int cxl_read_event(struct cxl_afu_h *afu, struct cxl_event *event); +int cxl_read_expected_event(struct cxl_afu_h *afu, struct cxl_event *event, + __u32 type, __u16 irq); + +/* + * fprint wrappers to print out CXL events - useful for debugging. + * fprint_cxl_event will select the appropriate implementation based on the + * event type and fprint_cxl_unknown_event will print out a hex dump of the + * raw event. + */ +int fprint_cxl_event(FILE *stream, struct cxl_event *event); +int fprint_cxl_unknown_event(FILE *stream, struct cxl_event *event); + +#endif diff --git a/src/include/mclient.h b/src/include/mclient.h new file mode 100644 index 00000000..0254bd90 --- /dev/null +++ b/src/include/mclient.h @@ -0,0 +1,479 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/mclient.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _MCLIENT_H +#define _MCLIENT_H + +#include + +/**************************************************************************** + * Each user process needs two interfaces to use the AFU: + * + * 1. a host transport MMIO map to issue commands directly to the AFU. + * The client opens the regular AFU device, registers its process + * context with the AFU driver (CXL_IOCTL_START_WORK ioctl) and + * then mmaps a 64KB host transport that has registers to issue + * commands and setup responses. + * + * 2. an interface to the master context daemon via a mc_hndl_t. + * This is used to create & size resource handles which are equivalent + * to virtual disks. The resource handle is written into the command + * block (IOARCB) sent to the AFU using interface #1. + * + * This header file describes the second interface. See sislite.h for the + * first interface. + * + * User must create interface #1 first. Parameters returned by #1 are + * required to establish interface #2. + * + * All routines described here return 0 on success and -1 on failure. + * errno is set on failure. + ***************************************************************************/ + +/* Max pathlen - e.g. for AFU device path */ +#define MC_PATHLEN 64 + +/* Permission Flags */ +#define MC_RDONLY 0x1u +#define MC_WRONLY 0x2u +#define MC_RDWR 0x3u + +/* mc_hndl_t is the client end point for communication to the + * master. After a fork(), the child must call mc_unregister + * on any inherited mc handles. It is a programming error to pass + * an inherited mc handle to any other API in the child. + */ +typedef void* mc_hndl_t; + + +/* mc_init & mc_term are called once per process */ +int mc_init(); +int mc_term(); + +/* + * mc_register outputs a client handle (mc_hndl_t) when successful. + * The handle is bound to the specified AFU and context handle. + * Subsequent API calls using the handle apply to that context + * and that AFU. The specified context handle must be owned by the + * requesting user process. + * + * The mc_hndl_t is a client endpoint for communication with the master + * context. It can be used to set up resources only for the single context + * on a single AFU bound to the mc handle. + * + * Users opening multiple AFU contexts must create multiple mc handles by + * registering each context. + * + * Client is responsible for serialization when the same mc handle is + * shared by multiple threads. + * + * Different threads can work on different mc handles in parallel w/o + * any serialization. + * + * It is possible to mc_register the same (AFU + ctx_hndl) multiple times + * creating multiple mc endpoints (mc_hndl) bound to the same context. + * However, to accomplish this, the mc_hdup function must be used to + * duplicate an existing mc_hndl. Calling mc_register more than once with + * the same parameters will cancel all but the most recent registation. + * + * Inputs: + * master_dev_path - e.g. /dev/cxl/afu0.0m + * The path must be to the master AFU device even if the + * user opened the corresponding regular device. + * ctx_hndl - User's context handle to register with master. + * This is the AFU ctx_id or context handle returned by the + * CXL_IOCTL_START_WORK ioctl. For surelock, this is + * a number between 0 & 511. The ctx_hndl is allocated by + * the kernel AFU driver without any involvement of the + * master context. + * p_mmio_map - pointer to user's MMIO map + * + * Output: + * p_mc_hndl - The output mc_hndl_t is written using this pointer. + * The mc_hndl_t is passed in all subsequent calls. + * + */ +int mc_register(char *master_dev_path, ctx_hndl_t ctx_hndl, + volatile __u64 *p_mmio_map, mc_hndl_t *p_mc_hndl); + + +/* Duplicate a previously registered mc handle - e.g. for use in + * another thread. + * + * Inputs: + * mc_hndl - registered handle identifying (AFU + context) + * + * Output: + * p_mc_hndl_new - The output mc_hndl_t is written using this pointer. + * The new handle is bound to the same (AFU + context) + * and is interchangeable with the original handle. + * + * Note: mc_unregister must be called on the duplicated handle when no + * longer needed. The duplicate mc handle and the original access the + * same resource handles because they refer to the same (AFU + context). + */ +int mc_hdup(mc_hndl_t mc_hndl, mc_hndl_t *p_mc_hndl_new); + + +/* mc_unregister invalidates the mc handle. All resource + * handles under the input mc handle are released if this mc_hndl is + * the only reference to those resources. + * + * Note that after a dup (see below), two contexts can be referencing + * the same resource handles. In that case, unregistering one context + * must not release resource handles since they are still referenced + * by the other context. + * + * Inputs: + * mc_hndl - a mc_hndl that specifies a (context + AFU) + * to unregister + * + * Output: + * none + */ +int mc_unregister(mc_hndl_t mc_hndl); + + +/* mc_open creates a zero sized virtual LBA space or a virtual disk + * on the AFU and context bound to the mc handle. + * + * Inputs: + * mc_hndl - mc handle that specifies a (context + AFU) + * + * flags - permission flags (see #defines) + * + * Output: + * p_res_hndl - A resource handle allocated by master is written using + * this pointer. The handle is valid only in the scope of + * the AFU context bound to the input mc_hndl or any + * contexts dup'ed to it. User must not mix up resource + * handles from different contexts. Resource handle A for + * AFU context 1 and handle B for AFU context 2 can have the + * same internal representation, but they refer to two + * completely different virtual LBA spaces. + */ +int mc_open(mc_hndl_t mc_hndl, __u64 flags, res_hndl_t *p_res_hndl); + + +/* mc_close deallocates any resources (LBAs) allocated to a virtual + * disk if this context is the last reference to those resources. + * + * Inputs: + * mc_hndl - client handle that specifies a (context + AFU) + * res_hndl - resource handle identifying the virtual disk + * to close + * + * Output: + * none + */ +int mc_close(mc_hndl_t mc_hndl, res_hndl_t res_hndl); + + +/* mc_size grows or shrinks a virtual disk to a new size. The size is + * in units of a chunk which is the minimum allocation unit for a + * virtual disk. + * + * Inputs: + * mc_hndl - client handle that specifies a (context + AFU) + * res_hndl - resource handle identifying the virtual disk + * to resize + * new_size - desired size in chunks + * + * Output: + * p_actual_new_size - points to location where actual new size is + * written. For a grow request, the actual size can + * be less than the requested size. + */ +int mc_size(mc_hndl_t mc_hndl, res_hndl_t res_hndl, + __u64 new_size, __u64 *p_actual_new_size); + + +/* mc_xlate_lba returns a physical LBA for a given virtual LBA of + * the input virtual disk. + * + * Inputs: + * mc_hndl - client handle that specifies a (context + AFU) + * res_hndl - resource handle identifying the virtual disk + * to translate + * v_lba - virtual LBA in res_hndl + * + * Output: + * p_lba - pointer to location that will contain the + * physical LBA. + * Note: + * This function is of limited use to a user since all users must + * run with LBA translation on. Normal user processes are not + * permitted to issue requests directly to physical LBAs. + * + */ +int mc_xlate_lba(mc_hndl_t mc_hndl, res_hndl_t res_hndl, + __u64 v_lba, __u64 *p_lba); + +/* mc_clone copies all virtual disks (resource handles) in a source + * context into a destination context. The destination context must + * not have any virtual disks open or the clone will fail. + * + * The source context must be owned by the requesting user process. + * + * Clone is a snapshot facility. After the clone, changes to the + * source context does not affect the destination and vice versa. + * If the source context opened a new virtual disk after the clone + * and the destination did the same, the two new disks are completely + * different and are seen only by the sole context that created it. + * Similarly, if a virtual disk was resized after the clone in the + * source context, the destination still has the original size. + * However, until copy-on-write is supported, cloned and original + * resource handles point to the same LBAs and their access must + * be coordinated by the user. + * + * Clone is supported only on the same AFU and NOT across AFUs. + * + * Inputs: + * mc_hndl - a mc_hndl that specifies a (context + AFU) + * as the destination of the clone + * This context must have no resources open. + * mc_hndl_src - source mc handle to clone from + * flags - permission flags (see #defines) + * + * Output: + * none + * + * Note: + * + * Clone copies all open resource handles in the source context to + * the destination. There is no interface to select which resouce + * handles to clone. + * + * After clone, the state of resource handles in the destination + * are as if they have been opened and sized. + * + * There is no interface to query the resource handles that get + * cloned into and are valid in the destination. These handles are + * the same handles valid in the source context that the user + * already knows about. + * Example: + * 1. user created res_hndls 0, 1, 2 & 3 in context=5. + * 2. closed handles 1 & 2 in context 5. + * 3. then cloned context 5 into a new context 8. + * 4. The resource handles valid in context 8 now are 0 & 3. + * + * The flags can only restrict the clone to a subset of the + * permissions in the original. It cannot be used to gain new + * permissions not in the original. For example, if the original + * was Readonly, and flags=RW, the clone is Readonly. + * + */ +int mc_clone(mc_hndl_t mc_hndl, mc_hndl_t mc_hndl_src, + __u64 flags); + +/* + * mc_dup allows any number of contexts to be linked together so that + * they are interchangeable and share all resources (virtual disks). + * Two contexts are linked per call: a destination context to a + * candidate context. The call can be repeated to link more than 2 + * contexts. The destination context must not have any resources open + * or be in a dup'ed group already, else dup will fail. + * + * The candidate context must be owned by the requesting user process. + * + * A resource handle created using one context is valid in any other + * context dup'ed to it. By default (w/o dup), resource handles are + * local and private to the context that created it. Further, a resource + * handle can be opened in one context and closed in another dup'ed + * context. Resizing a resource changes the virtual disk for all dup'ed + * contexts. + * + * dup allows 2 or more contexts to do IO to the same virtual disks w/o + * any restrictions. + * + * Note that each context has an implementation defined limit (N) on how + * many virtual disks it can open. mc_dup reduces the total number of + * virtual disks for the process as a whole. If a process opens 2 contexts, + * without dup, each context can open N virtual disks for a total of 2*N. + * If instead the 2 contexts are duped to each other, the same process can + * now open only N disks. + * + * Inputs: + * mc_hndl - a mc_hndl that specifies a (context + AFU) + * as the context to dup to (destination) + * This context must have no resources open. + * mc_hndl_cand - candidate context to dup from + * + * Output: + * none + * + * Dup is supported only on the same AFU and NOT across AFUs. + * + */ +int mc_dup(mc_hndl_t mc_hndl, mc_hndl_t mc_hndl_cand); + + +/* mc_stat is analogous to fstat in POSIX. It returns information on + * a virtual disk. + * + * Inputs: + * mc_hndl - client handle that specifies a (context + AFU) + * res_hndl - resource handle identifying the virtual disk + * to query + * + * Output: + * p_mc_stat - pointer to location that will contain the + * output data + */ +typedef struct mc_stat_s { + __u32 blk_len; /* length of 1 block in bytes as reported by device */ + __u8 nmask; /* chunk_size = (1 << nmask) in device blocks */ + __u8 rsvd[3]; + __u64 size; /* current size of the res_hndl in chunks */ + __u64 flags; /* permission flags */ +} mc_stat_t; + +int mc_stat(mc_hndl_t mc_hndl, res_hndl_t res_hndl, + mc_stat_t *p_mc_stat); + + +/* In the course of doing IOs, the user may be the first to notice certain + * critical events on the AFU or the backend storage. mc_notify allows a + * user to pass such information to the master. The master will verify the + * events and can take appropriate action. + * + * Inputs: + * mc_hndl - client handle that specifies a (context + AFU) + * The event pertains to this AFU. + * + * p_mc_notify - pointer to location that contains the event + * + * Output: + */ +typedef struct mc_notify_s { + __u8 event; /* MC_NOTIF_xxx */ +#define MC_NOTIFY_CMD_TIMEOUT 0x01 /* user command timeout */ +#define MC_NOTIFY_SCSI_SENSE 0x02 /* interesting sense data */ +#define MC_NOTIFY_AFU_EEH 0x03 /* user detected AFU is frozen */ +#define MC_NOTIFY_AFU_RST 0x04 /* user detected AFU has been reset */ +#define MC_NOTIFY_AFU_ERR 0x05 /* other AFU error, unexpected response */ + /* + * Note: the event must be sent on a mc_hndl_t that pertains to the + * affected AFU. This is important when the user interacts with multiple + * AFUs. + */ + + union { + struct { + res_hndl_t res_hndl; + } cmd_timeout; + + struct { + res_hndl_t res_hndl; + char data[20]; /* 20 bytes of sense data */ + } scsi_sense; + + struct { + } afu_eeh; + + struct { + } afu_rst; + + struct { + } afu_err; + }; +} mc_notify_t; + +int mc_notify(mc_hndl_t mc_hndl, mc_notify_t *p_mc_notify); + + +// can we force r0 in asm ? so we do not need the "zero" to alloc a register + +/* The write_nn or read_nn routines can be used to do byte reversed MMIO + or byte reversed SCSI CDB/data. +*/ +static inline void write_64(volatile __u64 *addr, __u64 val) +{ + __u64 zero = 0; +#ifndef _AIX + asm volatile ( "stdbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +#else + *((volatile __u64 *)(addr)) = val; +#endif /* _AIX */ +} + +static inline void write_32(volatile __u32 *addr, __u32 val) +{ + __u32 zero = 0; +#ifndef _AIX + asm volatile ( "stwbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +#else + *((volatile __u32 *)(addr)) = val; +#endif /* _AIX */ +} + +static inline void write_16(volatile __u16 *addr, __u16 val) +{ + __u16 zero = 0; +#ifndef _AIX + asm volatile ( "sthbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +#else + *((volatile __u16 *)(addr)) = val; +#endif /* _AIX */ +} + +static inline __u64 read_64(volatile __u64 *addr) +{ + __u64 val; + __u64 zero = 0; +#ifndef _AIX + asm volatile ( "ldbrx %0, %1, %2" : "=r"(val) : "r"(zero), "r"(addr) ); +#else + val = *((volatile __u64 *)(addr)); +#endif /* _AIX */ + + return val; +} + +static inline __u32 read_32(volatile __u32 *addr) +{ + __u32 val; + __u32 zero = 0; +#ifndef _AIX + asm volatile ( "lwbrx %0, %1, %2" : "=r"(val) : "r"(zero), "r"(addr) ); +#else + val = *((volatile __u32 *)(addr)); +#endif /* _AIX */ + return val; +} + +static inline __u16 read_16(volatile __u16 *addr) +{ + __u16 val; + __u16 zero = 0; +#ifndef _AIX + asm volatile ( "lhbrx %0, %1, %2" : "=r"(val) : "r"(zero), "r"(addr) ); +#else + val = *((volatile __u16 *)(addr)); +#endif /* _AIX */ + return val; +} + +#endif /* ifndef _MCLIENT_H */ + diff --git a/src/include/prov.h b/src/include/prov.h new file mode 100644 index 00000000..ab9b8715 --- /dev/null +++ b/src/include/prov.h @@ -0,0 +1,148 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/prov.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +#ifndef _PROV_H +#define _PROV_H +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +#include +#include +#include /* for NUM_FC_PORTS */ + +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ + +#define SL_INI_SINI_MARKER 0x53494e49 +#define SL_INI_ELMD_MARKER 0x454c4d44 +/*----------------------------------------------------------------------------*/ +/* Types */ +/*----------------------------------------------------------------------------*/ +/** + * @brief mserv packed binary config element + * There may be any number of these. Each element describes all necessary info + * for MServ to initialize the resources described by the element. This + * typically includes the AFU, port names, and other info. + */ +typedef struct capikv_ini_elm { + __u64 elmd_marker; // element data marker: set to 0x454c4d44 + __u64 lun_id; // lun_id to use (only 1) + __u64 wwpn[SURELOCK_NUM_FC_PORTS]; // wwpn of AFU ports + char afu_dev_path[64]; // non master path to /dev + char afu_pci_path[64]; // non master path to /sys + char afu_dev_pathm[64]; // master path to /dev + char afu_pci_pathm[64]; // master path to /sys + /* future expansions go here */ +}capikv_ini_elm_t; + +/** + * @brief mserv config header + * This header structure provides a description of how many init elements are + * contained in the variable-length struct. Note that we may have anywhere from + * 0 to N + */ +typedef struct capikv_ini { + __u32 sini_marker; // set to 0x53494e49 + __u32 flags; // to version or for other purposes, presently 0 + __u32 nelm; // number of elements + __u32 size; // size of each element + __u32 rsvd[8]; // must be zeroed + + /* + * NOTE: + * + * 1. to maintain beckward compatibility, the header of the ini file + * (i.e. the fields above) cannot change nor can the header be + * expanded. + * + * 2. Each element can be expanded by adding fields to the end but they + * cannot be shrunk. + * + * 3. any reserved field must be zeroed + * + */ + + struct capikv_ini_elm elm[1]; // variable length elements, 1 per AFU + // minimum 1 AFU required +}capikv_ini_t; + + + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ + + +/** + * @brief Initialize an adapter + * @param i_adapter device to be init'd + */ +bool provInitAdapter(const prov_adapter_info_t* i_adapter); + +/** + * @brief Get All WWPNs for ALL adapters in the system + * Note - caller is responsible for allocating and freeing o_wwpn_info! + * Code will return up to io_num_wwpns. + * @param io_wwpn_info empty buffer for wwpn_info. will be filled in on success + * @param io_num_wwpns input - max # of wwpn structs caller would like; output - actual # of wwpn structs filled in by fn. may be zero on failure. + * @returns TRUE if an error occurs, or if the buffer is not large enough according to io_num_wwpns + */ + +uint8_t provGetAllWWPNs(prov_wwpn_info_t* io_wwpn_info, uint16_t *io_num_wwpns); + +/** + * @brief List all Adapters found + * @param o_info output parm in which to place found adapter(s). If no adapters + * are found, this pointer will remain valid. Check io_num_adapters + * to know how many entries are returned. + * @param io_num_adapters input - max # of prov_adapter_info_t the caller will accept + * output - actual number of prov_adapter_info_t returned by the function + * + */ +bool provGetAllAdapters(prov_adapter_info_t* o_info, int* io_num_adapters); + +/** + * @brief Write config data for Master Context + * Write configuration file for capikv Master Context to the file system. + * @param i_inielem initialization element array starting member + * @param i_numelems number of i_inielem members + * @param i_flags any configuration flags (opaque) + * @param i_cfgfilename destination file to be written + * @returns true on sucess, false on failure + */ +bool provWriteMasterCfg(capikv_ini_elm_t* i_inielem, __u32 i_numelems, __u32 i_flags, char* i_cfgfilename); + +/** + * @brief Run a loopback test on a targeted adapter for a configurable amount of time. + * @param i_adapter Target card + * @param i_test_time_us Test time in microseconds for this run. Time must be at least 1 second, and can be up to 24 hours. + * @returns false on failure and true on pass of the test + */ +bool provLoopbackTest(const prov_adapter_info_t* i_adapter, uint64_t i_test_time_us); + +#endif //_PROV_H diff --git a/src/include/provextstructs.h b/src/include/provextstructs.h new file mode 100644 index 00000000..56e58c38 --- /dev/null +++ b/src/include/provextstructs.h @@ -0,0 +1,90 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/provextstructs.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + * @file provextstructs.h + * @brief Contains all external structures and types required for provisioning + * + * +*/ + +#ifndef _PROVEXTSTRUCTS_H +#define _PROVEXTSTRUCTS_H +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + +/*@{*/ // Special tag to say everything between it and the ending + // brace is a part of the #defines in doxygen. + +//def size of a WWPN in bytes, including a null terminator +#define WWPN_BUFFER_LENGTH 17 +//def max size of device path buffer, including a null terminator +#define DEV_PATH_LENGTH 64 + +/*@}*/ // Ending tag for external constants in doxygen + +/*----------------------------------------------------------------------------*/ +/* Enumerations */ +/*----------------------------------------------------------------------------*/ +/** + * \defgroup ExternalEnum External Enumerations + */ +/*@{*/ // Special tag to say everything between it and the ending + // brace is a part of the external enum module in doxygen. + +typedef struct prov_adapter_info +{ + char wwpn[MAX_WWPNS_PER_ADAPTER][WWPN_BUFFER_LENGTH]; + char pci_path[DEV_PATH_LENGTH]; + char afu_name[DEV_PATH_LENGTH]; +}prov_adapter_info_t; + +/** + * @struct prov_wwpn_info_t + * @brief typedef struct that provides a given WWPN for provisioning + */ +typedef struct prov_wwpn_info +{ + char wwpn[WWPN_BUFFER_LENGTH]; /** ASCII representation of the WWPN (16 char + a null) */ + AFU_PORT_ID port_id; /** What port is this WWPN describing? */ + char afu_path[DEV_PATH_LENGTH]; /** CXL Device that owns this WWPN */ + char pci_path[DEV_PATH_LENGTH]; /** PCI device that owns this WWPN */ +}prov_wwpn_info_t; + + +/*@}*/ // Ending tag for external structure module in doxygen + + + + +#endif //_PROVEXTSTRUCTS_H diff --git a/src/include/provutil.h b/src/include/provutil.h new file mode 100755 index 00000000..4272e81b --- /dev/null +++ b/src/include/provutil.h @@ -0,0 +1,91 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/provutil.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _PROVUTIL_H +#define _PROVUTIL_H +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ + +extern int32_t g_traceE; /* error traces */ +extern int32_t g_traceI; /* informative 'where we are in code' traces */ +extern int32_t g_traceF; /* function exit/enter */ +extern int32_t g_traceV; /* verbose trace...lots of information */ + +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + +#define TRACEE(FMT, args...) if(g_traceE) \ +do \ +{ \ +char __data__[256]; \ +memset(__data__,0,256); \ +sprintf(__data__,"%s %s: " FMT, __FILE__, __func__, ## args); \ +perror(__data__); \ +} while(0) + +#define TRACEF(FMT, args...) if(g_traceF) \ +{ \ +printf("%s %s: " FMT, __FILE__, __func__ ,## args); \ +} + +#define TRACEI(FMT, args...) if(g_traceI) \ +{ \ +printf("%s %s: " FMT, __FILE__, __func__ ,## args); \ +} + +#define TRACEV(FMT, args...) if(g_traceV) printf("%s %s: " FMT, __FILE__, __func__ ,## args) + +#define TRACED(FMT, args...) printf(FMT,## args) + +#if 0 + +#define TRACEE(FMT, args...) +#define TRACEF(FMT, args...) +#define TRACEI(FMT, args...) +#define TRACEV(FMT, args...) +#define TRACED(FMT, args...) + +#endif + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ + + +void prov_pretty_print(uint8_t *buffer, + uint32_t buffer_length); + + +#endif diff --git a/src/include/revtags.h b/src/include/revtags.h new file mode 100644 index 00000000..46cde036 --- /dev/null +++ b/src/include/revtags.h @@ -0,0 +1,29 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/revtags.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +//If we allow this, then add the following +#define REVISION_TAGS(MOD) \ +const char REVISION_ ## MOD ## _gitrev[] = "$Rev: " GITREVISION " $"; \ +const char REVISION_ ## MOD ## _timestamp[] = "$Timestamp: " __DATE__ " " __TIME__ " $"; diff --git a/src/include/sislite.h b/src/include/sislite.h new file mode 100755 index 00000000..1d29db09 --- /dev/null +++ b/src/include/sislite.h @@ -0,0 +1,416 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/sislite.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _SISLITE_H +#define _SISLITE_H + +#include +#include +#if !defined(_AIX) && !defined(_MACOSX) +#include +#endif /* !_AIX && ! _MACOSX */ +#if defined(_AIX) || defined(_MACOSX) +#include +#endif /* AIX */ + +/************************************************************************/ +/* CAPI Flash SIS LITE Register offsets */ +/************************************************************************/ + +/* Byte offsets to various host interface registers */ +#define CAPI_ENDIAN_CTRL_OFFSET 0x00 /* Endian control */ +#define CAPI_INTR_STATUS_OFFSET 0x08 /* Interrupt Staus register */ +#define CAPI_INTR_CLEAR_OFFSET 0x10 /* Interrupt Clear register */ +#define CAPI_INTR_MASK_OFFSET 0x18 /* Interrupt Mask register */ +#define CAPI_IOARRIN_OFFSET 0x20 /* IOARRIN register */ +#define CAPI_RRQ0_START_EA_OFFSET 0x28 /* RRQ#0 start EA register */ +#define CAPI_RRQ0_END_EA_OFFSET 0x30 /* RRQ#0 end EA register */ +#define CAPI_CMD_ROOM_OFFSET 0x38 /* CMD_ROOM register */ +#define CAPI_CTX_CTRL_OFFSET 0x40 /* Context Control register */ +#define CAPI_MBOX_W_OFFSET 0x48 /* Mailbox write register */ + + +#define CAPI_AFU_GLOBAL_OFFSET 0x10000 /* Offset of AFU Global area */ + +/************************************************************************/ +/* IOARCB: 64 bytes, min 16 byte alignment required */ +/************************************************************************/ + +typedef __u16 ctx_hndl_t; +typedef __u32 res_hndl_t; + +typedef struct sisl_ioarcb_s { + __u16 ctx_id; /* ctx_hndl_t */ + __u16 req_flags; +#define SISL_REQ_FLAGS_RES_HNDL 0x8000u /* bit 0 (MSB) */ +#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000u + +#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000u /* bit 1 */ + +#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000u /* bits 8,9 */ +#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040u +#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080u +#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0u + +#define SISL_REQ_FLAGS_AFU_CMD 0x0002u /* bit 14 */ + +#define SISL_REQ_FLAGS_HOST_WRITE 0x0001u /* bit 15 (LSB) */ +#define SISL_REQ_FLAGS_HOST_READ 0x0000u + + union { + __u32 res_hndl; /* res_hndl_t */ + __u32 port_sel; /* this is a selection mask: + * 0x1 -> port#0 can be selected, + * 0x2 -> port#1 can be selected. + * Can be bitwise ORed. + */ + }; + __u64 lun_id; + __u32 data_len; /* 4K for read/write */ + __u32 ioadl_len; + union { + __u64 data_ea; /* min 16 byte aligned */ + __u64 ioadl_ea; + }; + __u8 msi; /* LISN to send on RRQ write */ +#define SISL_MSI_PSL_XLATE 0 /* reserved for PSL */ +#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */ +#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */ +#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */ + /* The above LISN allocation permits user contexts to use 3 interrupts. + * Only master needs 4. This saves IRQs on the system. + */ + + __u8 rrq; /* 0 for a single RRQ */ + __u16 timeout; /* in units specified by req_flags */ + __u32 rsvd1; + __u8 cdb[16]; /* must be in big endian */ + __u64 rsvd2; +} sisl_ioarcb_t; + +struct sisl_rc { + __u8 flags; +#define SISL_RC_FLAGS_SENSE_VALID 0x80u +#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40u +#define SISL_RC_FLAGS_OVERRUN 0x20u +#define SISL_RC_FLAGS_UNDERRUN 0x10u + + __u8 afu_rc; +#define SISL_AFU_RC_RHT_INVALID 0x01u /* user error */ +#define SISL_AFU_RC_RHT_UNALIGNED 0x02u /* should never happen */ +#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */ +#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra + may retry if afu_retry is off + possible on master exit + */ +#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */ +#define SISL_AFU_RC_LXT_UNALIGNED 0x12u /* should never happen */ +#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */ +#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra + may retry if afu_retry is off + possible on master exit + */ +#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */ + +#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible when master exited */ + + /* NO_CHANNELS means the FC ports selected by dest_port in + * IOARCB or in the LXT entry are down when the AFU tried to select + * a FC port. If the port went down on an active IO, it will set + * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead. + */ +#define SISL_AFU_RC_NO_CHANNELS 0x20u /* see afu_extra, may retry */ +#define SISL_AFU_RC_CAP_VIOLATION 0x21u /* either user error or + afu reset/master restart + */ +#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30u /* always retry */ +#define SISL_AFU_RC_DATA_DMA_ERR 0x31u /* see afu_extra + may retry if afu_retry is off + */ +#define SISL_AFU_RC_TIMED_OUT_PRE_FC 0x50u /* The IOARCB timed out in the AFU + prior to it be sent to the AFU's + FC module. This most likely + indicates AFU is under heavy load + */ +#define SISL_AFU_RC_TIMED_OUT 0x51u /* The IOARCB timed out in the AFU + after it was sent to the AFU's + FC module. + */ + + __u8 scsi_rc; /* SCSI status byte, retry as appropriate */ +#define SISL_SCSI_RC_CHECK 0x02u +#define SISL_SCSI_RC_BUSY 0x08u + + __u8 fc_rc; /* retry */ + /* + * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) + * for commands that are in flight when a link goes down or is logged out. + * If the link is down or logged out before AFU selects the port, either + * it will choose the other port or we will get afu_rc=0x20 (no_channel) + * if there is no valid port to use. + * + * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these + * would happen if a frame is dropped and something times out. + * NOLOGI or LINKDOWN can be retried if the other port is up. + * RESIDERR can be retried as well. + * + * ABORTFAIL might indicate that lots of frames are getting CRC errors. + * So it maybe retried once and reset the link if it happens again. + * The link can also be reset on the CRC error threshold interrupt. + */ +#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */ +#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */ +#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */ +#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */ +#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */ +#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */ +#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */ +#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */ +#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */ +#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI + reported len, possbly due to dropped + frames */ +#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */ + +}; + +#define SISL_SENSE_DATA_LEN 20 /* Sense data length */ + +/* IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required */ +typedef struct sisl_ioasa_s { + union { + struct sisl_rc rc; + __u32 ioasc; +#define SISL_IOASC_GOOD_COMPLETION 0x00000000u + }; + __u32 resid; + __u8 port; + __u8 afu_extra; + /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR): + * afu_exta contains PSL response code. Useful codes are: + */ +#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein SW_Implication + * Enabled N/A + * Disabled retry + */ +#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error + * afu_rc SW_Implication + * 0x04, 0x14 Indicates master exit. + * 0x31 user error. + */ + /* when afu rc=0x20 (no channels): + * afu_extra bits [4:5]: available portmask, [6:7]: requested portmask. + */ +#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2) +#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03) + + __u8 scsi_extra; + __u8 fc_extra; + __u8 sense_data[SISL_SENSE_DATA_LEN]; + + union { + __u64 host_use[4]; + __u8 host_use_b[32]; + }; +} sisl_ioasa_t; + +/* single request+response block: 128 bytes. + cache line aligned for better performance. +*/ +typedef struct sisl_iocmd_s { + sisl_ioarcb_t rcb; + sisl_ioasa_t sa; +} sisl_iocmd_t __attribute__ ((aligned (128))); + +#define SISL_RESP_HANDLE_T_BIT 0x1ull /* Toggle bit */ + +/* MMIO space is required to support only 64-bit access */ + +/* per context host transport MMIO */ +struct sisl_host_map { + __u64 endian_ctrl; + __u64 intr_status; /* this sends LISN# programmed in ctx_ctrl. + * Only recovery in a PERM_ERR is a context exit since + * there is no way to tell which command caused the error. + */ +#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ull /* b59, user error */ +#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ull /* b60, user error */ +#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ull /* b61, user error */ +#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ull /* b62, user error */ + /* Page in wait accessing RCB/IOASA/RRQ is reported in b63. + * Same error in data/LXT/RHT access is reported via IOASA. + */ +#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ull /* b63, can be generated + * only when AFU auto retry is + * disabled. If user can determine + * the command that caused the error, + * it can be retried. + */ +#define SISL_ISTATUS_UNMASK (0x001Full) /* 1 means unmasked */ +#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */ + + __u64 intr_clear; + __u64 intr_mask; + __u64 ioarrin; /* only write what cmd_room permits */ + __u64 rrq_start; /* start & end are both inclusive */ + __u64 rrq_end; /* write sequence: start followed by end */ + __u64 cmd_room; + __u64 ctx_ctrl; /* least signiifcant byte or b56:63 is LISN# */ + __u64 mbox_w; /* restricted use */ +}; + +/* per context provisioning & control MMIO */ +struct sisl_ctrl_map { + __u64 rht_start; + __u64 rht_cnt_id; + /* both cnt & ctx_id args must be ull */ +#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32)) + + __u64 ctx_cap; /* afu_rc below is when the capability is violated */ +#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ull /* afu_rc 0x1a */ +#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ull /* afu_rc 0x21 */ +#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ull /* afu_rc 0x21 */ + __u64 mbox_r; +}; + +/* single copy global regs */ +struct sisl_global_regs { + __u64 aintr_status; + /* In surelock, each FC port/link gets a byte of status */ +#define SISL_ASTATUS_FC0_OTHER 0x8000ull /* b48, other err, FC_ERRCAP[31:20] */ +#define SISL_ASTATUS_FC0_LOGO 0x4000ull /* b49, target sent FLOGI/PLOGI/LOGO + while logged in */ +#define SISL_ASTATUS_FC0_CRC_T 0x2000ull /* b50, CRC threshold exceeded */ +#define SISL_ASTATUS_FC0_LOGI_R 0x1000ull /* b51, login state mechine timed out + and retrying */ +#define SISL_ASTATUS_FC0_LOGI_F 0x0800ull /* b52, login failed, FC_ERROR[19:0] */ +#define SISL_ASTATUS_FC0_LOGI_S 0x0400ull /* b53, login succeeded */ +#define SISL_ASTATUS_FC0_LINK_DN 0x0200ull /* b54, link online to offline */ +#define SISL_ASTATUS_FC0_LINK_UP 0x0100ull /* b55, link offline to online */ + +#define SISL_ASTATUS_FC1_OTHER 0x0080ull /* b56 */ +#define SISL_ASTATUS_FC1_LOGO 0x0040ull /* b57 */ +#define SISL_ASTATUS_FC1_CRC_T 0x0020ull /* b58 */ +#define SISL_ASTATUS_FC1_LOGI_R 0x0010ull /* b59 */ +#define SISL_ASTATUS_FC1_LOGI_F 0x0008ull /* b60 */ +#define SISL_ASTATUS_FC1_LOGI_S 0x0004ull /* b61 */ +#define SISL_ASTATUS_FC1_LINK_DN 0x0002ull /* b62 */ +#define SISL_ASTATUS_FC1_LINK_UP 0x0001ull /* b63 */ + +#define SISL_ASTATUS_UNMASK 0xFFFFull /* 1 means unmasked */ +#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */ + + __u64 aintr_clear; + __u64 aintr_mask; + __u64 afu_ctrl; + __u64 afu_hb; + __u64 afu_scratch_pad; + __u64 afu_port_sel; + __u64 afu_config; + __u64 rsvd[0xf8]; + __u64 afu_version; +}; + +#define SURELOCK_NUM_FC_PORTS 2 +#define SURELOCK_MAX_CONTEXT 512 /* how many contexts per afu */ + +struct sisl_global_map { + union { + struct sisl_global_regs regs; + char page0[0x1000]; /* page 0 */ + }; + + char page1[0x1000]; /* page 1 */ + __u64 fc_regs[SURELOCK_NUM_FC_PORTS][512]; /* pages 2 & 3, see afu_fc.h */ + __u64 fc_port[SURELOCK_NUM_FC_PORTS][512]; /* pages 4 & 5 (lun tbl) */ + +}; + + +struct surelock_afu_map { + union { + struct sisl_host_map host; + char harea[0x10000]; // 64KB each + } hosts[SURELOCK_MAX_CONTEXT]; + + union { + struct sisl_ctrl_map ctrl; + char carea[128]; // 128B each + } ctrls[SURELOCK_MAX_CONTEXT]; + + union { + struct sisl_global_map global; + char garea[0x10000]; // 64KB single block + }; +}; + + +/* LBA translation control blocks */ + +typedef struct sisl_lxt_entry { + __u64 rlba_base; /* bits 0:47 is base + * b48:55 is lun index + * b58:59 is write & read perms + * (if no perm, afu_rc=0x15) + * b60:63 is port_sel mask + */ + +} sisl_lxt_entry_t; + +typedef struct sisl_rht_entry { + sisl_lxt_entry_t *lxt_start; + __u32 lxt_cnt; + __u16 rsvd; + __u8 fp; /* format & perm nibbles. + * (if no perm, afu_rc=0x05) + */ + __u8 nmask; +} sisl_rht_entry_t __attribute__ ((aligned (16))); + +/* make the fp byte */ +#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm)) + +/* make the fp byte for a clone from a source fp and clone flags + * flags must be only 2 LSB bits. + */ +#define SISL_RHT_FP_CLONE(src_fp, clone_flags) ((src_fp) & (0xFC | (clone_flags))) + +/* extract the perm bits from a fp */ +#define SISL_RHT_PERM(fp) ((fp) & 0x3) + +#define RHT_PERM_READ 0x01u +#define RHT_PERM_WRITE 0x02u + +// AFU Sync Mode byte +#define AFU_LW_SYNC 0x0u +#define AFU_HW_SYNC 0x1u +#define AFU_GSYNC 0x2u + +#endif /* _SISLITE_H */ + diff --git a/src/include/trace_log.h b/src/include/trace_log.h new file mode 100644 index 00000000..8e7ee9e6 --- /dev/null +++ b/src/include/trace_log.h @@ -0,0 +1,544 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/trace_log.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _H_TRACE_LOG +#define _H_TRACE_LOG + + +#include +#include +#include +#include +#include +#include + + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + + + +typedef struct trace_log_ext_arg_s { + int flags; +#define TRACE_LOG_START_VALID 0x0001 /* Start time valid. */ +#define TRACE_LOG_NO_USE_LOG_NUM 0x0002 /* Use log_number - 1 for trace */ + struct timeb start_time; + struct timeb last_time; + uint log_number; +} trace_log_ext_arg_t; + + +#define TRACE_LOG_EXT_OPEN_APPEND_FLG 0x1 + + + +/*------------------------------------------------------------------------- + * + * + * To use these convenience macros for these trace facilities, one + * needs to create three globabl variables (static is optional) + * that are analogous to the the log_filename, + * log_verbosity, and logfp below. Then the macros listed here would + * be cloned (renamed) and modified to use these three variables + */ +#ifdef _SAMPLE_ +static char *log_filename; /* Trace log filename */ + /* This traces internal */ + /* activities */ +static int log_verbosity = 0; /* Verbosity of traces in*/ + /* log. */ +static FILE *logfp; /* File descriptor for */ + /* trace log */ + +static pthread_mutex_t log_lock = PTHREAD_MUTEX_INITIALIZER; + +#define TRACE_LOG_FILE_SETUP(filename) \ +do { \ + setup_log_file(&log_filename, &logfp,char *filename); \ +} while (0) + + +#define TRACE_LOG_FILE_SET_VERBOSITY(new_verbosity) \ +do { \ + log_verbosity = new_verbosity; \ +} while (0) + +#define TRACE_LOG_FILE_CLEANUP() \ +do { \ + cleanup_log_file(log_filename,logfp); \ +} while (0) + + +#define TRACE_LOG_FILE_TRC(verbosity,fmt, ...) \ +do { \ + if ((log_filename != NULL) && \ + (verbosity <= log_verbosity)) { \ + pthread_mutex_lock(&log_lock); \ + trace_log_data(logfp,__FILE__,__FUNCTION__,__LINE__, \ + fmt,## __VA_ARGS__); \ + pthread_mutex_unlock(&log_lock); \ + } \ +} while (0) + + +#endif /* _SAMPLE_ */ + +/* ----------- End of sample macros ---------------------------*/ + + + +#define TRACE_LOG_DATA_TRC(tlog_lock,tlogfp,tlog_verbosity,tfilename,verbosity,fmt, ...) \ +do { \ + if ((tfilename != NULL) && \ + (verbosity <= tlog_verbosity)) { \ + pthread_mutex_lock(&tlog_lock); \ + trace_log_data(tlogfp,__FILE__,__FUNCTION__, \ + __LINE__,fmt,## __VA_ARGS__); \ + pthread_mutex_unlock(&tlog_lock); \ + } \ +} while (0) + + +/* ---------------------------------------------------------------------------- + * + * NAME: trace_log_data + * + * FUNCTION: Print a message to trace log. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +static inline void trace_log_data(FILE *logfp,char *filename, char *function, + uint line_num,char *msg, ...) +{ + va_list ap; + static uint log_number = 0; + static struct timeb last_time; + struct timeb cur_time, log_time, delta_time; + static struct timeb start_time; + static int start_time_valid = FALSE; + + + ftime(&cur_time); + + if (!start_time_valid) { + + /* + * If start time is not set, then + * set it now. + */ + + start_time = cur_time; + + start_time_valid = TRUE; + + log_time.time = 0; + log_time.millitm = 0; + + delta_time.time = 0; + delta_time.millitm = 0; + + + /* + * Print header + */ + fprintf(logfp,"---------------------------------------------------------------------------\n"); + fprintf(logfp,"Date for %s is %s at %s\n",__FILE__,__DATE__,__TIME__); + fprintf(logfp,"Index Sec msec delta dmsec Filename function, line ...\n"); + fprintf(logfp,"------- ----- ----- ----- ----- -------------------- ---------------------\n"); + + } else { + + /* + * Find time offset since starting time. + */ + + log_time.time = cur_time.time - start_time.time; + log_time.millitm = cur_time.millitm - start_time.millitm; + + delta_time.time = log_time.time - last_time.time; + delta_time.millitm = log_time.millitm - last_time.millitm; + } + + fprintf(logfp,"%7d %5d.%05d %5d.%05d %20s %s, line = %d :", + log_number,(int)log_time.time,log_time.millitm,(int)delta_time.time,delta_time.millitm,filename, function, line_num); + /* + * Initialize ap to store arguments after msg + */ + + va_start(ap,msg); + vfprintf(logfp, msg, ap); + va_end(ap); + + fprintf(logfp,"\n"); + + fflush(logfp); + + log_number++; + + last_time = log_time; + + return; + +} + +/* ---------------------------------------------------------------------------- + * + * NAME: trace_log_data_ext + * + * FUNCTION: Print a message to trace log. This + * function is the same as trace_log_data, except + * this function requires the caller to maintain + * the static variables via the extended argument. In addition + * it gives the caller additional control over logging. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +static inline void trace_log_data_ext(trace_log_ext_arg_t *ext_arg, FILE *logfp,char *filename, char *function, + uint line_num,char *msg, ...) +{ + va_list ap; + struct timeb cur_time, log_time, delta_time; + uint print_log_number; + + if (ext_arg == NULL) { + + return; + } + + + if (ext_arg->flags & TRACE_LOG_NO_USE_LOG_NUM) { + + if (ext_arg->log_number > 0) { + print_log_number = ext_arg->log_number - 1; + } else { + print_log_number = 0; + } + + } else { + print_log_number = ext_arg->log_number; + } + + ftime(&cur_time); + + if (!(ext_arg->flags & TRACE_LOG_START_VALID)) { + + /* + * If start time is not set, then + * set it now. + */ + + ext_arg->start_time = cur_time; + + + ext_arg->flags |= TRACE_LOG_START_VALID; + + + log_time.time = 0; + log_time.millitm = 0; + + delta_time.time = 0; + delta_time.millitm = 0; + + + /* + * Print header + */ + fprintf(logfp,"---------------------------------------------------------------------------\n"); + fprintf(logfp,"Date for %s is %s at %s\n",__FILE__,__DATE__,__TIME__); + fprintf(logfp,"Index Sec msec delta dmsec Filename function, line ...\n"); + fprintf(logfp,"------- ----- ----- ----- ----- -------------------- ---------------------\n"); + + } else { + + /* + * Find time offset since starting time. + */ + + log_time.time = cur_time.time - ext_arg->start_time.time; + log_time.millitm = cur_time.millitm - ext_arg->start_time.millitm; + + delta_time.time = log_time.time - ext_arg->last_time.time; + delta_time.millitm = log_time.millitm - ext_arg->last_time.millitm; + } + + fprintf(logfp,"%7d %5d.%05d %5d.%05d %20s %s, line = %d :", + print_log_number,(int)log_time.time,log_time.millitm,(int)delta_time.time,delta_time.millitm,filename, function, line_num); + /* + * Initialize ap to store arguments after msg + */ + + va_start(ap,msg); + vfprintf(logfp, msg, ap); + va_end(ap); + + fprintf(logfp,"\n"); + + fflush(logfp); + + if (!(ext_arg->flags & TRACE_LOG_NO_USE_LOG_NUM)) { + ext_arg->log_number++; + } + + ext_arg->last_time = log_time; + + return; + +} + + + + +/* ---------------------------------------------------------------------------- + * + * NAME: setup_log_file + * + * FUNCTION: Set up trace_log file + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +static inline int setup_trace_log_file(char **log_filename, FILE **logfp,char *filename) +{ + + if ((*log_filename) && + (*logfp)) { + + fflush(*logfp); + fclose(*logfp); + } + + *log_filename = filename; + + if ((*logfp = fopen(*log_filename, "a")) == NULL) { + + fprintf (stderr, + "\nFailed to open log trace file %s\n",*log_filename); + + *log_filename = NULL; + + return 1; + } + + return 0; +} + + +/* ---------------------------------------------------------------------------- + * + * NAME: setup_log_file_ext + * + * FUNCTION: Set up trace_log file + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +static inline int setup_trace_log_file_ext(char **log_filename, FILE **logfp,char *filename,int flags) +{ + char *open_mode_str = "w"; + + + if ((*log_filename) && + (*logfp)) { + + fflush(*logfp); + fclose(*logfp); + } + + *log_filename = filename; + + if (flags & TRACE_LOG_EXT_OPEN_APPEND_FLG) { + + open_mode_str = "a"; + } + + if ((*logfp = fopen(*log_filename, open_mode_str)) == NULL) { + + fprintf (stderr, + "\nFailed to open log trace file %s\n",*log_filename); + + *log_filename = NULL; + + return 1; + } + + return 0; +} + + + +/* ---------------------------------------------------------------------------- + * + * NAME: cleanup_trace_log_file + * + * FUNCTION: clean up and close trace log file. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +static inline void cleanup_trace_log_file(char *log_filename,FILE *logfp) +{ + + + if ((log_filename) && + (logfp)) { + + fflush(logfp); + fclose(logfp); + } + + log_filename = NULL; + + return; +} +#ifdef _REMOVE +// This function is too specific to a an implementation so remove it. +/* ---------------------------------------------------------------------------- + * + * NAME: trace_log_init + * + * FUNCTION: Get environment variables. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +static inline void log_init(char **log_filename, FILE **logfp,int *trace_log_verbosity) +{ + + char *log = getenv("ZLIB_LOG"); + char *env_verbosity = getenv("ZLIB_LOG_VERBOSITY"); + int verbosity = 0; + + + + + if (log && env_verbosity) { + + /* + * Turn on logging + */ + + verbosity = atoi(env_verbosity); + + *trace_log_verbosity = verbosity; + + setup_trace_log_file(log_filename,logfp,log); + + } + return; +} + +#endif /* _REMOVE */ +#endif /* H_TRACE_LOG */ diff --git a/src/include/zmalloc.h b/src/include/zmalloc.h new file mode 100644 index 00000000..7e933d43 --- /dev/null +++ b/src/include/zmalloc.h @@ -0,0 +1,36 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/include/zmalloc.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __ZMALLOC_H_ +#define __ZMALLOC_H_ + +#include + +void *zmalloc(size_t size); + +void *zrealloc(void *ptr, size_t size); + +void zfree(void *ptr); + +#endif diff --git a/src/kv/README.md b/src/kv/README.md new file mode 100644 index 00000000..030ca155 --- /dev/null +++ b/src/kv/README.md @@ -0,0 +1,755 @@ +**ark\_create API** +=================== + +Purpose +------- + +Create a key/value store instance + +Syntax +------ + +int ark\_create(file, ark, flags) + +**char \*** file; + +**ARK \*\*** handle; + +uint64\_t flags; + +Description +----------- + +The **ark\_create** API will create a key/value store instance on the host system. + +The **path** parameter can be used to specify the special file (e.g. /dev/sdx) representative of the physical LUN created on the flash store. If the **path** parameter is not a special file, the API will assume it is a file to be used for the key/value store. If the file does not exist, it will be created. If **path** is NULL, memory will be used for the key/value store. + +The parameter, **flags**, will indicate the properties of the KV store. In the case of specifying a special file for the physical LUN, the user can specify whether the KV store is use the physical LUN as is or to create the KV store in a virtual LUN. By default, the entire physical LUN will be used for the KV store. If a virtual LUN is desired, the **ARK\_KV\_VIRTUAL\_LUN** bit must be set in the flags parameter. + +In this revision, a KV store configured to use the entire physical LUN can be persisted. Persistence of a KV store allows the user to shut down an ARK instance and at a later time open the same physical LUN and load the previous ARK instance to the same state as it was when it closed. To configure an ARK instance to be persisted at shut down (ark\_delete), set the **ARK\_KV\_PERSIST\_STORE** bit in the flags parameter. By default, an ARK instance is not configured to be persisted. To load the persisted ARK instance resident on the physical LUN, set the **ARK\_KV\_PERSIST\_LOAD** bit in the flags parameter. By default, the persisted store, if present, will not be loaded and will potentially be overwritten by any new persisted data. + +In this revision, only physical LUN KV stores can be persisted. + +Upon successful completion, the handle parameter will represent the newly created key/value store instance to be used for future API calls. + +Parameters +---------- + +| | | +|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Parameter** | **Description** | +| path | Allows the user to specify a specific CAPI adapter, a file, or memory for the key/value store. | +| ark | Handle representing the key/value store | +| flags | Collection of bit flags determining the properties of the KV store. + | + | | **- ARK_KV_VIRTUAL_LUN**: KV store will use a virtual LUN created from the physical LUN represented by the special file, **file.** + | | + | | **- ARK\_KV\_PERSIST\_STORE:** Configure the ARK instance to be persisted upon closing (ark\_delete) + | + | | **- ARK\_KV\_PERSIST\_LOAD:** If persistence data is present on the physical LUN, then load the configuration stored. | + +Return Values +------------- + +Upon successful completion, the **ark\_create** API will return 0, and the handle parameter will point to the newly created key/value store instance. If unsuccessful, the **ark\_create** API will return a non-zero error code: + +| | | +|-----------|--------------------------------------------------------------| +| **Error** | **Reason** | +| EINVAL | Invalid value for one of the parameters | +| ENOSPC | Not enough memory or flash storage | +| ENOTREADY | System not ready for key/value store supported configuration | +| | | + +**ark\_delete API** +=================== + +Purpose +------- + +Delete a key/value store instance + +Syntax +------ + +int ark\_delete(ark) + +**ARK \*** ark; + +Description +----------- + +The **ark\_delete** API will delete a key/value store instance, specified by the **ark** parameter, on the host system. Upon successfully completion all associated in memory and storage resources will be released at this time. + +If the ARK instance is configured to be persisted, it is at this time the configuration will be persisted so that it may be loaded at a future time. + +Parameters +---------- + +| | | +|---------------|---------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance. | + +Return Values +------------- + +Upon successful completion, the **ark\_delete** API will clean and remove all resources associated with the key/value store instance and return 0. If unsuccessful, the **ark\_delete** API will return a non-zero error code: + +| | | +|-----------|-------------------------------------| +| **Error** | **Reason** | +| EINVAL | key/value store handle is not valid | +| | | + +**ark\_set, ark\_set\_async\_cb API** +===================================== + +Purpose +------- + +Write a key/value pair + +Syntax +------ + +int ark\_set(ark, klen, key, vlen, val, res) + +int ark\_set\_async\_cb(ark, klen, key, vlen, val, callback, dt) + +**ARK \*** ark; + +**uint64\_t** klen; + +**void \*** key; + +**uint64\_t** vlen; + +**void \*** val; + +**void \***(\*callback)(int errcode, uint64\_t dt, uint64\_t res); + +**uint64\_t** dt; + +Description +----------- + +The **ark\_set** API will store the key, **key**, and value, **val**, into the store for the key/value store instance represented by the **ark** parameter The API, **ark\_set\_async\_cb**, will behave in the same manner, but in an asynchronous fashion, where the API immediately returns to the caller and the actual operation is scheduled to run. After the operation is executed, the **callback** function will be called to notify the caller of completion. + +If the **key** is present, the stored value will be replaced with the **val** value. + +Upon successful completion, the key/value pair will be present in the store and the number of bytes written will be returned to the caller through the **res** parameter. + +Parameters +---------- + +| | | +|---------------|-------------------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance connection | +| klen | Length of the key in bytes. | +| key | Key | +| vlen | Length of the value in bytes. | +| val | Value | +| res | Upon success, number of bytes written to the store. | +| callback | Function to call upon completion of the I/O operation. | +| dt | 64bit value to tag an asynchronous API call. | + +Return Values +------------- + +Upon successful completion, the **ark\_set** and **ark\_set\_async\_cb**API will write the key/value in the store associated with the key/value store instance and return the number of bytes written. The return of **ark\_set** will indicate the status of the operation. The **ark\_set\_async\_cb** API return will indicate whether the asynchronous operation was accepted or rejected. The true status will be stored in the **errcode** parameter when the **callback** function is executed. If unsuccessful, the **ark\_set** and **ark\_set\_async\_cb** API will return a non-zero error code: + +| | | +|-----------|------------------------------------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | +| ENOSPC | Not enough space left in key/value store store | +| | | + +**ark\_get, ark\_get\_async\_cb API** +===================================== + +Purpose +------- + +Retrieve a value for a given key + +Syntax +------ + +int ark\_get(ark, klen, key, vbuflen, vbuf, voff, res) + +int ark\_get\_async\_cb(ark, klen, key, vbuflen, vbuf, voff, callback, dt) + +**ARK \*** ark; + +**uint64\_t** klen; + +**void \*** key; + +**uint64\_t** vbuflen; + +**void \*** vbuf; + +**uint64\_t** voff; + +**void \***(\*callback)(int errcode, uint64\_t dt, uint64\_t res); + +**uint64\_t** dt; + +Description +----------- + +The **ark\_get** and **ark\_get\_async\_cb** API will query the key/value store store associated with the **ark** paramter for the given key, **key**. If found, the key's value will be returned in the **vbuf** parameter with at most **vbuflen** bytes written starting at the offset, **voff,** in the key's value. The API, **ark\_get\_async\_cb**, will behave in the same manner, but in an asynchronous fashion, where the API immediately returns to the caller and the actual operation is scheduled to run. After the operation is executed, the **callback** function will be called to notify the caller of completion. + +If successful, the length of the key's value is stored in the **res** parameter of the callback function. + +Parameters +---------- + +| | | +|---------------|--------------------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance connection. | +| klen | Length of the key in bytes. | +| key | Key | +| vbuflen | Length of the buffer, vbuf | +| vbuf | Buffer to store the key's value | +| voff | Offset into the key to start reading. | +| res | If successful, will store the size of the key in bytes | +| callback | Function to call upon completion of the I/O operation. | +| dt | 64bit value to tag an asynchronous API call. | + +Return Values +------------- + +Upon successful completion, the **ark\_get** and **ark\_get\_async\_cb** API will return 0. The return of **ark\_get** will indicate the status of the operation. The **ark\_get\_async\_cb** API return will indicate whether the asynchronous operation was accepted or rejected. The true status of the asynchronous API will be stored in the **errcode** parameter of the **callback** function. If unsuccessful, the **ark\_get** and **ark\_set\_async\_cb** API will return a non-zero error code: + +| | | +|-----------|-------------------------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | +| ENOENT | Key not found | +| ENOSPC | Buffer not big enough to hold value | + +**ark\_del, ark\_del\_async\_cb API** +===================================== + +Purpose +------- + +Delete the value associated with a given key + +Syntax +------ + +int ark\_del(ark, klen, key, res) + +int ark\_del\_async\_cb(ark, klen, key, callback, dt) + +**ARK \*** ark + +**uint64\_t** klen; + +**void \*** key; + +**void \***(\*callback)(int errcode, uint64\_t dt, uint64\_t res); + +**uint64\_t** dt; + +Description +----------- + +The **ark\_del** and **ark\_del\_async\_cb** API will query the key/value store store associated with the **handle** paramter for the given key, **key,** and if found, will delete the value. The API, **ark\_del\_async\_cb**, will behave in the same manner, but in an asynchronous fashion, where the API immediately returns to the caller and the actual operation is scheduled to run. After the operation is executed, the **callback** function will be called to notify the caller of completion. + +If successful, the length of the key's value is returned to the caller in the **res** parameter of the callback function. + +Parameters +---------- + +| | | +|---------------|--------------------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance connection. | +| klen | Length of the key in bytes. | +| key | Key | +| res | If successful, will store the size of the key in bytes | +| callback | Function to call upon completion of the I/O operation. | +| dt | 64bit value to tag an asynchronous API call. | + +Return Values +------------- + +Upon successful completion, the **ark\_del** and **ark\_del\_async\_cb** API will return 0. The return of **ark\_del** will indicate the status of the operation. The **ark\_del\_async\_cb** API return will indicate whether the asynchronous operation was accepted or rejected. The true status will be returned in the **errcode** parameter when the **callback** function is executed. If unsuccessful, the **ark\_del** and **ark\_del\_async\_cb** API will return a non-zero error code: + +| | | +|-----------|-------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | +| ENOENT | Key not found | +| | | + +**ark\_exists, ark\_exists\_async\_cb API** +=========================================== + +Purpose +------- + +Query the key/value store store to see if a given key is present + +Syntax +------ + +int ark\_exists(ark, klen, key, res) + +int ark\_exists\_async\_cb(ark, klen, key, callback, dt) + +**ARK \*** ark; + +**uint64\_t** klen; + +**void \*** key; + +**void \***(\*callback)(int errcode, uint64\_t dt, uint64\_t res); + +**uint64\_t** dt; + +Description +----------- + +The **ark\_exists** and **ark\_exists\_async\_cb** API will query the key/value store store associated with the **ark or arc** paramter for the given key, **key,** and if found, return the size of the value in bytes in the **res** parameter. The key and it's value will not be altered. The API, **ark\_exists\_async\_cb**, will behave in the same manner, but in an asynchronous fashion, where the API immediately returns to the caller and the actual operation is scheduled to run. After the operation is executed, the **callback** function will be called to notify the caller of completion. + +Parameters +---------- + +| | | +|---------------|--------------------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance connection. | +| klen | Length of the key in bytes. | +| key | Key | +| res | If successful, will store the size of the key in bytes | +| callback | Function to call upon completion of the I/O operation. | +| dt | 64bit value to tag an asynchronous API call. | + +Return Values +------------- + +Upon successful completion, the **ark\_exists** and **ark\_exists\_async\_cb** API will return 0. The return of **ark\_exists** will indicate the status of the operation. The **ark\_exists\_async\_cb** API return will indicate whether the asynchronous operation was accepted or rejected. The true status will be returned in the **errcode** parameter when the **callback** function is executed. If unsuccessful, the **ark\_exists and ark\_exists\_async\_cb** API will return a non-zero error code: + +| | | +|-----------|-------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | +| ENOENT | Key not found | +| | | + +**ark\_first API** +================== + +Purpose +------- + +Return the first key and handle to iterate through store. + +Syntax +------ + +ARI \* ark\_first(ark, kbuflen, klen, kbuf) + +**ARK \* ark**; + +**uint64\_t** kbuflen + +**int64\_t** \*klen; + +**void \*** kbuf; + +Description +----------- + +The **ark\_first** API will return the first key found in the store in the buffer, **kbuf**, and the size of the key in **klen**, as long as the size is less than the size of the kbuf, **kbuflen**. + +If successful, an iterator handle will be returned to the caller to be used to retrieve the next key in the store by calling the **ark\_next** API. + +Parameters +---------- + +| | | +|---------------|--------------------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance connection. | +| kbuflen | Length of the kbuf parameter. | +| klen | Size of the key returned in kbuf | +| kbuf | Buffer to hold the key | + +Return Values +------------- + +Upon successful completion, the **ark\_first** API will return a handle to be used to iterate through the store on subsequent calls using the **ark\_next** API. If unsuccessful, the **ark\_first** API will return NULL with **errno** set to one of the following: + +| | | +|-----------|-------------------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | +| ENOSPC | kbuf is too small to hold key | +| | | + +**ark\_next API** +================= + +Purpose +------- + +Return the next key in the store. + +Syntax +------ + +ARI \* ark\_next(iter, kbuflen, klen, kbuf) + +**ARI \*** iter; + +**uint64\_t** kbuflen + +**int64\_t** \*klen; + +**void \*** kbuf; + +Description +----------- + +The **ark\_next** API will return the next key found in the store based on the iterator handle, **iter**, in the buffer, **kbuf**, and the size of the key in **klen**, as long as the size is less than the size of the kbuf, **kbuflen**. + +If successful, a handle will be returned to the caller to be used to retrieve the next key in the store by calling the **ark\_next** API. If the end of the store is reached, a NULL value is returned and errno set to **ENOENT**. + +Because of the dynamic nature of the store, some recently written keys may not be returned. + +Parameters +---------- + +| | | +|---------------|------------------------------------------------| +| **Parameter** | **Description** | +| iter | Iterator handle where to begin search in store | +| kbuflen | Length of the kbuf parameter. | +| klen | Size of the key returned in kbuf | +| kbuf | Buffer to hold the key | + +Return Values +------------- + +Upon successful completion, the **ark\_next** API will return a handle to be used to iterate through the store on subsequent calls using the **ark\_next** API. If unsuccessful, the **ark\_next** API will return NULL with **errno** set to one of the following: + +| | | +|-----------|------------------------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | +| ENOSPC | kbuf is too small to hold key | +| ENOENT | End of the store has been reached. | + +**ark\_allocated API** +====================== + +Purpose +------- + +Return the number of bytes allocated in the store. + +Syntax +------ + +int ark\_allocated(ark, size) + +**ARK \*** ark; + +**uint64\_t \***size; + +Description +----------- + +The **ark\_allocated** API will return the number of bytes allocated in the store in the **size** parameter. + +Parameters +---------- + +| | | +|---------------|---------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance. | +| size | Will hold the size of the store in bytes | + +Return Values +------------- + +Upon successful completion, the **ark\_allocated** API will return 0. If unsuccessful, the **ark\_allocated** API will return one of the following error codes. + +| | | +|-----------|-------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | + +**ark\_inuse API** +================== + +Purpose +------- + +Return the number of bytes in use in the store. + +Syntax +------ + +int ark\_inuse(ark, size) + +**ARK \*** ark; + +**uint64\_t \***size; + +Description +----------- + +The **ark\_inuse** API will return the number of bytes in use in the store in the **size** parameter. + +Parameters +---------- + +| | | +|---------------|-----------------------------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance. | +| size | Will hold the size of number of blocks in use. Size will be in bytes. | + +Return Values +------------- + +Upon successful completion, the **ark\_inuse** API will return 0. If unsuccessful, the **ark\_inuse** API will return one of the following error codes: + +| | | +|-----------|-------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | + +**ark\_actual API** +=================== + +Purpose +------- + +Return the actual number of bytes in use in the store. + +Syntax +------ + +int ark\_actual(ark, size) + +**ARK \*** ark; + +**uint64 \*** size; + +Description +----------- + +The **ark\_actual** API will return the actual number of bytes in use in the store in the size parameter. This differs from the **ark\_inuse** API as this takes into account the actual sizes of the individual keys and their values instead of generic allocations based on blocks to store these values. + +Parameters +---------- + +| | | +|---------------|-----------------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance. | +| size | Will hold the actual number of bytes in use in the store. | + +Return Values +------------- + +Upon successful completion, the **ark\_actual** API will return the 0. If unsuccessful, the **ark\_actual** API will return one of the following error codes: + +| | | +|-----------|-------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | + +**ark\_error, ark\_errorstring API** +==================================== + +Purpose +------- + +Return additional error information on a failure. + +Syntax +------ + +int ark\_error(ark) + +**ARK \* ark**; + +char \* ark\_errorstring(ark) + +**ARK \* ark**; + +Description +----------- + +The **ark\_error** API will return a more descriptive error code for the last error encountered on a key/value store API. + +The **ark\_errorstring** API will return a human readable error string for the last error encountered on a key/value store API. + +Parameters +---------- + +| | | +|---------------|---------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance. | + +Return Values +------------- + +Upon successful completion, **ark\_error** will return a non-zero value and **ark\_errorstring** will return a non-NULL value. If an error is encountered, **ark\_error** will return a negative error code and **ark\_errorstring** will return NULL with errno set to one of the following: + +| | | +|-----------|-------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | + +**ark\_fork, ark\_fork\_done API** +================================== + +Purpose +------- + +Fork a key/value store for archiving purposes. + +Syntax +------ + +int ark\_fork(ark) + +int ark\_fork\_done(ark) + +**ARK \*** handle; + +Description +----------- + +The **ark\_fork** and **ark\_fork\_done** API's are to be called by the parent key/value store process to prepare the key/value store to be forked, fork the child process, and to perform any cleanup once it has been detected the child process has exited. + +The **ark\_fork** API will fork a child process and upon return, will return the process ID of the child in the parent process, and 0 in the child process. Once the parent detects the child has exited, a call to **ark\_fork\_done** will be needed to clean up any state from the **ark\_fork** call. + +Note, the **ark\_fork** API will fail if there are any outstanding asynchronous commands. + +Parameters +---------- + +| | | +|---------------|---------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance. | + +Return Values +------------- + +Upon successful completion, **ark\_fork** and **ark\_fork\_done** will return 0, otherwise one of the following errors: + +| | | +|-----------|-------------------------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | +| EBUSY | Outstanding asynchronous operations | +| ENOMEM | Not enough space to clone store | + +**ark\_random API** +=================== + +Purpose +------- + +Return a random key from the key/value store store. + +Syntax +------ + +int ark\_random(ark, kbuflen, klen, kbuf) + +**ARK \*** ark; + +**uint64\_t** kbuflen + +**int64\_t** \*klen; + +**void \*** kbuf; + +Description +----------- + +The **ark\_random** API will return a random key found in the store based on the handle, **ark**, in the buffer, **kbuf**, and the size of the key in **klen**, as long as the size is less than the size of the kbuf, **kbuflen**. + +Parameters +---------- + +| | | +|---------------|------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle respresenting the key/value store store | +| kbuflen | Length of the kbuf parameter. | +| klen | Size of the key returned in kbuf | +| kbuf | Buffer to hold the key | + +Return Values +------------- + +Upon successful completion, **ark\_random** will 0. Otherwise, **ark\_random** will return the following error codes: + +| | | +|-----------|-------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | + +**ark\_count API** +================== + +Purpose +------- + +Return the count of the number of keys in the key/value store store + +Syntax +------ + +int ark\_count(ark, int \*count) + +**ARK \*** ark; + +**int \*** count; + +Description +----------- + +The **ark\_count** API will return a the total number of keys in the store based on the handle, ark**,** and store the result in the **count** parameter. + +Parameters +---------- + +| | | +|---------------|---------------------------------------------------| +| **Parameter** | **Description** | +| ark | Handle representing the key/value store instance. | +| count | Number of keys found in the key/value store. | + +Return Values +------------- + +Upon successful completion, **ark\_count** will return 0. Otherwise, a non-zero error code will be returned: + +| | | +|-----------|-------------------| +| **Error** | **Reason** | +| EINVAL | Invalid parameter | + + + + diff --git a/src/kv/am.c b/src/kv/am.c new file mode 100644 index 00000000..025bb066 --- /dev/null +++ b/src/kv/am.c @@ -0,0 +1,107 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/am.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include + +#include "zmalloc.h" +#include "am.h" + +#include +#include + +int x = 1; +#define H 64 +#define ARK_KV_ALIGN 16 + +void *am_malloc(size_t size) +{ + unsigned char *p=NULL; + + if (FVT_KV_ALLOC_ERROR_INJECT) + { + FVT_KV_CLEAR_ALLOC_ERROR; + errno = ENOMEM; + KV_TRC_FFDC(pAT, "ALLOC_ERROR_INJECT rc = %d", ENOMEM); + return NULL; + } + + if ((int64_t)size > 0) + { +#ifdef _AIX + p = malloc(size + (ARK_KV_ALIGN - 1)); +#else + p = zmalloc(size + (ARK_KV_ALIGN - 1)); +#endif + if (p) memset(p,0x00, size + (ARK_KV_ALIGN - 1)); + } + return p; +} + +void am_free(void *ptr) { +#ifdef _AIX + free(ptr); +#else + zfree(ptr); +#endif +} + +void *am_realloc(void *ptr, size_t size) +{ + unsigned char *p = NULL; + + if (FVT_KV_ALLOC_ERROR_INJECT) + { + FVT_KV_CLEAR_ALLOC_ERROR; + errno = ENOMEM; + KV_TRC_FFDC(pAT, "ALLOC_ERROR_INJECT rc = %d", ENOMEM); + return NULL; + } + + if ((int64_t)size > 0) + { +#ifdef _AIX + p = realloc(ptr, size + (ARK_KV_ALIGN - 1)); +#else + p = zrealloc(ptr, size + (ARK_KV_ALIGN - 1)); +#endif + } + return p; +} + +void *ptr_align(void *ptr) +{ + void *new_ptr = NULL; + + if (ptr != NULL) + { + new_ptr = (void *)(((uintptr_t)(ptr) + ARK_KV_ALIGN - 1) & ~(uintptr_t)(ARK_KV_ALIGN - 1)); + } + + return new_ptr; +} + diff --git a/src/kv/am.h b/src/kv/am.h new file mode 100644 index 00000000..f0c4f008 --- /dev/null +++ b/src/kv/am.h @@ -0,0 +1,36 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/am.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __AM_H__ +#define __AM_H__ + +#include + +void *am_malloc(size_t size); +void am_free(void *ptr); +void *am_realloc(void *ptr, size_t size); + +void *ptr_align(void *ptr); + +#endif diff --git a/src/kv/ari.c b/src/kv/ari.c new file mode 100644 index 00000000..a2ce70a7 --- /dev/null +++ b/src/kv/ari.c @@ -0,0 +1,29 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ari.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +#include "ark.h" + +int ari_active = 0; diff --git a/src/kv/ari.h b/src/kv/ari.h new file mode 100644 index 00000000..9f9f3398 --- /dev/null +++ b/src/kv/ari.h @@ -0,0 +1,32 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ari.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __ARI_H__ +#define __ARI_H__ + + + +extern int ari_active; + +#endif diff --git a/src/kv/ark.h b/src/kv/ark.h new file mode 100644 index 00000000..34dfc350 --- /dev/null +++ b/src/kv/ark.h @@ -0,0 +1,313 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ark.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __ARK_H__ +#define __ARK_H__ + +#include + +#include "hash.h" +#include "bl.h" +#include "ea.h" +#include "bt.h" +#include "tag.h" +#include "queue.h" +#include "sq.h" + +#define PT_OFF 0 +#define PT_IDLE 1 +#define PT_RUN 2 +#define PT_EXIT 3 + +#define DIVCEIL(_x, _y) (((_x) / (_y)) + (((_x) % (_y)) ? 0 : 1)) + +// Stats to collect on number of K/V ops +// and IOs +typedef struct ark_stats { + volatile uint64_t ops_cnt; + volatile uint64_t io_cnt; + volatile uint64_t kv_cnt; + volatile uint64_t blk_cnt; + volatile uint64_t byte_cnt; +} ark_stats_t; + +#define ARK_P_VERSION_1 1 + +#define ARK_PERSIST_CONFIG 1 +#define ARK_PERSIST_END 2 +#define ARK_PERSIST_HT 3 +#define ARK_PERSIST_HT_BV 4 +#define ARK_PERSIST_HT_IV 5 +#define ARK_PERSIST_BL 6 +#define ARK_PERSIST_BL_IV 7 + +typedef struct p_cntr +{ +#define ARK_P_MAGIC "ARKPERST" + char p_cntr_magic[8]; + uint64_t p_cntr_version; + uint32_t p_cntr_type; + uint64_t p_cntr_size; + uint64_t p_cntr_cfg_offset; + uint64_t p_cntr_cfg_size; + uint64_t p_cntr_ht_offset; + uint64_t p_cntr_ht_size; + uint64_t p_cntr_bl_offset; + uint64_t p_cntr_bl_size; + uint64_t p_cntr_bliv_offset; + uint64_t p_cntr_bliv_size; + char p_cntr_data[]; +} p_cntr_t; + +typedef struct p_ark +{ + uint64_t flags; + uint64_t pblocks; + uint64_t size; + uint64_t bsize; + uint64_t bcount; + uint64_t blkbits; + uint64_t grow; + uint64_t hcount; + uint64_t vlimit; + uint64_t blkused; + ark_stats_t pstats; + int nasyncs; + int basyncs; + int nthrds; + int ntasks; +} P_ARK_t; + +#define ARK_VERBOSE_SIZE_DEF 1048576 +#define ARK_VERBOSE_BSIZE_DEF 4096 +#define ARK_VERBOSE_HASH_DEF 1048576 +#define ARK_VERBOSE_VLIMIT_DEF 256 +#define ARK_VERBOSE_BLKBITS_DEF 34 +#define ARK_VERBOSE_GROW_DEF 1024 +#define ARK_VERBOSE_NTHRDS_DEF 20 + +#define ARK_MAX_ASYNC_OPS 128 +#define ARK_MAX_TASK_OPS 32 + +typedef struct _ark { + uint64_t flags; + uint32_t persload; + uint64_t persblocks; + char *persdata; + uint64_t size; + uint64_t bsize; + uint64_t bcount; + uint64_t blkbits; + uint64_t grow; + uint64_t hcount; + uint64_t vlimit; + uint64_t blkused; + uint64_t nasyncs; + uint64_t ntasks; + + ark_stats_t pers_stats; + + uint32_t holds; + + int nthrds; + int basyncs; + int npart; + int nactive; + int ark_exit; + int pcmd; + int rthread; + int astart; + + pthread_mutex_t mainmutex; + + hash_t *ht; // hashtable + BL *bl; // block lists + struct _ea *ea; // in memory store space + struct _rcb *rcbs; + struct _tcb *tcbs; + struct _iocb *iocbs; + struct _scb *poolthreads; + struct _pt *pts; + struct _tags *rtags; + struct _tags *ttags; + +} _ARK; + +#define _ARC _ARK + +typedef struct _scb { + pthread_t pooltid; + queue_t *rqueue; + queue_t *tqueue; + queue_t *ioqueue; + int32_t poolstate; + int32_t rlast; + int32_t dogc; + uint32_t holds; + ark_stats_t poolstats; + pthread_mutex_t poolmutex; + pthread_cond_t poolcond; +} scb_t; + +// pool thread gets its id and the database struct +typedef struct _pt { + int id; + _ARK *ark; +} PT; + + +#define K_NULL 0 +#define K_GET 1 +#define K_SET 2 +#define K_DEL 3 +#define K_EXI 4 +#define K_RAND 5 +#define K_FIRST 6 +#define K_NEXT 7 + +#define A_NULL 0 +#define A_INIT 1 +#define A_COMPLETE 2 +#define A_FINAL 3 + + +// operation +typedef struct _rcb { + _ARK *ark; + uint64_t klen; + void *key; + uint64_t vlen; + void *val; + uint64_t voff; + + uint64_t pos; + uint64_t hash; + + int64_t res; + uint64_t dt; + int32_t rc; + + uint64_t rnum; + int32_t rtag; + int32_t ttag; + int32_t hold; + int32_t sthrd; + int32_t cmd; + int32_t stat; + + void (*cb)(int errcode, uint64_t dt,int64_t res); + pthread_cond_t acond; + pthread_mutex_t alock; +} rcb_t; + +typedef struct _ari { + _ARK *ark; + int64_t hpos; + int64_t key; + int32_t ithread; + uint64_t btsize; + BT *bt; + BT *bt_orig; + uint8_t *pos; +} _ARI; + +#define ARK_CMD_INIT 1 +#define ARK_CMD_DONE 2 +#define ARK_IO_HARVEST 3 +#define ARK_SET_START 4 +#define ARK_SET_PROCESS_INB 5 +#define ARK_SET_WRITE 6 +#define ARK_SET_FINISH 7 +#define ARK_GET_START 8 +#define ARK_GET_PROCESS 9 +#define ARK_GET_FINISH 10 +#define ARK_DEL_START 11 +#define ARK_DEL_PROCESS 12 +#define ARK_DEL_FINISH 13 +#define ARK_EXIST_START 14 +#define ARK_EXIST_FINISH 15 +#define ARK_RAND_START 16 +#define ARK_FIRST_START 17 +#define ARK_NEXT_START 18 + +// operation +typedef struct _tcb { + int32_t rtag; + int32_t ttag; + int32_t state; + int32_t sthrd; + + uint64_t inblen; + uint64_t oublen; + BTP inb; // input bucket space - aligned + BTP inb_orig; // input bucket space + BTP oub; // output bucket space - aligned + BTP oub_orig; // output bucket space + uint64_t vbsize; + uint8_t *vb; // value buffer space - aligned + uint8_t *vb_orig; // value buffer space + uint8_t *vval; // value buffer space + uint64_t vblkcnt; + int64_t vblk; + uint64_t hpos; + uint64_t hblk; + int64_t nblk; + uint64_t blen; + uint64_t old_btsize; + int64_t vvlen; + int32_t new_key; +} tcb_t; + +typedef struct _iocb +{ + struct _ea *ea; // in memory store space + int32_t op; + void *addr; + ark_io_list_t *blist; + uint64_t nblks; + uint64_t start; + uint64_t new_start; + uint64_t cnt; + int32_t tag; + uint32_t io_errno; + uint32_t io_done; +} iocb_t; + +int64_t ark_take_pool(_ARK *_arkp, ark_stats_t *stats, uint64_t n); +void ark_drop_pool(_ARK *_arkp, ark_stats_t *stats, uint64_t blk); + +int ark_set_async_tag(_ARK *ark, uint64_t klen, void *key, uint64_t vlen, void *val); +int ark_get_async_tag(_ARK *ark, uint64_t klen,void *key,uint64_t vbuflen,void *vbuf,uint64_t voff); +int ark_del_async_tag(_ARK *ark, uint64_t klen, void *key); +int ark_exists_async_tag(_ARK *ark, uint64_t klen, void *key); +int ark_rand_async_tag(_ARK *ark, uint64_t klen, void *key, int32_t ptid); +int ark_first_async_tag(_ARK *ark, uint64_t klen, void *key, _ARI *_arip, int32_t ptid); +int ark_next_async_tag(_ARK *ark, uint64_t klen, void *key, _ARI *_arip, int32_t ptid); + +int ark_anyreturn(_ARK *ark, int *tag, int64_t *res); + +int ea_async_io_mod(_ARK *_arkp, int op, void *addr, ark_io_list_t *blist, + int64_t nblks, int start, int32_t tag, int32_t io_done); + +#endif diff --git a/src/kv/ark_mod.h b/src/kv/ark_mod.h new file mode 100644 index 00000000..b5279cd0 --- /dev/null +++ b/src/kv/ark_mod.h @@ -0,0 +1,101 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ark_mod.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef __ARK_MOD_H__ +#define __ARK_MOD_H__ + +#include +#include "ark.h" +#include "bl.h" +#include "bt.h" + +#define ARK_SET_PROCESS_INB 1 +#define ARK_SET_WRITE 2 +#define ARK_SET_FINISH 3 +#define ARK_IO_DONE 4 +#define ARK_IO_HARVEST 5 +#define ARK_IO_SCHEDULE 6 +#define ARK_CMD_DONE 7 + +// operation +typedef struct _atp { + _ARK *_arkp; + + uint64_t klen; + void *key; + uint64_t vlen; + void *val; + uint64_t voff; + + uint64_t pos; + int64_t res; + + void (*cb)(int errcode, uint64_t dt, int64_t res); + uint64_t dt; + + int32_t cmd; + int32_t state; + int32_t tag; + int32_t task; + int32_t rc; + int32_t error; + + uint64_t inblen; + uint64_t oublen; + BTP inb; // input bucket space - aligned + BTP inb_orig; // input bucket space + BTP oub; // output bucket space - aligned + BTP oub_orig; // output bucket space + uint64_t vbsize; + uint8_t *vb; // value buffer space - aligned + uint8_t *vb_orig; // value buffer space + uint8_t *vval; // value buffer space + uint64_t vblkcnt; + int64_t vblk; + uint64_t hpos; + uint64_t hblk; + int64_t nblk; + uint64_t blen; + int64_t vvlen; + int32_t new_key; + +} ATP; + +typedef struct _aio +{ + EA *ea; + int32_t op; + void *addr; + ark_io_list_t *blist; + uint64_t nblks; + uint64_t start; + uint64_t new_start; + uint64_t cnt; + int32_t task; + uint32_t io_errno; + uint32_t io_done; +} AIO; + +#endif diff --git a/src/kv/arkdb.c b/src/kv/arkdb.c new file mode 100644 index 00000000..3eb95413 --- /dev/null +++ b/src/kv/arkdb.c @@ -0,0 +1,1737 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/arkdb.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _AIX +#include +REVISION_TAGS(arkdb); +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include "am.h" +#include "ut.h" +#include "tag.h" +#include "vi.h" +#include "ea.h" +#include "bl.h" + +#include "arkdb.h" +#include "ark.h" +#include "arp.h" +#include "capiblock.h" + +#include + +#include +#include + +KV_Trace_t arkdb_kv_trace; +KV_Trace_t *pAT = &arkdb_kv_trace; +uint32_t fvt_kv_inject = 0; + +void ark_persist_stats(_ARK * _arkp, ark_stats_t *pstats) +{ + int i = 0; + + pstats->kv_cnt = _arkp->pers_stats.kv_cnt; + pstats->blk_cnt = _arkp->pers_stats.blk_cnt; + pstats->byte_cnt = _arkp->pers_stats.byte_cnt; + + for (i = 0; i < _arkp->nthrds; i++) + { + pstats->kv_cnt += _arkp->poolthreads[i].poolstats.kv_cnt; + pstats->blk_cnt += _arkp->poolthreads[i].poolstats.blk_cnt; + pstats->byte_cnt += _arkp->poolthreads[i].poolstats.byte_cnt; + } +} + +void ark_persistence_calc(_ARK *_arkp) +{ + uint64_t tot_bytes = 0; + hash_t *htp = NULL; + BL *blp = NULL; + + // We need to determine the total size of the data + // that needs to be persisted. Items that we persist + // are: + // + // - Configuration + // - Hash Table (hash_t) + // - Block List (BL) + // - IV + + // Configuration + tot_bytes += sizeof(p_cntr_t) + sizeof(P_ARK_t); + + // Hash Table + htp = _arkp->ht; + tot_bytes += sizeof(hash_t) + (htp->n * sizeof(uint64_t)); + + // Block List + blp = _arkp->bl; + tot_bytes += sizeof(BL); + tot_bytes += sizeof(IV) + + (blp->list->words * sizeof(uint64_t)); + + // Calculate the total number of blocks needed to write out + // the persistent data. + _arkp->persblocks = divceil(tot_bytes, _arkp->bsize); + + return; +} + +int ark_persist(_ARK *_arkp) +{ + int32_t rc = 0; + uint64_t tot_bytes = 0; + uint64_t offset = 0; + hash_t *htp = NULL; + BL *blp = NULL; + char *p_data_orig = NULL; + char *p_data = NULL; + p_cntr_t *pptr = NULL; + char *dptr = NULL; + P_ARK_t pcfg; + ark_io_list_t *bl_array = NULL; + + if ( (_arkp->ea->st_type == EA_STORE_TYPE_MEMORY) || + !(_arkp->flags & ARK_KV_PERSIST_STORE) ) + { + return 0; + } + + memset(&pcfg, 0, sizeof(P_ARK_t)); + + tot_bytes = _arkp->persblocks * _arkp->bsize; + p_data_orig = am_malloc(tot_bytes); + if (p_data_orig == NULL) + { + KV_TRC_FFDC(pAT, "Out of memory allocating %"PRIu64" bytes for persistence data", tot_bytes); + return ENOMEM; + } + + p_data = ptr_align(p_data_orig); + + memset(p_data, 0, tot_bytes); + + pcfg.flags = _arkp->flags; + pcfg.size = _arkp->ea->size; + pcfg.bsize = _arkp->bsize; + pcfg.bcount = _arkp->bcount; + pcfg.blkbits = _arkp->blkbits; + pcfg.grow = _arkp->blkbits; + pcfg.hcount = _arkp->hcount; + pcfg.vlimit = _arkp->vlimit; + pcfg.blkused = _arkp->blkused; + pcfg.nasyncs = _arkp->nasyncs; + pcfg.basyncs = _arkp->basyncs; + pcfg.ntasks = _arkp->ntasks; + pcfg.nthrds = _arkp->nthrds; + pcfg.pblocks = _arkp->persblocks; + + ark_persist_stats(_arkp, &(pcfg.pstats)); + + pptr = (p_cntr_t *)p_data; + memcpy(pptr->p_cntr_magic, ARK_P_MAGIC, sizeof(pptr->p_cntr_magic)); + pptr->p_cntr_version = ARK_P_VERSION_1; + pptr->p_cntr_size = tot_bytes; + + // Record persist configuration info + offset = 0; + dptr = &pptr->p_cntr_data[offset]; + pptr->p_cntr_cfg_offset = offset; + pptr->p_cntr_cfg_size = sizeof(P_ARK_t);; + memcpy(dptr, &pcfg, pptr->p_cntr_cfg_size); + + // Record hash info + htp = _arkp->ht; + offset += pptr->p_cntr_cfg_size; + dptr = &pptr->p_cntr_data[offset]; + pptr->p_cntr_ht_offset = offset; + pptr->p_cntr_ht_size = sizeof(hash_t) + (htp->n * sizeof(uint64_t)); + memcpy(dptr, htp, pptr->p_cntr_ht_size); + + // Record block list info + blp = _arkp->bl; + offset += pptr->p_cntr_ht_size; + dptr = &pptr->p_cntr_data[offset]; + pptr->p_cntr_bl_offset = offset; + pptr->p_cntr_bl_size = sizeof(BL); + memcpy(dptr, blp, pptr->p_cntr_bl_size); + + offset += pptr->p_cntr_bl_size; + dptr = &pptr->p_cntr_data[offset]; + pptr->p_cntr_bliv_offset = offset; + pptr->p_cntr_bliv_size = sizeof(IV) + + (divup((blp->list->n * blp->list->m), 64)) * sizeof(uint64_t); + memcpy(dptr, blp->list, pptr->p_cntr_bliv_size); + + bl_array = bl_chain_blocks(_arkp->bl, 0, _arkp->persblocks); + if ( NULL == bl_array ) + { + KV_TRC_FFDC(pAT, "Out of memory allocating %"PRIu64" blocks for block list", + _arkp->persblocks); + rc = ENOMEM; + } + else + { + rc = ea_async_io(_arkp->ea, ARK_EA_WRITE, (void *)p_data, + bl_array, _arkp->persblocks, _arkp->nthrds); + am_free(bl_array); + } + + am_free(p_data_orig); + return rc; +} + +int ark_check_persistence(_ARK *_arkp, uint64_t flags) +{ + int32_t rc = -1; + char *p_data_orig = NULL; + char *p_data = NULL; + ark_io_list_t *bl_array = NULL; + p_cntr_t *pptr = NULL; + P_ARK_t *pcfg = NULL; + hash_t *htp = NULL; + IV *ivp = NULL; + BL *blp = NULL; + + // Ignore the persistence data and load from scratch + if ( (!(flags & ARK_KV_PERSIST_LOAD)) || (flags & ARK_KV_VIRTUAL_LUN) ) + { + return -1; + } + + p_data_orig = am_malloc(_arkp->bsize); + if (p_data_orig == NULL) + { + KV_TRC_FFDC(pAT, "Out of memory allocating %"PRIu64" bytes for the first\ + persistence block", _arkp->bsize); + rc = ENOMEM; + } + else + { + p_data = ptr_align(p_data_orig); + bl_array = bl_chain_no_bl(0, 1); + rc = ea_async_io(_arkp->ea, ARK_EA_READ, (void *)p_data, + bl_array, 1, 16); + am_free(bl_array); + } + + if (rc == 0) + { + // We've read the first block. We check to see if + // persistence data is present and if so, then + // read the rest of the data from the flash. + pptr = (p_cntr_t *)p_data; + _arkp->persdata = p_data_orig; + if ( memcmp(pptr->p_cntr_magic, ARK_P_MAGIC, + sizeof(pptr->p_cntr_magic) != 0)) + { + KV_TRC_FFDC(pAT, "No magic number found in persistence data: %d", EINVAL); + // The magic number does not match so data is either + // not present or is corrupted. + rc = -1; + } + else + { + // Now we check version and the first persistence data + // needs to be the ARK_PERSIST_CONFIG block + if ( (pptr->p_cntr_version != ARK_P_VERSION_1) ) + { + KV_TRC_FFDC(pAT, "Invalid / unsupported version: %"PRIu64"", + pptr->p_cntr_version); + rc = -1; + } + else + { + // Read in the rest of the persistence data + pcfg = (P_ARK_t *)&(pptr->p_cntr_data[pptr->p_cntr_cfg_offset]); + + // TODO: Set the size of the KV store based on the + // saved size. Temporary fix while dealing with + // virutal LUNs + _arkp->persblocks = pcfg->pblocks; + + if (pcfg->pblocks > 1) + { + p_data_orig = am_realloc(p_data_orig, (pcfg->pblocks * _arkp->bsize)); + if (p_data_orig == NULL) + { + KV_TRC_FFDC(pAT, "Out of memory allocating %"PRIu64" bytes for full\ + persistence block", (pcfg->pblocks * _arkp->bsize)); + rc = ENOMEM; + } + else + { + p_data = ptr_align(p_data_orig); + bl_array = bl_chain_no_bl(0, pcfg->pblocks); + if (bl_array == NULL) + { + KV_TRC_FFDC(pAT, "Out of memory allocating %"PRIu64" blocks for\ + full persistence data", pcfg->pblocks); + rc = ENOMEM; + } + } + + // We are still good to read the rest of the data + // from the flash + if (rc == 0) + { + rc = ea_async_io(_arkp->ea, ARK_EA_READ, (void *)p_data, + bl_array, pcfg->pblocks, 16); + am_free(bl_array); + pptr = (p_cntr_t *)p_data; + _arkp->persdata = p_data_orig; + } + } + } + } + } + + // If rc == 0, that means we have persistence data + if (rc == 0) + { + _arkp->size = pcfg->size; + _arkp->flags = flags; + _arkp->bsize = pcfg->bsize; + _arkp->bcount = pcfg->bcount; + _arkp->blkbits = pcfg->blkbits; + _arkp->grow = pcfg->grow; + _arkp->hcount = pcfg->hcount; + _arkp->vlimit = pcfg->vlimit; + _arkp->blkused = pcfg->blkused; + _arkp->nasyncs = pcfg->nasyncs; + _arkp->basyncs = pcfg->basyncs; + _arkp->ntasks = pcfg->ntasks; + _arkp->nthrds = pcfg->nthrds; + _arkp->pers_stats.kv_cnt = pcfg->pstats.kv_cnt; + _arkp->pers_stats.blk_cnt = pcfg->pstats.blk_cnt; + _arkp->pers_stats.byte_cnt = pcfg->pstats.byte_cnt; + + htp = (hash_t *)(&pptr->p_cntr_data[pptr->p_cntr_ht_offset]); + _arkp->ht = hash_new(htp->n); + memcpy(&_arkp->ht->h[0], &htp->h[0], (htp->n * sizeof(uint64_t))); + + blp = (BL *)(&pptr->p_cntr_data[pptr->p_cntr_bl_offset]); + _arkp->bl = bl_new(blp->n, blp->w); + _arkp->bl->count = blp->count; + _arkp->bl->head = blp->head; + _arkp->bl->hold = blp->hold; + + ivp = (IV *)(&pptr->p_cntr_data[pptr->p_cntr_bliv_offset]); + memcpy(&(_arkp->bl->list->data[0]), &(ivp->data[0]), + ivp->words * sizeof(uint64_t)); + } + + if (p_data_orig != NULL) + { + am_free(p_data_orig); + } + + if ( rc == 0 ) + { + _arkp->persload = 1; + } + + return rc; +} + +int ark_create_verbose(char *path, ARK **arkret, + uint64_t size, uint64_t bsize, uint64_t hcount, + int nthrds, int nqueue, int basyncs, uint64_t flags) { + int rc = 0; + int p_rc = 0; + uint64_t bcount = 0; + uint64_t x = 0; + int i = 0; + int tnum = 0; + int rnum = 0; + scb_t *scbp = NULL; + + KV_TRC_OPEN(pAT, "arkdb"); + + if (NULL == arkret) + { + KV_TRC_FFDC(pAT, "Incorrect value for ARK control block: rc=EINVAL"); + rc = EINVAL; + goto ark_create_ark_err; + } + + if ( (flags & (ARK_KV_PERSIST_LOAD|ARK_KV_PERSIST_STORE)) && + (flags & ARK_KV_VIRTUAL_LUN) ) + { + KV_TRC_FFDC(pAT, "Invalid persistence combination with ARK flags: %016lx", flags); + rc = EINVAL; + goto ark_create_ark_err; + } + + _ARK *ark = am_malloc(sizeof(_ARK)); + if (ark == NULL) { + rc = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory allocating ARK control structure for %"PRIu64"", sizeof(_ARK)); + goto ark_create_ark_err; + } + + KV_TRC(pAT, "%p path %s size %ld bsize %ld hcount %ld \ + nthrds %d nqueue %d", + ark, path, size, bsize, hcount, + nthrds, nqueue); + + ark->bsize = bsize; + ark->rthread = 0; + ark->persload = 0; + + // Create the KV storage, whether that will be memory based + // or flash + ark->ea = ea_new(path, ark->bsize, basyncs, &size, &bcount, + (flags & ARK_KV_VIRTUAL_LUN)); + if (ark->ea == NULL) + { + rc = errno; + KV_TRC_FFDC(pAT, "KV storage initialization failed: %d", rc); + goto ark_create_ea_err; + } + + // Now that the "connection" to the store has been established + // we need to check to see if data was persisted from a previous + // instantiation of the KV store. + p_rc = ark_check_persistence(ark, flags); + if (p_rc > 0) + { + // We ran into an error while trying to read from + // the store. + rc = p_rc; + KV_TRC_FFDC(pAT, "Persistence check failed: %d", rc); + goto ark_create_persist_err; + } + else if (p_rc == -1) + { + // There was no persistence data, so we just build off + // of what was passed into the API. + + ark->size = size; + ark->bcount = bcount; + ark->hcount = hcount; + ark->vlimit = ARK_VERBOSE_VLIMIT_DEF; + ark->blkbits = ARK_VERBOSE_BLKBITS_DEF; + ark->grow = ARK_VERBOSE_GROW_DEF; + ark->rthread = 0; + ark->flags = flags; + ark->astart = 0; + ark->blkused = 1; + ark->ark_exit = 0; + ark->nactive = 0; + ark->nasyncs = ((nqueue <= 0) ? ARK_MAX_ASYNC_OPS : nqueue); + ark->basyncs = basyncs; + ark->pers_stats.kv_cnt = 0; + ark->pers_stats.blk_cnt = 0; + ark->pers_stats.byte_cnt = 0; + ark->ntasks = ARK_MAX_TASK_OPS; + ark->pcmd = PT_IDLE; + + // We need to create at least 1 pool thread since SYNC ops + // are scheduled as ASYNC ops, with the caller waiting for + // the result before returning. + nthrds = ((nthrds <= 0) ? 4 : nthrds); + ark->nthrds = nthrds; + + // Create the requests and tag control blocks and queues. + x = ark->hcount / ark->nthrds; + ark->npart = x + (ark->hcount % ark->nthrds ? 1 : 0); + + // Create the hash table + ark->ht = hash_new(ark->hcount); + if (ark->ht == NULL) + { + rc = errno; + KV_TRC_FFDC(pAT, "Hash initialization failed: %d", rc); + goto ark_create_ht_err; + } + + // Create the block list + ark->bl = bl_new(ark->bcount, ark->blkbits); + if (ark->bl == NULL) + { + rc = errno; + KV_TRC_FFDC(pAT, "Block list initialization failed: %d", rc); + goto ark_create_bl_err; + } + + if (flags & ARK_KV_PERSIST_STORE) + { + ark_persistence_calc(ark); + bl_adjust(ark->bl, ark->persblocks); + } + } + + rc = pthread_mutex_init(&ark->mainmutex,NULL); + if (rc != 0) + { + KV_TRC_FFDC(pAT, "pthread_mutex_init for main mutex failed: %d", rc); + goto ark_create_pth_mutex_err; + } + + ark->rtags = tag_new(ark->nasyncs); + if ( NULL == ark->rtags ) + { + rc = ENOMEM; + KV_TRC_FFDC(pAT, "Tag initialization for requests failed: %d", rc); + goto ark_create_rtag_err; + } + + ark->ttags = tag_new(ark->ntasks); + if ( NULL == ark->ttags ) + { + rc = ENOMEM; + KV_TRC_FFDC(pAT, "Tag initialization for tasks failed: %d", rc); + goto ark_create_ttag_err; + } + + ark->rcbs = am_malloc(ark->nasyncs * sizeof(rcb_t)); + if ( NULL == ark->rcbs ) + { + rc = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory allocation of %"PRIu64" bytes for request control blocks", (ark->nasyncs * sizeof(rcb_t))); + goto ark_create_rcbs_err; + } + + ark->tcbs = am_malloc(ark->ntasks * sizeof(tcb_t)); + if ( NULL == ark->tcbs ) + { + rc = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory allocation of %"PRIu64" bytes for task control blocks", (ark->ntasks * sizeof(rcb_t))); + goto ark_create_tcbs_err; + } + + ark->iocbs = am_malloc(ark->ntasks * sizeof(iocb_t)); + if ( NULL == ark->iocbs ) + { + rc = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory allocation of %"PRIu64" bytes for io control blocks", (ark->ntasks * sizeof(iocb_t))); + goto ark_create_iocbs_err; + } + + ark->poolthreads = am_malloc(ark->nthrds * sizeof(scb_t)); + if ( NULL == ark->poolthreads ) + { + rc = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory allocation of %"PRIu64" bytes for server thread control blocks", (ark->nthrds * sizeof(scb_t))); + goto ark_create_poolthreads_err; + } + + for ( rnum = 0; rnum < ark->nasyncs ; rnum++ ) + { + ark->rcbs[rnum].stat = A_NULL; + pthread_cond_init(&(ark->rcbs[rnum].acond), NULL); + pthread_mutex_init(&(ark->rcbs[rnum].alock), NULL); + } + + for ( tnum = 0; tnum < ark->ntasks; tnum++ ) + { + ark->tcbs[tnum].inb = bt_new(0, ark->vlimit, sizeof(uint64_t), + &(ark->tcbs[tnum].inblen), + &(ark->tcbs[tnum].inb_orig)); + if (ark->tcbs[tnum].inb == NULL) + { + rc = errno; + KV_TRC_FFDC(pAT, "Bucket allocation for inbuffer failed: %d", rc); + goto ark_create_taskloop_err; + } + + ark->tcbs[tnum].oub = bt_new(0, ark->vlimit, sizeof(uint64_t), + &(ark->tcbs[tnum].oublen), + &(ark->tcbs[tnum].oub_orig)); + if (ark->tcbs[tnum].oub == NULL) + { + rc = errno; + KV_TRC_FFDC(pAT, "Bucket allocation for outbuffer failed: %d", rc); + goto ark_create_taskloop_err; + } + + //ark->tcbs[tnum].vbsize = bsize * 1024; + ark->tcbs[tnum].vbsize = bsize * 256; + ark->tcbs[tnum].vb_orig = am_malloc(ark->tcbs[tnum].vbsize); + if (ark->tcbs[tnum].vb_orig == NULL) + { + rc = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory allocation for %"PRIu64" bytes for variable size buffer", (bsize * 1024)); + goto ark_create_taskloop_err; + } + ark->tcbs[tnum].vb = ptr_align(ark->tcbs[tnum].vb_orig); + } + + *arkret = (void *)ark; + + ark->pts = (PT *)am_malloc(sizeof(PT) * ark->nthrds); + if ( ark->pts == NULL ) + { + rc = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory allocation for %"PRIu64" bytes for server thread data", (sizeof(PT) * ark->nthrds)); + goto ark_create_taskloop_err; + } + + for (i = 0; i < ark->nthrds; i++) { + PT *pt = &(ark->pts[i]); + scbp = &(ark->poolthreads[i]); + + memset(scbp, 0, sizeof(scb_t)); + + // Start off the random start point for this thread + // at -1, to show that it has not been part of a + // ark_random call. + scbp->rlast = -1; + scbp->holds = 0; + scbp->poolstate = PT_RUN; + + scbp->poolstats.io_cnt = 0; + scbp->poolstats.ops_cnt = 0; + scbp->poolstats.kv_cnt = 0; + scbp->poolstats.blk_cnt = 0; + scbp->poolstats.byte_cnt = 0; + + pthread_mutex_init(&(scbp->poolmutex), NULL); + pthread_cond_init(&(scbp->poolcond), NULL); + + scbp->rqueue = queue_new(ark->nasyncs); + scbp->tqueue = queue_new(ark->ntasks); + scbp->ioqueue = queue_new(ark->ntasks); + + pt->id = i; + pt->ark = ark; + rc = pthread_create(&(scbp->pooltid), NULL, pool_function, pt); + if (rc != 0) + { + KV_TRC_FFDC(pAT, "pthread_create of server thread failed: %d", rc); + goto ark_create_poolloop_err; + } + } + +#if 0 + while (ark->nactive < ark->nthrds) { + usleep(1); + //printf("Create waiting %d/%d\n", ark->nactive, ark->nthrds); + } +#endif + + ark->pcmd = PT_RUN; + + goto ark_create_return; + +ark_create_poolloop_err: + + for (; i >= 0; i--) + { + scbp = &(ark->poolthreads[i]); + + if (scbp->pooltid != 0) + { + queue_lock(scbp->rqueue); + queue_wakeup(scbp->rqueue); + queue_unlock(scbp->rqueue); + pthread_join(scbp->pooltid, NULL); + + pthread_mutex_destroy(&(scbp->poolmutex)); + pthread_cond_destroy(&(scbp->poolcond)); + + if ( scbp->rqueue != NULL ) + { + queue_free(scbp->rqueue); + } + + if ( scbp->tqueue != NULL ) + { + queue_free(scbp->tqueue); + } + + if ( scbp->ioqueue != NULL ) + { + queue_free(scbp->ioqueue); + } + } + } + + if ( ark->pts != NULL ) + { + am_free(ark->pts); + } + +ark_create_taskloop_err: + for ( tnum = 0; tnum < ark->ntasks; tnum++ ) + { + if (ark->tcbs[tnum].inb) + { + bt_delete(ark->tcbs[tnum].inb); + } + + if (ark->tcbs[tnum].oub) + { + bt_delete(ark->tcbs[tnum].oub); + } + + if (ark->tcbs[tnum].vb_orig) + { + am_free(ark->tcbs[tnum].vb_orig); + } + } + + for (rnum = 0; rnum < ark->nasyncs; rnum++) + { + pthread_cond_destroy(&(ark->rcbs[rnum].acond)); + pthread_mutex_destroy(&(ark->rcbs[rnum].alock)); + } + + if ( ark->poolthreads != NULL ) + { + am_free(ark->poolthreads); + } + +ark_create_poolthreads_err: + if (ark->iocbs) + { + am_free(ark->iocbs); + } + +ark_create_iocbs_err: + if (ark->tcbs) + { + am_free(ark->tcbs); + } + +ark_create_tcbs_err: + if (ark->rcbs) + { + am_free(ark->rcbs); + } + +ark_create_rcbs_err: + if (ark->ttags) + { + tag_free(ark->ttags); + } + +ark_create_ttag_err: + if (ark->rtags) + { + tag_free(ark->rtags); + } + +ark_create_rtag_err: + pthread_mutex_destroy(&ark->mainmutex); + +ark_create_pth_mutex_err: + bl_delete(ark->bl); + +ark_create_bl_err: + hash_free(ark->ht); + +ark_create_ht_err: +ark_create_persist_err: + ea_delete(ark->ea); + +ark_create_ea_err: + am_free(ark); + *arkret = NULL; + +ark_create_ark_err: + KV_TRC_CLOSE(pAT); + +ark_create_return: + return rc; +} + +int ark_create(char *path, ARK **arkret, uint64_t flags) +{ + return ark_create_verbose(path, arkret, + ARK_VERBOSE_SIZE_DEF, + ARK_VERBOSE_BSIZE_DEF, + ARK_VERBOSE_HASH_DEF, + ARK_VERBOSE_NTHRDS_DEF, + ARK_MAX_ASYNC_OPS, + ARK_EA_BLK_ASYNC_CMDS, + flags); +} + +int ark_delete(ARK *ark) { + int rc = 0; + int i = 0; + _ARK *_arkp = (_ARK *)ark; + scb_t *scbp = NULL; + + if (NULL == ark) + { + rc = EINVAL; + KV_TRC_FFDC(pAT, "Invalid ARK control block parameter: %d", rc); + goto ark_delete_ark_err; + } + + // Wait for all active threads to exit + for (i = 0; i < _arkp->nthrds; i++) + { + scbp = &(_arkp->poolthreads[i]); + scbp->poolstate = PT_EXIT; + + queue_lock(scbp->rqueue); + queue_wakeup(scbp->rqueue); + queue_unlock(scbp->rqueue); + + pthread_join(scbp->pooltid, NULL); + + queue_free(scbp->rqueue); + queue_free(scbp->tqueue); + queue_free(scbp->ioqueue); + + pthread_mutex_destroy(&(scbp->poolmutex)); + pthread_cond_destroy(&(scbp->poolcond)); + KV_TRC(pAT, "thread %d joined", i); + } + + if (_arkp->poolthreads) am_free(_arkp->poolthreads); + + if (_arkp->pts) am_free(_arkp->pts); + + for ( i = 0; i < _arkp->nasyncs ; i++ ) + { + pthread_cond_destroy(&(_arkp->rcbs[i].acond)); + pthread_mutex_destroy(&(_arkp->rcbs[i].alock)); + } + + for ( i = 0; i < _arkp->ntasks; i++ ) + { + + bt_delete(_arkp->tcbs[i].inb); + bt_delete(_arkp->tcbs[i].oub); + am_free(_arkp->tcbs[i].vb_orig); + } + + if (_arkp->iocbs) + { + am_free(_arkp->iocbs); + } + + if (_arkp->tcbs) + { + am_free(_arkp->tcbs); + } + + if (_arkp->rcbs) + { + am_free(_arkp->rcbs); + } + + if (_arkp->ttags) + { + tag_free(_arkp->ttags); + } + + if (_arkp->rtags) + { + tag_free(_arkp->rtags); + } + + if (!(_arkp->flags & ARK_KV_VIRTUAL_LUN)) + { + rc = ark_persist(_arkp); + if ( rc != 0 ) + { + KV_TRC_FFDC(pAT, "FFDC: ark_persist failed: %d", rc); + } + } + + pthread_mutex_destroy(&_arkp->mainmutex); + + (void)ea_delete(_arkp->ea); + hash_free(_arkp->ht); + bl_delete(_arkp->bl); + KV_TRC(pAT, "ark_delete done %p", _arkp); + am_free(_arkp); + +ark_delete_ark_err: + KV_TRC_CLOSE(pAT); + return rc; +} + +int ark_connect_verbose(ARC **arc, ARK *ark, int nasync) +{ + int rc = 0; + if ((arc == NULL) || (ark == NULL)) + { + rc = EINVAL; + } + else + { + *arc = ark; + } + + return rc; +} + +int ark_connect(ARC **arc, ARK *ark) +{ + return ark_connect_verbose(arc, ark, 128); +} + +int ark_disconnect(ARC *arc) +{ + int rc = 0; + + if (arc == NULL) + { + rc = EINVAL; + } + + return rc; +} + +// if successful then returns vlen else returns negative number error code +int ark_set(ARK *ark, uint64_t klen, + void *key, uint64_t vlen, void *val, int64_t *rval) { + int rc = 0; + int errcode = 0; + int tag = 0; + int64_t res = 0; + _ARK *_arkp = (_ARK *)ark; + + if ((_arkp == NULL) || klen < 0 || vlen < 0 || + ((klen > 0) && (key == NULL)) || + ((vlen > 0) && (val == NULL)) || + (rval == NULL)) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: ark %p, klen %"PRIu64",\ + key %p, vlen %"PRIu64", val %p, rval %p", ark, klen, key, vlen, val, rval); + rc = EINVAL; + } + else + { + *rval = -1; + tag = ark_set_async_tag(_arkp, klen, key, vlen, val); + if (tag < 0) + { + rc = EAGAIN; + KV_TRC_FFDC(pAT, "ark_set_async_tag failed rc = %d", rc); + } + else + { + // Will wait here for command to complete + rc = ark_wait_tag(_arkp, tag, &errcode, &res); + if (rc == 0) + { + if (errcode != 0) + { + KV_TRC_FFDC(pAT, "ark_wait_tag failed rc = %d", errcode); + rc = errcode; + } + + *rval = res; + } + } + } + + return rc; +} + + +// if success returns the size of value +int ark_exists(ARK *ark, uint64_t klen, void *key, int64_t *rval) { + int rc = 0; + int errcode = 0; + int tag = 0; + int64_t res = 0; + _ARK *_arkp = (_ARK *)ark; + + if ((_arkp == NULL) || klen < 0 || + ((klen > 0) && (key == NULL)) || + (rval == NULL)) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: ark %p, klen %"PRIu64",\ + key %p, rval %p", ark, klen, key, rval); + rc = EINVAL; + } + else + { + *rval = -1; + tag = ark_exists_async_tag(_arkp, klen, key); + if (tag < 0) + { + KV_TRC_FFDC(pAT, "ark_set_async_tag failed rc = %d", rc); + rc = EAGAIN; + } + else + { + // Will wait here for command to complete + rc = ark_wait_tag(_arkp, tag, &errcode, &res); + if (rc == 0) + { + if (errcode != 0) + { + KV_TRC_FFDC(pAT, "ark_wait_tag failed rc = %d", errcode); + rc = errcode; + } + + *rval = res; + } + } + } + + return rc; +} + +// if successful returns length of value +// vbuf is filled with value starting at voff bytes in, +// at most vbuflen bytes returned +int ark_get(ARK *ark, uint64_t klen, void *key, + uint64_t vbuflen, void *vbuf, uint64_t voff, int64_t *rval) { + + int rc = 0; + int errcode = 0; + int tag = 0; + int64_t res = 0; + _ARK *_arkp = (_ARK *)ark; + + if ((_arkp == NULL) || + ((klen > 0) && (key == NULL)) || + ((vbuflen > 0) && (vbuf == NULL)) || + (rval == NULL)) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: ark %p, klen %"PRIu64", key %p, \ +vbuflen %"PRIu64", vbuf %p, rval %p", + _arkp, klen, key, vbuflen, vbuf, rval); + rc = EINVAL; + } + else + { + *rval = -1; + tag = ark_get_async_tag(_arkp, klen, key, vbuflen, vbuf, voff); + if (tag < 0) + { + KV_TRC_FFDC(pAT, "ark_get_async_tag failed rc = %d", rc); + rc = EAGAIN; + } + else + { + // Will wait here for command to complete + rc = ark_wait_tag(_arkp, tag, &errcode, &res); + if (rc == 0) + { + if (errcode != 0) + { + KV_TRC_FFDC(pAT, "ark_wait_tag failed rc = %d", errcode); + rc = errcode; + } + + *rval = res; + } + } + } + + return rc; +} + +// if success returns size of value deleted +int ark_del(ARK *ark, uint64_t klen, void *key, int64_t *rval) { + int rc = 0; + int errcode = 0; + int tag = 0; + int64_t res = 0; + _ARK *_arkp = (_ARK *)ark; + + if ((_arkp == NULL) || + ((klen > 0) && (key == NULL)) || + (rval == NULL)) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: ark %p, klen %"PRIu64", key %p, rval %p", + _arkp, klen, key, rval); + rc = EINVAL; + } + else + { + *rval = -1; + tag = ark_del_async_tag(_arkp, klen, key); + if (tag < 0) + { + KV_TRC_FFDC(pAT, "ark_del_async_tag failed rc = %d", rc); + rc = EAGAIN; + } + else + { + // Will wait here for command to complete + rc = ark_wait_tag(_arkp, tag, &errcode, &res); + if (rc == 0) + { + if (errcode != 0) + { + KV_TRC_FFDC(pAT, "ark_wait_tag failed rc = %d", errcode); + rc = errcode; + } + + *rval = res; + } + } + } + + return rc; +} + +int ark_random(ARK *ark, uint64_t kbuflen, uint64_t *klen, void *kbuf) +{ + uint64_t i; + int32_t rc = 0; + int32_t done = 0; + int32_t ptid = -1; + int errcode = 0; + int tag = 0; + int64_t res = 0; + _ARK *_arkp = (_ARK *)ark; + + if (_arkp == NULL || 0 >= kbuflen || NULL == klen || NULL == kbuf) + { + KV_TRC_FFDC(pAT, "rc = EINVAL ark %p, kbuflen %"PRIu64", klen %p, kbuf %p", _arkp, kbuflen, klen, kbuf); + rc = EINVAL; + } + else + { + *klen = -1; + + ptid = _arkp->rthread; + + // Because the keys are stored in a hash table, their order + // by nature is random. Therefore, let's just pick a pool + // thread and see if it has any k/v pairs that it is monitoring + // and return those. THen move on to the next pool thread + // once done with the first thread...and so on. + for (i = 0; (i <= _arkp->nthrds) && (!done); i++) + { + tag = ark_rand_async_tag(_arkp, kbuflen, kbuf, ptid); + if (tag < 0) + { + KV_TRC_FFDC(pAT, "ark_rand_async_tag failed rc = %d", rc); + rc = EAGAIN; + done = 1; + } + else + { + // Will wait here for command to complete + rc = ark_wait_tag(_arkp, tag, &errcode, &res); + if (rc == 0) + { + + // If EAGAIN is returned, we need to try the next + // pool thread and see if it has any keys. All + // other errors (or success) we stop looking. + if (errcode != EAGAIN) + { + *klen = res; + done = 1; + + // Remember what thread we left off on + _arkp->rthread = ptid; + } + } + } + + ptid++; + if (ptid == _arkp->nthrds) + { + // Loop back around to pool thread 0 and continue looking. + ptid = 0; + } + } + + // If done is not set, that means we didn't find a single + // key/value pair. + if (!done) + { + rc = ENOENT; + KV_TRC_FFDC(pAT, "No more key/value pairs rc = %d", ENOENT); + } + } + + return rc; +} + +ARI *ark_first(ARK *ark, uint64_t kbuflen, int64_t *klen, void *kbuf) { + _ARI *_arip = NULL; + uint64_t i; + int32_t rc = 0; + int32_t done = 0; + int errcode = 0; + int tag = 0; + int64_t res = 0; + _ARK *_arkp = (_ARK *)ark; + + errno = 0; + + if ((_arkp == NULL) || + (klen == NULL) || + (kbuf == NULL) || + (kbuflen == 0)) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: ark %p kbuflen %ld klen %p kbuf %p", + _arkp, kbuflen, klen, kbuf); + errno = EINVAL; + } + else + { + _arip = am_malloc(sizeof(_ARI)); + if (NULL == _arip) + { + KV_TRC_FFDC(pAT, "Out of memory on allocation of %"PRIu64" bytes for random index control block", sizeof(_ARI)); + *klen = -1; + errno = ENOMEM; + } + else + { + *klen = -1; + + // Start with the first thread. We start with the first pool + // thread and get it's first key. If by chance the first thread + // does not have any key's in it's control, we move on to the next + // pool thread. + for (i = 0; (i < _arkp->nthrds) && (!done); i++) + { + tag = ark_first_async_tag(_arkp, kbuflen, kbuf, _arip, i); + if (tag < 0) + { + am_free(_arip); + _arip = NULL; + rc = EAGAIN; + done = 1; + KV_TRC_FFDC(pAT, "ark_first_async_tag failed rc = %d", rc); + } + else + { + // Will wait here for command to complete + rc = ark_wait_tag(_arkp, tag, &errcode, &res); + if (rc == 0) + { + + // If EAGAIN is returned, we need to try the next + // pool thread and see if it has any keys. All + // other errors (or success) we stop looking. + if (errcode != EAGAIN) + { + *klen = res; + done = 1; + + // Remember what thread we left off on + _arip->ithread = i; + } + } + } + } + + // If done is not set, that means we didn't find a single + // key/value pair. + if (!done) + { + am_free(_arip); + _arip = NULL; + rc = ENOENT; + KV_TRC_FFDC(pAT, "No more key/value pairs in the store: rc = %d", rc); + } + else + { + if (rc) + { + am_free(_arip); + _arip = NULL; + } + } + } + } + + return (ARI *)_arip; +} + +int ark_next(ARI *iter, uint64_t kbuflen, int64_t *klen, void *kbuf) +{ + _ARI *_arip = (_ARI *)iter; + uint64_t i; + int32_t rc = 0; + int32_t done = 0; + int errcode = 0; + int tag = 0; + int64_t res = 0; + _ARK *_arkp = NULL; + + if ((_arip == NULL) || + (_arip->ark == NULL) || + (klen == NULL) || + (kbuf == NULL) || + (kbuflen == 0)) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: ari %p kbuflen %ld klen %p kbuf %p", + _arip, kbuflen, klen, kbuf); + rc = EINVAL; + } + else + { + _arkp = _arip->ark; + *klen = -1; + + // Start with the thread we left off on last time and then loop + // to the end + for (i = _arip->ithread; (i < _arkp->nthrds) && (!done); i++) + { + tag = ark_next_async_tag(_arkp, kbuflen, kbuf, _arip, i); + if (tag < 0) + { + rc = EAGAIN; + KV_TRC_FFDC(pAT, "ark_next_async_tag failed rc = %d", rc); + done = 1; + } + else + { + // Will wait here for command to complete + rc = ark_wait_tag(_arkp, tag, &errcode, &res); + if (rc == 0) + { + + // If EAGAIN is returned, we need to try the next + // pool thread and see if it has any keys. All + // other errors (or success) we stop looking. + if (errcode != EAGAIN) + { + *klen = res; + done = 1; + + // Remember what thread we left off on + _arip->ithread = i; + } + } + } + } + + // If done is not set, that means we didn't find a single + // key/value pair. + if (!done) + { + am_free(_arip); + _arip = NULL; + rc = ENOENT; + KV_TRC_FFDC(pAT, "No more key/value pairs in the store: rc = %d", rc); + } + } + + return rc; +} + +int ark_null_async_cb(ARK *ark, uint64_t klen, void *key, + void (*cb)(int errcode, uint64_t dt, int64_t res), uint64_t dt) { + if (NULL == ark || cb == NULL) + { + KV_TRC_FFDC(pAT, "FFDC EINVAL, ark %p cb %p", ark, cb); + return EINVAL; + } + else + { + _ARK *_arkp = (_ARK *)ark; + return ark_enq_cmd(K_NULL,_arkp,klen,key,0,0,0,cb,dt, -1, NULL); + } +} +int ark_set_async_cb(ARK *ark, uint64_t klen, void *key, uint64_t vlen, void *val, + void (*cb)(int errcode, uint64_t dt, int64_t res), uint64_t dt) { + if (NULL == ark || ((vlen > 0) && (val == NULL)) || + (cb == NULL)) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: vlen %"PRIu64", val %p, cb %p", + vlen, val, cb); + return EINVAL; + } + else + { + _ARK *_arkp = (_ARK *)ark; + return ark_enq_cmd(K_SET, _arkp, klen,key,vlen,val,0,cb,dt, -1, NULL); + } +} +int ark_get_async_cb(ARK *ark, uint64_t klen,void *key,uint64_t vbuflen,void *vbuf,uint64_t voff, + void (*cb)(int errcode, uint64_t dt, int64_t res), uint64_t dt) { + if (NULL == ark || ((vbuflen > 0) && (vbuf == NULL))|| + (cb == NULL)) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: vbuflen %"PRIu64", vbuf %p, cb %p", + vbuflen, vbuf, cb); + return EINVAL; + } + else + { + _ARK *_arkp = (_ARK *)ark; + return ark_enq_cmd(K_GET, _arkp, klen,key,vbuflen,vbuf,voff,cb,dt, -1, NULL); + } +} +int ark_del_async_cb(ARK *ark, uint64_t klen, void *key, + void (*cb)(int errcode, uint64_t dt, int64_t res), uint64_t dt) { + if (NULL == ark || cb == NULL) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: cb %p", cb); + return EINVAL; + } + else + { + _ARK *_arkp = (_ARK *)ark; + return ark_enq_cmd(K_DEL, _arkp, klen,key,0,0,0,cb,dt, -1, NULL); + } +} +int ark_exists_async_cb(ARK *ark, uint64_t klen, void *key, + void (*cb)(int errcode, uint64_t dt, int64_t res), uint64_t dt) { + if (NULL == ark || cb == NULL) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: cb %p", cb); + return EINVAL; + } + else + { + _ARK *_arkp = (_ARK *)ark; + return ark_enq_cmd(K_EXI, _arkp, klen,key,0,0,0,cb,dt, -1, NULL); + } +} + + +int ark_null_async_tag(_ARK *_arkp, uint64_t klen, void *key) { + int tag = -1; + (void)ark_enq_cmd(K_NULL, _arkp, klen,key,0,0,0,NULL,0, -1, &tag); + return tag; +} +int ark_set_async_tag(_ARK *_arkp, uint64_t klen, void *key, uint64_t vlen, void *val) { + int tag = -1; + (void)ark_enq_cmd(K_SET, _arkp, klen,key,vlen,val,0,NULL,0, -1, &tag); + return tag; +} +int ark_get_async_tag(_ARK *_arkp, uint64_t klen,void *key,uint64_t vbuflen,void *vbuf,uint64_t voff) { + int tag = -1; + (void)ark_enq_cmd(K_GET, _arkp, klen,key,vbuflen,vbuf,voff,NULL,0, -1, &tag); + return tag; +} +int ark_del_async_tag(_ARK *_arkp, uint64_t klen, void *key) { + int tag = -1; + ark_enq_cmd(K_DEL, _arkp, klen,key,0,0,0,NULL,0, -1, &tag); + return tag; +} +int ark_exists_async_tag(_ARK *_arkp, uint64_t klen, void *key) { + int tag = -1; + (void)ark_enq_cmd(K_EXI, _arkp, klen,key,0,0,0,NULL,0, -1, &tag); + return tag; +} + +int ark_rand_async_tag(_ARK *_arkp, uint64_t klen, void *key, int32_t ptid) { + int tag = -1; + (void)ark_enq_cmd(K_RAND, _arkp, klen,key,0,NULL,0,NULL,0, ptid, &tag); + return tag; +} + +int ark_first_async_tag(_ARK *_arkp, uint64_t klen, void *key, _ARI *_arip, int32_t ptid) { + int tag; + (void)ark_enq_cmd(K_FIRST, _arkp, klen, key, 0, (void *)_arip, 0, NULL,0, ptid, &tag); + return tag; +} + +int ark_next_async_tag(_ARK *_arkp, uint64_t klen, void *key, _ARI *_arip, int32_t ptid) { + int tag; + (void)ark_enq_cmd(K_NEXT, _arkp, klen, key, 0, (void *)_arip, 0, NULL,0, ptid, &tag); + return tag; +} + +int ark_count(ARK *ark, int64_t *count) +{ + uint32_t i; + int32_t rc = 0; + _ARK *_arkp = (_ARK *)ark; + + if ((_arkp == NULL) || (count == NULL)) + { + KV_TRC_FFDC(pAT, "rc = EINVAL ark %p, count %p", _arkp, count); + rc = EINVAL; + } + else + { + *count = _arkp->pers_stats.kv_cnt; + + for (i = 0; i < _arkp->nthrds; i++) + { + *count += _arkp->poolthreads[i].poolstats.kv_cnt; + } + } + + return rc; +} + +int ark_flush(ARK *ark) +{ + int rc = 0; + int i = 0; + _ARK *_arkp = (_ARK *)ark; + hash_t *ht = NULL; + BL *bl = NULL; + + if (ark == NULL) + { + rc = EINVAL; + KV_TRC_FFDC(pAT, "rc = %d", rc); + goto ark_flush_err; + } + + // recreate the hash and block lists first + ht = hash_new(_arkp->hcount); + if (ht == NULL) + { + rc = errno; + KV_TRC_FFDC(pAT, "rc = %d", rc); + goto ark_flush_err; + } + + bl = bl_new(_arkp->bcount, _arkp->blkbits); + if (bl == NULL) + { + rc = errno; + KV_TRC_FFDC(pAT, "rc = %d", rc); + goto ark_flush_err; + } + + // If we've made it here then we delete the old + // hash and block list structures and set the new ones + hash_free(_arkp->ht); + bl_delete(_arkp->bl); + + _arkp->ht = ht; + _arkp->bl = bl; + + // We need to reset the counts for each pool thread + for (i = 0; i < _arkp->nthrds; i++) + { + _arkp->poolthreads[i].poolstats.blk_cnt = 0; + _arkp->poolthreads[i].poolstats.kv_cnt = 0; + _arkp->poolthreads[i].poolstats.byte_cnt = 0; + } + +ark_flush_err: + if (rc) + { + // If we ran into an error, delete the newly + // created hash and block lists structures + if (ht) + { + hash_free(ht); + } + + if (bl) + { + bl_delete(bl); + } + } + + return rc; +} + +pid_t ark_fork(ARK *ark) +{ + _ARK *_arkp = (_ARK *)ark; + + // Let's start with an error just incase we don't make it + // to the fork() system call and the -1 will tell the + // caller an error was encountered and a child process + // was not created. + pid_t cpid = -1; + + if (ark == NULL) + { + KV_TRC_FFDC(pAT, "rc = %s", "EINVAL"); + return cpid; + } + + // Tell the store to hold all "to be freed" blocks. This + // will be undid in ark_fork_done() or in the error + // case of fork below. + bl_hold(_arkp->bl); + + cpid = fork(); + + switch (cpid) + { + // Ran into an error, perform any cleanup before returning + case -1 : + bl_release(_arkp->bl); + break; + + // This is the child process. + case 0 : + { + // Need to do error checking for this block + int i; + + _arkp->pcmd = PT_RUN; + _arkp->ark_exit = 0; + + for (i = 0; i < _arkp->nthrds; i++) { + PT *pt = am_malloc(sizeof(PT)); + pt->id = i; + pt->ark = _arkp; + + _arkp->poolthreads[i].poolstate = PT_OFF; + pthread_mutex_init(&(_arkp->poolthreads[i].poolmutex),NULL); + pt->id = i; + pt->ark = _arkp; + pthread_create(&(_arkp->poolthreads[i].pooltid), NULL, pool_function, pt); + } + + if (_arkp->ea->st_type != EA_STORE_TYPE_MEMORY) + { + int c_rc = 0; + + c_rc = cblk_clone_after_fork(_arkp->ea->st_flash, O_RDONLY, 0); + + // If we encountered an error, force the child to + // exit with a non-zero status code + if (c_rc != 0) + { + KV_TRC_FFDC(pAT, "FFDC, rc = %d", c_rc); + _exit(c_rc); + } + } + break; + } + + // Parent process will go here, with cpid being the pid + // of the child process. + default : + break; + } + + return cpid; +} + +int ark_fork_done(ARK *ark) +{ + int rc = 0; + _ARK *_arkp = (_ARK *)ark; + + if (ark == NULL) + { + rc = EINVAL; + KV_TRC_FFDC(pAT, "FFDC, rc = %s", "EINVAL"); + return rc; + } + + bl_release(_arkp->bl); + + return rc; +} + +int ark_inuse(ARK *ark, uint64_t *size) { + + _ARK *_arkp = (_ARK *)ark; + int rc = 0; + uint64_t i; + + if ((ark == NULL) || (size == NULL)) + { + rc = EINVAL; + KV_TRC_FFDC(pAT, "rc = %d", rc); + } + else + { + *size = _arkp->pers_stats.blk_cnt; + for (i = 0; i < _arkp->nthrds; i++) + { + *size += _arkp->poolthreads[i].poolstats.blk_cnt; + } + + *size = *size * _arkp->bsize; + } + + return rc; +} + +int ark_allocated(ARK *ark, uint64_t *size) +{ + int rc = 0; + _ARK *_arkp = (_ARK *)ark; + + if ((ark == NULL) || (size == NULL)) + { + rc = EINVAL; + KV_TRC_FFDC(pAT, "rc = %d", rc); + } + else + { + *size = _arkp->ea->size; + } + + return rc; +} + +int ark_actual(ARK *ark, uint64_t *size) +{ + int32_t rc = 0; + _ARK *_arkp = (_ARK *)ark; + uint64_t i; + + if ((ark == NULL) || (size == NULL)) + { + rc = EINVAL; + KV_TRC_FFDC(pAT, "rc = %d", rc); + } + else + { + *size = _arkp->pers_stats.byte_cnt; + for (i = 0; i < _arkp->nthrds; i++) + { + *size += _arkp->poolthreads[i].poolstats.byte_cnt; + } + } + + return rc; +} + +int ark_stats(ARK *ark, uint64_t *ops, uint64_t *ios) +{ + int rc = 0; + int i = 0; + _ARK *_arkp = (_ARK *)ark; + + if ( (ark == NULL) || (ops == NULL) || (ios == NULL) ) + { + rc = EINVAL; + KV_TRC_FFDC(pAT, "rc = %d", rc); + } + else + { + *ops = 0; + *ios = 0; + + // Go through the pool threads and collect each + // threads counts + for (i = 0; i < _arkp->nthrds; i++) + { + *ops += _arkp->poolthreads[i].poolstats.ops_cnt; + *ios += _arkp->poolthreads[i].poolstats.io_cnt; + } + } + + return rc; +} diff --git a/src/kv/arkdb.h b/src/kv/arkdb.h new file mode 100644 index 00000000..6df975dd --- /dev/null +++ b/src/kv/arkdb.h @@ -0,0 +1,500 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/arkdb.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __ARKDB_H__ +#define __ARKDB_H__ + +#include +#include +#include +#include + +typedef uint64_t ARK; +#define ARC ARK +typedef uint64_t ARI; + +// Bits that can be set in the flags parameter of ark_create +// and ark_create_verbose + +// By default, a physical LUN will be used for the ARK +// store. To use a virtual LUN on the provided device +// use the following flag. +#define ARK_KV_VIRTUAL_LUN 0x0000000000000001 + +// Will this store have persistence enabled. By +// default persistence is not done. +#define ARK_KV_PERSIST_STORE 0x0000000000000002 + +// Ignore persistence data (if present) and start +// from scratch +#define ARK_KV_PERSIST_LOAD 0x0000000000000004 + + +int ark_create_verbose(char *path, ARK **ark, + uint64_t size, uint64_t bsize, + uint64_t hcount, int nthreads, int nqueue, + int basyncs, uint64_t flags); + +/** + * @brief Create a key value store instance + * + * The ark_create API will create a key value store instance on the + * host system. + * + * The path parameter can be used to specify a specific CAPI adapter. + * If the path parameter is not a CAPI adapter, the API will assume it + * is a file to be used for the key value store. If path is NULL, + * memory will be used for the key value store. + * + * Upon successful completion, the handle parameter will represent the + * newly created key value store instance to be used for future + * API calls. + * + * @param path Allows the user to specify a specific CAPI adapter + * a file or memory for the key value store + * @param ark Handle representing the key value store + * + * @param flags Bit field to hold configuration options for + * the ARK instance that is to be created. + * + * @return Upon successful completion, the ark_create API will return + * 0, and the handle parameter will point to the newly created + * key value store instance. If unsuccessful, the ark_create + * API will return a non-zero error code: + * + * @retval EINVAL Invalid value for one of the parameters + * @retval ENOMEM Not enough memory + * @retval ENOSPC Not enough flash storage + * @retval ENOTREADY System not ready for key value store support + */ +int ark_create(char *path, ARK **ark, uint64_t flags); + +/** + * @brief Delete a key value store instance + * + * The ark_delete API will delete a key value store instance specified + * by the ark parameter, on the host system. Upon successful + * completion all associated in memory and storage resources will + * be released at this time. + * + * @param ark Handle representing the key value store + * + * @return Upon successful completion, the ark_delete API will clean + * and remove all resources associated with the key value + * store instance and return 0. If unsuccessful, the + * ark_delete API will return a non-zero error code. + * + * @retval EINVAL Invalid value for one of the parameters + */ +int ark_delete(ARK *ark); + +int ark_connect_verbose(ARC **arc, ARK *ark, int nasync); + +int ark_connect(ARC **arc, ARK *ark); + +int ark_disconnect(ARC *arc); + +/** + * @brief Write a key/value pair + * + * The ark_set API will store the key, key, and value, val, into + * the store for the key/value store instance represented by the + * ark parameter. The API, ark_set_async_cb, will behave in the same + * manner, but in an asynchronous fashion, where the API immediately + * returns to the caller and the actual operation is scheduled to + * run. After the operation is executed, the callback function will + * be called to notify the caller of completion. + * + * If the key is present, the store value will be replaced with the + * val value. + * + * Upon successful completion, the key/value pair will be present in + * the store and the number of bytes written will be returned to + * the caller through the res parameter. + * + * @param ark Handle representing the key/value store instance + * @param klen Length of the key in bytes + * @param key Key + * @param vlen Length of the value in bytes + * @param val Value + * @param res Upon success, number of bytes written to the store + * @param callback Function to call upon completion of the I/O operation + * @param dt 64bit value to tag an asynchronous API call. + * + * @return Upon successful completion, the ark_set and ark_set_async_cb + * API will write the key/value in the store associated with the + * key/value store instance and return the number of bytes written. + * The return of ark_set will indicate the status of the operation. The + * ark_set_async_cb API return will indicate whether the asynchronous + * operation was accepted or rejected. The true status will be stored + * in the errocde parameter when the callback function is executed. + * if unsuccessful, the ark_set and ark_set_async_cb API will return + * a non-zero error code. + * + * @retval EINVAL Invalid value for one of the parameters + * @retval ENOSPC Not enough space left in key/value store + */ +int ark_set_async_cb(ARK *ark, uint64_t klen, void *key, uint64_t vlen, + void *val, + void (*cb)(int errcode, uint64_t dt, int64_t res), + uint64_t dt); + +int ark_set(ARK *ark, uint64_t klen, void *key, uint64_t vlen, + void *val, int64_t *res); + +/** + * @brief Retrieve a value for a given key + * + * The ark_get API will query th ekey/value store associated with the + * ark parameter for the given key, key. If found, the key's value will + * be returned in the vbuf parameter with at most vbuflen bytes written + * starting at the offset, voff, in the key's value. The API, + * ark_get_async_cb, will behave in the same manner, but in an + * asynchronous fashion, where the API immediately returns to the + * caller and the actual operation is scheduled to run. + * After the operation is executed, the callback + * function will be called to notify the caller of completion. + * + * + * Upon successful completion, the length of the key's value will + * be stored in the res parameter. + * + * @param ark Handle representing the key/value store instance + * @param klen Length of the key in bytes + * @param key Key + * @param vbuflen Length of the buffer, vbuf + * @param vbuf Buffer to store the key's value + * @param voff Offset into the key to start reading. + * @param res Upon success, size of the key value + * @param callback Function to call upon completion of the I/O operation + * @param dt 64bit value to tag an asynchronous API call. + * + * @return Upon successful completion, the ark_get and ark_get_async_cb + * API will return 0. THe return of ark_get will indicate the status + * of the operation. The ark_get_async_cb API return will indicate + * whether the asynchronous operation was accepted or rejected. The + * true status of the asynchronous API will be stored in the errocde + * parameter of the callback function. If unsuccessful, the ark_get + * and ark_get_async_cb API will return a non-zero error code + * + * @retval EINVAL Invalid value for one of the parameters + * @retval ENOSPC Not enough space left in key/value store + */ +int ark_get_async_cb(ARK *ark, uint64_t klen, void *key, uint64_t vbuflen, + void *vbuf, uint64_t voff, + void (*cb)(int errcode, uint64_t dt,int64_t res), + uint64_t dt); +int ark_get(ARK *ark, uint64_t klen, void *key, uint64_t vbuflen, + void *vbuf, uint64_t voff, int64_t *res); + +/** + * @brief Delete the value associated with a given key + * + * The ark_del API will query the key/value store associated with the + * handle parameter for the given key, key, and if found, will delete + * the value. The API, ark_del_async_cb, will behave in the same + * manner, but in an asynchronous fashion, where the API immediately + * returns to the caller and the actual operation is scheduled to run. + * After the operation is executed, the callback function will be + * called to notify the caller of completion. + * + * Upon successful completion, the length of the key's value will + * be stored in the res parameter. + * + * @param ark Handle representing the key/value store instance + * @param klen Length of the key in bytes + * @param key Key + * @param res Upon success, size of the key value + * @param callback Function to call upon completion of the I/O operation + * @param dt 64bit value to tag an asynchronous API call. + * + * @return Upon successful completion, the ark_del and ark_del_async_cb + * API will return 0. THe return of ark_del will indicate the status + * of the operation. The ark_del_async_cb API return will indicate + * whether the asynchronous operation was accepted or rejected. THe + * true status will be returned in the errcode parameter when the + * callback function is executed. If unsuccessful, the ark_del and + * ark_del_async_cb API will return a non-zero error code: + * + * @retval EINVAL Invalid value for one of the parameters + * @retval ENOSPC Not enough space left in key/value store + */ +int ark_del_async_cb(ARK *ark, uint64_t klen, void *key, + void (*cb)(int errcode, uint64_t dt,int64_t res), + uint64_t dt); +int ark_del(ARK *ark, uint64_t klen, void *key, int64_t *res); + +/** + * @brief Query the key/value store to see if a given key is present + * + * The ark_exists API will query the key/value store associated with + * the ark parameter for the given key, key, and if found, + * return the size of the value in bytes in the res parameter. The + * key and it's value will not be altered. THe API, + * ark_exists_async_cb, will behave in the same manner, but in an + * asynchronous fashion, where the API immediately returns to the + * caller and the actual operation is scheduled to run. After the + * operation is executed, the callback function will be called + * to notify the caller of completion. + * + * Upon successful completion, the length of the key's value will + * be stored in the res parameter. + * + * @param ark Handle representing the key/value store instance + * @param klen Length of the key in bytes + * @param key Key + * @param res Upon success, size of the key value + * @param callback Function to call upon completion of the I/O operation + * @param dt 64bit value to tag an asynchronous API call. + * + * @return Upon successful completion, the ark_exists and + * ark_exists_async_cb API will return 0. The return of ark_exists + * will indicate the status of the operation. The ark_exists_async_cb + * API return will indicate whether the asynchronous operation was + * accepted or rejected. The true status will be returned in the + * errcode parameter when the callback function is executed. If + * unsuccessful, the ark_exists and ark_exists_async_cb API will + * return a non-zero error code: + * + * @retval EINVAL Invalid value for one of the parameters + * @retval ENOSPC Not enough space left in key/value store + */ +int ark_exists_async_cb(ARK *ark, uint64_t klen, void *key, + void (*cb)(int errcode, uint64_t dt, int64_t res), + uint64_t dt); +int ark_exists(ARK *ark, uint64_t klen, void *key, int64_t *res); + + +/** + * @brief Return the first key and handle to iterate through the store + * + * The ark_first API will return the first key found in the store + * in the store in the buffer, kbuf, and the size of the key in klen, + * as long as the size is less than the size of the kbuf, kbuflen. + * + * + * Upon successful completion, an iterator handle will be returned + * to the caller to be used to retrieve the next key in the store + * by calling the ark_next API. + * + * @param ark Handle representing the key/value store instance + * @param kbuflen Length of the kbuf parameter + * @param klen Size of the key returned in kbuf + * @param kbuf Buffer to hold the key + * + * @return Upon successful completion, the ark_first API will return + * a handle to be used to iterate through the store on subsequent + * calls using the ark_next API. If unsuccessful, the ark_first API + * will return NULL with errno set to one of the following: + * + * @retval EINVAL Invalid value for one of the parameters + * @retval ENOSPC Not enough space left in key/value store + */ +ARI *ark_first(ARK *ark, uint64_t kbuflen, int64_t *klen, void *kbuf); + +/** + * @brief Return the next key in the store + * + * The ark_next API will return the next key found in the store based + * on the iterator handle, iter, in the buffer, kbuf, and the size + * of the key in klen, as long as the size is less than the size of + * the kbuf, kbuflen. + * + * Upon successful completion, a handle will be returned to the caller + * to be used to retrieve the next key in the store by calling the + * ark_next API. If the end of the store is reached, a NULL, value + * is returned and errno set to ENOENT. Because of the dynamic nature + * of the store, some recently written keys may not be returned. + * + * @param iter Iterator handle where to begin search in store + * @param kbuflen Length of the kbuf parameter + * @param klen Size of the key returned in kbuf + * @param kbuf Buffer to hold the key + * + * @return Upon successful completion, the ark_next API will return a + * handle to be used to iterate through the store on subsequent calls + * using the ark_next API. If unsuccessful, the ark_next API will + * a return a non-zero value of one of the following errors: + * + * @retval EINVAL Invalid value for one of the parameters + * @retval ENOSPC Not enough space left in key/value store + */ +int ark_next(ARI *iter, uint64_t kbuflen, int64_t *klen, void *kbuf); + +/** + * @brief Return the number of bytes allocated in the store + * + * The ark_allocated API will return the number of bytes allocated + * in the store in the size parameter. + * + * @param ark Handle representing the key/value store instance + * @param size Will hold the size of the store in bytes + * + * @return Upon successful completion, the ark_allocated API will return + * zero. If unsuccessful, the ark_allocated API will return one + * of the following error codes: + * + * @retval EINVAL Invalid value for one of the parameters + * @retval ENOSPC Not enough space left in key/value store + */ +int ark_allocated(ARK *ark, uint64_t *size); + +/** + * @brief Return the number of bytes in use in the store + * + * The ark_inuse API will return the number of bytes in use in the + * store in the size parameter. + * + * @param ark Handle representing the key/value store instance + * @param size Will hold the size of the number of blcoks in use. Size + * will be in bytes + * + * @return Upon successful completion, the ark_inuse API will return + * zero. If unsuccessful, the ark_allocated API will return one + * of the following error codes: + * + * @retval EINVAL Invalid value for one of the parameters + */ +int ark_inuse(ARK *ark, uint64_t *size); + +/** + * @brief Return the actual number of bytes in use in the store + * + * The ark_actual API will return the actual number of bytes in use + * in the store in the size parameter. This differs from the ark_inuse + * API as this takes into account the actual sizes of the individual + * keys and their values instead of generic allocations based on blocks + * to store these values. + * + * @param ark Handle representing the key/value store instance + * @param size Will hold the actual number of bytes in use in the store + * + * @return Upon successful completion, the ark_actual API will return + * zero. If unsuccessful, the ark_actual API will return one of + * the following error codes: + * + * @retval EINVAL Invalid value for one of the parameters + */ +int ark_actual(ARK *ark, uint64_t *size); + +/** + * @brief Return the count of the number of keys in the key/value store + * + * The ark_count API will return the total number of keys in the store + * based on the handle, ark, and store the result in the count + * parameter. + * + * @param ark Handle representing the key/value store instance + * @param count Number of keys found in the key/value store + * + * @return Upon successful completion, ark_count will return zero. + * Otherwise, a non-zero error code will be returned: + * + * @retval EINVAL Invalid value for one of the parameters + */ +int ark_count(ARK *ark, int64_t *count); + +/** + * @brief Return a random key from the key/value store + * + * The ark_random API will return a random key found in the store + * based on the handle, ark, in the buffer, kbuf, and the size of + * the key in klen, as long as the size is less than the size of + * the kbuf, kbuflen. + * + * @param ark Handle representing the key/value store instance + * @param kbuflen Length of the kbuf parameter + * @param klen Size of th ekey returned in kbuf + * @param kbuf Buffer to hold the key + * + * @return Upon successful completion, ark_random will zero. Otherwise, + * ark_random will return the following error codes: + * + * @retval EINVAL Invalid value for one of the parameters + */ +int ark_random(ARK *ark, uint64_t kbuflen, uint64_t *klen, void *kbuf); + +/** + * @brief Flush the contents of the key/value store + * + * The ark_flush API will flush the contents of the key/value store + * represented by the ark parameter. + * + * Note, this will not actually write or clear data off the flash device + * + * @param ark Handle representing the key/value store instance + * + * @return Upon successful completion, the ark_flush API will return + * zero. If unsuccessful, the ark_allocated API will return one + * of the following error codes: + * + * @retval EINVAL Invalid value for one of the parameters + */ +int ark_flush(ARK *ark); + +/** + * @brief Fork a key/value store for archiving purposes + * + * The ark_fork and ark_fork_done API's are to be called by the parent + * key/value store process to prepare the key/value store to be forked + * fork the child process, and to perform any cleanup once it has been + * detected the child process has exited. + * + * The ark_fork API will fork a child process and upon return, will + * return the process ID of the child in the parent process, and + * 0 in the child process. Once the parent detects the child has + * exited, a call to ark_fork_done will be needed to clean up any + * state from the ark_fork call. + * + * @param ark Handle representing the key/value store instance + * + * @return Upon successful completion, ark_fork and ark_fork_done + * will return zero, otherwise one of the following errors: + * + * @retval EINVAL Invalid value for one of the parameters + */ +pid_t ark_fork(ARK *ark); +int ark_fork_done(ARK *ark); + +/** + * @brief Return the number of key/value ops and number of block ops + * + * The ark_stats API will return the total number of key/value + * operations in the ops parameter, and number of block operations + * in the ios parameter. + * + * @param ark Handle representing the key/value store instance + * @param ops Number of key/value operations + * @param ios Number of block operations + * + * @return Upon successful completion, the ark_stats API will return + * zero. If unsuccessful, the ark_allocated API will return one + * of the following error codes: + * + * @retval EINVAL Invalid value for one of the parameters + */ +int ark_stats(ARK *ark, uint64_t *ops, uint64_t *ios); + +#endif + diff --git a/src/kv/arkdb_trace.h b/src/kv/arkdb_trace.h new file mode 100644 index 00000000..c8e0ac07 --- /dev/null +++ b/src/kv/arkdb_trace.h @@ -0,0 +1,27 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/arkdb_trace.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern KV_Trace_t *pAT; diff --git a/src/kv/arp.c b/src/kv/arp.c new file mode 100644 index 00000000..23077f64 --- /dev/null +++ b/src/kv/arp.c @@ -0,0 +1,1183 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/arp.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include + +#include +#include + +#include "ct.h" +#include "ut.h" +#include "vi.h" + +#include "arkdb.h" +#include "ark.h" +#include "arp.h" +#include "am.h" + +#include + +int ark_wait_tag(_ARK *_arkp, int tag, int *errcode, int64_t *res) +{ + int rc = 0; + rcb_t *rcbp = &(_arkp->rcbs[tag]); + + pthread_mutex_lock(&(rcbp->alock)); + while (rcbp->stat != A_COMPLETE) + { + pthread_cond_wait(&(rcbp->acond), &(rcbp->alock)); + } + + if (rcbp->stat == A_COMPLETE) + { + *res = rcbp->res; + *errcode = rcbp->rc; + rcbp->stat = A_NULL; + tag_bury(_arkp->rtags, tag); + } + else + { + rc = EINVAL; + KV_TRC_FFDC(pAT, "tag = %d rc = %d", tag, rc); + } + + pthread_mutex_unlock(&(rcbp->alock)); + + return rc; +} + +int ark_anyreturn(_ARK *_arkp, int *tag, int64_t *res) { + int i; + int astart = _arkp->astart; + int nasyncs = _arkp->nasyncs; + int itag = -1; + + for (i = 0; i < _arkp->nasyncs; i++) { + itag = (astart + i) % nasyncs; + if (_arkp->rcbs[itag].stat == A_COMPLETE) { + //KV_TRC_IO(pAT, pAT, "Found completion tag %d", itag); + *tag = itag; + *res = _arkp->rcbs[(astart+i) % nasyncs].res; + KV_TRC_IO(pAT, "arp %p res %"PRIi64"", _arkp->rcbs+itag, + _arkp->rcbs[itag].res); + _arkp->rcbs[itag].stat = A_NULL; + tag_bury(_arkp->rtags, itag); + return 0; + } + } + _arkp->astart++; + _arkp->astart %= nasyncs; + return EINVAL; +} + +int64_t ark_take_pool(_ARK *_arkp, ark_stats_t *stats, uint64_t n) +{ + int64_t rc = 0; + + pthread_mutex_lock(&_arkp->mainmutex); + + if (bl_left(_arkp->bl) < n) + { + BL *bl = NULL; + int ea_rc = 0; + int64_t cursize = _arkp->size; + int64_t atleast = n * _arkp->bsize; + int64_t newsize = cursize + atleast + (_arkp->grow * _arkp->bsize); + + uint64_t newbcnt = newsize / _arkp->bsize; + + bl = bl_resize(_arkp->bl, newbcnt, _arkp->bl->w); + + if (bl == NULL) { + rc = -1; + } + else + { + ea_rc = ea_resize(_arkp->ea, _arkp->bsize, newbcnt); + if (ea_rc) { + rc = -1; + } else { + _arkp->size = newsize; + _arkp->bl = bl; + } + } + } + + if (rc != -1) + { + uint64_t blk = bl_take(_arkp->bl, n); + if (blk > 0) { + _arkp->blkused += n; + rc = blk; + } + } + else + { + KV_TRC_FFDC(pAT, "Failed to obtain blocks requested = %"PRIu64"", n); + } + + pthread_mutex_unlock(&_arkp->mainmutex); + + if (rc != -1) + { + stats->blk_cnt += n; + } + + return rc; +} + +void ark_drop_pool(_ARK *_arkp, ark_stats_t *stats, uint64_t blk) { + + pthread_mutex_lock(&_arkp->mainmutex); + int now = bl_drop(_arkp->bl, blk); + _arkp->blkused -= now; + pthread_mutex_unlock(&_arkp->mainmutex); + stats->blk_cnt -= now; + KV_TRC_IO(pAT, "blkused %ld new %d", _arkp->blkused, now); +} + + +int ark_enq_cmd(int cmd, _ARK *_arkp, uint64_t klen, void *key, + uint64_t vlen,void *val,uint64_t voff, + void (*cb)(int errcode, uint64_t dt,int64_t res), + uint64_t dt, int pthr, int *ptag) +{ + int32_t rtag = -1; + int rc = 0; + int pt = 0; + rcb_t *rcbp = NULL; + + if ( (_arkp == NULL) || ((klen > 0) && (key == NULL))) + { + KV_TRC_FFDC(pAT, "rc = EINVAL: cmd %d, ark %p, \ + key %p, klen %"PRIu64"", cmd, _arkp, key, klen); + rc = EINVAL; + goto ark_enq_cmd_err; + } + + rc = tag_unbury(_arkp->rtags, &rtag); + if (rc == 0) + { + rcbp = &(_arkp->rcbs[rtag]); + rcbp->ark = _arkp; + rcbp->cmd = cmd; + rcbp->rtag = rtag; + rcbp->ttag = -1; + rcbp->stat = A_INIT; + rcbp->klen = klen; + rcbp->key = key; + rcbp->vlen = vlen; + rcbp->val = val; + rcbp->voff = voff; + rcbp->cb = cb; + rcbp->dt = dt; + rcbp->rc = 0; + rcbp->hold = -1; + rcbp->res = -1; + + // If the caller has asked to run on a specific thread, then + // do so. Otherwise, use the key to find which thread + // will handle command + if (pthr == -1) + { + rcbp->pos = hash_pos(_arkp->ht, key, klen); + rcbp->sthrd = rcbp->pos / _arkp->npart; + pt = rcbp->pos / _arkp->npart; + } + else + { + rcbp->sthrd = pthr; + pt = pthr; + } + + KV_TRC_IO(pAT, "%s%p enqueueing on P%d%s", C_Yellow, _arkp, rcbp->sthrd, C_Reset); + + queue_lock(_arkp->poolthreads[pt].rqueue); + + (void)queue_enq_unsafe(_arkp->poolthreads[pt].rqueue, rtag); + + queue_unlock(_arkp->poolthreads[pt].rqueue); + + } + else + { + KV_TRC_DBG(pAT, "NO_TAG: %"PRIu64"", dt); + } + +ark_enq_cmd_err: + + // If the caller is interested in the tag, give it back to + // them. If they aren't, no need to set it + if (ptag != NULL) + { + *ptag = rtag; + } + return rc; +} + +int ark_rand_pool(_ARK *_arkp, int id, tcb_t *tcbp) { + // find the hashtable position + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + int32_t found = 0; + int32_t i = 0; + int32_t ea_rc = 0; + int32_t state = ARK_CMD_DONE; + uint8_t *buf = NULL; + uint8_t *pos = NULL; + uint64_t hblk; + uint64_t pkl; + uint64_t pvl; + uint64_t btsize; + int64_t blen = 0; + void *kbuf = rcbp->key; + BT *bt = NULL; + BT *bt_orig = NULL; + ark_io_list_t *bl_array = NULL; + scb_t *scbp = &(_arkp->poolthreads[id]); + + // Check to see if this thead has any keys to begin with. + // If it doesn't reset the rlast field and return immediately. + if (scbp->poolstats.kv_cnt == 0) + { + rcbp->res = -1; + scbp->rlast = -1; + rcbp->rc = EAGAIN; + goto ark_rand_pool_err; + } + + // Check to see if we start off at the beginning + // of the thread's monitored hash buckets or + // if we need to pick up where we left off last time + i = scbp->rlast; + if ( i == -1 ) + { + i = id; + } + + // Now that we have the starting point, loop + // through the buckets for this thread and find + // the first bucket with blocks + for (; (i < _arkp->hcount) && (!found); i += _arkp->nthrds) + { + + hblk = HASH_LBA(HASH_GET(_arkp->ht, i)); + + // Found an entry with blocks + if (hblk>0) + { + blen = bl_len(_arkp->bl, hblk); + bt = bt_new(divceil(blen * _arkp->bsize, + _arkp->bsize) * _arkp->bsize, + _arkp->vlimit, 8, &(btsize), &bt_orig); + if (bt == NULL) + { + rcbp->res = -1; + rcbp->rc = ENOMEM; + KV_TRC_FFDC(pAT, "Bucket create failed blen %"PRIi64"", blen); + goto ark_rand_pool_err; + } + + buf = (uint8_t *)bt; + bl_array = bl_chain(_arkp->bl, hblk, blen); + if (bl_array == NULL) + { + rcbp->res = -1; + rcbp->rc = ENOMEM; + KV_TRC_FFDC(pAT, "Can not create block list %"PRIi64"", blen); + bt_delete(bt_orig); + goto ark_rand_pool_err; + } + + ea_rc = ea_async_io(_arkp->ea, ARK_EA_READ, (void *)buf, + bl_array, blen, _arkp->nthrds); + if (ea_rc != 0) + { + rcbp->res = -1; + rcbp->rc = ea_rc; + free(bl_array); + bt_delete(bt_orig); + bt = NULL; + KV_TRC_FFDC(pAT, "IO failure for READ rc = %d", ea_rc); + goto ark_rand_pool_err; + } + + free(bl_array); + + found = 1; + } + } + + if (found) + { + // Start position at the beginning of the bucket + pos = bt->data; + + // If we've made it here, we have our bucket, whether it be + // a new bucket from a new hash table entry, or the old + // bucket that still has keys that haven't been processed. + pos += vi_dec64(pos, &pkl); + pos += vi_dec64(pos, &pvl); + + // Record the true length of the key + rcbp->res = pkl; + + // Copy the key into the buffer for the passed in length + memcpy(kbuf, pos, (pkl > rcbp->klen? rcbp->klen : pkl)); + + // Delete the bucket since we are now done with it + bt_delete(bt_orig); + + // This will be the starting point on the next call to ark_random + // for this thread + scbp->rlast = i + _arkp->nthrds; + } + else + { + rcbp->res = -1; + + // We didn't find a key here, so return EAGAIN so that + // ark_random can determine if it wants to try on the + // next pool thread. Also reset the start point + // for this thread + scbp->rlast = -1; + rcbp->rc = EAGAIN; + } + +ark_rand_pool_err: + return state; +} + +// arp->res returns the length of the key +// arp->val is for the ARI structure...both for +// passing in and returning. +int ark_first_pool(_ARK *_arkp, int id, tcb_t *tcbp) { + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + _ARI *_arip = NULL; + uint64_t i; + int32_t rc = 0; + int32_t state = ARK_CMD_DONE; + int32_t found = 0; + uint8_t *buf = NULL; + uint8_t *pos = NULL; + uint64_t hblk; + uint64_t pkl; + uint64_t pvl; + int64_t blen = 0; + void *kbuf = rcbp->key; + ark_io_list_t *bl_array = NULL; + + _arip = (_ARI *)rcbp->val; + _arip->ark = _arkp; + _arip->bt = NULL; + _arip->hpos = 0; + _arip->key = 0; + _arip->pos = NULL; + + // Now that we have the starting point, loop + // through the buckets for this thread and find + // the first bucket with blocks + for (i = id; (i < _arkp->hcount) && (!found); i += _arkp->nthrds) + { + hblk = HASH_LBA(HASH_GET(_arkp->ht, i)); + + // Found an entry with blocks + if (hblk>0) + { + // Remember the hash table entry. Will start from + // this point. + _arip->hpos = i; + + // Allocate a new bucket + blen = bl_len(_arkp->bl, hblk); + _arip->bt = bt_new(divceil(blen * _arkp->bsize, + _arkp->bsize) * _arkp->bsize, + _arkp->vlimit, 8, &(_arip->btsize), + &(_arip->bt_orig)); + if (_arip->bt == NULL) + { + rcbp->rc = ENOMEM; + rcbp->res = -1; + goto ark_first_pool_err; + } + + buf = (uint8_t *)_arip->bt; + + bl_array = bl_chain(_arkp->bl, hblk, blen); + if (bl_array == NULL) + { + rcbp->rc = ENOMEM; + bt_delete(_arip->bt_orig); + _arip->bt_orig = NULL; + _arip->bt = NULL; + rcbp->res = -1; + goto ark_first_pool_err; + } + + // Iterate over all the blocks and read them from + // the storage into the bucket. + rc = ea_async_io(_arkp->ea, ARK_EA_READ, (void *)buf, + bl_array, blen, _arkp->nthrds); + if (rc != 0) + { + KV_TRC_FFDC(pAT, "FFDC, errno = %d", errno); + free(bl_array); + bt_delete(_arip->bt_orig); + _arip->bt_orig = NULL; + _arip->bt = NULL; + rcbp->rc = rc; + rcbp->res = -1; + goto ark_first_pool_err; + } + + free(bl_array); + + found = 1; + } + } + + // If bt == NULL, that either means we didn't find a + // hash bucket with a key or we ran into an error. + if (_arip->bt == NULL) { + if (rc == 0) + { + // If rc == 0, that means we didn't find a hash + // table. Set the error EAGAIN so the caller + // can retry on a different thread. + rcbp->rc = EAGAIN; + } + rcbp->res = -1; + goto ark_first_pool_err; + } + + // Look for the first key in the bucket + pos = _arip->bt->data; + pos += vi_dec64(pos, &pkl); + pos += vi_dec64(pos, &pvl); + + // Record the true length of the key + rcbp->res = pkl; + + // Copy the key into the buffer for the passed in length + memcpy(kbuf, pos, (pkl > rcbp->klen) ? rcbp->klen : pkl); + + // Are we done with this bucket or do we remember which key we + // left off on for next call. + if (_arip->bt->cnt > 1) + { + _arip->key++; + } + else + { + // If there are no more keys in this hash bucket + // then jump to the next monitored hash bucket for this thread. + _arip->hpos += _arkp->nthrds; + _arip->key = 0; + } + +ark_first_pool_err: + + if (_arip->bt != NULL) + { + bt_delete(_arip->bt_orig); + _arip->bt_orig = NULL; + _arip->bt = NULL; + } + + return state; +} + +// arp->res returns the length of the key +// arp->val is for the ARI structure...both for +// passing in and returning. +int ark_next_pool(_ARK *_arkp, int id, tcb_t *tcbp) { + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + _ARI *_arip = (_ARI *)rcbp->val; + uint64_t i; + int32_t rc = 0; + int32_t state = ARK_CMD_DONE; + int32_t found = 0; + int32_t kcnt = 0; + uint8_t *buf = NULL; + uint8_t *pos = NULL; + uint64_t hblk; + uint64_t pkl; + uint64_t pvl; + int64_t blen = 0; + void *kbuf = rcbp->key; + ark_io_list_t *bl_array = NULL; + + rcbp->res = -1; + + // We can be in a few different scenarios when we enter + // here. + // + // 1. We are starting out with a new thread, so we + // start from the beginning of it's monitored + // hash buckets. + // _arip->key == 0; + // 2. We are back with the same thread as before, but + // we are starting a new hash bucket. + // _arip->key == 0; + // 3. We are back with the same thread as before, but + // we are in the middle of a hash bucket. + // _arip->key > 0; + // + // Previously, we would cache the hash bucket in the ARI + // structure. But, because the ark library now supports + // multi-threading, the hash bucket can change inbetween + // calls to ark_first and ark_next and ark_next. So + // now we must fetch the hash bucket on each invocation. + // + + // Look for the first hash table entry that has blocks + // in it. If hpos is zero, we are just starting out + // with a new pool thread, so start with that pool + // thread's first hash bucket. + i = _arip->hpos; + if (_arip->hpos == 0) + { + i = id; + } + + for (; (i < _arkp->hcount) && (!found); i += _arkp->nthrds) { + hblk = HASH_LBA(HASH_GET(_arkp->ht, i)); + + // Found an entry with blocks + if (hblk>0) + { + // Remember the hash table entry. Will start from + // this point next time. + _arip->hpos = i; + + // Allocate a new bucket + blen = bl_len(_arkp->bl, hblk); + _arip->bt = bt_new(divceil(blen * _arkp->bsize, + _arkp->bsize) * _arkp->bsize, + _arkp->vlimit, 8, &(_arip->btsize), + &(_arip->bt_orig)); + if (_arip->bt == NULL) + { + rcbp->rc = ENOMEM; + goto ark_next_pool_err; + } + + buf = (uint8_t *)_arip->bt; + bl_array = bl_chain(_arkp->bl, hblk, blen); + if (bl_array == NULL) + { + rcbp->rc = ENOMEM; + bt_delete(_arip->bt_orig); + _arip->bt_orig = NULL; + _arip->bt = NULL; + goto ark_next_pool_err; + } + + rc = ea_async_io(_arkp->ea, ARK_EA_READ, (void *)buf, + bl_array, blen, _arkp->nthrds); + if (rc != 0) + { + free(bl_array); + KV_TRC_FFDC(pAT, "FFDC, ENOENT errno = %d", errno); + bt_delete(_arip->bt_orig); + _arip->bt_orig = NULL; + _arip->bt = NULL; + rcbp->rc = rc; + goto ark_next_pool_err; + } + + free(bl_array); + + // The hash bucket we just read in, does it have + // enough keys that we can get the "_arip->key" + // entry? If not, then release this hash bucket + // and look to the next one + if (_arip->key < _arip->bt->cnt) + { + found = 1; + } + else + { + // This hash bucket has changed and no longer + // contains the same amount of keys. Move + // to the next bucket and start with the first (0) + // key. + bt_delete(_arip->bt_orig); + _arip->bt_orig = NULL; + _arip->bt = NULL; + _arip->key = 0; + } + } + else + { + // This could have been the bucket we left off on + // last time...if so, then we need to reset + // _arip->key to start at the beginning of + // the next valid hash bucket. + _arip->key = 0; + } + } + + // If bt is NULL, then we either hit an error above, or + // we didn't find a key. + if (_arip->bt == NULL) { + + // If rc == 0, that means we didn't find a key. Set + // rc to EAGAIN so that the caller can try with the next + // thread. + if (rc == 0) + { + _arip->key = 0; + _arip->hpos = 0; + rcbp->rc = EAGAIN; + } + rcbp->res = -1; + goto ark_next_pool_err; + } + + // We have our bucket and now we need to find the next key. + pos = _arip->bt->data; + + kcnt = 0; + do + { + pos += vi_dec64(pos, &pkl); + pos += vi_dec64(pos, &pvl); + + // Have you found our key place in the bucket? + if (kcnt == _arip->key) + { + break; + } + else + { + // Move to the next key/value pair in the bucket. + // By here, we have made sure that we have enough keys + // to find the _arip->key entry because of the check above. + pos += (pkl + (pvl > _arip->bt->max ? _arip->bt->def : pvl)); + kcnt++; + } + } while (1); + + // Record the true length of the key + rcbp->res = pkl; + + // Copy the key into the buffer for the passed in length + memcpy(kbuf, pos, (pkl > rcbp->klen) ? rcbp->klen : pkl); + + // Are we done with this bucket or do we remember which key we + // left off on for next call. + _arip->key++; + if (_arip->key == _arip->bt->cnt) + { + // If there are no more keys in this hash bucket + // then jump to the next monitored hash bucket for this thread. + _arip->hpos += _arkp->nthrds; + _arip->key = 0; + } + +ark_next_pool_err: + + if (_arip->bt != NULL) + { + bt_delete(_arip->bt_orig); + _arip->bt_orig = NULL; + _arip->bt = NULL; + } + + return state; +} + + +void +cleanup_task_memory(_ARK *_arkp, tcb_t *tcbp) +{ + + if ((tcbp->inb != NULL) && (tcbp->inblen > _arkp->bsize)) + { + //printf("Reducing in bucket\n"); + bt_delete(tcbp->inb); + tcbp->inb = bt_new(0, _arkp->vlimit, sizeof(uint64_t), + &(tcbp->inblen), + &(tcbp->inb_orig)); + } + + if ((tcbp->oub != NULL) && (tcbp->oublen > _arkp->bsize)) + { + //printf("Reducing out bucket\n"); + bt_delete(tcbp->oub); + tcbp->oub = bt_new(0, _arkp->vlimit, sizeof(uint64_t), + &(tcbp->oublen), + &(tcbp->oub_orig)); + } + + if ((tcbp->vb != NULL) && (tcbp->vbsize > (_arkp->bsize * 256))) + { + //printf("Reducing variable buffer\n"); + am_free(tcbp->vb_orig); + tcbp->vbsize = _arkp->bsize * 256; + tcbp->vb_orig = am_malloc(tcbp->vbsize); + if (tcbp->vb_orig != NULL) + { + tcbp->vb = ptr_align(tcbp->vb_orig); + } + else + { + // Clear out the vbsize. When this buffer is used + // for a command, the command will fail gracefully + tcbp->vbsize = 0; + tcbp->vb = NULL; + } + } +} + +int init_task_state(_ARK *_arkp, tcb_t *tcbp) +{ + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + int init_state = 0; + + switch (rcbp->cmd) + { + case K_GET : + { + init_state = ARK_GET_START; + break; + } + case K_SET : + { + init_state = ARK_SET_START; + break; + } + case K_DEL : + { + init_state = ARK_DEL_START; + break; + } + case K_EXI : + { + init_state = ARK_EXIST_START; + break; + } + case K_RAND : + { + init_state = ARK_RAND_START; + break; + } + case K_FIRST : + { + init_state = ARK_FIRST_START; + break; + } + case K_NEXT : + { + init_state = ARK_NEXT_START; + break; + } + default : + // we should never get here + break; + } + + return init_state; +} + +void *pool_function(void *arg) { + PT *pt = (PT*)arg; + int id = pt->id; + _ARK *_arkp = pt->ark; + scb_t *scbp = &(_arkp->poolthreads[id]); + rcb_t *iorcbp = NULL; + rcb_t *rcbp = NULL; + rcb_t *rcbtmp = NULL; + rcb_t *ractp = NULL; + tcb_t *iotcbp = NULL; + tcb_t *tcbp = NULL; + tcb_t *tactp = NULL; + iocb_t *iocbp = NULL; + queue_t *rq = scbp->rqueue; + queue_t *tq = scbp->tqueue; + queue_t *ioq = scbp->ioqueue; + int32_t io_rc = 0; + int32_t iocount = 0; + int32_t i = 0; + int32_t reqrc = EAGAIN; + int32_t tskrc = EAGAIN; + int32_t io_status = 0; + int32_t tskact = 0; + int32_t reqact = 0; + int32_t iotask = 0; + int32_t reqtag = -1; + int32_t tsktag = 0; + uint64_t hval = 0; + uint64_t hlba = 0; + +#if 0 + struct timespec timewait; + struct timeval now; + +#define ARK_KV_GC_TIME_SEC 120 + gettimeofday(&now, NULL); + timewait.tv_sec = now.tv_sec + ARK_KV_GC_TIME_SEC; + timewait.tv_nsec = now.tv_usec * 1000UL; + _arkp->poolthreads[id].dogc = 1; +#endif + + KV_TRC(pAT, "id %d started, ark->nactive %d", id, _arkp->nactive); + + // Run until the thread state is EXIT or the global + // state, ark_exit, is not set showing we are shutting + // down the ark db. + while ((scbp->poolstate != PT_EXIT)) + { + + // First we check to see if there are any outstanding + // I/O's that need to be harvested. Because the IO + // queue is only manipulated by the owning thread, we + // don't need to worry about any serialization issues here + iocount = queue_count(ioq); + for (i = 0; i < iocount; i++) + { + queue_deq_unsafe(ioq, &iotask); + iocbp = &(_arkp->iocbs[iotask]); + iotcbp = &(_arkp->tcbs[iotask]); + iorcbp = &(_arkp->rcbs[iotcbp->rtag]); + + // Call out to IO completion routine + io_status = ea_async_io_harvest(_arkp, id, iotcbp, iocbp); + if (io_status > 0 ) + { + iotcbp->state = iocbp->io_done; + (void)queue_enq_unsafe(tq, iotask); + } + else if (io_status == 0) + { + io_rc = ea_async_io_mod(_arkp, iocbp->op, + (void *)iocbp->addr, iocbp->blist, iocbp->nblks, + iocbp->start, iotask, iocbp->io_done); + if ( io_rc < 0 ) + { + iorcbp->res = -1; + iorcbp->rc = -io_rc; + iotcbp->state = ARK_CMD_DONE; + (void)queue_enq_unsafe(tq, iotask); + } + else if (io_rc == 0) + { + iotcbp->state = ARK_IO_HARVEST; + (void)queue_enq_unsafe(ioq, iotask); + } + else + { + iotcbp->state = iocbp->io_done; + (void)queue_enq_unsafe(tq, iotask); + } + } + else + { + iotcbp->state = ARK_CMD_DONE; + iorcbp->rc = -(io_status); + (void)queue_enq_unsafe(tq, iotask); + } + } + + // Now we check the request queue and try to pull off + // as many requests as possible and queue them up + // in the task queue + queue_lock(rq); + + if ( (queue_empty(rq)) && (reqtag == -1) ) + { + if ( queue_empty(ioq) && queue_empty(tq) ) + { + // We have reached a point where there is absolutely + // no work for this worker thread to do. So we + // go to sleep waiting for new requests to come in + queue_wait(rq); + +#if 0 + // If we've come out of here, it means work has been + // placed on the request queue. + p_rc = pthread_cond_timedwait(&(_arkp->poolthreads[id].poolcond), + &(_arkp->poolthreads[id].poolmutex), + &timewait); + // Every ARK_KV_GC_TIME_SEC, check to see if we need + // to reduce the size of the vb buffer back to it's original + // size. This is to help keep memory footprint down + if ( (p_rc == ETIMEDOUT) && (_arkp->poolthreads[id].dogc) ) + { + gettimeofday(&now, NULL); + timewait.tv_sec = now.tv_sec + ARK_KV_GC_TIME_SEC; + timewait.tv_nsec = now.tv_usec * 1000UL; + + if (_arkp->poolthreads[id].vbsize > (_arkp->bsize * 1024)) + { + am_free(_arkp->poolthreads[id].vb_orig); + + _arkp->poolthreads[id].vbsize = _arkp->bsize * 1024; + _arkp->poolthreads[id].vb_orig = + am_malloc(_arkp->poolthreads[id].vbsize); + if (_arkp->poolthreads[id].vb_orig != NULL) + { + _arkp->poolthreads[id].vb = + ptr_align(_arkp->poolthreads[id].vb_orig); + } + else + { + // Clear out the vbsize. When this buffer is used + // for a command, the command will fail gracefully + _arkp->poolthreads[id].vbsize = 0; + _arkp->poolthreads[id].vb = NULL; + } + } + } +#endif + } + } + + while (((reqrc == EAGAIN) && !(queue_empty(rq))) || + ((reqrc == 0) && (!tag_empty(_arkp->ttags)))) + { + if (reqrc == EAGAIN) + { + reqrc = queue_deq_unsafe(rq, &reqtag); + if ( reqrc == 0 ) + { + rcbp = &(_arkp->rcbs[reqtag]); + rcbp->rtag = reqtag; + rcbp->ttag = -1; + rcbp->hold = -1; + } + } + + if (reqrc == 0) + { + hval = HASH_GET(_arkp->ht, rcbp->pos); + if (HASH_LCK(hval)) + { + tsktag = HASH_TAG(hval); + tcbp = &(_arkp->tcbs[tsktag]); + rcbtmp = &(_arkp->rcbs[tcbp->rtag]); + while (rcbtmp->hold != -1) + { + rcbtmp = &(_arkp->rcbs[rcbtmp->hold]); + } + scbp->holds++; + rcbtmp->hold = reqtag; + reqrc = EAGAIN; + reqtag = -1; + rcbp = NULL; + } + else + { + tskrc = tag_unbury(_arkp->ttags, &tsktag); + if (tskrc == 0) + { + tcbp = &(_arkp->tcbs[tsktag]); + } + else + { + tcbp = NULL; + } + + if (tcbp) + { + tcbp->rtag = reqtag; + rcbp->ttag = tsktag; + rcbp->hold = -1; + tcbp->state = init_task_state(_arkp, tcbp); + tcbp->sthrd = rcbp->sthrd; + tcbp->ttag = tsktag; + tcbp->new_key = 0; + hlba = HASH_LBA(hval); + HASH_SET(_arkp->ht, rcbp->pos, HASH_MAKE(1, tsktag, hlba)); + (void)queue_enq_unsafe(tq, tsktag); + reqtag = -1; + reqrc = EAGAIN; + tskrc = EAGAIN; + rcbp = NULL; + tcbp = NULL; + } + } + } + } + + queue_unlock(rq); + + while(!queue_empty(tq)) + { + (void)queue_deq_unsafe(tq, &tskact); + tactp = &(_arkp->tcbs[tskact]); + reqact = tactp->rtag; + ractp = &(_arkp->rcbs[reqact]); + + switch (tactp->state) + { + case ARK_SET_START : + { + scbp->poolstats.ops_cnt++; + tactp->state = ark_set_start(_arkp, id, tactp); + break; + } + case ARK_SET_PROCESS_INB : + { + tactp->state = ark_set_process_inb(_arkp, id, tactp); + break; + } + case ARK_SET_WRITE : + { + tactp->state = ark_set_write(_arkp, id, tactp); + break; + } + case ARK_SET_FINISH : + { + tactp->state = ark_set_finish(_arkp, id, tactp); + break; + } + case ARK_GET_START : + { + scbp->poolstats.ops_cnt++; + tactp->state = ark_get_start(_arkp, id, tactp); + break; + } + case ARK_GET_PROCESS : + { + tactp->state = ark_get_process(_arkp, id, tactp); + break; + } + case ARK_GET_FINISH : + { + tactp->state = ark_get_finish(_arkp, id, tactp); + break; + } + case ARK_DEL_START : + { + scbp->poolstats.ops_cnt++; + tactp->state = ark_del_start(_arkp, id, tactp); + break; + } + case ARK_DEL_PROCESS : + { + tactp->state = ark_del_process(_arkp, id, tactp); + break; + } + case ARK_DEL_FINISH : + { + tactp->state = ark_del_finish(_arkp, id, tactp); + break; + } + case ARK_EXIST_START : + { + scbp->poolstats.ops_cnt++; + tactp->state = ark_exist_start(_arkp, id, tactp); + break; + } + case ARK_EXIST_FINISH : + { + tactp->state = ark_exist_finish(_arkp, id, tactp); + break; + } + case ARK_RAND_START : + { + tactp->state = ark_rand_pool(_arkp, id, tactp); + break; + } + case ARK_FIRST_START : + { + tactp->state = ark_first_pool(_arkp, id, tactp); + break; + } + case ARK_NEXT_START : + { + tactp->state = ark_next_pool(_arkp, id, tactp); + break; + } + default : + { + // The only state left is ARK_CMD_DONE, so we + // just break out and end the task + break; + } + } + + if (tactp->state == ARK_CMD_DONE) + { + if (ractp->hold == -1) + { + hlba = HASH_LBA(HASH_GET(_arkp->ht, ractp->pos)); + HASH_SET(_arkp->ht, ractp->pos, HASH_MAKE(0, 0, hlba)); + if ( ractp->cb != NULL ) + { + (ractp->cb)(ractp->rc, ractp->dt, ractp->res); + ractp->stat = A_NULL; + (void)tag_bury(_arkp->rtags, reqact); + } + else + { + pthread_mutex_lock(&(ractp->alock)); + ractp->stat = A_COMPLETE; + pthread_cond_broadcast(&(ractp->acond)); + pthread_mutex_unlock(&(ractp->alock)); + } + cleanup_task_memory(_arkp, tactp); + (void)tag_bury(_arkp->ttags, tskact); + } + else + { + tactp->rtag = ractp->hold; + ractp->hold = -1; + if ( ractp->cb != NULL) + { + (ractp->cb)(ractp->rc, ractp->dt, ractp->res); + ractp->stat = A_NULL; + (void)tag_bury(_arkp->rtags, reqact); + } + else + { + pthread_mutex_lock(&(ractp->alock)); + ractp->stat = A_COMPLETE; + pthread_cond_broadcast(&(ractp->acond)); + pthread_mutex_unlock(&(ractp->alock)); + } + + scbp->holds--; + ractp = &(_arkp->rcbs[tactp->rtag]); + ractp->ttag = tactp->ttag; + tactp->rtag = ractp->rtag; + tactp->state = init_task_state(_arkp, tactp); + (void)queue_enq_unsafe(tq, tskact); + } + } + else + { + (void)queue_enq_unsafe(ioq, tskact); + } + } + } + KV_TRC(pAT, "pool thread %d exiting, nactive %d", id, _arkp->nactive); + return NULL; +} + diff --git a/src/kv/arp.h b/src/kv/arp.h new file mode 100644 index 00000000..8c65b5c1 --- /dev/null +++ b/src/kv/arp.h @@ -0,0 +1,58 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/arp.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef __ARP_H__ +#define __ARP_H__ + + +void *pool_function(void *arg); + +int ark_enq_cmd(int cmd, _ARK *_arkp,uint64_t klen,void *key,uint64_t vbuflen,void *vbuf,uint64_t voff, + void (*cb)(int errcode, uint64_t dt,int64_t res), uint64_t dt, int32_t pthr, int *ptag); + +int ark_wait_tag(_ARK *_arkp, int tag, int *errcode, int64_t *res); + +int ark_set_finish(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_set_write(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_set_process(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_set_process_inb(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_set_start(_ARK *_arkp, int tid, tcb_t *tcbp); + +int ark_get_start(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_get_finish(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_get_process(_ARK *_arkp, int tid, tcb_t *tcbp); + +int ark_del_start(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_del_process(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_del_finish(_ARK *_arkp, int32_t tid, tcb_t *tcbp); + +int ark_exist_start(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_exist_finish(_ARK *_arkp, int tid, tcb_t *tcbp); + +extern int ea_async_io_schedule(_ARK *_arkp, int32_t tid, tcb_t *tcbp, iocb_t *iocbp); +extern int ea_async_io_harvest(_ARK *_arkp, int32_t tid, tcb_t *tcbp, iocb_t *iocbp); + + +#endif diff --git a/src/kv/arp_del.c b/src/kv/arp_del.c new file mode 100644 index 00000000..0895d487 --- /dev/null +++ b/src/kv/arp_del.c @@ -0,0 +1,253 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/arp_del.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include + +#include + +#include "ut.h" +#include "vi.h" + +#include "arkdb.h" +#include "ark.h" +#include "am.h" + +#include + +int ark_del_start(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_del_process(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_del_finish(_ARK *_arkp, int32_t tid, tcb_t *tcbp); + +// if success returns size of value deleted +int ark_del_start(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + scb_t *scbp = &(_arkp->poolthreads[tid]); + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + ark_io_list_t *bl_array = NULL; + int32_t rc = 0; + int32_t state = ARK_CMD_DONE; + + // Acquire the block that contains the hash entry + // control information. + tcbp->hblk = HASH_LBA(HASH_GET(_arkp->ht, rcbp->pos)); + + // If there is no block, that means there are no + // entries in the hash entry, which means they + // key in question is not in the store. + if ( tcbp->hblk == 0 ) + { + KV_TRC_FFDC(pAT, "rc = ENOENT: key %p, klen %"PRIu64"", + rcbp->key, rcbp->klen); + rcbp->res = -1; + rcbp->rc = ENOENT; + state = ARK_CMD_DONE; + goto ark_del_start_err; + } + + // Check to see if we need to grow the in buffer + // to hold the hash entry + tcbp->blen = bl_len(_arkp->bl, tcbp->hblk); + rc = bt_growif(&(tcbp->inb), &(tcbp->inb_orig), &(tcbp->inblen), + (tcbp->blen * _arkp->bsize)); + if (rc != 0) + { + rcbp->res = -1; + rcbp->rc = rc; + state = ARK_CMD_DONE; + goto ark_del_start_err; + } + + // Create a list of blocks that will be read in + bl_array = bl_chain(_arkp->bl, tcbp->hblk, tcbp->blen); + if (bl_array == NULL) + { + rcbp->rc = ENOMEM; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_del_start_err; + } + + tcbp->old_btsize = tcbp->inb->len; + scbp->poolstats.io_cnt += tcbp->blen; + + // Schedule the IO to read the hash entry from storage + rc = ea_async_io_mod(_arkp, ARK_EA_READ, (void *)tcbp->inb, bl_array, + tcbp->blen, 0, tcbp->ttag, ARK_DEL_PROCESS); + if (rc < 0) + { + rcbp->rc = -rc; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_del_start_err; + } + else if (rc == 0) + { + state = ARK_IO_HARVEST; + } + else + { + state = ark_del_process(_arkp, tid, tcbp); + } + +ark_del_start_err: + + return state; +} + +int ark_del_process(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + scb_t *scbp = &(_arkp->poolthreads[tid]); + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + ark_io_list_t *bl_array = NULL; + int32_t rc = 0; + int32_t state = ARK_CMD_DONE; + uint64_t blkcnt = 0; + uint64_t oldvlen = 0; + int64_t dblk = 0; + + rc = bt_growif(&(tcbp->oub), &(tcbp->oub_orig), &(tcbp->oublen), + (tcbp->blen * _arkp->bsize)); + if (rc != 0) + { + rcbp->res = -1; + rcbp->rc = rc; + state = ARK_CMD_DONE; + goto ark_del_process_err; + } + + // Delete the key and value from the hash entry + // and save off the length of the value + rcbp->res = bt_del_def(tcbp->oub, tcbp->inb, rcbp->klen, + rcbp->key, (uint8_t*)&dblk, &oldvlen); + + if (rcbp->res >= 0) + { + // Return the blocks of the hash entry back to + // the free list + ark_drop_pool(_arkp, &(scbp->poolstats), tcbp->hblk); + if (dblk > 0) + { + // Return the blocks used to store the value if it + // wasn't stored in the hash entry + ark_drop_pool(_arkp, &(scbp->poolstats), dblk); + } + + // Are there entries in the hash bucket. + if (tcbp->oub->cnt > 0) + { + + // Determine how many blocks will be needed for the + // out buffer and then get them from the free + // block list + blkcnt = divceil(tcbp->oub->len, _arkp->bsize); + tcbp->nblk = ark_take_pool(_arkp, &(scbp->poolstats), blkcnt); + if (tcbp->nblk == -1) + { + rcbp->rc = ENOSPC; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_del_process_err; + } + + // Create a list of the blocks to write. + bl_array = bl_chain(_arkp->bl, tcbp->nblk, blkcnt); + if (bl_array == NULL) + { + rcbp->rc = ENOMEM; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_del_process_err; + } + + scbp->poolstats.io_cnt += blkcnt; + scbp->poolstats.byte_cnt -= (tcbp->old_btsize + oldvlen); + scbp->poolstats.byte_cnt += tcbp->oub->len; + + // Schedule the WRITE IO of the updated hash entry. + rc = ea_async_io_mod(_arkp, ARK_EA_WRITE, (void *)tcbp->oub, + bl_array, blkcnt, 0, tcbp->ttag, ARK_DEL_FINISH); + if (rc < 0) + { + rcbp->rc = -rc; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_del_process_err; + } + else if (rc == 0) + { + state = ARK_IO_HARVEST; + } + else + { + state = ark_del_finish(_arkp, tid, tcbp); + } + } + else + { + scbp->poolstats.byte_cnt -= (tcbp->old_btsize + oldvlen); + scbp->poolstats.byte_cnt += tcbp->oub->len; + scbp->poolstats.kv_cnt--; + + // Nothing left in this hash entry, so let's clear out + // the hash entry control block to show there is no + // data in the store for this hash entry + HASH_SET(_arkp->ht, rcbp->pos, HASH_MAKE(1, tcbp->ttag, 0)); + + rcbp->rc = 0; + state = ARK_CMD_DONE; + } + } + else + { + KV_TRC_FFDC(pAT, "rc = ENOENT: key %p, klen %"PRIu64"", + rcbp->key, rcbp->klen); + rcbp->rc = ENOENT; + rcbp->res = -1; + state = ARK_CMD_DONE; + } + +ark_del_process_err: + + return state; +} + +int ark_del_finish(_ARK *_arkp, int32_t tid, tcb_t *tcbp) +{ + int32_t state = ARK_CMD_DONE; + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + scb_t *scbp = &(_arkp->poolthreads[tid]); + + scbp->poolstats.kv_cnt--; + + // Update the starting hash entry block with the + // new block info. + HASH_SET(_arkp->ht, rcbp->pos, HASH_MAKE(1, tcbp->ttag, tcbp->nblk)); + + return state; +} + diff --git a/src/kv/arp_exist.c b/src/kv/arp_exist.c new file mode 100644 index 00000000..180b622c --- /dev/null +++ b/src/kv/arp_exist.c @@ -0,0 +1,138 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/arp_exist.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +#include +#include +#include +#include + +#include + +#include "ut.h" +#include "vi.h" + +#include "arkdb.h" +#include "ark.h" +#include "am.h" + +#include + +int ark_exist_start(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_exist_finish(_ARK *_arkp, int tid, tcb_t *tcbp); + +// if successful returns length of value +int ark_exist_start(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + scb_t *scbp = &(_arkp->poolthreads[tid]); + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + ark_io_list_t *bl_array = NULL; + int32_t rc = 0; + int32_t state = ARK_CMD_DONE; + + // Now that we have the hash entry, get the block + // that holds the control information for the entry. + tcbp->hblk = HASH_LBA(HASH_GET(_arkp->ht, rcbp->pos)); + + // If there is no control block for this hash + // entry, then the key is not present in the hash. + // Set the error + if ( tcbp->hblk == 0 ) + { + KV_TRC_FFDC(pAT, "rc = ENOENT key %p, klen %"PRIu64"", + rcbp->key, rcbp->klen); + rcbp->res = -1; + rcbp->rc = ENOENT; + state = ARK_CMD_DONE; + goto ark_exist_start_err; + } + + // Set up the in-buffer to read in the hash bucket + // that contains the key + tcbp->blen = bl_len(_arkp->bl, tcbp->hblk); + rc = bt_growif(&(tcbp->inb), &(tcbp->inb_orig), &(tcbp->inblen), + (tcbp->blen * _arkp->bsize)); + if (rc != 0) + { + rcbp->res = -1; + rcbp->rc = rc; + state = ARK_CMD_DONE; + goto ark_exist_start_err; + } + + // Create a chain of blocks to be passed to be read + bl_array = bl_chain(_arkp->bl, tcbp->hblk, tcbp->blen); + if (bl_array == NULL) + { + rcbp->rc = ENOMEM; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_exist_start_err; + } + + scbp->poolstats.io_cnt += tcbp->blen; + + rc = ea_async_io_mod(_arkp, ARK_EA_READ, (void *)tcbp->inb, bl_array, + tcbp->blen, 0, tcbp->ttag, ARK_EXIST_FINISH); + if (rc < 0) + { + rcbp->rc = -rc; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_exist_start_err; + } + else if (rc == 0) + { + state = ARK_IO_HARVEST; + } + else + { + state = ark_exist_finish(_arkp, tid, tcbp); + } + +ark_exist_start_err: + + return state; +} + +int ark_exist_finish(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + int32_t state = ARK_CMD_DONE; + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + + // Find the key position in the read in bucket + rcbp->res = bt_exists(tcbp->inb, rcbp->klen, rcbp->key); + if (rcbp->res == BT_FAIL) + { + KV_TRC_FFDC(pAT, "rc = ENOENT key %p, klen %"PRIu64"", + rcbp->key, rcbp->klen); + rcbp->rc = ENOENT; + rcbp->res = -1; + state = ARK_CMD_DONE; + } + + return state; +} + diff --git a/src/kv/arp_get.c b/src/kv/arp_get.c new file mode 100644 index 00000000..aa178fd0 --- /dev/null +++ b/src/kv/arp_get.c @@ -0,0 +1,241 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/arp_get.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include + +#include + +#include "ut.h" +#include "vi.h" + +#include "arkdb.h" +#include "ark.h" +#include "am.h" + +#include + +int ark_get_start(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_get_finish(_ARK *_arkp, int tid, tcb_t *tcbp); +int ark_get_process(_ARK *_arkp, int tid, tcb_t *tcbp); + +// if successful returns length of value +int ark_get_start(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + scb_t *scbp = &(_arkp->poolthreads[tid]); + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + ark_io_list_t *bl_array = NULL; + int32_t rc = 0; + int32_t state = ARK_CMD_DONE; + + // Now that we have the hash entry, get the block + // that holds the control information for the entry. + tcbp->hblk = HASH_LBA(HASH_GET(_arkp->ht, rcbp->pos)); + + // If there is no control block for this hash + // entry, then the key is not present in the hash. + // Set the error + if ( tcbp->hblk == 0 ) + { + KV_TRC_FFDC(pAT, "rc = ENOENT key %p, klen %"PRIu64"", + rcbp->key, rcbp->klen); + rcbp->res = -1; + rcbp->rc = ENOENT; + state = ARK_CMD_DONE; + goto ark_get_start_err; + } + + // Set up the in-buffer to read in the hash bucket + // that contains the key + tcbp->blen = bl_len(_arkp->bl, tcbp->hblk); + rc = bt_growif(&(tcbp->inb), &(tcbp->inb_orig), &(tcbp->inblen), + (tcbp->blen * _arkp->bsize)); + if (rc != 0) + { + rcbp->res = -1; + rcbp->rc = rc; + state = ARK_CMD_DONE; + goto ark_get_start_err; + } + + // Create a chain of blocks to be passed to be read + bl_array = bl_chain(_arkp->bl, tcbp->hblk, tcbp->blen); + if (bl_array == NULL) + { + rcbp->rc = ENOMEM; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_get_start_err; + } + + scbp->poolstats.io_cnt += tcbp->blen; + + rc = ea_async_io_mod(_arkp, ARK_EA_READ, (void *)tcbp->inb, bl_array, + tcbp->blen, 0, tcbp->ttag, ARK_GET_PROCESS); + if (rc < 0) + { + rcbp->rc = -rc; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_get_start_err; + } + else if (rc == 0) + { + state = ARK_IO_HARVEST; + } + else + { + state = ark_get_process(_arkp, tid, tcbp); + } + +ark_get_start_err: + + return state; +} + +int ark_get_process(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + scb_t *scbp = &(_arkp->poolthreads[tid]); + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + ark_io_list_t *bl_array = NULL; + uint8_t *new_vb = NULL; + int32_t rc = 0; + int32_t state = ARK_CMD_DONE; + uint64_t vblk = 0; + uint64_t new_vbsize = 0; + + // Find the key position in the read in bucket + tcbp->vvlen = bt_get(tcbp->inb, rcbp->klen, rcbp->key, tcbp->vb); + if (tcbp->vvlen >= 0) + { + // The key was found in the bucket. Now check to see + // if the length of the value requires us to read into + // the variable buffer. + if (tcbp->vvlen > _arkp->vlimit) + { + vblk = *((uint64_t *)(tcbp->vb)); + + // Determine the number of blocks needed for + // the value + tcbp->blen = divceil(tcbp->vvlen, _arkp->bsize); + + // Check to see if the current size of the variable + // buffer is big enough to hold the value. + if (tcbp->vvlen > tcbp->vbsize) { + + new_vbsize = (tcbp->blen * _arkp->bsize); + new_vb = am_realloc(tcbp->vb_orig, new_vbsize); + if ( new_vb == NULL ) + { + rcbp->rc = ENOMEM; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_get_process_err; + } + + // The realloc succeeded. Set the new size, original + // variable buffer, and adjusted variable buffer + tcbp->vbsize = new_vbsize; + tcbp->vb_orig = new_vb; + tcbp->vb = ptr_align(tcbp->vb_orig); + } + + // Create the block chain to be used for the IO + bl_array = bl_chain(_arkp->bl, vblk, tcbp->blen); + if (bl_array == NULL) + { + rcbp->rc = ENOMEM; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_get_process_err; + } + + scbp->poolstats.io_cnt += tcbp->blen; + + // Schedule the READ of the key's value into the + // variable buffer. + rc = ea_async_io_mod(_arkp, ARK_EA_READ, (void *)tcbp->vb, + bl_array, tcbp->blen, 0, tcbp->ttag, ARK_GET_FINISH); + if (rc < 0) + { + rcbp->rc = -rc; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_get_process_err; + } + else if (rc == 0) + { + state = ARK_IO_HARVEST; + } + else + { + state = ark_get_finish(_arkp, tid, tcbp); + } + } + else + { + // Copy the value into the buffer passed in. + memcpy(rcbp->val, tcbp->vb, tcbp->vvlen); + rcbp->res = tcbp->vvlen; + rcbp->rc = 0; + state = ARK_CMD_DONE; + } + } + else + { + KV_TRC_FFDC(pAT, "rc = EINVAL: key %p, klen %"PRIu64"", + rcbp->key, rcbp->klen); + rcbp->rc = ENOENT; + rcbp->res = -1; + state = ARK_CMD_DONE; + } + +ark_get_process_err: + + return state; +} + +int ark_get_finish(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + int32_t state = ARK_CMD_DONE; + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + + // We've read in the variable buffer. Now we copy it + // into the passed in buffer. + if ((rcbp->voff + rcbp->vlen) <= tcbp->vvlen) + { + memcpy(rcbp->val, (tcbp->vb + rcbp->voff), rcbp->vlen); + } + else + { + memcpy(rcbp->val, (tcbp->vb + rcbp->voff), (tcbp->vvlen - rcbp->voff)); + } + rcbp->res = tcbp->vvlen; + + return state; +} + diff --git a/src/kv/arp_set.c b/src/kv/arp_set.c new file mode 100644 index 00000000..a91ed09c --- /dev/null +++ b/src/kv/arp_set.c @@ -0,0 +1,342 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/arp_set.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include + +#include + +//#include "ct.h" +#include "ut.h" +#include "vi.h" + +#include "arkdb.h" +#include "ark.h" +#include "arp.h" +#include "am.h" + +#include + +// This is the entry into ark_set. The task has +// been pulled off the task queue. +int ark_set_start(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + scb_t *scbp = &(_arkp->poolthreads[tid]); + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + ark_io_list_t *bl_array = NULL; + uint64_t vlen = rcbp->vlen; + uint64_t new_vbsize = 0; + int32_t rc = 0; + int32_t state = ARK_CMD_DONE; + uint8_t *new_vb = NULL; + + // Let's check to see if the value is larger than + // what can fit inline in the bucket. + if ( vlen > _arkp->vlimit ) + { + // Determine the number of blocks needed to hold the value + tcbp->vblkcnt = divceil(vlen, _arkp->bsize); + + // Ok, the value will not fit inline in the has bucket. + // Let's see if the per task value buffer is currently + // large enough to hold this value. If not, we will + // need to grow it. + if ( vlen > tcbp->vbsize ) + { + // Calculate the new size of the value buffer. The + // size will be based on the block size, rounded up + // to ensure enough space to hold value. We record + // the size in a new variable and wait to make sure + // the realloc succeeds before updating vbsize + new_vbsize = tcbp->vblkcnt * _arkp->bsize; + new_vb = am_realloc(tcbp->vb_orig, new_vbsize); + if ( NULL == new_vb ) + { + rcbp->res = -1; + rcbp->rc = ENOMEM; + state = ARK_CMD_DONE; + goto ark_set_start_err; + } + tcbp->vbsize = new_vbsize; + tcbp->vb_orig = new_vb; + tcbp->vb = ptr_align(tcbp->vb_orig); + } + + memcpy(tcbp->vb, rcbp->val, vlen); + + // Get/Reserve the required number of blocks. Pass along + // the per server thread pool stats so that they can + // be updated + tcbp->vblk = ark_take_pool(_arkp, &(scbp->poolstats), tcbp->vblkcnt); + if ( -1 == tcbp->vblk ) + { + rcbp->res = -1; + rcbp->rc = ENOSPC; + state = ARK_CMD_DONE; + goto ark_set_start_err; + } + + tcbp->vval = (uint8_t *)&(tcbp->vblk); + } + else + { + tcbp->vval = (uint8_t *)rcbp->val; + } + + tcbp->hblk = HASH_LBA(HASH_GET(_arkp->ht, rcbp->pos)); + + if ( 0 == tcbp->hblk ) + { + // This is going to be the first key in this bucket. + // Initialize the bucket data/header + bt_init(tcbp->inb); + + // Call to the next phase of the SET command since + // we do not need to read in the hash bucket since + // this will be the first key in it. + state = ark_set_process(_arkp, tid, tcbp); + } + else + { + // Look to see how many blocks are currently in use + // for this hash bucket. + tcbp->blen = bl_len(_arkp->bl, tcbp->hblk); + + // Now that we have the number of blocks, check to see + // if we need to grow the in-buffer. If so, then + // grow the in-buffer to the appropriate size. + rc = bt_growif(&(tcbp->inb), &(tcbp->inb_orig), &(tcbp->inblen), + (tcbp->blen * _arkp->bsize)); + if ( 0 != rc ) + { + rcbp->res = -1; + rcbp->rc = rc; + state = ARK_CMD_DONE; + goto ark_set_start_err; + } + + // Create a list of the blocks that will be read in. + // We do this upfront so we only have to access the block + // list once. + bl_array = bl_chain(_arkp->bl, tcbp->hblk, tcbp->blen); + if ( NULL == bl_array ) + { + rcbp->res = -1; + rcbp->rc = ENOMEM; + state = ARK_CMD_DONE; + goto ark_set_start_err; + } + + // Here is where we schedule the IO for the hash bucket + // This will not wait for the the IO to complete, it will + // instead issue the asynchronouse IO and then set up + // the task and queue it so that the IO completion can be + // done later. + rc = ea_async_io_mod(_arkp, ARK_EA_READ, + (void *)tcbp->inb, bl_array, tcbp->blen, + 0, tcbp->ttag, ARK_SET_PROCESS_INB); + + // Upon successful return, the IO has been issued and the TASK + // block has been set up so that IO completion can be checked + // later. + if ( rc < 0 ) + { + rcbp->res = -1; + rcbp->rc = -rc; + state = ARK_CMD_DONE; + goto ark_set_start_err; + } + else if (rc == 0) + { + state = ARK_IO_HARVEST; + } + else + { + state = ark_set_process_inb(_arkp, tid, tcbp); + } + } + +ark_set_start_err: + + return state; +} + +int ark_set_process_inb(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + scb_t *scbp = &(_arkp->poolthreads[tid]); + int32_t state = ARK_CMD_DONE; + + ark_drop_pool(_arkp, &(scbp->poolstats), tcbp->hblk); + + state = ark_set_process(_arkp, tid, tcbp); + + return state; +} + +int ark_set_process(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + scb_t *scbp = &(_arkp->poolthreads[tid]); + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + ark_io_list_t *bl_array = NULL; + uint64_t oldvlen = 0; + int32_t rc = 0; + int32_t state = ARK_CMD_DONE; + + // Let's see if we need to grow the out buffer + tcbp->old_btsize = tcbp->inb->len; + rc = bt_growif(&(tcbp->oub), &(tcbp->oub_orig), &(tcbp->oublen), + divceil((tcbp->blen * _arkp->bsize) + + (rcbp->klen + _arkp->vlimit + 16), + _arkp->bsize) * _arkp->bsize); + if (rc != 0) + { + rcbp->res = -1; + rcbp->rc = rc; + state = ARK_CMD_DONE; + goto ark_set_process_err; + } + + // modify bucket + tcbp->new_key = bt_set(tcbp->oub, tcbp->inb, rcbp->klen, rcbp->key, + rcbp->vlen, tcbp->vval, &oldvlen); + + // write the value to the value allocated blocks + if (rcbp->vlen > _arkp->vlimit) { + + bl_array = bl_chain(_arkp->bl, tcbp->vblk, tcbp->vblkcnt); + if (bl_array == NULL) + { + rcbp->rc = ENOMEM; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_set_process_err; + } + + scbp->poolstats.byte_cnt -= oldvlen; + scbp->poolstats.byte_cnt += rcbp->vlen; + scbp->poolstats.io_cnt += tcbp->vblkcnt; + + rc = ea_async_io_mod(_arkp, ARK_EA_WRITE, (void *)tcbp->vb, + bl_array, tcbp->vblkcnt, 0, tcbp->ttag, ARK_SET_WRITE); + if (rc < 0) + { + rcbp->rc = -rc; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_set_process_err; + } + else if (rc == 0) + { + state = ARK_IO_HARVEST; + } + else + { + state = ark_set_write(_arkp, tid, tcbp); + } + } + else + { + state = ark_set_write(_arkp, tid, tcbp); + } + +ark_set_process_err: + + return state; +} + +int ark_set_write(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + scb_t *scbp = &(_arkp->poolthreads[tid]); + ark_io_list_t *bl_array = NULL; + uint64_t blkcnt = 0; + int32_t rc = 0; + int32_t state = ARK_CMD_DONE; + + // write obuf to new blocks + blkcnt = divceil(tcbp->oub->len, _arkp->bsize); + tcbp->nblk = ark_take_pool(_arkp, &(scbp->poolstats), blkcnt); + if (tcbp->nblk == -1) + { + rcbp->rc = ENOSPC; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_set_write_err; + } + + bl_array = bl_chain(_arkp->bl, tcbp->nblk, blkcnt); + if (bl_array == NULL) + { + rcbp->rc = ENOMEM; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_set_write_err; + } + + scbp->poolstats.byte_cnt -= tcbp->old_btsize; + scbp->poolstats.byte_cnt += tcbp->oub->len; + scbp->poolstats.io_cnt += blkcnt; + + rc = ea_async_io_mod(_arkp, ARK_EA_WRITE, (void *)tcbp->oub, + bl_array, blkcnt, 0, tcbp->ttag, ARK_SET_FINISH); + if (rc < 0) + { + rcbp->rc = -rc; + rcbp->res = -1; + state = ARK_CMD_DONE; + goto ark_set_write_err; + } + else if (rc == 0) + { + state = ARK_IO_HARVEST; + } + else + { + state = ark_set_finish(_arkp, tid, tcbp); + } + +ark_set_write_err: + + return state; +} + +int ark_set_finish(_ARK *_arkp, int tid, tcb_t *tcbp) +{ + scb_t *scbp = &(_arkp->poolthreads[tid]); + rcb_t *rcbp = &(_arkp->rcbs[tcbp->rtag]); + int32_t state = ARK_CMD_DONE; + + HASH_SET(_arkp->ht, rcbp->pos, HASH_MAKE(1, tcbp->ttag, tcbp->nblk)); + + rcbp->res = rcbp->vlen; + if ( 1 == tcbp->new_key ) + { + scbp->poolstats.kv_cnt++; + } + + return state; +} diff --git a/src/kv/bl.c b/src/kv/bl.c new file mode 100644 index 00000000..0a3c676e --- /dev/null +++ b/src/kv/bl.c @@ -0,0 +1,355 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/bl.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include "am.h" +#include "ea.h" +#include "bl.h" +#include + +// create a chain 0..n-1 +BL *bl_new(int64_t n, int w) { + + BL *bl = am_malloc(sizeof(BL)); + if (bl == NULL) + { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "n %ld w %d, rc = %d", n, w, errno); + goto exception; + } + + IV *iv = bl->list = iv_new(n,w); + if (NULL == bl->list) + { + KV_TRC_FFDC(pAT, "n %ld w %d, iv_new() failed", n, w); + am_free(bl); + bl = NULL; + goto exception; + } + + uint64_t i; + iv_set(iv,0,0); + for (i=1; in = n; + bl->count = n-1; + bl->head = 1; + bl->hold = -1; + bl->w = w; + pthread_rwlock_init(&(bl->iv_rwlock), NULL); + + KV_TRC(pAT, "bl %p iv %p n %ld w %d", bl, iv, n, w); + +exception: + return bl; +} + +void bl_adjust(BL *bl, uint64_t blks) +{ +#if 0 + uint64_t i; + for (i = 1; i <= blks; i++) + { + iv_set(bl->list, i, 0); + } +#endif + bl->head = blks + 1; + bl->count -= blks; +} + +void bl_delete(BL *bl) { + KV_TRC(pAT, "bl %p", bl); + iv_delete(bl->list); + pthread_rwlock_destroy(&(bl->iv_rwlock)); + am_free(bl); +} + +BL *bl_resize(BL *bl, int64_t n, int w) { + int64_t i; + if (w!=bl->w) { + KV_TRC_FFDC(pAT, "Sizes do not match bl %p n %ld w %d", bl, n, w); + return NULL; + } + + int64_t delta = n - bl->n; + if (delta == 0) + { + KV_TRC_FFDC(pAT, "No size difference bl %p n %ld w %d", bl, n, w); + return bl; + } + + pthread_rwlock_wrlock(&(bl->iv_rwlock)); + + bl->list = iv_resize(bl->list, n, w); + if (bl->list) { + for(i=bl->n; ilist,i,i+1); + } + iv_set(bl->list,n-1, bl->head); + bl->head = bl->n; + bl->n = n; + bl->count += delta; + } else { + KV_TRC_FFDC(pAT, "bl %p n %ld w %d", bl, n, w); + bl = NULL; + } + + pthread_rwlock_unlock(&(bl->iv_rwlock)); + + KV_TRC(pAT, "bl %p", bl); + return bl; +} + +int64_t bl_left(BL *bl) { + return bl->count; +} + +// take returns a root to a chain of n blocks +int64_t bl_take(BL *bl, int64_t n) { + if (n > bl->count) { + KV_TRC_FFDC(pAT, "Not enough free blocks bl %p n %ld bl->count %ld", bl, n, bl->count); + return -1; + } + if (n==0) { + KV_TRC_FFDC(pAT, "Zero block request bl %p n %ld", bl, n); + return 0; + } + + pthread_rwlock_rdlock(&(bl->iv_rwlock)); + + int64_t hd = bl->head; + int64_t tl = bl->head; + int64_t m = n - 1; + while (m>0) { + tl = iv_get(bl->list,tl); + m--; + } + bl->head= iv_get(bl->list,tl); + iv_set(bl->list,tl,0); + bl->count -= n; + + pthread_rwlock_unlock(&(bl->iv_rwlock)); + + KV_TRC_DBG(pAT, "bl %p n %ld bl->count %ld bl->head %ld tl %ld m %ld", + bl, n, bl->count, bl->head, tl, m); + return hd; +} + +int64_t bl_end(BL *bl, int64_t b) { + int64_t i = b; + int64_t ret = 0; + + pthread_rwlock_rdlock(&(bl->iv_rwlock)); + + if (i>=0) { + while ((ret = iv_get(bl->list,i)) > 0) i = ret; + } + + pthread_rwlock_unlock(&(bl->iv_rwlock)); + return i; +} + +// return a chain from a root +int64_t bl_drop(BL *bl, int64_t b) { + int64_t i = bl_end(bl,b); + int64_t n = bl_len(bl,b); + + pthread_rwlock_rdlock(&(bl->iv_rwlock)); + if (bl->hold==-1) { + iv_set(bl->list,i,bl->head); + //bl->chain[i] = bl->head; + bl->head = b; + bl->count += n; + } else if (bl->hold == 0) { + bl->hold = b; + } else { + iv_set(bl->list, i, bl->hold); + //bl->chain[i] = bl->hold; + bl->hold = b; + } + pthread_rwlock_unlock(&(bl->iv_rwlock)); + KV_TRC_DBG(pAT, "bl %p b %ld bl->count %ld bl->hold %ld bl->head %ld", + bl, b, bl->count, bl->hold, bl->head); + return n; +} + +// cause drops to be held until released to free pool +void bl_hold (BL *bl) { + if (bl->hold == -1) bl->hold = 0; +} + +// release any held blocks to the free list +void bl_release(BL *bl) { + pthread_rwlock_rdlock(&(bl->iv_rwlock)); + if (0hold) { + int64_t i = bl_end(bl,bl->hold); + int64_t n = bl_len(bl,bl->hold); + + if (n>0) { + iv_set(bl->list, i, bl->head); + bl->head = bl->hold; + bl->count += n; + } + } + bl->hold = -1; + pthread_rwlock_unlock(&(bl->iv_rwlock)); +} + + +// the length of a chain +int64_t bl_len(BL *bl, int64_t b) { + int64_t n = 0; + int64_t i = b; + pthread_rwlock_rdlock(&(bl->iv_rwlock)); + while (0 < i) { + n++; + i = iv_get(bl->list,i); + } + pthread_rwlock_unlock(&(bl->iv_rwlock)); + return n; +} +// the next block in a chain +int64_t bl_next(BL *bl, int64_t b) { + uint64_t blk; + + pthread_rwlock_rdlock(&(bl->iv_rwlock)); + blk = iv_get(bl->list, b); + pthread_rwlock_unlock(&(bl->iv_rwlock)); + + return blk; +} + +ark_io_list_t *bl_chain(BL *bl, int64_t b, int64_t len) +{ + ark_io_list_t *bl_array = NULL; + int i = 0; + + pthread_rwlock_rdlock(&(bl->iv_rwlock)); + + if (bl != NULL) + { + bl_array = (ark_io_list_t *)am_malloc(sizeof(ark_io_list_t) * len); + if (bl_array != NULL) + { + while (0 < b) { + bl_array[i].blkno = b; + bl_array[i].a_tag = -1; + b = iv_get(bl->list, b); + i++; + } + } + } + + pthread_rwlock_unlock(&(bl->iv_rwlock)); + + return bl_array; +} + +ark_io_list_t *bl_chain_blocks(BL *bl, int64_t start, int64_t len) +{ + ark_io_list_t *bl_array = NULL; + int i = 0; + + pthread_rwlock_rdlock(&(bl->iv_rwlock)); + + if (bl != NULL) + { + bl_array = (ark_io_list_t *)am_malloc(sizeof(ark_io_list_t) * len); + if (bl_array != NULL) + { + for (i = 0; i < len; i++) + { + bl_array[i].blkno = start + i; + bl_array[i].a_tag = -1; + } + } + } + + pthread_rwlock_unlock(&(bl->iv_rwlock)); + + return bl_array; +} + +ark_io_list_t *bl_chain_no_bl(int64_t start, int64_t len) +{ + ark_io_list_t *bl_array = NULL; + int i = 0; + + bl_array = (ark_io_list_t *)am_malloc(sizeof(ark_io_list_t) * len); + if (bl_array != NULL) + { + for (i = 0; i < len; i++) + { + bl_array[i].blkno = start + i; + bl_array[i].a_tag = -1; + } + } + + return bl_array; +} + +/* int64_t bl_cnt(BL *bl, int64_t b, int64_t i) { */ +/* int64_t cnt = 0; */ +/* while (b >= 0) { */ +/* if (b==i) cnt++; */ +/* b = iv_get(bl->list, b); */ +/* // b = bl->chain[b]; */ +/* } */ +/* return cnt; */ +/* } */ + +void bl_dot(BL *bl, int i, int *bcnt, int ccnt, int64_t *chains) { + char f[256]; + sprintf(f,"bl%03d.dot", i); + FILE *F = fopen(f,"w"); + + fprintf(F,"digraph G {\n"); + fprintf(F," head [shape=Mdiamond];\n"); + fprintf(F," chain [shape=Mdiamond];\n"); + + int64_t b = bl->head; + if (0<=b) fprintf(F," head -> b%"PRIi64"_%d;\n", b, bcnt[b]); + while (0<=b) { + if (0list,b)) fprintf(F," b%"PRIi64"_%d -> b%"PRIi64"_%d;\n", + b, bcnt[b], iv_get(bl->list,b), bcnt[iv_get(bl->list,b)]); + b = iv_get(bl->list,b); + } + + for(i=i; i b%"PRIi64"_%d;\n", b, bcnt[b]); + while (0<=b) { + int64_t bn = iv_get(bl->list,b); + if (0<=bn) fprintf(F," b%"PRIi64"_%d -> b%"PRIi64"_%d;\n", b, bcnt[b], bn, bcnt[bn]); + b = bn; + } + + } + + fprintf(F,"}\n"); + fclose(F); +} diff --git a/src/kv/bl.h b/src/kv/bl.h new file mode 100644 index 00000000..85a6d5e9 --- /dev/null +++ b/src/kv/bl.h @@ -0,0 +1,90 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/bl.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __BL_H__ +#define __BL_H__ + +#include +#include +#include "iv.h" + +typedef struct ark_io_list +{ + int64_t blkno; + int a_tag; +} ark_io_list_t; + +typedef struct _bl { + int64_t n; + int64_t w; + int64_t head; + int64_t count; + int64_t hold; + pthread_rwlock_t iv_rwlock; + IV *list; + //int64_t chain[]; +} BL; + +// create a chain 0..n-1 +BL *bl_new(int64_t n,int w); +void bl_adjust(BL *bl, uint64_t blks); +BL *bl_resize(BL *bl, int64_t n, int w); +void bl_delete(BL *bl); + +// take returns a root to a chain of n blocks, neagative is error +int64_t bl_take(BL *bl, int64_t n); + +// put a chain back, return the length of returned chain, negative is error +int64_t bl_drop(BL *bl, int64_t b); + +// number of items left +int64_t bl_left(BL *bl); + +// the length of a chain +int64_t bl_len(BL *bl, int64_t b); + +// the next block in a chain +int64_t bl_next(BL *bl, int64_t); + +// count occurrences of i in a chain rooted at b +int64_t bl_cnt(BL *bl, int64_t b, int64_t i); + +// cause drops to be held until released to free pool +void bl_hold (BL *bl); + +// release any held blocks to the free list +void bl_release(BL *bl); + +// Return an array of linked blocks starting at b +ark_io_list_t *bl_chain(BL *bl, int64_t b, int64_t len); + +ark_io_list_t *bl_chain_blocks(BL *bl, int64_t start, int64_t len); + +ark_io_list_t *bl_chain_no_bl(int64_t start, int64_t len); + +// generate a graph of the blocks +void bl_dot(BL *bl, int n, int *bcnt, int ccnt, int64_t *chain); + + +#endif diff --git a/src/kv/bt.c b/src/kv/bt.c new file mode 100644 index 00000000..3d041ea6 --- /dev/null +++ b/src/kv/bt.c @@ -0,0 +1,359 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/bt.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include +#include + +#include "am.h" +#include "vi.h" +#include "bt.h" + +#include + +BT *bt_new(uint64_t dlen, uint64_t vmx, uint64_t vdf, uint64_t *btsize, BT **bt_orig) { + BT *bt = NULL; + + *bt_orig = am_malloc(sizeof(BT) + dlen); + if (*bt_orig == NULL) { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory dlen %ld vmx %ld vdf %ld, errno = %d", + dlen, vmx, vdf, errno); + if (btsize != NULL) + { + *btsize = 0; + } + } + else + { + bt = (BT *)ptr_align(*bt_orig); + bt->len = sizeof(BT); + bt->cnt = 0; + bt->max = vmx; + bt->def = vdf; + bt->dlen = dlen; + if (btsize != NULL) + { + *btsize = sizeof(BT) + dlen; + } + } + KV_TRC(pAT, "bt %p dlen %ld vmx %ld vdf %ld", + bt, dlen, vmx, vdf); + return bt; +} + +int bt_growif(BT **bt, BT **bt_orig, uint64_t *lim, uint64_t size) { + BT *btret = NULL; + int rc = 0; + + // We need to include the size of the BT header struct + // This is a bit of a hack, but we know that a + // struct BT will fit within a block size of 4K + uint64_t newsize = (size + 4096); + + // If the sie of bt is already sufficient, do nothing + // and just return success. + if (newsize > *lim) { + btret = am_realloc(*bt_orig, newsize); + if (btret == NULL) + { + // An error was encountered with realloc + // Do not lose the original bt, just retur + // the error + rc = errno; + } + else + { + // Realloc succeeded, set bt to the new + // space + *bt_orig = btret; + *bt = (BT *)ptr_align(btret); + (*bt)->dlen = newsize - sizeof(BT); + *lim = newsize; + } + } + + return rc; +} + +int bt_init(BT *bt) { + if (bt) { + bt->len = sizeof(BT); + bt->cnt = 0; + return BT_OK; + } else { + errno = EINVAL; + KV_TRC_FFDC(pAT, "bt %p, errno = %d", bt, errno); + return BT_FAIL; + } +} + + +void bt_delete(BT *bt) { + KV_TRC(pAT, "bt %p", bt); + am_free(bt); +} + +int64_t bt_exists(BT *bt, uint64_t kl, uint8_t *k) { + uint64_t i; + //uint64_t off = 0; + uint8_t *buf = bt->data; + for(i=0; icnt; i++) { + uint64_t klen = 0; + uint64_t vlen = 0; + //uint64_t bytes = 0; + buf += vi_dec64(buf, &klen); + buf += vi_dec64(buf, &vlen); + if (kl==klen && memcmp(buf, k, klen)==0) { + return vlen; + } + buf += klen + (vlen > bt->max ? bt->def : vlen); + } + errno = EINVAL; + KV_TRC_FFDC(pAT, "bt %p kl %ld, k %p, rc = %d", bt, kl, k, errno); + return BT_FAIL; +} + +int bt_set(BT *bto,BT *bti, uint64_t kl, uint8_t *k, uint64_t vl, uint8_t *v, uint64_t *ovlen) { + uint8_t *src = bti->data; + uint8_t *dst = bto->data; + uint64_t bytes = 0; + dst += vi_enc64(kl, dst); + dst += vi_enc64(vl, dst); + memcpy(dst, k, kl); + dst += kl; + bytes = vl > bti->max ? bti->def : vl; + memcpy(dst, v, bytes); + dst += bytes; + int add = 1; + uint64_t pkl; + uint64_t pvl; + int64_t i; + // uint64_t inc = 0; + for(i=0 ; icnt; i++) { + src += vi_dec64(src, &pkl); + src += vi_dec64(src, &pvl); + if (pkl==kl && memcmp(src,k,kl)==0) { + add = 0; + *ovlen = pvl; + src += (pkl + (pvl > bti->max ? bti->def : pvl)); + bytes = bti->len - sizeof(BT) - (src - bti->data); + memcpy(dst, src, bytes); + dst += bytes; + i=bti->cnt; + } else { + bytes = pkl + (pvl>bti->max ? bti->def : pvl); + dst += vi_enc64(pkl,dst); + dst += vi_enc64(pvl,dst); + memcpy(dst,src, bytes); + src += bytes; + dst += bytes; + } + } + bto->len = sizeof(BT) + (dst - bto->data); + bto->cnt = bti->cnt + add; + bto->def = bti->def; + bto->max = bti->max; + return add; +} + +int64_t bt_del(BT *bto, BT *bti, uint64_t kl, uint8_t *k) { + uint8_t *src = bti->data; + uint8_t *dst = bto->data; + uint64_t pkl; + uint64_t pvl; + int64_t i; + int64_t ret = -1; + int del = 0; + uint64_t bytes = 0; + + // uint64_t inc = 0; + for(i=0 ; icnt; i++) { + src += vi_dec64(src, &pkl); + src += vi_dec64(src, &pvl); + if (pkl==kl && memcmp(src,k,kl)==0) { + del = 1; + src += (pkl + (pvl > bti->max ? bti->def : pvl)); + bytes = bti->len - sizeof(BT) - (src - bti->data); + memcpy(dst, src, bytes); + dst += bytes; + ret = pvl; + i=bti->cnt; + } else { + bytes = pkl + (pvl>bti->max ? bti->def : pvl); + dst += vi_enc64(pkl,dst); + dst += vi_enc64(pvl,dst); + memcpy(dst,src, bytes); + src += bytes; + dst += bytes; + } + } + bto->len = sizeof(BT) + (dst - bto->data); + bto->cnt = bti->cnt -del; + bto->def = bti->def; + bto->max = bti->max; + + if ( ret == -1 ) + { + errno = EINVAL; + KV_TRC_FFDC(pAT, "Key not found bto %p bti %p, kl %ld k %p, rc = %d", + bto, bti, kl, k, errno); + } + return ret; +} +// same as bt_del except returns the val if vlen > vlimit as this ref may be used to +// to delete other things ref will be filled with def bytes +// this could replace bt_del at the cost of the extra parameter +int64_t bt_del_def(BT *bto, BT *bti, uint64_t kl, uint8_t *k, uint8_t *ref, uint64_t *ovlen) { + uint8_t *src = bti->data; + uint8_t *dst = bto->data; + uint64_t pkl; + uint64_t pvl; + int64_t i; + int64_t ret = -1; + int del = 0; + uint64_t bytes = 0; + // uint64_t inc = 0; + for(i=0 ; icnt; i++) { + src += vi_dec64(src, &pkl); + src += vi_dec64(src, &pvl); + if (pkl==kl && memcmp(src,k,kl)==0) { + if (ref) { + if (pvl > bti->max) + memcpy(ref, src + pkl, bti->def); + else + memset(ref, 0x00, bti->def); + } + *ovlen = pvl; + del = 1; + src += (pkl + (pvl > bti->max ? bti->def : pvl)); + bytes = bti->len - ((uint64_t)src - (uint64_t)bti); + memcpy(dst, src, bytes); + dst += bytes; + ret = pvl; + i=bti->cnt; + } else { + bytes = pkl + (pvl>bti->max ? bti->def : pvl); + dst += vi_enc64(pkl,dst); + dst += vi_enc64(pvl,dst); + memcpy(dst,src, bytes); + src += bytes; + dst += bytes; + } + } + bto->len = sizeof(BT) + (dst - bto->data); + bto->cnt = bti->cnt - del; + bto->def = bti->def; + bto->max = bti->max; + + if ( ret == -1 ) + { + errno = EINVAL; + KV_TRC_FFDC(pAT, "Key not found bto %p bti %p, kl %ld k %p ref %p, rc = %d", + bto, bti, kl, k, ref, errno); + } + + return ret; +} + + + + +int64_t bt_get(BT *bti, uint64_t kl, uint8_t *k, uint8_t *v) { + uint8_t *src = bti->data; + uint64_t pkl; + uint64_t pvl; + int64_t i; + // uint64_t inc = 0; + for(i=0 ; icnt; i++) { + src += vi_dec64(src, &pkl); + src += vi_dec64(src, &pvl); + if (pkl==kl && memcmp(src,k,kl)==0) { + src += kl; + memcpy(v,src,(pvl>bti->max ? bti->def : pvl)); + return pvl; + } else { + src += pkl; + src += (pvl>bti->max ? bti->def : pvl); + } + } + + errno = EINVAL; + KV_TRC_FFDC(pAT, "bti %p, kl %ld k %p, rc = %d", + bti, kl, k, errno); + return -1; +} + + + +void bt_dump(BT *bt) { + uint64_t kl; + uint64_t vl; + uint64_t i,j; + printf(" ----\n Bkt: %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64"\n", bt->len, bt->cnt, bt->max, bt->def); + if (bt->cnt==0) { + printf(" --empty--\n"); + } else { + uint8_t *buf = bt->data; + for(i=0; icnt; i++) { + buf += vi_dec64(buf, &kl); + buf += vi_dec64(buf, &vl); + printf(" %"PRIu64": %"PRIu64" %"PRIu64" '", i,kl, vl); + for(j=0; j'"); + for(j=0; j<(vl>bt->max ? bt->def : vl); j++) printf("%02x",buf[j]); + buf += vl; + printf("'\n"); + } + printf(" ----\n"); + } +} + +void bt_cstr(BT *bt) { + uint64_t kl; + uint64_t vl; + uint64_t i,j; + printf(" ----\n Bkt: %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64"\n", bt->len, bt->cnt, bt->max, bt->def); + if (bt->cnt==0) { + printf(" --empty--\n"); + } else { + uint8_t *buf = bt->data; + for(i=0; icnt; i++) { + buf += vi_dec64(buf, &kl); + buf += vi_dec64(buf, &vl); + printf(" %"PRIu64": %"PRIu64" %"PRIu64" '", i,kl, vl); + for(j=0; j'"); + for(j=0; j<(vl>bt->max ? bt->def : vl); j++) printf("%c",buf[j]); + buf += vl; + printf("'\n"); + } + printf(" ----\n"); + } +} diff --git a/src/kv/bt.h b/src/kv/bt.h new file mode 100644 index 00000000..4da6493f --- /dev/null +++ b/src/kv/bt.h @@ -0,0 +1,68 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/bt.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __BT_H__ +#define __BT_H__ + +typedef struct _bt { + uint64_t len; + uint64_t cnt; + uint64_t max; + uint64_t def; + uint64_t dlen; + uint8_t data[]; +} BT; + +typedef BT * BTP; + +// data is layed out as +// kl0 vl0 k0... v0... kl1 vl1 k1... v1... etc. +// when vl > max then the length of the v is really def so +// only def bytes are present but the get/set/delroutines report vl on success +// since there is a max on val sizes entire vals are returned in a buffer the user has made sufficiently large + +#define BT_OK 0 +#define BT_FAIL -1 + +BT *bt_new(uint64_t dlen, uint64_t vmx, uint64_t vdf, uint64_t *btsize, BT **bt_orig); +int bt_growif(BT **bt, BT **bt_orig, uint64_t *lim, uint64_t size); +void bt_delete(BT *bt); +int bt_init(BT *bt); + +int64_t bt_exists(BT *bt, uint64_t kl, uint8_t *k); + +int bt_set(BT *bto, BT *bti, uint64_t kl, uint8_t *k, uint64_t vl, uint8_t *v, uint64_t *ovlen); + +int64_t bt_get(BT *bti, uint64_t kl, uint8_t *k, uint8_t *v); + +int64_t bt_del(BT *bto, BT *bti, uint64_t kl, uint8_t *k); + +int64_t bt_del_def(BT *bto, BT *bti, uint64_t kl, uint8_t *k, uint8_t *ref, uint64_t *ovlen); + +void bt_dump(BT *bt); +void bt_cstr(BT *bt); + +// NYI + +#endif diff --git a/src/kv/bv.c b/src/kv/bv.c new file mode 100644 index 00000000..b088ca2e --- /dev/null +++ b/src/kv/bv.c @@ -0,0 +1,133 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/bv.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include + +#include "am.h" +#include "ut.h" +#include "bv.h" + +#define BPW 8 +#define WRD uint8_t +#define SHF 3 +#define MSK 7 + +/* #define BPW 16 */ +/* #define WRD uint16_t */ +/* #define SHF 4 */ +/* #define MSK 15 */ + +/* #define BPW 32 */ +/* #define WRD uint32_t */ +/* #define SHF 5 */ +/* #define MSK 31 */ + +/* #define BPW 64 */ +/* #define WRD uint64_t */ +/* #define SHF 6 */ +/* #define MSK 63 */ + +#define BVINLINE // inline +#define BVSAFE // if (i<0 & i>=bv->n) exit(987); + +BV *bv_new(uint64_t n) { + uint64_t nw = divup(n, BPW); + BV *p = am_malloc(sizeof(BV) + nw * sizeof(WRD)); + if (p == NULL) + { + errno = ENOMEM; + } + else + { + memset(p->bits,0x00, nw * sizeof(WRD)); + p->n = n; + p->nw = nw; + } + return p; +} +void bv_delete(BV *bv) { + am_free(bv); +} + +BVINLINE +int bv_get(BV *bv, uint64_t i) { + BVSAFE + WRD *v = (WRD*)(bv->bits); + uint64_t w = i>>SHF; + int b = i & MSK; + return 1 & (v[w]>>b); +} +BVINLINE +void bv_set(BV *bv, uint64_t i) { + BVSAFE + WRD *v = (WRD*) (bv->bits); + uint64_t w = i>>SHF; + int b = i & MSK; + WRD m = 1; + m <<= b; + v[w] |= m; +} +BVINLINE +void bv_clr(BV *bv, uint64_t i) { + BVSAFE + WRD *v = (WRD*) (bv->bits); + uint64_t w = i>>SHF; + int b = i & MSK; + WRD m = 1; + m <<= b; + m ^= -1; + v[w] &= m; +} + +int popcount[256] = + {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 + }; + +uint64_t bv_cnt(BV *bv) { + uint64_t cnt = 0; + WRD *v = (WRD*) (bv->bits); + int i; + if (BPW==8) { + for (i=0; inw; i++) cnt += popcount[v[i]]; + } else if (BPW==16) { + for (i=0; inw; i++) cnt += (popcount[v[i] & MSK] + popcount[v[i] >> (BPW/2)]); + } else if (BPW==64) { + cnt = -2; + } else { + cnt = -BPW; + } + return cnt; +} + +int bv_bpw() { return BPW; } diff --git a/src/kv/bv.h b/src/kv/bv.h new file mode 100644 index 00000000..3d57261d --- /dev/null +++ b/src/kv/bv.h @@ -0,0 +1,45 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/bv.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __BV_H__ +#define __BV_H__ +#include + + +typedef struct _bv { + uint64_t n; + uint64_t nw; + char bits[]; +} BV; + +BV *bv_new(uint64_t n); +void bv_delete(BV *bv); + +void bv_set(BV *bv, uint64_t i); +void bv_clr(BV *bv, uint64_t i); +int bv_get(BV *bv, uint64_t i); + +uint64_t bv_cnt(BV *bv); + +#endif diff --git a/src/kv/cl.c b/src/kv/cl.c new file mode 100644 index 00000000..b8ae3b86 --- /dev/null +++ b/src/kv/cl.c @@ -0,0 +1,179 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/cl.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include + +#include "cl.h" + +char *ardes[] = {"","","","","","", ""}; + +int cl_parse(int argc, char **argv, CL *ar, char **anon, int echo) { + int i, j; + int acnt = 0; + int arn = 0; + int amax = 0; + while (ar[arn].str!=NULL) arn++; + while (anon[amax]!=NULL) amax++; + //printf("arn = %d, amax = %d\n", arn, amax); + for (i=1; i>>> %s\n", argv[i]); + found = 1; + switch (ar[j].typ) { + case AR_INT: + case AR_INT32: { + if (++i==argc) goto help_exit; + *((int*)ar[j].val) = atoi(argv[i]); + break; + } + case AR_INT64: { + if (++i==argc) goto help_exit; + *((int64_t*)ar[j].val) = atoll(argv[i]); + break; + } + case AR_FLT: { + if (++i==argc) goto help_exit; + *((float*)ar[j].val) = atof(argv[i]); + break; + } + case AR_DBL: { + if (++i==argc) goto help_exit; + *((double*)ar[j].val) = strtod(argv[i],0); + break; + } + case AR_STR: { + if (++i==argc) goto help_exit; + *((char**)ar[j].val) = argv[i]; + break; + } + case AR_FLG: { + *((int*)ar[j].val) -= 1; + break; + } + default : goto help_exit; + } + } + } + if (!found) { + if (argv[i][0]=='-') + goto help_exit; + else { + if (acnt < amax) { + //printf("###### %s\n", argv[i]); + found = 1; + anon[acnt++] = argv[i]; + } else { + goto help_exit; + } + } // end anonymous + } + } + if (echo) { + printf(" Running: %s with arguments\n", argv[0]); + j = 0; + for(j=0; j : %s\n", ar[j].str, (*(int*)ar[j].val) ? "true" : "false" , ar[j].des); + break; + } + } + } + if (acnt==0) acnt = amax; + if (acnt) printf(" Anonymous arguments:\n"); + for(j=0; j\n", argv[0]); + for(j=0; j ... : max anonymous args cnt = %d\n", amax-1, amax); + exit(1); +} + +int csv_parse(char *buf, char **val, int *len, int n) { + int m = 0; + int i = 0; + int cnt = 0; + int instring = 0; + while (buf[i]!=0) { + int mark = buf[i]=='"'; + if (instring) { + if (mark) { + buf[i] = 0; + len[m] = cnt; + m++; + instring = 0; + } else { + cnt++; + } + } else { + if (mark) { + val[m] = buf+i+1; + cnt=0; + instring = 1; + }/* else { */ + /* 1; */ + /* } */ + } + i++; + } + return m; +} + + + + diff --git a/src/kv/cl.h b/src/kv/cl.h new file mode 100644 index 00000000..6203e2d8 --- /dev/null +++ b/src/kv/cl.h @@ -0,0 +1,47 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/cl.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __AR_H__ +#define __AR_H__ + +#define AR_INT 0 +#define AR_FLT 1 +#define AR_STR 2 +#define AR_FLG 3 +#define AR_INT32 4 +#define AR_INT64 5 +#define AR_DBL 6 + +typedef struct _cl { + char *str; + void *val; + int typ; + char *des; +} CL; + +int cl_parse(int argc, char **argv, CL *ar, char **anon, int echo); + +int csv_parse(char *line, char **val, int *len, int n); + +#endif diff --git a/src/kv/ct.h b/src/kv/ct.h new file mode 100644 index 00000000..c336de0f --- /dev/null +++ b/src/kv/ct.h @@ -0,0 +1,41 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ct.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __CT_H__ +#define __CT_H__ + +#define C_Reset "\x1b[0m" +#define C_Black "\x1b[30m" +#define C_Red "\x1b[31m" +#define C_Green "\x1b[32m" +#define C_Yellow "\x1b[33m" +#define C_Blue "\x1b[34m" +#define C_Cyan "\x1b[35m" +#define C_Magenta "\x1b[36m" +#define C_White "\x1b[37m" + +char *c_strings[9] = { C_Reset , C_Black , C_Red , C_Green , C_Yellow , C_Blue , C_Cyan , C_Magenta , C_White}; + +#endif + diff --git a/src/kv/ea.c b/src/kv/ea.c new file mode 100644 index 00000000..da2ce1ce --- /dev/null +++ b/src/kv/ea.c @@ -0,0 +1,501 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ea.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include + +#include "bl.h" +#include "ea.h" +#include "capiblock.h" +#include "am.h" + +#include +#include + +static int cflsh_blk_lib_init = 0; + +#ifndef _AIX + +// fetch_and_or is not available for linux user space. +// It appears that __sync_fetch_and_or is doing the +// required functionality. + + +#define fetch_and_or(ptr,val) (__sync_fetch_and_or(ptr,val)) + + +#endif /* !_AIX */ + + +EA *ea_new(const char *path, uint64_t bsize, int basyncs, + uint64_t *size, uint64_t *bcount, uint64_t vlun) +{ + int rc = 0; + size_t plen = 0; + uint8_t *store = NULL; + EA *ea = NULL; + chunk_id_t chkid = NULL_CHUNK_ID; + chunk_ext_arg_t ext = 0; + + + + if (!(fetch_and_or(&cflsh_blk_lib_init,1))) { + + + // We need to call cblk_init once before + // we use any other cblk_ interfaces + + rc = cblk_init(NULL,0); + + if (rc) { + + KV_TRC_FFDC(pAT, "cblk_init failed path %s bsize %"PRIu64" size %"PRIu64" bcount %"PRIu64", errno = %d", + path, bsize, *size, *bcount, errno); + + return ea; + } + } + + ea = am_malloc(sizeof(EA)); + if ( NULL == ea ) + { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory path %s bsize %"PRIu64" size %"PRIu64" bcount %"PRIu64", errno = %d", + path, bsize, *size, *bcount, errno); + } + else + { + // We need to check the path parameter to see if + // we are going to use memory or a file/capi + // device (to be determined by the block layer) + if ( (NULL == path) || (strlen(path) == 0) ) + { + // Using memory for store + ea->st_type = EA_STORE_TYPE_MEMORY; + + store = malloc(*size); + if ( NULL == store ) + { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory for store path %s bsize %"PRIu64" size %"PRIu64" bcount %"PRIu64", errno = %d", + path, bsize, *size, *bcount, errno); + am_free(ea); + ea = NULL; + } + else + { + *bcount = ((*size) / bsize); + ea->st_memory = store; + } + } + else + { + + + // Using a file. We don't care if it's an actual + // file or a CAPI device, we let block layer + // decide and we just use the chunk ID that is + // passed back from the cblk_open call. + ea->st_type = EA_STORE_TYPE_FILE; + + // Check to see if we need to create the store on a + // physical or virtual LUN. Previously, in GA1, + // we keyed off the size and if it was 0, then we + // asked for the LUN to be physical. Now, the user + // can specify with a flag. + if ( vlun == 0 ) + { + chkid = cblk_open((const char *)path, + basyncs, O_RDWR, ext, 0); + if ( NULL_CHUNK_ID != chkid ) + { + rc = cblk_get_size(chkid, (size_t *)bcount, 0); + if ( (rc != 0) || (*bcount == 0) ) + { + // An error was encountered, close the chunk + + // If we are here, and the call to cblk_get_size() + // was successful then that means bcount is 0 + if ( rc == 0 ) + { + rc = ENOSPC; + } + + cblk_close(chkid, 0); + chkid = NULL_CHUNK_ID; + KV_TRC_FFDC(pAT, "cblk_get_size failed path %s bsize %"PRIu64" size %"PRIu64"" + "bcount %"PRIu64", errno = %d", + path, bsize, *size, *bcount, errno); + } + else + { + // Set the size to be returned + *size = *bcount * bsize; + } + } + } + else + { + chkid = cblk_open((const char *)path, basyncs, O_RDWR, ext, + CBLK_OPN_VIRT_LUN); + if ( NULL_CHUNK_ID != chkid ) + { + // A specific size was passed in so we try to set the + // size of the chunk. + *bcount = *size / bsize; + rc = cblk_set_size(chkid, (size_t)*bcount, 0); + if ( rc != 0 ) + { + // An error was encountered, close the chunk + cblk_close(chkid, 0); + chkid = NULL_CHUNK_ID; + KV_TRC_FFDC(pAT, "cblk_set_size failed path %s bsize %"PRIu64" size %"PRIu64"" + " bcount %"PRIu64", errno = %d", + path, bsize, *size, *bcount, errno); + } + } + } + + if ( NULL_CHUNK_ID == chkid ) + { + printf("cblk_open failed\n"); + am_free(ea); + ea = NULL; + KV_TRC_FFDC(pAT, "cblk_open failed path %s bsize %"PRIu64" size %"PRIu64"" + " bcount %"PRIu64", errno = %d", + path, bsize, *size, *bcount, errno); + } + else + { + // Save off the chunk ID and the device name + ea->st_flash = chkid; + plen = strlen(path) + 1; + ea->st_device = (char *)am_malloc(plen); + memset(ea->st_device, 0, plen); + strncpy(ea->st_device, path, plen); + } + } + + if (ea != NULL) + { + // Fill in the EA struct + pthread_rwlock_init(&(ea->ea_rwlock), NULL); + ea->bsize = bsize; + ea->bcount = *bcount; + ea->size = *size; + } + } + + KV_TRC(pAT, "path %s bsize %"PRIu64" size %"PRIu64" bcount %"PRIu64"", + path, bsize, *size, *bcount); + return ea; +} + +int ea_resize(EA *ea, uint64_t bsize, uint64_t bcount) { + uint64_t size = bcount * bsize; + int rc = 0; + + ARK_SYNC_EA_WRITE(ea); + + if ( ea->st_type == EA_STORE_TYPE_MEMORY ) + { + // For an in-memory store, we simply "realloc" + // the memory. + uint8_t *store = realloc(ea->st_memory, size); + if (store) { + ea->bcount = bcount; + ea->size = size; + ea->st_memory = store; + } + else { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "Out of memory to resize ea %p bsize %lu bcount %lu, errno = %d", + ea, bsize, bcount, errno); + rc = 1; + } + } + else + { + // Call down to the block layer to set the + // new size on the store. + rc = cblk_set_size(ea->st_flash, bcount, 0); + if (rc == 0) + { + ea->bcount = bcount; + ea->size = size; + } + } + + ARK_SYNC_EA_UNLOCK(ea); + + return rc; +} + +int ea_read(EA *ea, uint64_t lba, void *dst) { + uint8_t *src = NULL; + int rc = 0; + + if ( ea->st_type == EA_STORE_TYPE_MEMORY) + { + // Read out the value from the in-memor block + src = ea->st_memory + lba * ea->bsize; + rc = ( memcpy(dst, src, ea->bsize) == NULL ? 0 : 1); + } + else + { + // Call out to the block layer and retrive a block + rc = cblk_read(ea->st_flash, dst, lba, 1, 0); + } + + return rc; +} + +int ea_write(EA *ea, uint64_t lba, void *src) { + uint8_t *dst = NULL; + int rc = 0; + + if ( ea->st_type == EA_STORE_TYPE_MEMORY) + { + // Write the value to the in-memor block + dst = ea->st_memory + lba * ea->bsize; + rc = ( memcpy(dst, src, ea->bsize) == NULL ? 0 : 1); + } + else + { + // Send the value down to the block layer, 1 block + rc = cblk_write(ea->st_flash, src, lba, 1, 0); + } + + return rc; +} + +int ea_async_io(EA *ea, int op, void *addr, ark_io_list_t *blist, int64_t len, int nthrs) +{ + int64_t i = 0; + int64_t j = 0; + int64_t comps = 0; + int num = 0; + int max_ops = 0; + int rc = 0; + int a_rc = 0; + uint64_t status = 0; + uint8_t *p_addr = NULL; + uint8_t *m_addr = NULL; + + ARK_SYNC_EA_READ(ea); + + if ( ea->st_type == EA_STORE_TYPE_MEMORY) + { + // Loop through the block list to issue the IO + for(i = 0; i < len; i++) { + + p_addr = ((uint8_t*)addr) + (i * ea->bsize); + + // For in-memory Store, we issue the memcpy + // and wait for the return, no async here. + // Read out the value from the in-memor block + m_addr = ea->st_memory + (blist[i].blkno * ea->bsize); + + if (op == ARK_EA_READ) + { + if (FVT_KV_READ_ERROR_INJECT) + { + FVT_KV_CLEAR_READ_ERROR; rc = errno = EIO; + KV_TRC_FFDC(pAT, "READ_ERROR_INJECT rc = %d", EIO); + break; + } + if ( memcpy(p_addr, m_addr, ea->bsize) == NULL ) + { + rc = errno; + break; + } + } + else + { + if (FVT_KV_WRITE_ERROR_INJECT) + { + FVT_KV_CLEAR_WRITE_ERROR; rc = errno = EIO; + KV_TRC_FFDC(pAT, "WRITE_ERROR_INJECT rc = %d", EIO); + break; + } + if ( memcpy(m_addr, p_addr, ea->bsize) == NULL ) + { + rc = errno; + break; + } + } + } + } + else + { + // Because we have 4 pool threads, we want to ensure + // that at any given time, if all threads are running + // large K/V operations, we don't hang because + // we exhausted the async command slots in the block + // layer. So we divide up the cmd slots among + // the 4 threads and go 1 more less to be sure + max_ops = (ARK_EA_BLK_ASYNC_CMDS / nthrs) - 1; + + // Loop through the block list to issue the IO + while ((comps < len) && (rc == 0)) + { + for(i = comps, num = 0; + (i < len) && (num < max_ops); + i++, num++) + { + p_addr = ((uint8_t*)addr) + (i * ea->bsize); + + // Call out to the block layer and retrive a block + // Do an async op for a single block and tell the block + // layer to wait if there are no available command + // blocks. Upon return, we can either get an error + // (rc == -1), the data will be available (rc == number + // of blocks read), or IO has been scheduled (rc == 0). + if (op== ARK_EA_READ) + { + if (FVT_KV_READ_ERROR_INJECT) + { + FVT_KV_CLEAR_READ_ERROR; rc = errno = EIO; + KV_TRC_FFDC(pAT, "READ_ERROR_INJECT rc = %d", EIO); + break; + } + + KV_TRC_IO(pAT, "RD: id:%d blkno:%"PRIi64"", + ea->st_flash, + blist[i].blkno); + //printf("cblk_aread for block: %"PRIu64"\n", blist[i].blkno); + rc = cblk_aread(ea->st_flash, p_addr, blist[i].blkno, 1, + &(blist[i].a_tag), NULL,CBLK_ARW_WAIT_CMD_FLAGS); + } + else + { + if (FVT_KV_WRITE_ERROR_INJECT) + { + FVT_KV_CLEAR_WRITE_ERROR; rc = errno = EIO; + KV_TRC_FFDC(pAT, "WRITE_ERROR_INJECT rc = %d", EIO); + break; + } + + KV_TRC_IO(pAT, "WR: id:%d blkno:%"PRIi64"", + ea->st_flash, + blist[i].blkno); + rc = cblk_awrite(ea->st_flash, p_addr, blist[i].blkno, 1, + &(blist[i].a_tag), NULL,CBLK_ARW_WAIT_CMD_FLAGS); + } + + if ( rc == -1 ) + { + // Error was encountered. Don't issue any more IO + rc = errno; + KV_TRC_FFDC(pAT, "cblk_aread/awrite failed, IO ERROR, blkno:%"PRIi64"\ + tag:%d, errno = %d", blist[i].blkno, blist[i].a_tag, errno); + break; + } + + // Data has already been returned so we don't need to + // wait for the response below + if ( rc > 0 ) + { + blist[i].a_tag = -1; + rc = 0; + } + //_arkp->stats.io_cnt++; + } + + // For as many IOs that were performed, we loop t + // see if we need to wait for the response or the + // data has already been returned. + for (j = comps; j < i; j++) + { + + // Data has already been read + if (blist[j].a_tag == -1) + { + continue; + } + + do + { + a_rc = cblk_aresult(ea->st_flash, &(blist[j].a_tag), + &status, CBLK_ARESULT_BLOCKING); + KV_TRC_IO(pAT, "RT: id:%d blkno:%"PRIi64" status:%"PRIi64"", + ea->st_flash, + blist[i].blkno, + status); + // There was an error, check to see if we haven't + // encoutnered an error previously and if not, then + // set rc. Continue processing so that we harvest + // all outstanding responses + if (a_rc == -1) + { + if (rc == 0) + { + rc = errno; + } + } + + // If a_rc is 0, that means we got interrupted somehow + // so we need to retry the operation. + } while (a_rc == 0); + } + + // If we start another loop, start off where we finished + // in this loop. + comps = i; + } + } + + ARK_SYNC_EA_UNLOCK(ea); + + return rc; +} + +int ea_delete(EA *ea) { + int rc = 0; + + if ( ea->st_type == EA_STORE_TYPE_MEMORY ) + { + KV_TRC(pAT, "ea %p ea->st_memory %p", ea, ea->st_memory); + // Simple free the block of store + free(ea->st_memory); + } + else + { + // Call to close out the chunk and free the space + // for the device name + rc = cblk_close(ea->st_flash, 0); + am_free(ea->st_device); + } + + if ( rc == 0 ) + { + KV_TRC(pAT, "ea %p", ea); + am_free(ea); + } + + return rc; +} + diff --git a/src/kv/ea.h b/src/kv/ea.h new file mode 100644 index 00000000..56d77028 --- /dev/null +++ b/src/kv/ea.h @@ -0,0 +1,102 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ea.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __EA_H__ +#define __EA_H__ + +#include +#include "ark.h" +#include "bl.h" +#include "capiblock.h" + +#define ARK_EA_READ 0 +#define ARK_EA_WRITE 1 + +#define st_memory store.memory +#define st_flash store.flash.chkid +#define st_device store.flash.device + +typedef struct flash_cntrl +{ + chunk_id_t chkid; + char *device; +} flash_cntrl_t; + +typedef union _store_id { + uint8_t *memory; + flash_cntrl_t flash; +} store_id_t; + +#define ARK_EA_BLK_ASYNC_CMDS 256 + +typedef struct _ea { + pthread_rwlock_t ea_rwlock; + uint64_t bsize; + uint64_t bcount; + uint64_t size; + store_id_t store; + +#define EA_STORE_TYPE_MEMORY 1 +#define EA_STORE_TYPE_FILE 2 +#define EA_STORE_TYPE_FLASH 3 + uint8_t st_type; +} EA; + +#define ARK_SYNC_EA_READ(_ea) \ +{ \ + if ((_ea)->st_type == EA_STORE_TYPE_MEMORY ) \ + { \ + pthread_rwlock_rdlock(&((_ea)->ea_rwlock)); \ + } \ +} + +#define ARK_SYNC_EA_WRITE(_ea) \ +{ \ + if ((_ea)->st_type == EA_STORE_TYPE_MEMORY ) \ + { \ + pthread_rwlock_wrlock(&((_ea)->ea_rwlock)); \ + } \ +} + +#define ARK_SYNC_EA_UNLOCK(_ea) \ +{ \ + if ((_ea)->st_type == EA_STORE_TYPE_MEMORY ) \ + { \ + pthread_rwlock_unlock(&((_ea)->ea_rwlock)); \ + } \ +} + +EA *ea_new(const char *path, uint64_t bsize, int basyncs, uint64_t *size, + uint64_t *bcount, uint64_t vlun); +int ea_resize(EA *ea, uint64_t bsize, uint64_t bcount); +int ea_delete(EA *ea); + +int ea_read(EA *ea, uint64_t lba, void *dst); + +int ea_write(EA *ea, uint64_t lba, void *src); + +int ea_async_io(EA *ea, int op, void *addr, ark_io_list_t *blist, int64_t len, int nthrs); + + +#endif diff --git a/src/kv/ea_mod.c b/src/kv/ea_mod.c new file mode 100644 index 00000000..77c4c279 --- /dev/null +++ b/src/kv/ea_mod.c @@ -0,0 +1,333 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ea_mod.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include + +#include "ark.h" +#include "bl.h" +#include "ea.h" +#include "capiblock.h" +#include "am.h" + +#include +#include + +int ea_async_io_schedule(_ARK *_arkp, int32_t tid, tcb_t *tcbp, iocb_t *iocbp) +{ + EA *ea = iocbp->ea; + int32_t rc = 0; + int32_t harvest = 0; + int64_t i = 0; + uint8_t *p_addr = NULL; + uint8_t *m_addr = NULL; + + if ( EA_STORE_TYPE_MEMORY == ea->st_type ) + { + // With memory, since we will be doing a simple + // memcpy, there really isn't anything to "wait" + // for in terms of IO completion. However, to keep + // it somewhat similar in design, we will perform the + // memcpy inline here, but the "wait" IO task will + // still be scheduled. When it pops, no processing + // will be done, instead the "complete" status + // will be set and the IO will be considered + // done. + + // Loop through the block list to issue the IO + for ( i = 0; i < iocbp->nblks; i++ ) + { + p_addr = ((uint8_t *)(iocbp->addr)) + (i * ea->bsize); + + // For in-memory store, we issue the memcpy. There + // is no need to do any asynchronous "waiting" for + // IO completion. + m_addr = ea->st_memory + (iocbp->blist[i].blkno * ea->bsize); + + if ( ARK_EA_READ == iocbp->op ) + { + + if (FVT_KV_READ_ERROR_INJECT) + { + FVT_KV_CLEAR_READ_ERROR; + rc = -(EIO); + KV_TRC_FFDC(pAT, "READ_ERROR_INJECT rc = %d", EIO); + break; + } + + if ( memcpy(p_addr, m_addr, ea->bsize) == NULL ) + { + rc = -(errno); + break; + } + } + else + { + + if (FVT_KV_WRITE_ERROR_INJECT) + { + FVT_KV_CLEAR_WRITE_ERROR; + rc = -(EIO); + KV_TRC_FFDC(pAT, "READ_ERROR_INJECT rc = %d", EIO); + break; + } + + if ( memcpy(m_addr, p_addr, ea->bsize) == NULL ) + { + rc = -(errno); + break; + } + } + } + + if ( rc >= 0) + { + rc = iocbp->nblks; + } + + am_free(iocbp->blist); + } + else + { + // The idea here is to try and issue all the IO's + // for this operation. However, we can run into + // a situation where the block layer runs out of + // command IO buffer space and starts returning + // EAGAIN. If that happens, we need to stop issuing + // IO's and instead look to harvest the completion + // of any outstanding ops, so we schedule a TASK + // to do that. This can get a bit tricky + + for (i = iocbp->start; i < iocbp->nblks; i++) + { + p_addr = ((uint8_t *)iocbp->addr) + (i * ea->bsize); + + // Call out to the block layer and retrive a block + // Do an async op for a single block and tell the block + // layer to wait if there are no available command + // blocks. Upon return, we can either get an error + // (rc == -1), the data will be available (rc == number + // of blocks read), or IO has been scheduled (rc == 0). + if ( iocbp->op == ARK_EA_READ ) + { + if (FVT_KV_READ_ERROR_INJECT) + { + FVT_KV_CLEAR_READ_ERROR; + rc = -1; + errno = EIO; + KV_TRC_FFDC(pAT, "READ_ERROR_INJECT rc = %d", EIO); + } + else + { + KV_TRC_IO(pAT, "RD: id:%d blkno:%"PRIi64" tag:%d", + ea->st_flash, + iocbp->blist[i].blkno, + iocbp->blist[i].a_tag); + rc = cblk_aread(ea->st_flash, p_addr, iocbp->blist[i].blkno, 1, + &(iocbp->blist[i].a_tag), NULL, + 0); + } + } + else + { + if (FVT_KV_WRITE_ERROR_INJECT) + { + FVT_KV_CLEAR_WRITE_ERROR; + rc = -1; + errno = EIO; + KV_TRC_FFDC(pAT, "READ_ERROR_INJECT rc = %d", EIO); + } + else + { + KV_TRC_IO(pAT, "WR: id:%d blkno:%"PRIi64" tag:%d", + ea->st_flash, + iocbp->blist[i].blkno, + iocbp->blist[i].a_tag); + rc = cblk_awrite(ea->st_flash, p_addr, iocbp->blist[i].blkno, 1, + &(iocbp->blist[i].a_tag), NULL, + 0); + } + } + + if ( rc == -1 ) + { + if ( errno != EAGAIN ) + { + // Something bad went wrong. We need to fail the + // IO operation and the KV command itself. We schedule + // an ARK_IO_HARVEST task and set an error in the status + // field. + //rc = -(errno); + rc = -(EIO); + } + else + { + rc = 0; + } + + break; + } + else if ( rc > 0 ) + { + // Data has already been returned so we don't need + // to harvest the completion for this block + iocbp->blist[i].a_tag = -1; + } + else + { + // IO was scheduled and we need to check back + // later for the completion. + harvest = 1; + } + } + + iocbp->new_start = i; + + if ( harvest ) + { + // Schedule the task to harvest the outstanding + // IO operations. + iocbp->io_errno = rc; + rc = 0; + } + else + { + if ( i == iocbp->nblks ) + { + // The IO has completed and we have all the data + // Schedule an ARK_IO_DONE task. + rc = iocbp->nblks; + } + } + } + + return rc; +} + +int ea_async_io_harvest(_ARK *_arkp, int32_t tid, tcb_t *tcbp, iocb_t *iocbp) +{ + EA *ea = iocbp->ea; + int32_t i = 0; + int32_t rc = 0; + int32_t a_rc = 0; + uint64_t status = 0; + + // Loop through all potential async IO's waiting for + // completions + for (i = iocbp->start; i < iocbp->new_start; i++) + { + // If a_tag is -1, that means we don't need to wait + // for any data...just move on to the next one + if ( iocbp->blist[i].a_tag == -1 ) + { + continue; + } + + do + { + // Make call to harvest the IO completion. + a_rc = cblk_aresult(ea->st_flash, &(iocbp->blist[i].a_tag), + &status, CBLK_ARESULT_BLOCKING); + + KV_TRC_IO(pAT, "RT: id:%d blkno:%"PRIi64" tag:%d status:%"PRIi64"", + ea->st_flash, + iocbp->blist[i].blkno, + iocbp->blist[i].a_tag, + status); + if (a_rc == -1) + { + if (rc == 0) + { + //rc = -errno; + rc = -(EIO); + } + } + } while (a_rc == 0); + } + + // If we've harvested all IO's or if we got + // an error, then the IO itself + // is complete. Schedule the ARK_IO_DONE task. + if ( (iocbp->io_errno < 0) || (rc < 0) || (iocbp->new_start == iocbp->nblks) ) + { + if ( iocbp->io_errno < 0 ) + { + rc = iocbp->io_errno; + } + else if ( (rc == 0) && (iocbp->new_start == iocbp->nblks) ) + { + rc = iocbp->nblks; + } + am_free(iocbp->blist); + } + else + { + // We are not done so we have to go back and + // schedule some more IOs + iocbp->start = iocbp->new_start; + rc = 0; + } + + return rc; +} + +int ea_async_io_mod(_ARK *_arkp, int op, void *addr, ark_io_list_t *blist, + int64_t nblks, int start, int32_t tag, int32_t io_done) +{ + int status = 0; + iocb_t *iocbp = &(_arkp->iocbs[tag]); + tcb_t *tcbp = &(_arkp->tcbs[tag]); + + // We really shouldn't run into an error here since all we + // are doing is filling in the iocb_t structure for the + // given tag and setting the state to ARK_IO_SCHEDULE and + // queueing it up on the task queue. + iocbp->ea = _arkp->ea; + iocbp->op = op; + iocbp->addr = addr; + iocbp->blist = blist; + iocbp->nblks = nblks; + iocbp->start = start; + iocbp->new_start = 0; + iocbp->io_errno = 0; + iocbp->io_done = io_done; + iocbp->tag = tag; + + // Set the state to start the IO + // tcbp->state = ARK_IO_SCHEDULE; + + ARK_SYNC_EA_READ(iocbp->ea); + + // Queue up the task now. + // (void)sq_enq(_arkp->tkq[tid], (void *)&task); + status = ea_async_io_schedule(_arkp, 0, tcbp, iocbp); + + ARK_SYNC_EA_UNLOCK(iocbp->ea); + + return status; +} + diff --git a/src/kv/hash.c b/src/kv/hash.c new file mode 100644 index 00000000..c7c8f4a5 --- /dev/null +++ b/src/kv/hash.c @@ -0,0 +1,52 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/hash.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include "am.h" +#include "hash.h" + +hash_t *hash_new(uint64_t n) { + hash_t *hash = am_malloc(sizeof(hash_t) + n * sizeof(uint64_t)); + bzero(hash, sizeof(hash_t) + n * sizeof(uint64_t)); + hash->n = n; + return hash; +} + +void hash_free(hash_t *hash) +{ + am_free(hash); + return; +} + +uint64_t hash_hash(uint8_t *buf, uint64_t n) { + uint64_t sum = 0; + uint64_t i; + for (i=0; in; +} diff --git a/src/kv/hash.h b/src/kv/hash.h new file mode 100644 index 00000000..a1b7094c --- /dev/null +++ b/src/kv/hash.h @@ -0,0 +1,53 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/hash.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +#ifndef __HASH_H__ +#define __HASHH__ +#include +#include +#include + +typedef struct _hash { + uint64_t n; + uint64_t h[]; +} hash_t; + +hash_t *hash_new(uint64_t n); +void hash_free(hash_t *hash); + +uint64_t hash_hash(uint8_t *buf, uint64_t n); +uint64_t hash_pos(hash_t *hash, uint8_t *buf, uint64_t n); + +#define HASH_GET(htb, pos) ((htb)->h[pos]) +#define HASH_SET(htb, pos, val) ((htb)->h[pos] = val) + +#define HASH_MAKE(lck,tag,lba) ((((uint64_t)(lck))<<56) | (((uint64_t)(tag))<<40) | ((uint64_t)(lba))) +#define HASH_LCK(x) ((x)>>56) +#define HASH_TAG(x) (0x000000000000FFFFULL & ((x)>>40)) +#define HASH_LBA(x) (0x000000FFFFFFFFFFULL & (x)) + + +#endif diff --git a/src/kv/ht.c b/src/kv/ht.c new file mode 100644 index 00000000..aa442dee --- /dev/null +++ b/src/kv/ht.c @@ -0,0 +1,94 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ht.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include + +#include "am.h" +#include "bv.h" +#include "iv.h" + +#include "ht.h" + +#include + +uint64_t ht_hash(uint8_t *buf, uint64_t n) { + uint64_t sum = 0; + uint64_t i; + for (i=0; in; +} + + +HT *ht_new(uint64_t n, uint64_t m) { + HT *ht = am_malloc(sizeof(HT)); + if ( ht == NULL ) + { + KV_TRC_FFDC(pAT, "n %ld m %ld ENOMEM", n, m); + errno = ENOMEM; + } + else + { + ht->n = n; + ht->m = m; + ht->valid = bv_new(n); + ht->value = iv_new(n,m); + KV_TRC(pAT, "n %ld m %ld", n, m); + } + return ht; +} + +void ht_delete(HT *ht) { + KV_TRC(pAT, "ht %p valid %p value %p", ht, ht->valid, ht->value); + bv_delete(ht->valid); + iv_delete(ht->value); + am_free(ht); +} + + +void ht_set(HT *ht, uint64_t pos, uint64_t val) { + bv_set(ht->valid, pos); + iv_set(ht->value, pos, val); +} + +uint64_t ht_get(HT *ht, uint64_t pos) { + return iv_get(ht->value,pos); +} + +void ht_clr(HT *ht, uint64_t pos) { + KV_TRC(pAT, "valid %p pos %ld", ht->valid, pos); + bv_clr(ht->valid,pos); +} + +int ht_vldp(HT *ht, uint64_t pos) { + return bv_get(ht->valid, pos); +} + diff --git a/src/kv/ht.h b/src/kv/ht.h new file mode 100644 index 00000000..cd2e961a --- /dev/null +++ b/src/kv/ht.h @@ -0,0 +1,57 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ht.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef __HT_H__ +#define __HT_H__ + +#include + +#include "bv.h" +#include "iv.h" + + +typedef struct _ht { + uint64_t n; + uint64_t m; + BV *valid; + IV *value; +} HT; + +uint64_t ht_hash(uint8_t *buf, uint64_t n); +uint64_t ht_pos(HT *ht, uint8_t *buf, uint64_t n); + +HT *ht_new(uint64_t n, uint64_t m); +void ht_delete(HT *ht); + +void ht_set(HT *ht, uint64_t pos, uint64_t val); +uint64_t ht_get(HT *ht, uint64_t pos); + +void ht_clr(HT *ht, uint64_t pos); + +int ht_vldp(HT *ht, uint64_t pos); + +uint64_t ht_cnt(HT *ht); + +#endif diff --git a/src/kv/iv.c b/src/kv/iv.c new file mode 100644 index 00000000..df39afa6 --- /dev/null +++ b/src/kv/iv.c @@ -0,0 +1,165 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/iv.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include +#include "am.h" + +#include "ut.h" +#include "iv.h" +#include + + +#define IVINLINE // inline +#define IVSAFE // if (i<0 & i>=bv->n) exit(987); + +IV *iv_new(uint64_t n, uint64_t m) { + + uint64_t bits = n * m; + uint64_t words = divup(bits, 64); + uint64_t bytes = sizeof(IV) + words * sizeof(uint64_t); + + IV *iv = am_malloc(bytes); + if (iv == NULL) + { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "FFDC: n %"PRIu64" m %"PRIu64", errno = %d", n, m, errno); + } + else + { + memset(iv,0x00, bytes); + iv->n = n; + iv->m = m; + iv->bits = bits; + iv->words = words; + iv->mask = 1; + iv->mask <<= m; + iv->mask -= 1; + iv->bar = 64 - m; + } + + KV_TRC(pAT, "iv %p n %"PRIu64" m %"PRIu64"", iv, n, m); + return iv; +} + +IV *iv_resize(IV *piv, uint64_t n, uint64_t m) { + + uint64_t bits = n * m; + uint64_t words = divup(bits, 64); + uint64_t bytes = sizeof(IV) + words * sizeof(uint64_t); + + IV *iv = am_realloc(piv,bytes); + if (iv == NULL) + { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "FFDC: iv %p n %"PRIu64" m %"PRIu64", errno = %d", piv, n, m, errno); + } + else + { + iv->n = n; + iv->m = m; + iv->bits = bits; + iv->words = words; + iv->mask = 1; + iv->mask <<= m; + iv->mask -= 1; + iv->bar = 64 - m; + } + + KV_TRC_DBG(pAT, "iv %p n %"PRIu64" m %"PRIu64"", piv, n, m); + return iv; +} + +void iv_set(IV *iv, uint64_t i, uint64_t v) { + uint64_t pos = i * iv->m; + uint64_t w = pos >> 6; + uint64_t b = pos & 63; + uint64_t shift; + uint64_t msk0; + uint64_t msk1; + uint64_t val; + + + v &= iv->mask; + if (b <= iv->bar) { + shift = iv->bar - b; + msk1 = iv->mask << shift; + msk0 = ~msk1; + val = v << shift; + val |= (iv->data[w] & msk0); + iv->data[w] = val; + } else { + shift = b - iv->bar; + msk1 = iv->mask >> shift; + msk0 = ~msk1; + val = v >> shift; + val |= (iv->data[w] & msk0); + iv->data[w] = val; + shift = 64 - (b - iv->bar); + msk1 = iv->mask << shift; + msk0 = ~msk1; + val = v << shift; + val |= (iv->data[w+1] & msk0); + iv->data[w+1] = val; + } +} + +uint64_t iv_get(IV *iv, uint64_t i) { + uint64_t bp = i * iv->m; + uint64_t w = bp >> 6; + uint64_t b = bp & 63; + uint64_t shift; + uint64_t val0; + uint64_t val1; + uint64_t msk0; + uint64_t msk1; + uint64_t val; + if (b <= iv->bar) { + shift = iv->bar - b; + val = iv->mask & (iv->data[w] >> shift); + } else { + shift = b - iv->bar; + msk0 = iv->mask>>shift; + msk1 = iv->mask >> (iv->m-shift); + val0 = (msk0 & iv->data[w]) << shift; + val1 = msk1 & (iv->data[w+1] >> (64-shift)); + val = val0 | val1; + } + return val; +} + +/* void iv_resize(IV *iv, uint64_t n) { */ +/* uint64_t bits = n * iv->m; */ +/* uint64_t words = divup(bits, 64); */ +/* uint64_t bytes = sizeof(IV) + words * sizeof(uint64_t); */ +/* IV *iv = realloc(bytes); */ + +void iv_delete(IV *iv) { + KV_TRC(pAT, "iv %p", iv); + am_free(iv); +} + diff --git a/src/kv/iv.h b/src/kv/iv.h new file mode 100644 index 00000000..24bbf8f2 --- /dev/null +++ b/src/kv/iv.h @@ -0,0 +1,48 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/iv.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __IV_H__ +#define __IV_H__ +#include + + +typedef struct _iv { + uint64_t n; + uint64_t m; + uint64_t bits; + uint64_t words; + uint64_t bar; + uint64_t mask; + uint64_t data[]; +} IV; + +IV *iv_new(uint64_t n, uint64_t m); +IV *iv_resize(IV *iv, uint64_t n, uint64_t m); +void iv_delete(IV *iv); + +void iv_set(IV *iv, uint64_t i, uint64_t v); +uint64_t iv_get(IV *iv, uint64_t i); + + +#endif diff --git a/src/kv/kv.c b/src/kv/kv.c new file mode 100644 index 00000000..49a8c571 --- /dev/null +++ b/src/kv/kv.c @@ -0,0 +1,25 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/kv.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + diff --git a/src/kv/kv_trace.h b/src/kv/kv_trace.h new file mode 100644 index 00000000..b7a1c613 --- /dev/null +++ b/src/kv/kv_trace.h @@ -0,0 +1,265 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/kv_trace.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * definitions for a kv trace facility + * \details + * export KV_CFLAGS=-DKV_TRC_DISABLE before building to disable the trace + * export KV_TRC_VERBOSITY=4 for maximum tracing + * + * OFF KV_TRC_VERBOSITY == 0 + * KV_TRC_FFDC KV_TRC_VERBOSITY <= 1 + * KV_TRC KV_TRC_VERBOSITY <= 2 + * KV_TRC_IO KV_TRC_VERBOSITY <= 3 + * KV_TRC_DBG KV_TRC_VERBOSITY <= 4 + * \ingroup + ******************************************************************************/ +#ifndef _H_KV_TRACE_LOG +#define _H_KV_TRACE_LOG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define B_EYE 0xABCDBEEF +#define E_EYE 0xDCBAFEED +#define STRLEN 256 + +#define EYEC_INVALID(_p_KT) ((_p_KT->b_eye != B_EYE) || (_p_KT->e_eye != E_EYE)) + +typedef struct +{ + uint32_t b_eye; + pthread_mutex_t lock; + uint32_t init_done; + uint32_t verbosity; + FILE *logfp; + uint32_t log_number; + uint32_t start_time_valid; + struct timeb start_time; + struct timeb last_time; + uint32_t e_eye; +} KV_Trace_t; + +#ifndef KV_TRC_DISABLE + +/** + ******************************************************************************* + * \brief + * setup to run kv log trace macro + ******************************************************************************/ +#define KV_TRC_OPEN( _pKT, _filename) \ +do \ +{ \ + char *env_verbosity = getenv("KV_TRC_VERBOSITY"); \ + char *env_user = getenv("USER"); \ + char fn[STRLEN+1] = {0}; \ + \ + if (NULL == _pKT) break; \ + \ + /* if this is the first call or we were corrupted + * begin the trace as an append to any previous traces for the filename + */ \ + if (EYEC_INVALID(_pKT)) \ + { \ + memset(_pKT, 0, sizeof(KV_Trace_t)); \ + _pKT->b_eye = B_EYE; \ + _pKT->e_eye = E_EYE; \ + _pKT->verbosity = 1; \ + pthread_mutex_init(&_pKT->lock, NULL); \ + } \ + \ + pthread_mutex_lock(&_pKT->lock); \ + \ + if (_pKT->init_done >= 1) {++_pKT->init_done; goto open_unlock;} \ + \ + if (_filename) snprintf(fn, STRLEN,"/tmp/%s.%s.kv.log",env_user,_filename);\ + else snprintf(fn, STRLEN,"/tmp/%s.kv.log", env_user); \ + \ + if ((_pKT->logfp = fopen(fn, "a")) == NULL) \ + { \ + fprintf(stderr, "\nFailed to open log trace file %s\n", fn); \ + } \ + else \ + { \ + ++_pKT->init_done; \ + if (env_verbosity) {_pKT->verbosity = atoi(env_verbosity);} \ + } \ + \ +open_unlock: \ + pthread_mutex_unlock(&_pKT->lock); \ +} while (0) + +/** + ******************************************************************************* + * \brief + * cleanup and close the kv trace + ******************************************************************************/ +#define KV_TRC_CLOSE(_pKT) \ +do \ +{ \ + if (NULL == _pKT) break; \ + if (EYEC_INVALID(_pKT)) break; \ + if (_pKT->init_done == 0) break; \ + \ + pthread_mutex_lock(&_pKT->lock); \ + \ + if (_pKT->init_done > 1) {--_pKT->init_done; goto close_unlock;} \ + \ + if (_pKT->logfp) \ + { \ + if (_pKT->start_time_valid) \ + { \ + fprintf(_pKT->logfp,"----------------------------------------------\ +-----------------------------\n"); \ + fprintf(_pKT->logfp,"DONE: Date is %s at %s\n",__DATE__,__TIME__); \ + fprintf(_pKT->logfp,"----------------------------------------------\ +-----------------------------\n"); \ + } \ + fflush(_pKT->logfp); \ + fclose(_pKT->logfp); \ + } \ + --_pKT->init_done; \ + \ +close_unlock: \ + pthread_mutex_unlock(&_pKT->lock); \ +} while (0) + +/** + ******************************************************************************* + * \brief + * save the msg to the log file + ******************************************************************************/ +#define KV_TRACE_LOG_DATA(_pKT, _fmt, ...) \ +do \ +{ \ + char _str[STRLEN] = {0}; \ + struct timeb _cur_time, _log_time, _delta_time; \ + \ + if (EYEC_INVALID(_pKT)) break; \ + if (!_pKT->init_done) break; \ + if (NULL == _pKT->logfp) break; \ + \ + pthread_mutex_lock(&_pKT->lock); \ + \ + ftime(&_cur_time); \ + \ + if (!_pKT->start_time_valid) \ + { \ + _pKT->start_time = _cur_time; \ + _pKT->start_time_valid = 1; \ + _log_time.time = 0; \ + _log_time.millitm = 0; \ + _delta_time.time = 0; \ + _delta_time.millitm = 0; \ + fprintf(_pKT->logfp,"--------------------------------------------------\ +---------------------------------\n"); \ + fprintf(_pKT->logfp,"BEGIN: Date is %s at %s\n",__DATE__,__TIME__); \ + fprintf(_pKT->logfp,"%-8s %13s %10s %-30s %-25s\n", \ + "Index", "sec.msec", "delta.dmsec", "Line:Filename", "Function"); \ + fprintf(_pKT->logfp,"--------------------------------------------------\ +---------------------------------\n"); \ + } \ + else \ + { \ + /* Find time offset since starting time */ \ + _log_time.time = _cur_time.time - _pKT->start_time.time; \ + _log_time.millitm = _cur_time.millitm - _pKT->start_time.millitm; \ + _delta_time.time = _log_time.time - _pKT->last_time.time; \ + _delta_time.millitm = _log_time.millitm - _pKT->last_time.millitm; \ + } \ + \ + fprintf(_pKT->logfp,"%-6d %5d.%05d %5d.%05d ", \ + _pKT->log_number, \ + (int)_log_time.time, \ + _log_time.millitm, \ + (int)_delta_time.time, \ + _delta_time.millitm); \ + \ + snprintf(_str, STRLEN, _fmt, ##__VA_ARGS__); \ + fprintf(_pKT->logfp," %5d:%-25s %-25s %s\n", \ + __LINE__,__FILE__,__FUNCTION__,_str); \ + fflush(_pKT->logfp); \ + \ + _pKT->log_number++; \ + _pKT->last_time = _log_time; \ + \ + pthread_mutex_unlock(&_pKT->lock); \ +} while (0) + +#define KV_TRC_FFDC(_pKT, msg, ...) \ + do \ + { \ + if (_pKT && _pKT->verbosity >= 1) \ + { \ + KV_TRACE_LOG_DATA(_pKT, msg, ## __VA_ARGS__); \ + } \ + } while (0) + +#define KV_TRC(_pKT, msg, ...) \ + do \ + { \ + if (_pKT && _pKT->verbosity >= 2) \ + { \ + KV_TRACE_LOG_DATA(_pKT, msg, ## __VA_ARGS__); \ + } \ + } while (0) + +#define KV_TRC_IO(_pKT, msg, ...) \ + do \ + { \ + if (_pKT && _pKT->verbosity >= 3) \ + { \ + KV_TRACE_LOG_DATA(_pKT, msg, ## __VA_ARGS__); \ + } \ + } while (0) + +#define KV_TRC_DBG(_pKT, msg, ...) \ + do \ + { \ + if (_pKT && _pKT->verbosity >= 4) \ + { \ + KV_TRACE_LOG_DATA(_pKT, msg, ## __VA_ARGS__); \ + } \ + } while (0) +#else +#define KV_TRC_OPEN(_pKT, _filename) +#define KV_TRC_CLOSE(_pKT) +#define KV_TRC_FFDC(_pKT, _fmt, ...) +#define KV_TRC(_pKT, _fmt, ...) +#define KV_TRC_IO(_pKT, _fmt, ...) +#define KV_TRC_DBG(_pKT, _fmt, ...) +#endif + +#endif diff --git a/src/kv/ll.c b/src/kv/ll.c new file mode 100644 index 00000000..9e4115d4 --- /dev/null +++ b/src/kv/ll.c @@ -0,0 +1,51 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ll.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include "am.h" +#include "ll.h" + +LL *ll_cons(void *dat, LL *ll) { + LL *ret = am_malloc(sizeof(LL)); + if (ret == NULL) + { + errno = ENOMEM; + } + ret->car = dat; + ret->cdr = ll; + return ret; +} + +void *ll_car(LL *ll) {return ll->car; } +LL *ll_cdr(LL *ll) {return ll->cdr; } + +int ll_len(LL *ll) { + int ret = 0; + while (ll) { + ret++; + ll = ll->cdr; + } + return ret; +} diff --git a/src/kv/ll.h b/src/kv/ll.h new file mode 100644 index 00000000..a6516354 --- /dev/null +++ b/src/kv/ll.h @@ -0,0 +1,42 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ll.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __LL_H__ +#define __LL_H__ + +typedef struct _ll { + void *car; + struct _ll *cdr; +} LL; + +LL *ll_cons(void *dat, LL *ll); +void *ll_car(LL *ll); +LL *ll_cdr(LL *ll); + +int ll_len(LL *ll); + +void ll_collect(LL *ll); +void ll_collectall(LL *ll); + +#endif diff --git a/src/kv/makefile b/src/kv/makefile new file mode 100644 index 00000000..000c15bb --- /dev/null +++ b/src/kv/makefile @@ -0,0 +1,53 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/kv/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +ROOTPATH = ../.. + +#NO_O3 = yes + +OBJS = ut.o cl.o ea.o ll.o bv.o iv.o si.o vi.o bl.o bt.o arkdb.o am.o ari.o \ +arp.o arp_set.o arp_get.o arp_del.o arp_exist.o tag.o ea_mod.o queue.o \ +hash.o tg.o ht.o + +OBJS64 = $(subst .o,.64o,$(OBJS)) + +MODULE = arkdb + +UNAME=$(shell uname) +EXPFLAGS = -bexpall +ifeq ($(UNAME),AIX) +MODLIBS = -lcflsh_block -lcflshcom -lpthreads +MODULE_LINKLIBS = -lcflsh_block -lcflshcom -lpthreads -larkalloc +else +MODLIBS = -lcflsh_block -lcflshcom -lpthread +MODULE_LINKLIBS = -lcflsh_block -lcflshcom -lpthread -larkalloc +endif + +LIBPATHS = -L${ROOTPATH}/img + +CFLAGS += $(KV_CFLAGS) + +SUBDIRS = test.d + +include ${ROOTPATH}/config.mk diff --git a/src/kv/pq.h b/src/kv/pq.h new file mode 100644 index 00000000..e1bce941 --- /dev/null +++ b/src/kv/pq.h @@ -0,0 +1,42 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/pq.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include + +typedef struct _pq { + int n; + int m; + int head; + int tail; + int cnt; + char queue[]; +} SQ; + +typedef PQ * PQP; + +PQP pq_init(PQP pq, int n,int m); +int pq_enq(PQP pq, void *v); +int pq_deq(PQP pq, void *v); + diff --git a/src/kv/queue.c b/src/kv/queue.c new file mode 100644 index 00000000..e78cf99e --- /dev/null +++ b/src/kv/queue.c @@ -0,0 +1,140 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/queue.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include + +#include "am.h" +#include "queue.h" + +queue_t *queue_new(uint32_t n) { + queue_t *q = am_malloc(sizeof(queue_t) + n * sizeof(int32_t)); + q->n = n; + q->c = 0; + q->h = 0; + q->t = 0; + q->waiters = 0; + pthread_mutex_init(&q->m,NULL); + pthread_cond_init(&q->cond,NULL); + return q; +} + +void queue_free(queue_t *q) +{ + am_free(q); + return; +} + +int queue_enq(queue_t *q, int32_t v) { + int rc = ENOSPC; + pthread_mutex_lock(&q->m); + if (q->c < q->n) { + int h = q->h; + q->q[h++] = v; + h %= q->n; + q->c++; + q->h = h; + rc = 0; + if (q->waiters > 0) + { + queue_wakeup(q); + } + } + pthread_mutex_unlock(&q->m); + return rc; +} +int queue_enq_unsafe(queue_t *q, int32_t v) { + int rc = ENOSPC; + if (q->c < q->n) { + int h = q->h; + q->q[h++] = v; + h %= q->n; + q->c++; + q->h = h; + rc = 0; + if (q->waiters > 0) + { + queue_wakeup(q); + } + } + return rc; +} +int queue_deq(queue_t *q, int32_t *v) { + int rc = EAGAIN; + pthread_mutex_lock(&q->m); + if (q->c > 0) { + int t = q->t; + *v = q->q[t++]; + t %= q->n; + q->c--; + q->t = t; + rc = 0; + } + pthread_mutex_unlock(&q->m); + return rc; +} + +int queue_deq_unsafe(queue_t *q, int32_t *v) { + int rc = EAGAIN; + if (q->c > 0) { + int t = q->t; + *v = q->q[t++]; + t %= q->n; + q->c--; + q->t = t; + rc = 0; + } + return rc; +} + +void queue_lock(queue_t *q) +{ + pthread_mutex_lock(&(q->m)); + return; +} + +void queue_unlock(queue_t *q) +{ + pthread_mutex_unlock(&(q->m)); + return; +} + +void queue_wakeup(queue_t *q) +{ + pthread_cond_broadcast(&(q->cond)); + return; +} + +void queue_wait(queue_t *q) +{ + q->waiters++; + + pthread_cond_wait(&(q->cond), &(q->m)); + + q->waiters--; + + return; +} + diff --git a/src/kv/queue.h b/src/kv/queue.h new file mode 100644 index 00000000..c9bfbeab --- /dev/null +++ b/src/kv/queue.h @@ -0,0 +1,62 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/queue.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef __QUEUE_H__ +#define __QUEUE_H__ + +#include +#include + +typedef struct _queue { + uint32_t n; + uint32_t c; + uint32_t h; + uint32_t t; + uint32_t waiters; + pthread_mutex_t m; + pthread_cond_t cond; + int32_t q[]; +} queue_t; + + +queue_t *queue_new(uint32_t n); +void queue_free(queue_t *q); + +int queue_enq(queue_t *q, int32_t v); +int queue_deq(queue_t *q, int32_t *v); + +int queue_enq_unsafe(queue_t *q, int32_t v); +int queue_deq_unsafe(queue_t *q, int32_t *v); + +void queue_lock(queue_t *q); +void queue_unlock(queue_t *q); + +void queue_wait(queue_t *q); +void queue_wakeup(queue_t *q); + +#define queue_empty(q) ((q)->c==0) +#define queue_count(q) ((q)->c) + +#endif diff --git a/src/kv/si.c b/src/kv/si.c new file mode 100644 index 00000000..d2ee21c6 --- /dev/null +++ b/src/kv/si.c @@ -0,0 +1,170 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/si.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include + +#include "am.h" +#include "ut.h" +#include "si.h" + +#include + +// there is a better variable size implementation but for now +typedef struct _sie { + uint64_t nxt; // next sie + uint64_t gid; // global id + uint64_t len; // string length + uint64_t dat; // position in main buffer +} SIE; + +typedef struct _si { + uint64_t nh; // # of hash table entries + uint64_t ne; // # of si entries + uint64_t nb; // # bytes for the table + uint64_t ent_next; // next free entry + uint64_t dat_next; // next free bytes + uint64_t gid_next; // next free global id + + int64_t *tbl; // hash table + SIE *ent; // SI entries + char *dat; // the data +} SI; + +uint64_t si_hash(char *buf, int n) { + unsigned char *b = (unsigned char *)buf; + uint64_t sum = 0; + int i; + for (i=0; inh = nh; + si->ne = ne; + si->nb = nb; + + si->ent_next = 0; + si->gid_next = 0; + si->dat_next = 0; + + si->tbl = am_malloc(nh * sizeof(uint64_t)); + if ( si->tbl == NULL ) + { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "FFDC2: nh %ld ne %ld nb %ld, rc = %d", + nh, ne, nb, errno); + } + else + { + memset(si->tbl, 0xFF, nh * sizeof(uint64_t)); + si->dat = am_malloc(nb); + if (si->dat == NULL) + { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "FFDC3: nh %ld ne %ld nb %ld, rc = %d", + nh, ne, nb, errno); + am_free(si->tbl); + am_free(si); + si = NULL; + } + else + { + si->ent = am_malloc(ne * sizeof(SIE)); + if (si->ent == NULL) + { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "FFDC4: nh %ld ne %ld nb %ld, rc = %d", + nh, ne, nb, errno); + am_free(si->tbl); + am_free(si->dat); + am_free(si); + si = NULL; + } + } + } + + memset(si->tbl, 0xFF, nh * sizeof(uint64_t)); + return si; +} + +uint64_t si_intern(void *siv, char *buf, int n) { + SI *si = (SI*)siv; + uint64_t id = 0; + uint64_t hsh = si_hash(buf,n); + uint64_t pos = hsh % si->nh; + int64_t ent = si->tbl[pos]; + int found = 0; + while (!found && ent>=0 ) { + if (memcmp(si->dat + si->ent[ent].dat, buf, n)==0) { + found = 1; + } else { + ent = si->ent[ent].nxt; + } + } + if (found) { + id = si->ent[ent].gid; + } else if ((si->ent_next < si->ne-1) && (si->dat_next+n < si->nb)) { + id = si->gid_next++; + ent = si->ent_next++; + + si->ent[ent].nxt = si->tbl[pos]; + si->ent[ent].gid = id; + si->ent[ent].len = n; + si->ent[ent].dat = si->dat_next; + + memcpy(si->dat + si->dat_next, buf, n); + si->dat_next += n; + + si->tbl[pos] = ent; + } else { + id = -1; + } + return id; +} + +void si_summary(void *siv) { + SI *si = (SI*)siv; + uint64_t ecnt = 0; + uint64_t i; + for(i=0; inh; i++) if (si->tbl[i]>=0) ecnt++; + + printf("table %"PRIu64"/%"PRIu64" entries %"PRIu64"/%"PRIu64" data %"PRIu64"/%"PRIu64"\n", + ecnt, si->nh, + si->ent_next, si->ne, + si->dat_next, si->nb); +} diff --git a/src/kv/si.h b/src/kv/si.h new file mode 100644 index 00000000..8d207095 --- /dev/null +++ b/src/kv/si.h @@ -0,0 +1,33 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/si.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __SI_H__ +#define __SI_H__ +#include + +void *si_new(uint64_t nh, uint64_t ne, uint64_t nb); + +uint64_t si_intern(void *si, char *buf, int n); + +#endif diff --git a/src/kv/sq.c b/src/kv/sq.c new file mode 100755 index 00000000..cc21bb9d --- /dev/null +++ b/src/kv/sq.c @@ -0,0 +1,90 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/sq.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include + +#include "am.h" + +#include "sq.h" + +#include + +int sq_sizeof(int n, int m) {return sizeof(SQ) + n*m;} +int sq_count(SQP sq) {return sq->cnt;} +int sq_emptyp(SQP sq) {return sq->cnt==0;} +int sq_fullp(SQP sq) {return sq->cnt==sq->n;} + +SQP sq_init(SQP sq, int n, int m) { + if (sq==NULL) sq = am_malloc(sq_sizeof(n,m)); + + if (sq==NULL) + { + errno = ENOMEM; + KV_TRC_FFDC(pAT, "FFDC: sq %p n %d, m %d, ENOMEM", sq, n, m); + goto exception; + } + + sq->n = n; + sq->m = m; + sq->head = 0; + sq->tail = 0; + sq->cnt = 0; + KV_TRC(pAT, "sq %p n %d m %d", sq, n, m); + +exception: + return sq; +} + +// return count of items enqueued (0 or 1) +int sq_enq(SQP sq, void *v) { + if (sq_fullp(sq)) + { + KV_TRC_DBG(pAT, "QFULL: sq %p sq->n %d v %p", sq, sq->n, v); + return 0; + } + int h = sq->head; + memcpy(sq->queue+(h*sq->m), v, sq->m); + sq->head = (h+1)%sq->n; + sq->cnt++; + return 1; +} + +// return count of items dequeued (0 or 1) +int sq_deq(SQP sq, void *v) { + if (sq_emptyp(sq)) + { + KV_TRC_FFDC(pAT, "FFDC: sq %p sq->n %d v %p", sq, sq->n, v); + return 0; + } + int t = sq->tail; + memcpy(v,sq->queue+(t*sq->m), sq->m); + sq->tail = (t+1)%sq->n; + sq->cnt--; + return 1; +} + diff --git a/src/kv/sq.h b/src/kv/sq.h new file mode 100755 index 00000000..ba517171 --- /dev/null +++ b/src/kv/sq.h @@ -0,0 +1,47 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/sq.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef _SQ_H +#define _SQ_H + +#include +#include + +typedef struct _sq { + int n; + int m; + int head; + int tail; + int cnt; + char queue[]; +} SQ; + +typedef SQ * SQP; + +SQP sq_init(SQP sq, int n,int m); +int sq_enq(SQP sq, void *v); +int sq_deq(SQP sq, void *v); + +#endif diff --git a/src/kv/tag.c b/src/kv/tag.c new file mode 100644 index 00000000..46e455ff --- /dev/null +++ b/src/kv/tag.c @@ -0,0 +1,71 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/tag.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include "tag.h" +#include "am.h" + +tags_t *tag_new(uint32_t n) { + int i; + tags_t *tags = am_malloc(sizeof(tags_t) + n * sizeof(int32_t)); + tags->n = n; + tags->c = n; + pthread_mutex_init(&(tags->l), NULL); + for(i=0; is[i] = i; + return tags; +} + +void tag_free(tags_t *tags) +{ + pthread_mutex_destroy(&(tags->l)); + am_free(tags); + return; +} + +int tag_unbury(tags_t *tags, int32_t *tag) { + int ret = 0; + pthread_mutex_lock(&(tags->l)); + if (tags->c==0){ + ret = EAGAIN; + } + else{ + *tag = tags->s[--(tags->c)]; + } + pthread_mutex_unlock(&(tags->l)); + return ret; +} + +int tag_bury(tags_t *tags, int32_t tag) { + int ret = 0; + pthread_mutex_lock(&(tags->l)); + if (tags->c == tags->n){ + ret = ENOSPC; + } + else + tags->s[tags->c++] = tag; + pthread_mutex_unlock(&(tags->l)); + return ret; +} diff --git a/src/kv/tag.h b/src/kv/tag.h new file mode 100644 index 00000000..8537206e --- /dev/null +++ b/src/kv/tag.h @@ -0,0 +1,48 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/tag.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef __TAG_H__ +#define __TAG_H__ + +#include +#include +#include + +typedef struct _tags { + pthread_mutex_t l; + uint32_t n; + uint32_t c; + int32_t s[]; +} tags_t; + +tags_t *tag_new(uint32_t n); +void tag_free(tags_t *tags); + +int tag_unbury(tags_t *tags, int32_t *tag); +int tag_bury(tags_t *tags, int32_t tag); + +#define tag_empty(tag) ((tag)->c==0) + +#endif diff --git a/src/kv/test/_tst_ark.c b/src/kv/test/_tst_ark.c new file mode 100644 index 00000000..07cd60b5 --- /dev/null +++ b/src/kv/test/_tst_ark.c @@ -0,0 +1,31 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_ark.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +int tst_ark_entry(int argc, char **argv); + +int main(int argc, char **argv) +{ + return tst_ark_entry(argc, argv); +} diff --git a/src/kv/test/_tst_bl.c b/src/kv/test/_tst_bl.c new file mode 100644 index 00000000..c93d7f10 --- /dev/null +++ b/src/kv/test/_tst_bl.c @@ -0,0 +1,31 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_bl.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +int tst_bl_entry(int argc, char **argv); + +int main(int argc, char **argv) +{ + return tst_bl_entry(argc, argv); +} diff --git a/src/kv/test/_tst_bt.c b/src/kv/test/_tst_bt.c new file mode 100644 index 00000000..4c4722f6 --- /dev/null +++ b/src/kv/test/_tst_bt.c @@ -0,0 +1,30 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_bt.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +int tst_bt_entry(int argc, char **argv); + +int main(int argc, char **argv) { + + return tst_bt_entry(argc, argv); +} diff --git a/src/kv/test/_tst_bv.c b/src/kv/test/_tst_bv.c new file mode 100644 index 00000000..c0e4add6 --- /dev/null +++ b/src/kv/test/_tst_bv.c @@ -0,0 +1,30 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_bv.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +int tst_bv_entry(int argc, char **argv); + +int main(int argc, char **argv) +{ + return tst_bv_entry(argc, argv); +} diff --git a/src/kv/test/_tst_cb.c b/src/kv/test/_tst_cb.c new file mode 100644 index 00000000..4e9ca34c --- /dev/null +++ b/src/kv/test/_tst_cb.c @@ -0,0 +1,365 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_cb.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ct.h" + +#include "cl.h" +#include "bv.h" +#include "tg.h" + +#include "arkdb.h" + +int iter = 10; +int npool = 4; +int nqueue = 1024; + +int nclient = 3; +int nasync = 128; + + +int64_t bs = 4096; +int64_t size = 4096 * 1024; +int64_t hc = 1024 * 1024; +int bref = 34; +int grow = 1024; +int basyncs = 256; + +int nkey = 64; +int seed = 1234; + +int kmn = 0; +int kmx = 8; +int klo = 97; +int khi = 122; +int kmod = -1; +int krng = -1; + +int vmn = 0; +int vmx = 16; +int vlo = 97; +int vhi = 122; +int vmod = -1; +int vrng = -1; + +int cstr_ = 0; + +void gen_keyval(int id, int ki, unsigned short *isd, + uint64_t *kl, uint8_t *key, + uint64_t *vl, uint8_t *val, + int discard) { + unsigned short sd[3]; + //printf("in (%d,%d)] with [%d %d %d]\n",id, ki,isd[0], isd[1], isd[2]); + sd[0] = isd[0]; + sd[1] = isd[1]; + sd[2] = isd[2]; + int i; + int klen = 0; + if (id) { + klen = sprintf((char *)key,"_%d_", id); + } + if (ki) { + klen += sprintf((char *)key+klen,".%d.", ki); + } + /* if (ki) { */ + /* sprintf(key+7, "%6d_", ki */ + /* } */ + int len = kmn + nrand48(sd) % kmod; + for (i=0; iid, ct->seed[0],ct->seed[1],ct->seed[2]); + + BV *kvmap = bv_new(nkey); + unsigned short *kvseed = malloc (nkey * 3 * sizeof(unsigned short)); + int i; + for (i=0; i<3*nkey; i++) kvseed[i] = nrand48(ct->seed); + + + // enough buffer space to support async number of operations + uint8_t **keys = malloc(nasync * sizeof(uint8_t *)); + uint8_t *keyspace = malloc(nasync * (kmx + 8)); + for(i=0; iseed) % nkey; + + ai = tg_get(tg); + op = nrand48(ct->seed) % 2; + switch (op) { + case 0: //set + { + gen_keyval(ct->id,ki,kvseed + ki*3, klen + ai, keys[ai], vlen + ai, vals[ai],1); + gen_keyval(ct->id,ki,kvseed + ki*3, klen + ai, keys[ai], vlen + ai, vals[ai],0); + printf("%s%c(%d,%d)'%s'->'%s' tag [%d,%d]%s\n", C_Yellow,bv_get(kvmap,ki) ? '!' : '+', + ct->id, ki, keys[ai], vals[ai], ct->id,ai, C_Reset); + // int ark_set_tag(arc, ai, + expres[ai] = vlen[ai]; + tag = ark_set_async_cb(arc, klen[ai], keys[ai], vlen[ai], vals[ai], set_del_callback, expres[ai]); + if (tag < 0) { + exit(2); + } else { + tgmap[tag] = ai; + } + bv_set(kvmap,ki); + break; + } + case 1: //del + { + gen_keyval(ct->id,ki,kvseed + ki*3, klen + ai, keys[ai], vlen + ai, vals[ai],1); + printf("%s%c(%d,%d)'%s'->'%s' tag [%d,%d]%s\n", C_Yellow, bv_get(kvmap,ki) ? '-' : '~', + ct->id, ki, keys[ai], vals[ai], ct->id,ai, C_Reset); + expres[ai] = bv_get(kvmap,ki) ? vlen[ai] : -1; + tag = ark_del_async_cb(arc, klen[ai], keys[ai], set_del_callback, expres[ai]); + tgmap[tag] = ai; + if (tag<0) { + exit(3); + } + bv_clr(kvmap,ki); + break; + } + } + } + // do get operations + for (i=0; iseed) % nkey; + while (1) + { + ai = tg_get(tg); + if (ai >= 0) + { + break; + } + usleep(5); + } + gen_keyval(ct->id,ki,kvseed + ki*3, klen + ai, keys[ai], vlen + ai, vals[ai],0); + printf("%s%c(%d,%d)'%s'->'%s' tag [%d,%d]%s\n", C_Yellow, bv_get(kvmap,ki) ? '$' : '?', + ct->id, ki, keys[ai], vals[ai], ct->id,ai, C_Reset); + expres[ai] = bv_get(kvmap,ki) ? vlen[ai] : -1; + tag = ark_get_async_cb(arc, klen[ai], keys[ai], vlen[ai], vals[ai],0, get_callback, expres[ai]); + tgmap[tag] = ai; + if (tag<0) { + exit(4); + } + } + + // print present keys + for(i=0; iid,i,kvseed + i*3, klen + i, keys[i], vlen + i, vals[i],0); + printf(":(%d,%d)'%s'->'%s'\n", ct->id,i, keys[i], vals[i]); + } + } + + printf("Client %d complete\n", ct->id); + return NULL; +} + +int main(int argc, char **argv) { + + char *anon[] = {NULL,NULL,NULL,NULL}; + CL args[] = {{"-n", &iter, AR_INT, "iterations per client"}, + {"-c", &nclient, AR_INT, "# of clients"}, + {"-p", &npool, AR_INT, "# of threads in processing pool"}, + {"-bs", &bs, AR_INT64, "block size"}, + {"-size", &size, AR_INT64, "inital storage size"}, + {"-hc", &hc, AR_INT64, "hash count"}, + {"-vlim", &vlim, AR_INT64, "value limit for bucket store"}, + {"-bref", &bref, AR_INT, "block ref bits"}, + {"-grow", &grow, AR_INT, "# of blocks to grow by"}, + {"-basyncs", &basyncs, AR_INT, "# of async block buffers"}, + {"-k", &nkey, AR_INT, "# of keys"}, + {"-s", &seed, AR_INT, "random seed"}, + {"-q", &nqueue, AR_INT, "# of queue entries per thread in the pool"}, + {"-a", &nasync, AR_INT, "# of allowed async ops per client"}, + {"-kmn", &kmn, AR_INT, "kmn"}, + {"-kmx", &kmx, AR_INT, "kmx"}, + {"-klo", &klo, AR_INT, "klo"}, + {"-khi", &khi, AR_INT, "khi"}, + {"-vmn", &vmn, AR_INT, "vmn"}, + {"-vmx", &vmx, AR_INT, "vmx"}, + {"-vlo", &vlo, AR_INT, "vlo"}, + {"-vhi", &vhi, AR_INT, "vhi"}, + {"-cstr_", &cstr_, AR_FLG,"c string keys and values"}, + /* {"-simple", &simple, AR_FLG, "simple test"}, */ + /* {"-table", &table, AR_FLG, "print table"}, */ + /* {"-progress", &progress, AR_FLG, "print progress"}, */ + {NULL, NULL, 0, NULL}}; + + int echo = 1; + cl_parse(argc,argv,args,anon,echo); + //int acnt = cl_parse(argc,argv,args,anon,echo);//TODO: we are not using tthe arg count here. + + kmod = kmx - kmn + 1; + krng = khi - klo + 1; + + vmod = vmx - vmn + 1; + vrng = vhi - vlo + 1; + + int rc = ark_create_verbose(NULL, &ark, size, bs, hc, + npool, nqueue, basyncs, ARK_KV_VIRTUAL_LUN); + + if (rc) { + printf("bad create return %d\n", rc); + exit(1); + } + + CT *ct = malloc(nclient * sizeof(CT)); + pthread_t *clients = malloc(nclient * sizeof(pthread_t)); + + srand48(seed); + + struct timeval tv; + struct timeval post_tv; + uint64_t ops = 0; + uint64_t post_ops = 0; + uint64_t io_cnt = 0; + uint64_t post_io_cnt = 0; + + (void)gettimeofday(&tv, NULL); + (void)ark_stats(ark, &ops, &io_cnt); + + int i; + for(i=0; i +#include +#include +#include +#include +#include + +#include "cl.h" +#include "ut.h" +#include "ll.h" + + + +int main(int argc, char **argv) { + + char *anon[] = {"foo","bar","doo","dah",NULL,NULL,NULL,NULL}; + int tcnt = 1; + int bcnt = 128; + int bsize = 4096; + int excl = 0; + int echo = 1; + int seed = 1234; + int iter = 1000; + CL args[] = {{"-t", &tcnt, AR_INT, "thread count"}, + {"-bc", &bcnt, AR_INT, "block count per thread"}, + {"-bs", &bsize, AR_INT, "block size"}, + {"-e", &excl, AR_FLG, "exclusive regions"}, + {"-s", &seed, AR_INT, "random seed"}, + {"-n", &iter, AR_INT, "iterations per thread"}, + {NULL, NULL, 0, NULL}}; + int acnt = cl_parse(argc,argv,args,anon,echo); + + + exit(6 == acnt); +} + diff --git a/src/kv/test/_tst_ht.c b/src/kv/test/_tst_ht.c new file mode 100644 index 00000000..7f878834 --- /dev/null +++ b/src/kv/test/_tst_ht.c @@ -0,0 +1,30 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_ht.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +int tst_ht_entry(int argc, char **argv); + +int main(int argc, char **argv) +{ + return tst_ht_entry(argc, argv); +} diff --git a/src/kv/test/_tst_iv.c b/src/kv/test/_tst_iv.c new file mode 100644 index 00000000..5acb020c --- /dev/null +++ b/src/kv/test/_tst_iv.c @@ -0,0 +1,30 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_iv.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +int tst_iv_entry(int argc, char **argv); + +int main(int argc, char **argv) +{ + return tst_iv_entry(argc, argv); +} diff --git a/src/kv/test/_tst_kv_utils.c b/src/kv/test/_tst_kv_utils.c new file mode 100644 index 00000000..71ffdb18 --- /dev/null +++ b/src/kv/test/_tst_kv_utils.c @@ -0,0 +1,137 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_kv_utils.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * simple unit test driver for tst_kv_utils.c + * \ingroup + ******************************************************************************/ +#include +#include +#include +#include +#include + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +int32_t test_db(uint32_t type, uint32_t klen, uint32_t vlen, uint32_t num) +{ + uint32_t i = 0; + uint32_t rc = 0; + kv_t *db = NULL; + + if (type) + { + db = (kv_t*)kv_db_create_fixed(num, klen, vlen); + } + else + { + db = (kv_t*)kv_db_create_mixed(num, klen, vlen); + } + + assert(NULL != db); + + for (i=0; i>>>>FAILED\n"); + rc = -1; + goto exception; + } + } + +exception: + KV_TRC_CLOSE(pFT); + return rc; +} diff --git a/src/kv/test/_tst_persist.c b/src/kv/test/_tst_persist.c new file mode 100644 index 00000000..d54b1c2a --- /dev/null +++ b/src/kv/test/_tst_persist.c @@ -0,0 +1,31 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_persist.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +int tst_persist_entry(int argc, char **argv); + +int main(int argc, char **argv) +{ + return tst_persist_entry(argc, argv); +} diff --git a/src/kv/test/_tst_tg.c b/src/kv/test/_tst_tg.c new file mode 100644 index 00000000..89337686 --- /dev/null +++ b/src/kv/test/_tst_tg.c @@ -0,0 +1,30 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_tg.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +int tst_tg_entry(int argc, char **argv); + +int main(int argc, char **argv) +{ + return tst_tg_entry(argc, argv); +} diff --git a/src/kv/test/_tst_vi.c b/src/kv/test/_tst_vi.c new file mode 100644 index 00000000..c051a9be --- /dev/null +++ b/src/kv/test/_tst_vi.c @@ -0,0 +1,30 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/_tst_vi.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +int tst_vi_entry(int argc, char **argv); + +int main(int argc, char **argv) +{ + return tst_vi_entry(argc, argv); +} diff --git a/src/kv/test/fvt_ark_io.C b/src/kv/test/fvt_ark_io.C new file mode 100644 index 00000000..035ecd57 --- /dev/null +++ b/src/kv/test/fvt_ark_io.C @@ -0,0 +1,70 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_ark_io.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/** + ******************************************************************************* + * \file + * \brief + * Simple test cases for kv FVT + * \ingroup + ******************************************************************************/ +#include + +extern "C" +{ +#include +#include +#include +} + +/** + ******************************************************************************* + * \brief + * add fixed length key/value to ark, then query them + ******************************************************************************/ +TEST(FVT_KV_ARK_IO, ASYNC_CB_ARK_IO) +{ + uint32_t ctxts = 4; + uint32_t jobs = 20; + uint32_t pths = 20; + uint32_t vlen = KV_64K; + uint32_t xmin = 1; + char *env_min = getenv("FVT_ARK_IO_MIN"); + + if (env_min) xmin = atoi(env_min); + printf("running for mins:%d\n", xmin); fflush(stdout); + + kv_async_init_ark_io(ctxts, jobs, vlen, xmin*60); + kv_async_start_jobs(); + + printf("\n"); fflush(stdout); + + Sync_ark_io ark_io_job; + ark_io_job.run_multi_arks(ctxts, pths, vlen, xmin*60); + + printf("ASYNC: "); + + kv_async_wait_jobs(); +} diff --git a/src/kv/test/fvt_ark_io_inject.C b/src/kv/test/fvt_ark_io_inject.C new file mode 100644 index 00000000..63534366 --- /dev/null +++ b/src/kv/test/fvt_ark_io_inject.C @@ -0,0 +1,73 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_ark_io_inject.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * inject random r/w/alloc errors while running multi-context + * \ingroup + ******************************************************************************/ +#include +#include + +extern "C" +{ +#include +#include +#include +} + +using namespace std; + +/** + ******************************************************************************* + * \brief + * add fixed length key/value to ark, then query them + ******************************************************************************/ +TEST(FVT_KV_ARK_IO, ASYNC_CB_ARK_IO_INJECT) +{ + uint32_t ctxts = 100; + uint32_t ops = 20; + uint32_t vlen = KV_4K; + uint32_t xmin = 1; + char *env_FVT_DEV = getenv("FVT_DEV"); + char *env_min = getenv("FVT_ARK_IO_MIN"); + + if (env_min) xmin = atoi(env_min); + + cout << "\nRunning for " << xmin << " minutes.\n"; + + if (!env_FVT_DEV) ctxts=10; + + kv_async_init_ark_io_inject(ctxts, ops, vlen, xmin*60); + kv_async_start_jobs(); + + printf("\n"); fflush(stdout); + + Sync_ark_io ark_io_job; + ark_io_job.run_multi_arks(ctxts, ops, vlen, xmin*60); + + kv_async_wait_jobs(); +} diff --git a/src/kv/test/fvt_ark_mc_aio.C b/src/kv/test/fvt_ark_mc_aio.C new file mode 100644 index 00000000..9a830d24 --- /dev/null +++ b/src/kv/test/fvt_ark_mc_aio.C @@ -0,0 +1,72 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_ark_mc_aio.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * Run multi-context async I/O + * \ingroup + ******************************************************************************/ +#include +#include + +extern "C" +{ +#include +#include +} + +using namespace std; + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ARK_IO, ASYNC_CB_MULTI_CONTEXT) +{ + uint32_t ctxts = 0; + uint32_t jobs = 0; + uint32_t xmin = 1; + char *env_min = getenv("FVT_ARK_IO_MIN"); + + if (env_min) xmin = atoi(env_min); + + cout << "\nRunning for " << xmin << " minutes. How many contexts? "; + cin >> ctxts; + + if (ctxts <= 0 || ctxts > 508) + { + printf("bad context num %d\n", ctxts); + return; + } + + if (ctxts < 5) jobs = 40; + else if (ctxts < 100) jobs = 20; + else jobs = 4; + + kv_async_init_ctxt_io(ctxts, jobs, 16, KV_500K, 1, xmin*60); + printf("ctxts:%d jobs:%d ", ctxts, jobs); fflush(stdout); + kv_async_run_jobs(); +} diff --git a/src/kv/test/fvt_ark_mcio.C b/src/kv/test/fvt_ark_mcio.C new file mode 100644 index 00000000..b58eb1fc --- /dev/null +++ b/src/kv/test/fvt_ark_mcio.C @@ -0,0 +1,76 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_ark_mcio.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * Simple test cases for kv FVT + * \ingroup + ******************************************************************************/ +#include +#include + +extern "C" +{ +#include +#include +#include +#include +} + +using namespace std; + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ARK_IO, SYNC_PTH_MULTI_CONTEXT) +{ + uint32_t num_ctxt = 1; + uint32_t num_pth = 32; + uint32_t vlen = KV_500K; + uint32_t LEN = 1; + uint32_t xmin = 1; + char *env_min = getenv("FVT_ARK_IO_MIN"); + + if (env_min) xmin = atoi(env_min); + + cout << "\nRunning for " << xmin; + cout << " minutes. How many contexts? "; + cin >> num_ctxt; + + if (num_ctxt <= 0 || num_ctxt > 508) + { + printf("bad context num %d\n", num_ctxt); + return; + } + if (num_ctxt < 10) num_pth = 32; + else if (num_ctxt < 32) num_pth = 8; + else num_pth = 2; + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(num_ctxt, num_pth, vlen, LEN, xmin*60); +} diff --git a/src/kv/test/fvt_ark_perf.C b/src/kv/test/fvt_ark_perf.C new file mode 100644 index 00000000..df8dcd3d --- /dev/null +++ b/src/kv/test/fvt_ark_perf.C @@ -0,0 +1,299 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_ark_perf.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/** + ******************************************************************************* + * \file + * \brief + * Simple test cases for kv FVT + * \ingroup + ******************************************************************************/ +#include + +extern "C" +{ +#include +#include +#include +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, REDIS_PERF) +{ + ARK *ark = NULL; + + ARK_CREATE; + fvt_kv_utils_perf(ark, 16, 1, 1000); + ARK_DELETE; + + kv_async_job_perf(100, 16, 16, 1000); + + kv_async_init_perf_io(100, 128, 4, 16, 16, 100, 10); + printf("ctxts:100 jobs:128 npool=4 "); fflush(stdout); + kv_async_run_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_PERF) +{ + uint32_t ctxts = 1; + uint32_t npool = 0; + uint32_t secs = 10; + + Sync_pth sync_pth; + + npool=4; + sync_pth.run_multi_ctxt(ctxts, 32, npool, 16, 50, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, 16, 50, secs); + printf("\n"); + + npool=16; + sync_pth.run_multi_ctxt(ctxts, 32, npool, 16, 50, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, 16, 50, secs); + printf("\n"); + + npool=24; + sync_pth.run_multi_ctxt(ctxts, 32, npool, 16, 50, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, 16, 50, secs); + printf("\n"); + + npool=32; + sync_pth.run_multi_ctxt(ctxts, 32, npool, 16, 50, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, 16, 50, secs); + printf("\n"); + + npool=64; + sync_pth.run_multi_ctxt(ctxts, 32, npool, 16, 50, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, 16, 50, secs); + printf("\n"); + + npool=4; + sync_pth.run_multi_ctxt(ctxts, 32, npool, KV_4K, 20, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, KV_4K, 20, secs); + printf("\n"); + + npool=16; + sync_pth.run_multi_ctxt(ctxts, 32, npool, KV_4K, 20, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, KV_4K, 20, secs); + printf("\n"); + + npool=32; + sync_pth.run_multi_ctxt(ctxts, 32, npool, KV_4K, 20, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, KV_4K, 20, secs); + printf("\n"); + + npool=64; + sync_pth.run_multi_ctxt(ctxts, 32, npool, KV_4K, 20, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, KV_4K, 20, secs); + printf("\n"); + + npool=4; + sync_pth.run_multi_ctxt(ctxts, 32, npool, KV_64K, 20, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, KV_64K, 20, secs); + printf("\n"); + + npool=16; + sync_pth.run_multi_ctxt(ctxts, 32, npool, KV_64K, 20, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, KV_64K, 20, secs); + printf("\n"); + + npool=32; + sync_pth.run_multi_ctxt(ctxts, 32, npool, KV_64K, 20, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, KV_64K, 20, secs); + printf("\n"); + + npool=64; + sync_pth.run_multi_ctxt(ctxts, 32, npool, KV_64K, 20, secs); + sync_pth.run_multi_ctxt(ctxts, 64, npool, KV_64K, 20, secs); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, ASYNC_PERF) +{ + uint32_t ctxts = 1; + uint32_t npool = 0; + uint32_t jobs = 100; + uint32_t secs = 10; + + npool=4; + kv_async_init_perf_io(ctxts, jobs, npool, 16, 16, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=16; + kv_async_init_perf_io(ctxts, jobs, npool, 16, 16, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=24; + kv_async_init_perf_io(ctxts, jobs, npool, 16, 16, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=32; + kv_async_init_perf_io(ctxts, jobs, npool, 16, 16, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=48; + kv_async_init_perf_io(ctxts, jobs, npool, 16, 16, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=64; + kv_async_init_perf_io(ctxts, jobs, npool, 16, 16, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + printf("\n"); + + npool=4; + kv_async_init_perf_io(ctxts, jobs, npool, 16, KV_4K, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=16; + kv_async_init_perf_io(ctxts, jobs, npool, 16, KV_4K, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=32; + kv_async_init_perf_io(ctxts, jobs, npool, 16, KV_4K, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=48; + kv_async_init_perf_io(ctxts, jobs, npool, 16, KV_4K, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=64; + kv_async_init_perf_io(ctxts, jobs, npool, 16, KV_4K, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + printf("\n"); + + npool=4; + kv_async_init_perf_io(ctxts, jobs, npool, 16, KV_64K, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=16; + kv_async_init_perf_io(ctxts, jobs, npool, 16, KV_64K, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=32; + kv_async_init_perf_io(ctxts, jobs, npool, 16, KV_64K, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=48; + kv_async_init_perf_io(ctxts, jobs, npool, 16, KV_64K, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + + npool=64; + kv_async_init_perf_io(ctxts, jobs, npool, 16, KV_64K, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, MULTI_CTXT_PERF) +{ + uint32_t ctxts = 480; + uint32_t npool = 0; + uint32_t jobs = 16; + uint32_t pths = 2; + uint32_t secs = 10; + + Sync_pth sync_pth; + + npool=4; pths=1; + sync_pth.run_multi_ctxt(100, pths, npool, 16, 100, secs); + pths=2; + sync_pth.run_multi_ctxt(100, pths, npool, 16, 100, secs); + sync_pth.run_multi_ctxt(300, pths, npool, 16, 100, secs); + sync_pth.run_multi_ctxt(ctxts, pths, npool, 16, 100, secs); + npool=20; + sync_pth.run_multi_ctxt(ctxts, pths, npool, 16, 100, secs); + + npool=4; ctxts = 20; + kv_async_init_perf_io(ctxts, 20, npool, 16, 16, 100, secs); + printf("ctxts:%d jobs:20 npool=%d ", ctxts, npool); fflush(stdout); + kv_async_run_jobs(); + npool=4; ctxts = 100; jobs=4; + kv_async_init_perf_io(ctxts, jobs, npool, 16, 16, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + npool=4; ctxts = 480; + kv_async_init_perf_io(ctxts, jobs, npool, 16, 16, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + npool=20; + kv_async_init_perf_io(ctxts, jobs, npool, 16, 16, 100, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, OLD_SYNC_PERF) +{ + ARK *ark = NULL; + + ARK_CREATE; + fvt_kv_utils_perf(ark, 280, 4, 1000); + fvt_kv_utils_perf(ark, KV_4K, 60, 1000); + fvt_kv_utils_perf(ark, KV_64K, 200, 1000); + fvt_kv_utils_perf(ark, KV_250K,600, 100); + ARK_DELETE; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, OLD_ASYNC_PERF) +{ + kv_async_job_perf(100, 16, 256, 1000); + kv_async_job_perf(100, 16, KV_4K, 1000); + kv_async_job_perf(100, KV_4K, KV_4K, 1000); + kv_async_job_perf(100, 16, KV_64K, 1000); +} diff --git a/src/kv/test/fvt_ark_perf2.C b/src/kv/test/fvt_ark_perf2.C new file mode 100644 index 00000000..9169962e --- /dev/null +++ b/src/kv/test/fvt_ark_perf2.C @@ -0,0 +1,157 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_ark_perf2.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/** + ******************************************************************************* + * \file + * \brief + * Simple test cases for kv FVT + * \ingroup + ******************************************************************************/ +#include + +extern "C" +{ +#include +#include +#include +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, PERF_16x16) +{ + uint32_t ctxts = 1; + uint32_t mb = 1; + uint32_t klen = 16; + uint32_t vlen = 16; + uint32_t LEN = 500; + uint32_t secs = 10; + uint32_t jobs = 1; + uint32_t ops = 0; + uint32_t ios = 0; + ARK *ark = NULL; + + /* single context single job 16x16 */ + Sync_pth sync_pth; + sync_pth.run_multi_ctxt(ctxts, jobs, vlen, LEN, secs, &ops, &ios); + + ARK_CREATE; + fvt_kv_utils_perf(ark, vlen, mb, LEN); + + kv_async_job_perf(jobs, klen, vlen, LEN); + + kv_async_init_ctxt_io(ctxts, jobs, klen, vlen, LEN, 10); + printf("ctxt:%d jobs:%d ", ctxts, jobs); + kv_async_run_jobs(); + + printf("---------\n"); + + /* single context 20 job 16x16 */ + jobs = 20; + + sync_pth.run_multi_ctxt(ctxts, jobs, vlen, LEN, secs, &ops, &ios); + + kv_async_job_perf(jobs, klen, vlen, LEN); + + kv_async_init_ctxt_io(ctxts, jobs, klen, vlen, LEN, 10); + printf("ctxt:%d jobs:%d ", ctxts, jobs); + kv_async_run_jobs(); + + ARK_DELETE; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_PERF) +{ + uint32_t LEN = 500; + uint32_t ops = 0; + uint32_t ios = 0; + ARK *ark = NULL; + + ARK_CREATE; + fvt_kv_utils_perf(ark, 280, 4, LEN); + fvt_kv_utils_perf(ark, KV_64K, 200, 50); + ARK_DELETE; + + printf("---------\n"); + + /* single context single job */ + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(1, 1, 280, LEN, 10, &ops, &ios); + sync_pth.run_multi_ctxt(1, 1, KV_64K, 10, 10, &ops, &ios); + + /* single context 20 job */ + + sync_pth.run_multi_ctxt(1, 20, 280, LEN, 10, &ops, &ios); + sync_pth.run_multi_ctxt(1, 20, KV_64K, 10, 10, &ops, &ios); + + printf("---------\n"); + + /* 50 context 20 job */ + + sync_pth.run_multi_ctxt(50, 20, 280, LEN, 10, &ops, &ios); + sync_pth.run_multi_ctxt(50, 20, KV_64K, 10, 10, &ops, &ios); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, ASYNC_PERF) +{ + uint32_t ctxts = 1; + uint32_t jobs = 20; + uint32_t klen = 16; + uint32_t vlen = 16; + uint32_t LEN = 500; + + kv_async_job_perf(jobs, klen, 280, LEN); + kv_async_job_perf(jobs, KV_4K, KV_4K, 100); + kv_async_job_perf(jobs, klen, KV_64K, 50); + + kv_async_init_ctxt_io(ctxts, jobs, klen, vlen, LEN, 10); + printf("ctxt:%d jobs:%d ", ctxts, jobs); + kv_async_run_jobs(); + + kv_async_init_ctxt_io(ctxts, jobs, klen, KV_64K, 50, 10); + printf("ctxt:%d jobs:%d ", ctxts, jobs); + kv_async_run_jobs(); + + printf("---------\n"); + + /* 50 context 20 job */ + ctxts = 50; + kv_async_init_ctxt_io(ctxts, jobs, klen, vlen, LEN, 10); + printf("ctxt:%d jobs:%d ", ctxts, jobs); + kv_async_run_jobs(); +} diff --git a/src/kv/test/fvt_ark_perf_check.C b/src/kv/test/fvt_ark_perf_check.C new file mode 100644 index 00000000..648601b4 --- /dev/null +++ b/src/kv/test/fvt_ark_perf_check.C @@ -0,0 +1,103 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_ark_perf_check.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * Simple test cases for kv FVT + * \ingroup + ******************************************************************************/ +#include + +extern "C" +{ +#include +#include +#include +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, COMMIT_PERF_CHECK) +{ + uint32_t ctxts = 1; + uint32_t jobs = 128; + uint32_t pths = 100; + uint32_t secs = 15; + + Sync_pth sync_pth; + + printf("\nctxts=1, 16x16\n"); + sync_pth.run_multi_ctxt(ctxts, pths, 16, 500, secs); + + kv_async_init_ctxt_io(ctxts, jobs, 16, 16, 500, secs); + printf("ctxts:%d jobs:%d ", ctxts, jobs); fflush(stdout); + kv_async_run_jobs(); + + printf("\nctxts=1, 16x4k\n"); + sync_pth.run_multi_ctxt(ctxts, pths, KV_4K, 500, secs); + + kv_async_init_ctxt_io(ctxts, jobs, 16, KV_4K, 500, secs); + printf("ctxts:%d jobs:%d ", ctxts, jobs); fflush(stdout); + kv_async_run_jobs(); + + printf("\nctxts=1, 16x64k\n"); + + sync_pth.run_multi_ctxt(ctxts, pths, KV_64K, 1, secs); + + kv_async_init_ctxt_io(ctxts, jobs, 16, KV_64K, 1, secs); + printf("ctxts:%d jobs:%d ", ctxts, jobs); fflush(stdout); + kv_async_run_jobs(); + + ctxts=8; secs=25; + + printf("\nctxts=%d, 16x16\n", ctxts); + + sync_pth.run_multi_ctxt(ctxts, pths, 16, 500, secs); + + kv_async_init_ctxt_io(ctxts, jobs, 16, 16, 500, secs); + printf("ctxts:%d jobs:%d ", ctxts, jobs); fflush(stdout); + kv_async_run_jobs(); + + printf("\nctxts=%d, 16x64k\n", ctxts); + + sync_pth.run_multi_ctxt(ctxts, pths, KV_64K, 1, secs); + + kv_async_init_ctxt_io(ctxts, jobs, 16, KV_64K, 1, secs); + printf("ctxts:%d jobs:%d ", ctxts, jobs); fflush(stdout); + kv_async_run_jobs(); + + ctxts=40; jobs=20; pths=20; + + printf("\nctxts=%d, 16x16\n", ctxts); + + sync_pth.run_multi_ctxt(ctxts, pths, 16, 500, secs); + + kv_async_init_ctxt_io(ctxts, jobs, 16, 16, 500, secs); + printf("ctxts:%d jobs:%d ", ctxts, jobs); fflush(stdout); + kv_async_run_jobs(); +} diff --git a/src/kv/test/fvt_ark_perf_tool.C b/src/kv/test/fvt_ark_perf_tool.C new file mode 100644 index 00000000..91770c9d --- /dev/null +++ b/src/kv/test/fvt_ark_perf_tool.C @@ -0,0 +1,270 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_ark_perf_tool.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * kv ark perf tool with commandline args + * \ingroup + ******************************************************************************/ +#include +#include + +extern "C" +{ +#include +#include +} + +void usage(void) +{ + printf("Usage:\n"); + printf(" [-k klen][-v vlen][-l LEN][-s secs][-c ctxts][-t threads]" + "[-j jobs][-n npool][-S(sync)][-A(async)][-b(r/w)][-r(rdonly)]" + "[-w(wronly)][-V(verbose)][-h]\n"); + exit(0); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +int main(int argc, char **argv) +{ + char c; + char FF = 0xFF; + char *kparm = NULL; + char *vparm = NULL; + char *lparm = NULL; + char *sparm = NULL; + char *cparm = NULL; + char *tparm = NULL; + char *nparm = NULL; + char *jparm = NULL; + uint32_t Sparm = FALSE; + uint32_t Aparm = FALSE; + uint32_t mem = FALSE; + uint32_t klen = 16; + uint32_t vlen = 16; + uint32_t secs = 5; + uint32_t pths = 20; + uint32_t jobs = 128; + uint32_t ctxts = 1; + uint32_t npool = 20; + uint32_t LEN = 200; + uint32_t ro = FALSE; + uint32_t wo = FALSE; + uint32_t rw = FALSE; + uint32_t async = FALSE; + uint32_t sync = FALSE; + uint32_t verbo = FALSE; + char *penv = getenv("FVT_DEV"); + + opterr = 1; + + while (FF != (c=getopt(argc, argv, "k:v:l:s:c:t:n:j:rwmbVSAh"))) + { + //printf("c=%c %x\n", c, c); + switch (c) + { + case 'k': kparm = optarg; break; + case 'v': vparm = optarg; break; + case 'l': lparm = optarg; break; + case 's': sparm = optarg; break; + case 'c': cparm = optarg; break; + case 't': tparm = optarg; break; + case 'n': nparm = optarg; break; + case 'j': jparm = optarg; break; + case 'r': ro = TRUE; break; + case 'w': wo = TRUE; break; + case 'b': rw = TRUE; break; + case 'm': mem = TRUE; break; + case 'V': verbo = TRUE; break; + case 'S': Sparm = TRUE; break; + case 'A': Aparm = TRUE; break; + case 'h': + case '?': usage(); break; + } + } + + if (kparm) klen = atoi(kparm); + if (vparm) vlen = atoi(vparm); + if (lparm) LEN = atoi(lparm); + if (sparm) secs = atoi(sparm); + if (cparm) ctxts = atoi(cparm); + if (tparm) pths = atoi(tparm); + if (jparm) jobs = atoi(jparm); + if (nparm) npool = atoi(nparm); + if (Sparm) sync = TRUE; + if (Aparm) async = TRUE; + + if (!sync && !async) {sync=TRUE; async=TRUE;} + + if (mem) penv=NULL; + + if (vlen < KV_4K) LEN=200; + else if (vlen < KV_64K) LEN=50; + else if (vlen < KV_250K) LEN=10; + else LEN=1; + + KV_TRC_OPEN(pFT, "fvt"); + FVT_TRC_SIGINT_HANDLER; + + /*-------------------------------------*/ + /* read only perf stats */ + /*-------------------------------------*/ + if (ro) + { + if (!verbo) printf(" "); + + if (ctxts > 200 && vlen < KV_64K) LEN=50; + else if (ctxts > 200 && vlen > KV_64K) LEN=2; + + if (ctxts > 50 && pths > 20) pths=20; + + if (verbo) + printf("\n RO: ctxts:%d pths:%d jobs:%d %dx%dx%d secs:%d npool:%d " + "rw:%d sync:%d async:%d path:%s\n ", + ctxts, pths, jobs, klen, vlen, LEN, secs, npool, rw, sync, async, penv); + + Sync_pth sync_pth; + sync_pth.run_multi_ctxt_rd(ctxts, pths, npool, vlen, LEN, secs); + + printf("\n"); + exit(0); + } + + /*-------------------------------------*/ + /* write only perf stats */ + /*-------------------------------------*/ + if (wo) + { + if (!verbo) printf(" "); + + if (ctxts > 200 && vlen < KV_64K) LEN=50; + else if (ctxts > 200 && vlen > KV_64K) LEN=2; + + if (ctxts > 50 && pths > 20) pths=20; + + if (verbo) + printf("\n WO: ctxts:%d pths:%d jobs:%d %dx%dx%d secs:%d npool:%d " + "rw:%d sync:%d async:%d path:%s\n ", + ctxts, pths, jobs, klen, vlen, LEN, secs, npool, rw, sync, async, penv); + + Sync_pth sync_pth; + sync_pth.run_multi_ctxt_wr(ctxts, pths, npool, vlen, LEN, secs); + + printf("\n"); + exit(0); + } + + /*-------------------------------------*/ + /* use read/write separated perf stats */ + /*-------------------------------------*/ + if (rw) + { + if (!verbo) printf("\n"); + + if (sync) + { + + ARK *ark = NULL; + int mb; + + assert(0 == ark_create_verbose(penv, &ark, + 1048576, + 4096, + 1048576, + npool, + 256, + 8*1024, + ARK_KV_VIRTUAL_LUN)); + assert(NULL != ark); + + if (vlen < 256) mb=1; + else if (vlen < 1024) mb=4; + else if (vlen < KV_4K) mb=60; + else if (vlen < KV_64K) mb=200; + else if (vlen < KV_250K) mb=600; + else mb=1000; + + if (verbo) + printf("\n ctxts:1 pths:1 %dx%dx%d npool:%d rw:%d sync:%d\ + async:%d path:%s\n", klen, vlen, LEN, npool, rw, sync, async, penv); + + fvt_kv_utils_perf(ark, vlen, mb, 50); + ark_delete(ark); + } + + if (async) + { + if (verbo) + printf("\n ctxts:1 %dx%dx%d npool:20 rw:%d sync:%d async:%d\ + path:%s\n", klen, vlen, LEN, rw, sync, async, penv); + + kv_async_job_perf(128, klen, vlen, LEN); + } + + printf("\n"); + exit(0); + } + + /*-------------------------------------*/ + /* use general ark stats, r/w combined */ + /*-------------------------------------*/ + if (!verbo) printf(" "); + + if (ctxts > 200 && vlen < KV_64K) LEN=50; + else if (ctxts > 200 && vlen > KV_64K) LEN=2; + + if (sync) + { + if (ctxts > 50 && pths > 20) pths=20; + + if (verbo) + printf("\n ctxts:%d pths:%d jobs:%d %dx%dx%d secs:%d npool:%d " + "rw:%d sync:%d async:%d path:%s\n ", + ctxts, pths, jobs, klen, vlen, LEN, secs, npool, rw, sync, async, penv); + + Sync_pth sync_pth; + sync_pth.run_multi_ctxt(ctxts, pths, npool, vlen, LEN, secs); + } + + if (async) + { + if (verbo) + printf("\n ctxts:%d pths:%d jobs:%d %dx%dx%d secs:%d npool:%d " + "rw:%d sync:%d async:%d path:%s\n ", + ctxts, pths, jobs, klen, vlen, LEN, secs, npool, rw, sync, async, penv); + else + printf(" "); + + kv_async_init_perf_io(ctxts, jobs, npool, klen, vlen, LEN, secs); + printf("ctxts:%d jobs:%d npool=%d ", ctxts, jobs, npool); fflush(stdout); + kv_async_run_jobs(); + } + printf("\n"); + KV_TRC_CLOSE(pFT); +} diff --git a/src/kv/test/fvt_io_buster b/src/kv/test/fvt_io_buster new file mode 100755 index 00000000..22ddc506 --- /dev/null +++ b/src/kv/test/fvt_io_buster @@ -0,0 +1,20 @@ +#this works for systems with 64mb of RAM +#for systems with 128mb or greater, echo "400" for afu1.0 + +export FVT_ARK_IO_MIN=30 + +cd /opt/ibm/capikv/test + +FVT_DEV=/dev/cxl/afu0.0s; ./fvt_ark_io& +FVT_DEV=/dev/cxl/afu0.0s; ./fvt_ark_io& +FVT_DEV=/dev/cxl/afu0.0s; ./fvt_ark_io& +FVT_DEV=/dev/cxl/afu0.0s; ./fvt_ark_io& +FVT_DEV=/dev/cxl/afu0.0s; ./fvt_ark_io& +FVT_DEV=/dev/cxl/afu0.0s; echo "400" | ./fvt_ark_mc_aio& + +FVT_DEV=/dev/cxl/afu1.0s; ./fvt_ark_io& +FVT_DEV=/dev/cxl/afu1.0s; ./fvt_ark_io& +FVT_DEV=/dev/cxl/afu1.0s; ./fvt_ark_io& +FVT_DEV=/dev/cxl/afu1.0s; ./fvt_ark_io& +FVT_DEV=/dev/cxl/afu1.0s; ./fvt_ark_io& +FVT_DEV=/dev/cxl/afu1.0s; echo "100" | ./fvt_ark_mc_aio& diff --git a/src/kv/test/fvt_kv.h b/src/kv/test/fvt_kv.h new file mode 100644 index 00000000..9ee4d54f --- /dev/null +++ b/src/kv/test/fvt_kv.h @@ -0,0 +1,88 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * declarations to aid in KV testing + * \details + * if env var "FVT_DEV" is unset, then tests run using memory file + * use export FVT_DEV=/dev/ to run to hardware + * \ingroup + ******************************************************************************/ +#ifndef FVT_KV_H +#define FVT_KV_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef TRUE +#define TRUE 1 +#endif +#ifndef FALSE +#define FALSE 0 +#endif + +#define ARK_CREATE \ + ASSERT_EQ(0, ark_create(getenv("FVT_DEV"), &ark, ARK_KV_VIRTUAL_LUN)); \ + ASSERT_TRUE(ark != NULL); + +#define ARK_CREATE_PERSIST_READONLY \ + ASSERT_EQ(0, ark_create(getenv("FVT_DEV_PERSIST"), &ark, \ + ARK_KV_PERSIST_LOAD)); \ + ASSERT_TRUE(ark != NULL); + +#define ARK_CREATE_PERSIST \ + ASSERT_EQ(0, ark_create(getenv("FVT_DEV_PERSIST"), &ark, \ + ARK_KV_PERSIST_STORE | ARK_KV_PERSIST_LOAD)); \ + ASSERT_TRUE(ark != NULL); + +#define ARK_CREATE_NEW_PERSIST \ + ASSERT_EQ(0, ark_create(getenv("FVT_DEV_PERSIST"), &ark, \ + ARK_KV_PERSIST_STORE)); \ + ASSERT_TRUE(ark != NULL); + +#define ARK_DELETE \ + ASSERT_EQ(0, ark_delete(ark)) + +#define KV_4K 4 *1024 +#define KV_8K 8 *1024 +#define KV_64K 64 *1024 +#define KV_250K 250 *1024 +#define KV_500K 500 *1024 +#define KV_1M 1024*1024 +#define KV_2M 1024*1024*2 + +#define UNUSED(x) (void)(x) + +extern char *env_FVT_DEV; + +#endif diff --git a/src/kv/test/fvt_kv_fixme1.C b/src/kv/test/fvt_kv_fixme1.C new file mode 100644 index 00000000..4bb9385d --- /dev/null +++ b/src/kv/test/fvt_kv_fixme1.C @@ -0,0 +1,55 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_fixme1.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +#include +#include +#include +#include +#include +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, RANDOM_ERRORS) +{ + uint32_t ctxts = 1; + uint32_t vlen = KV_1M; + uint32_t secs = 300; + + kv_async_init_ark_io_inject(ctxts, 128, vlen, secs); + kv_async_start_jobs(); + + printf("\n"); fflush(stdout); + + Sync_ark_io ark_io_job; + ark_io_job.run_multi_arks(ctxts, 20, vlen, secs); + + kv_async_wait_jobs(); +} diff --git a/src/kv/test/fvt_kv_inject.h b/src/kv/test/fvt_kv_inject.h new file mode 100644 index 00000000..de258eae --- /dev/null +++ b/src/kv/test/fvt_kv_inject.h @@ -0,0 +1,80 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_inject.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * declarations to aid in KV testing + * \ingroup + ******************************************************************************/ +#ifndef FVT_KV_INJECT_H +#define FVT_KV_INJECT_H + +#include + +#define FVT_INJECT_FLAGS_ACTIVE 0x8000 +#define FVT_INJECT_FLAGS_READ_ERROR 0x4000 +#define FVT_INJECT_FLAGS_WRITE_ERROR 0x2000 +#define FVT_INJECT_FLAGS_ALLOC_ERROR 0x1000 + +extern uint32_t fvt_kv_inject; + +#define FVT_KV_SET_INJECT_ACTIVE \ + fvt_kv_inject |= FVT_INJECT_FLAGS_ACTIVE + +#define FVT_KV_SET_INJECT_INACTIVE \ + fvt_kv_inject = 0 + +#define FVT_KV_INJECT_READ_ERROR \ + fvt_kv_inject |= FVT_INJECT_FLAGS_READ_ERROR + +#define FVT_KV_INJECT_WRITE_ERROR \ + fvt_kv_inject |= FVT_INJECT_FLAGS_WRITE_ERROR + +#define FVT_KV_INJECT_ALLOC_ERROR \ + fvt_kv_inject |= FVT_INJECT_FLAGS_ALLOC_ERROR + +#define FVT_KV_READ_ERROR_INJECT \ + (fvt_kv_inject & FVT_INJECT_FLAGS_ACTIVE && \ + fvt_kv_inject & FVT_INJECT_FLAGS_READ_ERROR) + +#define FVT_KV_WRITE_ERROR_INJECT \ + (fvt_kv_inject & FVT_INJECT_FLAGS_ACTIVE && \ + fvt_kv_inject & FVT_INJECT_FLAGS_WRITE_ERROR) + +#define FVT_KV_ALLOC_ERROR_INJECT \ + (fvt_kv_inject & FVT_INJECT_FLAGS_ACTIVE && \ + fvt_kv_inject & FVT_INJECT_FLAGS_ALLOC_ERROR) + +#define FVT_KV_CLEAR_READ_ERROR \ + fvt_kv_inject &= ~FVT_INJECT_FLAGS_READ_ERROR + +#define FVT_KV_CLEAR_WRITE_ERROR \ + fvt_kv_inject &= ~FVT_INJECT_FLAGS_WRITE_ERROR + +#define FVT_KV_CLEAR_ALLOC_ERROR \ + fvt_kv_inject &= ~FVT_INJECT_FLAGS_ALLOC_ERROR + +#endif diff --git a/src/kv/test/fvt_kv_tst_ark.C b/src/kv/test/fvt_kv_tst_ark.C new file mode 100644 index 00000000..c61fd6c3 --- /dev/null +++ b/src/kv/test/fvt_kv_tst_ark.C @@ -0,0 +1,72 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_ark.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include + +extern "C" +{ +int tst_ark_entry(int argc, char **argv); +} + +TEST(FVT_KV_GOOD_PATH, _tst_ark_default) +{ + char dev_parm[] = "-dev"; + char *env_FVT_DEV = getenv("FVT_DEV"); + char *argv_parms[3] = + { + (char*)"tst_ark_entry", + }; + + EXPECT_EQ(0, tst_ark_entry(1,argv_parms)); + + if (NULL != env_FVT_DEV) + { + argv_parms[1] = dev_parm; + argv_parms[2] = env_FVT_DEV; + EXPECT_EQ(0, tst_ark_entry(3,argv_parms)); + } +} + +TEST(FVT_KV_GOOD_PATH, _tst_ark_1000_keys_100_iterations) +{ + char dev_parm[] = "-dev"; + char *env_FVT_DEV = getenv("FVT_DEV"); + char *argv_parms[7] = + { + (char*)"tst_ark_entry", + (char*)"-n", + (char*)"100", + (char*)"-k", + (char*)"1000", + }; + EXPECT_EQ(0, tst_ark_entry(5,argv_parms)); + + if (NULL != env_FVT_DEV) + { + argv_parms[5] = dev_parm; + argv_parms[6] = env_FVT_DEV; + EXPECT_EQ(0, tst_ark_entry(7,argv_parms)); + } +} diff --git a/src/kv/test/fvt_kv_tst_async_cb.C b/src/kv/test/fvt_kv_tst_async_cb.C new file mode 100644 index 00000000..d191e982 --- /dev/null +++ b/src/kv/test/fvt_kv_tst_async_cb.C @@ -0,0 +1,158 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_async_cb.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/** + ******************************************************************************* + * \file + * \brief + * utility functions to support async KV ops for the FVT + * \ingroup + ******************************************************************************/ +#include + +extern "C" +{ +#include +#include +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, ASYNC_CB_SINGLE_SGD_JOB) +{ + uint32_t klen = 32; + uint32_t vlen = 128; + uint32_t LEN = 300; + uint32_t secs = 3; + + kv_async_init_ctxt (ASYNC_SINGLE_CONTEXT, secs); + kv_async_init_job_SGD(ASYNC_SINGLE_CONTEXT, + ASYNC_SINGLE_JOB, klen, vlen, LEN); + kv_async_run_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, ASYNC_CB_SINGLE_REP_JOB) +{ + uint32_t klen = 22; + uint32_t vlen = 243; + uint32_t LEN = 200; + uint32_t secs = 3; + + kv_async_init_ctxt (ASYNC_SINGLE_CONTEXT, secs); + kv_async_init_job_REP(ASYNC_SINGLE_CONTEXT, + ASYNC_SINGLE_JOB, klen, vlen, LEN); + kv_async_run_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, ASYNC_CB_EASY) +{ + uint32_t secs = 5; + + kv_async_init_ctxt (ASYNC_SINGLE_CONTEXT, secs); + kv_async_init_job_easy(ASYNC_SINGLE_CONTEXT); + kv_async_run_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, ASYNC_CB_BIG_BLOCKS) +{ + uint32_t secs = 5; + + kv_async_init_ctxt (ASYNC_SINGLE_CONTEXT, secs); + kv_async_init_job_BIG_BLOCKS(ASYNC_SINGLE_CONTEXT); + kv_async_run_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, ASYNC_CB_STRESS_LOW) +{ + uint32_t secs = 5; + + kv_async_init_ctxt (ASYNC_SINGLE_CONTEXT, secs); + kv_async_init_job_low_stress(ASYNC_SINGLE_CONTEXT); + kv_async_run_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, ASYNC_CB_STRESS_HIGH) +{ + uint32_t secs = 5; + + kv_async_init_ctxt (ASYNC_SINGLE_CONTEXT, secs); + kv_async_init_job_high_stress(ASYNC_SINGLE_CONTEXT); + kv_async_run_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, ASYNC_CB_2_CONTEXT) +{ + uint32_t ctxt = 0; + uint32_t secs = 5; + + kv_async_init_ctxt (ctxt, secs); + kv_async_init_job_low_stress(ctxt++); + kv_async_init_ctxt (ctxt, secs); + kv_async_init_job_low_stress(ctxt); + kv_async_run_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, ASYNC_CB_10_CONTEXT) +{ + uint32_t ctxt = 0; + uint32_t secs = 5; + + for (ctxt=0; ctxt<10; ctxt++) + { + kv_async_init_ctxt (ctxt, secs); + kv_async_init_job_easy(ctxt); + } + kv_async_run_jobs(); +} diff --git a/src/kv/test/fvt_kv_tst_bins.C b/src/kv/test/fvt_kv_tst_bins.C new file mode 100644 index 00000000..8e8fc471 --- /dev/null +++ b/src/kv/test/fvt_kv_tst_bins.C @@ -0,0 +1,43 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_bins.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +#include +char *env_root_path = getenv("SURELOCKROOT"); +char cmd[1024] = {0}; +} + +// disabling this test because it is broken and we may not fix it +TEST(FVT_KV_GOOD_PATH, BIN_tst_ark) +{ + if (env_root_path) + sprintf(cmd, "%s/obj/tests/fvt_kv_tst_ark 2>&1 >/dev/null", env_root_path); + else + sprintf(cmd, "fvt_kv_tst_ark 2>&1 >/dev/null"); + EXPECT_EQ(0, system(cmd)); +} + diff --git a/src/kv/test/fvt_kv_tst_bl.C b/src/kv/test/fvt_kv_tst_bl.C new file mode 100644 index 00000000..d987c9ee --- /dev/null +++ b/src/kv/test/fvt_kv_tst_bl.C @@ -0,0 +1,73 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_bl.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +int tst_bl_entry(int argc, char **argv); +} + +TEST(FVT_KV_GOOD_PATH, _tst_bl_parms_default) +{ + char *argv_parms[] = + { + (char*)"tst_bl_entry", + }; + EXPECT_EQ(0, tst_bl_entry(1,argv_parms)); +} + +TEST(FVT_KV_GOOD_PATH, _tst_bl_parms_10000_iterations) +{ + char *argv_parms[] = + { + (char*)"tst_bl_entry", + (char*)"-n", + (char*)"10000" + }; + EXPECT_EQ(0, tst_bl_entry(3,argv_parms)); +} + +TEST(FVT_KV_GOOD_PATH, _tst_bl_parms_big) +{ + char *argv_parms[] = + { + (char*)"tst_bl_entry", + (char*)"-g", + (char*)"64", + (char*)"-b", + (char*)"100", + (char*)"-c", + (char*)"100", + (char*)"-l", + (char*)"100", + (char*)"-r", + (char*)"200", + (char*)"-w", + (char*)"64", + (char*)"-n", + (char*)"10000", + }; + EXPECT_EQ(0, tst_bl_entry(15,argv_parms)); +} diff --git a/src/kv/test/fvt_kv_tst_bt.C b/src/kv/test/fvt_kv_tst_bt.C new file mode 100644 index 00000000..2424d3a9 --- /dev/null +++ b/src/kv/test/fvt_kv_tst_bt.C @@ -0,0 +1,53 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_bt.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +int tst_bt_entry(int argc, char **argv); +} + +TEST(FVT_KV_GOOD_PATH, _tst_bt_default) +{ + char *argv_parms[] = + { + (char*)"tst_bt_entry", + }; + + EXPECT_EQ(0, tst_bt_entry(1,argv_parms)); +} + +TEST(FVT_KV_GOOD_PATH, _tst_bt_big) +{ + char *argv_parms[] = + { + (char*)"tst_bt_entry", + (char*)"-n", + (char*)"10000", + (char*)"-k", + (char*)"50000", + }; + EXPECT_EQ(0, tst_bt_entry(5,argv_parms)); +} diff --git a/src/kv/test/fvt_kv_tst_bv.C b/src/kv/test/fvt_kv_tst_bv.C new file mode 100644 index 00000000..e30eb58a --- /dev/null +++ b/src/kv/test/fvt_kv_tst_bv.C @@ -0,0 +1,58 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_bv.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +int tst_bv_entry(int argc, char **argv); +} + +TEST(FVT_KV_GOOD_PATH, _tst_bv_default) +{ + char *argv_parms[] = + { + (char*)"tst_bv_entry", + }; + + EXPECT_EQ(0, tst_bv_entry(1,argv_parms)); +} + +TEST(FVT_KV_GOOD_PATH, _tst_bv_big) +{ + char *argv_parms[] = + { + (char*)"tst_bv_entry", + (char*)"-b", + (char*)"512", + (char*)"-n", + (char*)"50000", + (char*)"-s", + (char*)"2837529", + }; + + EXPECT_EQ(0, tst_bv_entry(7,argv_parms)); +} + + diff --git a/src/kv/test/fvt_kv_tst_errors.C b/src/kv/test/fvt_kv_tst_errors.C new file mode 100644 index 00000000..316eaaa6 --- /dev/null +++ b/src/kv/test/fvt_kv_tst_errors.C @@ -0,0 +1,475 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_errors.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * Simple test cases for kv FVT + * \ingroup + ******************************************************************************/ +#include + +extern "C" +{ +#include +#include +#include +#include +#include + +ARK *async_ark = NULL; +uint32_t async_io = 0; +int32_t async_err = 0; + +void kv_tst_io_errors_cb(int errcode, uint64_t dt, int64_t res) +{ + --async_io; + if (async_err != errcode) printf("tag=%"PRIx64"\n", dt); + ASSERT_EQ(async_err, errcode); +} + +} //extern C + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_create) +{ + EXPECT_EQ(EINVAL, ark_create(NULL, NULL, ARK_KV_VIRTUAL_LUN)); + //ENOSPC + //ENOTREADY +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_delete) +{ + EXPECT_EQ(EINVAL, ark_delete(NULL)); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_set) +{ + ARK *ark = NULL; + char s[] = {"12345"}; + int64_t res = 0; + + ARK_CREATE; + EXPECT_EQ(EINVAL, ark_set(NULL, 5, s, 5, s, &res)); + EXPECT_EQ(EINVAL, ark_set(ark, 5, NULL, 5, s, &res)); + EXPECT_EQ(ENOMEM, ark_set(ark, 5, s, -1, s, &res)); + EXPECT_EQ(EINVAL, ark_set(ark, 5, s, 5, NULL, &res)); + EXPECT_EQ(EINVAL, ark_set(ark, 5, s, 5, s, NULL)); + ARK_DELETE; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_get) +{ + ARK *ark = NULL; + kv_t *db = NULL; + char s[] = {"12345"}; + int64_t res = 0; + uint32_t LEN = 100; + + ARK_CREATE; + + EXPECT_EQ(ENOENT, ark_get(ark, 5, s, 5, s, 0, &res)); + + EXPECT_EQ(0, ark_set(ark, 5, s, 5, s, &res)); + + EXPECT_EQ(EINVAL, ark_get(NULL, 5, s, 5, s, 0, &res)); + EXPECT_EQ(EINVAL, ark_get(ark, 5, NULL, 5, s, 0, &res)); + EXPECT_EQ(0, ark_get(ark, 5, s, -1, s, 0, &res)); + EXPECT_EQ(EINVAL, ark_get(ark, 5, s, 5, NULL, 0, &res)); + EXPECT_EQ(EINVAL, ark_get(ark, 5, s, 5, s, 0, NULL)); + + db = (kv_t*)kv_db_create_fixed(LEN, 2, 2); + EXPECT_TRUE(db != NULL); + fvt_kv_utils_load (ark, db, LEN); + fvt_kv_utils_query(ark, db, KV_4K, LEN); + EXPECT_EQ(ENOENT, ark_get(ark, 3, s, 5, s, 0, &res)); + + fvt_kv_utils_del(ark, db, LEN); + kv_db_destroy(db, LEN); + ARK_DELETE; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_exists) +{ + ARK *ark = NULL; + kv_t *db = NULL; + char s[] = {"12345"}; + int64_t res = 0; + uint32_t LEN = 100; + + ARK_CREATE; + + EXPECT_EQ(EINVAL, ark_exists(NULL, 5, s, &res)); + EXPECT_EQ(EINVAL, ark_exists(ark, 5, NULL, &res)); + EXPECT_EQ(EINVAL, ark_exists(ark, 5, s, NULL)); + + EXPECT_EQ(ENOENT, ark_exists(ark, 5, s, &res)); + + db = (kv_t*)kv_db_create_fixed(LEN, 2, 2); + EXPECT_TRUE(db != NULL); + fvt_kv_utils_load (ark, db, LEN); + fvt_kv_utils_query(ark, db, KV_4K, LEN); + EXPECT_EQ(ENOENT, ark_exists(ark, 5, s, &res)); + + fvt_kv_utils_del(ark, db, LEN); + kv_db_destroy(db, LEN); + ARK_DELETE; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_del) +{ + ARK *ark = NULL; + kv_t *db = NULL; + char s[] = {"12345"}; + int64_t res = 0; + uint32_t LEN = 100; + + ARK_CREATE; + + EXPECT_EQ(EINVAL, ark_del(NULL, 5, s, &res)); + EXPECT_EQ(EINVAL, ark_del(ark, 5, NULL, &res)); + EXPECT_EQ(EINVAL, ark_del(ark, 5, s, NULL)); + + EXPECT_EQ(ENOENT, ark_del(ark, 5, s, &res)); + + db = (kv_t*)kv_db_create_fixed(LEN, 2, 2); + EXPECT_TRUE(db != NULL); + fvt_kv_utils_load (ark, db, LEN); + fvt_kv_utils_query(ark, db, KV_4K, LEN); + EXPECT_EQ(ENOENT, ark_del(ark, 5, s, &res)); + + fvt_kv_utils_del(ark, db, LEN); + kv_db_destroy(db, LEN); + ARK_DELETE; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_first) +{ + ARK *ark = NULL; + uint8_t gvalue[KV_4K] = {0}; + int64_t klen = 0; + void *_NULL = NULL; + + ARK_CREATE; + + EXPECT_EQ(_NULL, ark_first(NULL, 5, &klen, gvalue)); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(_NULL, ark_first(ark, 0, &klen, gvalue)); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(_NULL, ark_first(ark, 5, NULL, gvalue)); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(_NULL, ark_first(ark, 5, &klen, NULL)); + EXPECT_EQ(errno, EINVAL); + + ARK_DELETE; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_next) +{ + ARK *ark = NULL; + ARI *ari = NULL; + char s[] = {"12345"}; + int64_t res = 0; + int64_t klen = 0; + uint8_t gvalue[KV_4K] = {0}; + + ARK_CREATE; + + EXPECT_EQ(0, ark_set(ark, 5, s, 5, s, &res)); + + ari = ark_first(ark, KV_4K, &klen, gvalue); + EXPECT_TRUE(ari != NULL); + + EXPECT_EQ(EINVAL, ark_next(NULL, KV_4K, &klen, gvalue)); + EXPECT_EQ(EINVAL, ark_next(ari, 0, &klen, gvalue)); + EXPECT_EQ(EINVAL, ark_next(ari, KV_4K, NULL, gvalue)); + EXPECT_EQ(EINVAL, ark_next(ari, KV_4K, &klen, NULL)); + + ARK_DELETE; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_count) +{ + ARK *ark = NULL; + int64_t cnt = 0; + + EXPECT_EQ(EINVAL, ark_count(ark, &cnt)); + + ARK_CREATE; + + EXPECT_EQ(EINVAL, ark_count(ark, NULL)); + + ARK_DELETE; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_random) +{ + ARK *ark = NULL; + uint8_t gvalue[KV_4K] = {0}; + uint64_t klen = 0; + + EXPECT_EQ(EINVAL, ark_random(ark, klen, &klen, gvalue)); + + ARK_CREATE; + + EXPECT_EQ(EINVAL, ark_random(ark, klen, &klen, gvalue)); + EXPECT_EQ(EINVAL, ark_random(ark, 0, &klen, gvalue)); + EXPECT_EQ(EINVAL, ark_random(ark, klen, NULL, gvalue)); + EXPECT_EQ(EINVAL, ark_random(ark, klen, &klen, NULL)); + + ARK_DELETE; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, BAD_PARMS_ark_fork) +{ + ARK *ark = NULL; + + EXPECT_EQ(-1, ark_fork(ark)); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, ALLOC_ERRORS) +{ + char s[] = {"12345678"}; + uint32_t klen = 8; + int64_t res = 0; + + FVT_KV_SET_INJECT_ACTIVE; + + errno=0; FVT_KV_INJECT_ALLOC_ERROR; + ASSERT_EQ(ENOMEM, ark_create(getenv("FVT_DEV"), + &async_ark, ARK_KV_VIRTUAL_LUN)); + + errno=0; + ASSERT_EQ(0, ark_create(getenv("FVT_DEV"), &async_ark, ARK_KV_VIRTUAL_LUN)); + EXPECT_TRUE(async_ark != NULL); + + /* sync */ + errno=0; FVT_KV_INJECT_ALLOC_ERROR; + EXPECT_EQ(ENOMEM, ark_set(async_ark, klen, s, klen, s, &res)); + EXPECT_EQ(ENOENT, ark_get(async_ark, klen, s, klen, s, 0, &res)); + + EXPECT_EQ(0, ark_set(async_ark, klen, s, klen, s, &res)); + EXPECT_EQ(0, ark_get(async_ark, klen, s, klen, s, 0, &res)); + + errno=0; FVT_KV_INJECT_ALLOC_ERROR; + EXPECT_EQ(ENOMEM, ark_del(async_ark, klen, s, &res)); + EXPECT_EQ(0, ark_del(async_ark, klen, s, &res)); + + errno=0; ++async_io; async_err = ENOMEM; + FVT_KV_INJECT_ALLOC_ERROR; + EXPECT_EQ(0, ark_set_async_cb(async_ark, klen, s, klen, s, + kv_tst_io_errors_cb, 0xfee1)); + while (async_io) usleep(50000); + + errno=0; ++async_io; async_err = ENOENT; + EXPECT_EQ(0, ark_get_async_cb(async_ark, klen, s, klen, s, 0, + kv_tst_io_errors_cb, 0xfee2)); + while (async_io) usleep(50000); + + errno=0; ++async_io; async_err = 0; + EXPECT_EQ(0, ark_set_async_cb(async_ark, klen, s, klen, s, + kv_tst_io_errors_cb, 0xfee3)); + while (async_io) usleep(50000); + + errno=0; ++async_io; async_err = 0; + EXPECT_EQ(0, ark_get_async_cb(async_ark, klen, s, klen, s, 0, + kv_tst_io_errors_cb, 0xfee4)); + while (async_io) usleep(50000); + + errno=0; ++async_io; async_err = ENOMEM; + FVT_KV_INJECT_ALLOC_ERROR; + EXPECT_EQ(0, ark_del_async_cb(async_ark, klen, s, + kv_tst_io_errors_cb, 0xfee5)); + while (async_io) usleep(50000); + + errno=0; ++async_io; async_err = 0; + EXPECT_EQ(0, ark_del_async_cb(async_ark, klen, s, + kv_tst_io_errors_cb, 0xfee6)); + while (async_io) usleep(50000); + + errno=0; FVT_KV_SET_INJECT_INACTIVE; + ASSERT_EQ(0, ark_delete(async_ark)); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, IO_ERRORS) +{ + char s[] = {"12345678"}; + uint32_t klen = 8; + int64_t res = 0; + + FVT_KV_SET_INJECT_ACTIVE; + + ASSERT_EQ(0, ark_create(getenv("FVT_DEV"), &async_ark, ARK_KV_VIRTUAL_LUN)); + EXPECT_TRUE(async_ark != NULL); + + /* sync */ + errno=0; FVT_KV_INJECT_WRITE_ERROR; + EXPECT_EQ(EIO, ark_set(async_ark, klen, s, klen, s, &res)); + EXPECT_EQ(ENOENT, ark_get(async_ark, klen, s, klen, s, 0, &res)); + + EXPECT_EQ(0, ark_set(async_ark, klen, s, klen, s, &res)); + + errno=0; FVT_KV_INJECT_READ_ERROR; + EXPECT_EQ(EIO, ark_get(async_ark, klen, s, klen, s, 0, &res)); + + errno=0; FVT_KV_INJECT_READ_ERROR; + EXPECT_EQ(EIO, ark_exists(async_ark, klen, s, &res)); +#if 0 + errno=0; FVT_KV_INJECT_WRITE_ERROR; + EXPECT_EQ(EIO, ark_del(async_ark, klen, s, &res)); +#endif + + EXPECT_EQ(0, ark_del(async_ark, klen, s, &res)); + + /* async */ + + errno=0; ++async_io; async_err = EIO; + FVT_KV_INJECT_WRITE_ERROR; + EXPECT_EQ(0, ark_set_async_cb(async_ark, klen, s, klen, s, + kv_tst_io_errors_cb, 0xfee1)); + while (async_io) usleep(50000); + + errno=0; ++async_io; async_err = ENOENT; + EXPECT_EQ(0, ark_get_async_cb(async_ark, klen, s, klen, s, 0, + kv_tst_io_errors_cb, 0xfee2)); + while (async_io) usleep(50000); + + errno=0; ++async_io; async_err = 0; + EXPECT_EQ(0, ark_set_async_cb(async_ark, klen, s, klen, s, + kv_tst_io_errors_cb, 0xfee3)); + while (async_io) usleep(50000); + + errno=0; ++async_io; async_err = 0; + EXPECT_EQ(0, ark_get_async_cb(async_ark, klen, s, klen, s, 0, + kv_tst_io_errors_cb, 0xfee4)); + while (async_io) usleep(50000); + + errno=0; ++async_io; async_err = EIO; + FVT_KV_INJECT_READ_ERROR; + EXPECT_EQ(0, ark_get_async_cb(async_ark, klen, s, klen, s, 0, + kv_tst_io_errors_cb, 0xfee5)); + while (async_io) usleep(50000); + + errno=0; ++async_io; async_err = EIO; + FVT_KV_INJECT_READ_ERROR; + EXPECT_EQ(0, ark_exists_async_cb(async_ark, klen, s, + kv_tst_io_errors_cb, 0xfee6)); + while (async_io) usleep(50000); +#if 0 + errno=0; ++async_io; async_err = EIO; + FVT_KV_INJECT_WRITE_ERROR; + EXPECT_EQ(0, ark_del_async_cb(async_ark, klen, s, + kv_tst_io_errors_cb, 0xfee7)); + while (async_io) usleep(50000); +#endif + + errno=0; ++async_io; async_err = 0; + EXPECT_EQ(0, ark_del_async_cb(async_ark, klen, s, + kv_tst_io_errors_cb, 0xfee8)); + while (async_io) usleep(50000); + + errno=0; FVT_KV_SET_INJECT_INACTIVE; + ASSERT_EQ(0, ark_delete(async_ark)); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_ERROR_PATH, RANDOM_ERRORS) +{ + uint32_t ctxts = 10; + uint32_t ops = 10; + uint32_t vlen = KV_4K; + uint32_t secs = 20; + + if (getenv("FVT_DEV") == NULL) + { + printf("SKIPPING, when running to memory\n"); + } + else + { + kv_async_init_ark_io_inject(ctxts, ops, vlen, secs); + kv_async_start_jobs(); + + printf("\n"); fflush(stdout); + + Sync_ark_io ark_io_job; + ark_io_job.run_multi_arks(ctxts, ops, vlen, secs); + + kv_async_wait_jobs(); + } +} diff --git a/src/kv/test/fvt_kv_tst_ht.C b/src/kv/test/fvt_kv_tst_ht.C new file mode 100644 index 00000000..09846257 --- /dev/null +++ b/src/kv/test/fvt_kv_tst_ht.C @@ -0,0 +1,56 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_ht.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +int tst_ht_entry(int argc, char **argv); +} + +TEST(FVT_KV_GOOD_PATH, _tst_ht_default) +{ + char *argv_parms[] = + { + (char*)"tst_ht_entry", + }; + EXPECT_EQ(0, tst_ht_entry(1,argv_parms)); +} + +TEST(FVT_KV_GOOD_PATH, _tst_ht_big) +{ + char *argv_parms[] = + { + (char*)"tst_ht_entry", + (char*)"-n", + (char*)"100000", + (char*)"-h", + (char*)"256", + (char*)"-m", + (char*)"256", + (char*)"-s", + (char*)"657432", + }; + EXPECT_EQ(0, tst_ht_entry(9,argv_parms)); +} diff --git a/src/kv/test/fvt_kv_tst_iv.C b/src/kv/test/fvt_kv_tst_iv.C new file mode 100644 index 00000000..7993833f --- /dev/null +++ b/src/kv/test/fvt_kv_tst_iv.C @@ -0,0 +1,67 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_iv.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +int tst_iv_entry(int argc, char **argv); +} + +TEST(FVT_KV_GOOD_PATH, tst_iv_default1) +{ + char *argv_parms[] = + { + (char*)"tst_iv_entry", + }; + EXPECT_EQ(0, tst_iv_entry(1,argv_parms)); +} + +TEST(FVT_KV_GOOD_PATH, tst_iv_default0) +{ + char *argv_parms[] = + { + (char*)"tst_iv_entry", + (char*)"-t", + (char*)"0", + }; + EXPECT_EQ(0, tst_iv_entry(3,argv_parms)); +} + +TEST(FVT_KV_GOOD_PATH, _tst_iv_big) +{ + char *argv_parms[] = + { + (char*)"tst_iv_entry", + (char*)"-n", + (char*)"128", + (char*)"-m", + (char*)"512", + (char*)"-i", + (char*)"50000", + (char*)"-s", + (char*)"675432", + }; + EXPECT_EQ(0, tst_iv_entry(9,argv_parms)); +} diff --git a/src/kv/test/fvt_kv_tst_persist.C b/src/kv/test/fvt_kv_tst_persist.C new file mode 100644 index 00000000..4dc823ae --- /dev/null +++ b/src/kv/test/fvt_kv_tst_persist.C @@ -0,0 +1,237 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_persist.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +#include +#include +#include +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, PERSIST_FIXED_1kx1kx10000) +{ + ARK *ark = NULL; + kv_t *fdb = NULL; + uint32_t klen = 1024; + uint32_t vlen = 1024; + uint32_t LEN = 10000; + uint32_t i = 0; + int64_t res = 0; + uint8_t gvalue[vlen]; + + if (NULL != getenv("FVT_DEV_PERSIST")) + { + printf("create ark\n"); + ARK_CREATE_NEW_PERSIST; + + fdb = (kv_t*)kv_db_create_fixed(LEN, klen, vlen); + ASSERT_TRUE(fdb != NULL); + + printf("load ark\n"); + fvt_kv_utils_load (ark, fdb, LEN); + printf("query db\n"); + fvt_kv_utils_query(ark, fdb, vlen, LEN); + + ARK_DELETE; + + ARK_CREATE_PERSIST; + + printf("re-opened ark, query db\n"); + fvt_kv_utils_query(ark, fdb, vlen, LEN); + printf("delete db from ark\n"); + fvt_kv_utils_del(ark, fdb, LEN); + + printf("load ark with the same db\n"); + fvt_kv_utils_load (ark, fdb, LEN); + printf("query db\n"); + fvt_kv_utils_query(ark, fdb, vlen, LEN); + + ARK_DELETE; + + ARK_CREATE_NEW_PERSIST; + + printf("re-open ark as new, verify empty\n"); + + for (i=0; i +extern "C" +{ +#include +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SCENARIO_CREATE_DELETE_LOOP) +{ + ARK *ark = NULL; + int32_t loops = 100; + int32_t i = 0; + + for (i=0; i + +extern "C" +{ +#include +} + +/** + ******************************************************************************* + * \brief + * create/delete ark + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SIMPLE_validate_kv_utils) +{ + int32_t i = 0; + int32_t FLEN = 1000; + int32_t MLEN = 1000; + void *null = NULL; + + kv_t *db_f = (kv_t*)kv_db_create_fixed(FLEN, 64, 4); + ASSERT_TRUE(db_f != NULL); + + kv_t *db_m = (kv_t*)kv_db_create_mixed(MLEN, 128, 64); + ASSERT_TRUE(db_m != NULL); + + for (i=0; i + +extern "C" +{ +#include +#include +#include +#include +#include +#include +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_CTXT_10_PTH_10_SMALL_BLOCKS) +{ + uint32_t num_ctxt = 10; + uint32_t num_pth = 10; + uint32_t vlen = 16; + uint32_t LEN = 100; + uint32_t secs = 5; + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(num_ctxt, num_pth, vlen, LEN, secs); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_CTXT_10_PTH_32_BIG_BLOCKS) +{ + uint32_t num_ctxt = 10; + uint32_t num_pth = 32; + uint32_t vlen = KV_64K; + uint32_t LEN = 20; + uint32_t secs = 5; + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(num_ctxt, num_pth, vlen, LEN, secs); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_ASYNC_EASY) +{ + uint32_t klen = 256; + uint32_t vlen = KV_64K; + uint32_t LEN = 20; + uint32_t secs = 5; + + kv_async_init_ctxt(ASYNC_SINGLE_CONTEXT, secs); + kv_async_init_job_easy(ASYNC_SINGLE_CONTEXT); + + printf("start async jobs\n"); + kv_async_start_jobs(); + + printf("start sync job\n"); + fvt_kv_utils_SGD_LOOP(kv_async_get_ark(ASYNC_SINGLE_CONTEXT), + kv_db_create_fixed, klen, vlen, LEN, secs); + + printf("wait for async jobs\n"); + kv_async_wait_jobs(); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +#ifdef _AIX +TEST(FVT_KV_GOOD_PATH, DISABLED_SYNC_ASYNC_PERF) +#else +TEST(FVT_KV_GOOD_PATH, SYNC_ASYNC_PERF) +#endif +{ + uint32_t num_ctxt = 1; + uint32_t num_pth = 128; + uint32_t npool = 20; + uint32_t vlen = 16; + uint32_t LEN = 500; + uint32_t secs = 15; + uint32_t ios = 0; + uint32_t ops = 0; + + if (getenv("FVT_DEV") == NULL) + { + printf("NOT_EXECUTED for memory\n"); + return; + } + + Sync_pth sync_pth; + + num_ctxt = 1; + sync_pth.run_multi_ctxt(num_ctxt, num_pth, npool, vlen, LEN,secs,&ops,&ios); + EXPECT_GT(ops, 72000); + EXPECT_GT(ios, 72000); + usleep(1000000); + + num_ctxt = 1; vlen = KV_64K; secs = 10; num_pth = 40; + sync_pth.run_multi_ctxt(num_ctxt, num_pth, npool, vlen, LEN,secs,&ops,&ios); + EXPECT_GT(ops, 11500); + EXPECT_GT(ios, 112000); + usleep(1000000); + + num_ctxt = 1; vlen = KV_500K; num_pth = 20; + sync_pth.run_multi_ctxt(num_ctxt, num_pth, npool, vlen, LEN,secs,&ops,&ios); + EXPECT_GT(ops, 2200); + EXPECT_GT(ios, 150000); + usleep(1000000); + + num_ctxt = 4; vlen = 16; num_pth = 128; + sync_pth.run_multi_ctxt(num_ctxt, num_pth, npool, vlen, LEN,secs,&ops,&ios); + EXPECT_GT(ops, 182000); + EXPECT_GT(ios, 182000); + usleep(1000000); + + num_ctxt = 4; num_pth = 20; vlen = KV_64K; + sync_pth.run_multi_ctxt(num_ctxt, num_pth, npool, vlen, LEN,secs,&ops,&ios); + EXPECT_GT(ops, 19000); + EXPECT_GT(ios, 180000); + usleep(1000000); + + num_ctxt = 20; vlen = 16; num_pth = 128; npool = 4; + sync_pth.run_multi_ctxt(num_ctxt, num_pth, npool, vlen, LEN,secs,&ops,&ios); + EXPECT_GT(ops, 110000); + EXPECT_GT(ios, 110000); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_ASYNC_MAGNUS_DIFFICULTUS) +{ + uint32_t num_ctxt = 20; + uint32_t ops = 50; + uint32_t vlen = KV_64K; + uint32_t LEN = 5; + uint32_t secs = 10; + +#ifdef _AIX + char *env_FVT_DEV = getenv("FVT_DEV"); + if (env_FVT_DEV == NULL) + { + printf("NOT_EXECUTED for memory on AIX\n"); + return; + } +#endif + + printf("init async %dctxt/%djobs\n", num_ctxt, ops); + kv_async_init_ctxt_io(num_ctxt, ops, 16, vlen, LEN, secs); + + printf("start async jobs\n"); + kv_async_start_jobs(); + + printf("start %dctxt/%dpth sync jobs\n", num_ctxt, ops); + Sync_ark_io ark_io_job; + ark_io_job.run_multi_arks(num_ctxt, ops, vlen, secs); + + printf("wait for async jobs\n"); + kv_async_wait_jobs(); +} diff --git a/src/kv/test/fvt_kv_tst_sync_pth.C b/src/kv/test/fvt_kv_tst_sync_pth.C new file mode 100644 index 00000000..0540f0b5 --- /dev/null +++ b/src/kv/test/fvt_kv_tst_sync_pth.C @@ -0,0 +1,167 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_sync_pth.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/** + ******************************************************************************* + * \file + * \brief + * \ingroup + ******************************************************************************/ +#include +#include + +extern "C" +{ +#include +#include +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_PTH_FIXED_8x2) +{ + uint32_t num_ctxt = 1; + uint32_t num_pth = 1; + uint32_t vlen = 2; + uint32_t LEN = 200; + uint32_t secs = 3; + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(num_ctxt, num_pth, vlen, LEN, secs); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_PTH_FIXED_8x4k) +{ + uint32_t num_ctxt = 1; + uint32_t num_pth = 1; + uint32_t vlen = KV_4K; + uint32_t LEN = 200; + uint32_t secs = 3; + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(num_ctxt, num_pth, vlen, LEN, secs); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +#ifdef _AIX +TEST(FVT_KV_GOOD_PATH, DISABLED_SYNC_PTH_FIXED_BIG_BLOCKS) +#else +TEST(FVT_KV_GOOD_PATH, SYNC_PTH_FIXED_BIG_BLOCKS) +#endif +{ + uint32_t num_ctxt = 1; + uint32_t num_pth = 1; + uint32_t vlen = KV_500K; + uint32_t LEN = 50; + uint32_t secs = 3; + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(num_ctxt, num_pth, vlen, LEN, secs); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_2_PTH_1_CONTEXT) +{ + uint32_t num_ctxt = 1; + uint32_t num_pth = 2; + uint32_t vlen = KV_4K; + uint32_t LEN = 200; + uint32_t secs = 3; + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(num_ctxt, num_pth, vlen, LEN, secs); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_1_PTH_2_CONTEXT) +{ + uint32_t num_ctxt = 2; + uint32_t num_pth = 1; + uint32_t vlen = KV_4K; + uint32_t LEN = 200; + uint32_t secs = 3; + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(num_ctxt, num_pth, vlen, LEN, secs); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +TEST(FVT_KV_GOOD_PATH, SYNC_4_PTH_40_CONTEXT) +{ + uint32_t num_ctxt = 40; + uint32_t num_pth = 4; + uint32_t vlen = 128; + uint32_t LEN = 200; + uint32_t secs = 3; + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(num_ctxt, num_pth, vlen, LEN, secs); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +#ifdef _AIX +TEST(FVT_KV_GOOD_PATH, DISABLED_SYNC_4_PTH_200_CONTEXT) +#else +TEST(FVT_KV_GOOD_PATH, SYNC_4_PTH_200_CONTEXT) +#endif +{ + uint32_t num_ctxt = 200; + uint32_t num_pth = 4; + uint32_t npool = 4; + uint32_t vlen = 128; + uint32_t LEN = 100; + uint32_t secs = 3; + + Sync_pth sync_pth; + + sync_pth.run_multi_ctxt(num_ctxt, num_pth, npool, vlen, LEN, secs); +} diff --git a/src/kv/test/fvt_kv_tst_tg.C b/src/kv/test/fvt_kv_tst_tg.C new file mode 100644 index 00000000..c37442bc --- /dev/null +++ b/src/kv/test/fvt_kv_tst_tg.C @@ -0,0 +1,54 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_tg.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +int tst_tg_entry(int argc, char **argv); +} + +TEST(FVT_KV_GOOD_PATH, kv_tst_tg_default) +{ + char *argv_parms[] = + { + (char*)"tst_tg_entry", + }; + EXPECT_EQ(0, tst_tg_entry(1,argv_parms)); +} + +TEST(FVT_KV_GOOD_PATH, kv_tst_tg_big) +{ + char *argv_parms[] = + { + (char*)"tst_tg_entry", + (char*)"-n", + (char*)"100", + (char*)"-c", + (char*)"10", + (char*)"-p", + (char*)"15", + }; + EXPECT_EQ(0, tst_tg_entry(7,argv_parms)); +} diff --git a/src/kv/test/fvt_kv_tst_vi.C b/src/kv/test/fvt_kv_tst_vi.C new file mode 100644 index 00000000..c1908310 --- /dev/null +++ b/src/kv/test/fvt_kv_tst_vi.C @@ -0,0 +1,52 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_tst_vi.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +int tst_vi_entry(int argc, char **argv); +} + +TEST(FVT_KV_GOOD_PATH, _tst_vi_default) +{ + char *argv_parms[] = + { + (char*)"tst_vi_entry", + }; + EXPECT_EQ(0, tst_vi_entry(1,argv_parms)); +} + +TEST(FVT_KV_GOOD_PATH, _tst_vi_big) +{ + char *argv_parms[] = + { + (char*)"tst_vi_entry", + (char*)"-n", + (char*)"10000", + (char*)"-s", + (char*)"477288", + }; + EXPECT_EQ(0, tst_vi_entry(5,argv_parms)); +} diff --git a/src/kv/test/fvt_kv_utils.C b/src/kv/test/fvt_kv_utils.C new file mode 100644 index 00000000..72667cb4 --- /dev/null +++ b/src/kv/test/fvt_kv_utils.C @@ -0,0 +1,363 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_utils.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * utility functions for KV FVT + * \ingroup + ******************************************************************************/ +#include + +extern "C" +{ +#include +#include +#include +} + +/******************************************************************************* + ******************************************************************************/ +void fvt_kv_utils_load(ARK *ark, kv_t *db, uint32_t LEN) +{ + uint32_t i = 0; + int64_t res = 0; + + ASSERT_TRUE(NULL != ark); + ASSERT_TRUE(NULL != db); + + for (i=0; i 0) + { + /* regenerate values to each key */ + if (i%2) regen_vlen = vlen+1; + else regen_vlen = vlen; + ASSERT_EQ(0, db_regen(db, LEN, regen_vlen)); + } + + /* load/replace all key/value pairs from the db into the ark */ + fvt_kv_utils_load(ark, db, LEN); + + /* query all key/value pairs from the db */ + fvt_kv_utils_query(ark, db, regen_vlen, LEN); + + cur = time(0); + ++i; + } + while (cur-start < secs); + + /* delete all key/value pairs from the db */ + fvt_kv_utils_del(ark, db, LEN); + + kv_db_destroy(db, LEN); +} + +/******************************************************************************* + ******************************************************************************/ +void fvt_kv_utils_read(ARK *ark, kv_t *db, uint32_t vbuflen, uint32_t LEN) +{ + uint32_t i = 0; + int64_t res = 0; + uint8_t gvalue[vbuflen]; + + ASSERT_TRUE(NULL != ark); + ASSERT_TRUE(NULL != db); + + for (i=0; i +#include + +/** + ******************************************************************************* + * \brief + * load all the db key/values into the ark using ark_set + ******************************************************************************/ +void fvt_kv_utils_load(ARK *ark, kv_t *db, uint32_t LEN); + +/** + ******************************************************************************* + * \brief + * query all the db key/values in the ark using ark_get/ark_exists + ******************************************************************************/ +void fvt_kv_utils_query(ARK *ark, kv_t *db, uint32_t vbuflen, uint32_t LEN); + +/** + ******************************************************************************* + * \brief + * query all the db key/values in the ark using ark_get/ark_exists and offset + ******************************************************************************/ +void fvt_kv_utils_query_off(ARK *ark, kv_t *db, uint32_t vbuflen, uint32_t LEN); + +/** + ******************************************************************************* + * \brief + * delete all the db key/values from the ark using ark_del + ******************************************************************************/ +void fvt_kv_utils_del(ARK *ark, kv_t *db, uint32_t LEN); + +/** + ******************************************************************************* + * \brief + * create db, set, get, exists, del, destroy db + ******************************************************************************/ +void fvt_kv_utils_SGD_LOOP(ARK *ark, + void* (*db_create)(uint32_t, uint32_t, uint32_t), + uint32_t klen, + uint32_t vlen, + uint32_t len, + uint32_t secs); + +/** + ******************************************************************************* + * \brief + * create db, set, get, exists, regen db, set, get, exists, del, destroy db + ******************************************************************************/ +void fvt_kv_utils_REP_LOOP(ARK *ark, + void* (*db_create)(uint32_t, uint32_t, uint32_t), + uint32_t (*db_regen) (kv_t*, uint32_t, uint32_t), + uint32_t klen, + uint32_t vlen, + uint32_t len, + uint32_t secs); + +/** + ******************************************************************************* + * \brief + * calc sync data rates for read and write + ******************************************************************************/ +void fvt_kv_utils_perf(ARK *ark, uint32_t vlen, uint32_t mb, uint32_t LEN); + +#endif diff --git a/src/kv/test/fvt_kv_utils_ark_io.C b/src/kv/test/fvt_kv_utils_ark_io.C new file mode 100644 index 00000000..7c4b7c94 --- /dev/null +++ b/src/kv/test/fvt_kv_utils_ark_io.C @@ -0,0 +1,159 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_utils_ark_io.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * functions to aid in testing the sync kv ark functions + * \ingroup + ******************************************************************************/ +extern "C" +{ +#include +#include +#include + +} +void kv_async_get_arks(ARK *arks[], uint32_t num_ctxt); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_ark_io::run_SGD(ark_io_args_t *args) +{ + uint32_t i = 0; + uint32_t rc = 0; + void* (*fp)(void*) = (void*(*)(void*))SGD; + + args->pth = 0; + for (i=0; i<5000; i++) + { + if (0 == (rc=pthread_create(&args->pth, + NULL, + fp, + (void*)args))) break; + usleep(10000); + } + if (rc) + { + args->pth = 0; + printf("."); fflush(stdout); + } +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_ark_io::wait(ark_io_args_t *args) +{ + if (args->pth) (void)pthread_join(args->pth, NULL); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_ark_io::run_multi_arks(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t vlen, + uint32_t secs) +{ + uint32_t i = 0; + uint32_t ctxt_i = 0; + uint32_t pth_i = 0; + uint32_t klen = KV_4K; //must be large, to not dup async db klens + uint32_t LEN = 50; + uint32_t tot_pth = num_ctxt * num_pth; + ark_io_args_t pth_args[tot_pth]; + ARK *ark[num_ctxt]; + kv_t *db[100]; + struct timeval stop, start; + uint64_t ops = 0; + uint64_t ios = 0; + uint32_t t_ops = 0; + uint32_t t_ios = 0; + + memset(pth_args, 0, sizeof(ark_io_args_t) * tot_pth); + + /* alloc one set of db's, to be used for each context */ + for (i=0; i +#include +} + +/** + ******************************************************************************* + * \brief + * struct to contain args passed to child + ******************************************************************************/ +typedef struct +{ + ARK *ark; + kv_t *db; + int32_t vlen; + int32_t LEN; + int32_t secs; + uint32_t mb; + uint32_t ops; + pthread_t pth; +} ark_io_args_t; + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +class Sync_ark_io +{ + protected: + static void set(void *args) + { + ark_io_args_t *sg_args = (ark_io_args_t*)args; + ARK *ark = sg_args->ark; + kv_t *db = sg_args->db; + int64_t res = 0; + int32_t i = 0; + + /* load all key/value pairs from the fixed db into the ark */ + for (i=0; iLEN; i++) + { + if (0 != ark_set(ark, db[i].klen, db[i].key, db[i].vlen, + db[i].value, &res)) + printf("ark_set failed\n"); + + if (db[i].vlen != res) + printf("ark_set bad length\n"); + } + } + + static void get(void *args) + { + ark_io_args_t *sg_args = (ark_io_args_t*)args; + ARK *ark = sg_args->ark; + kv_t *db = sg_args->db; + int64_t res = 0; + int32_t i = 0; + uint8_t gvalue[sg_args->vlen]; + + /* query all key/value pairs from the fixed db */ + for (i=sg_args->LEN-1; i>=0; i--) + { + if (0 != ark_get(ark, db[i].klen, db[i].key, db[i].vlen, + gvalue, 0, &res)) + printf("ark_get failed\n"); + + if (db[i].vlen != res) + printf("ark_get bad length\n"); + + if (0 != memcmp(db[i].value,gvalue,db[i].vlen)) + printf("ark_get miscompare\n"); + + if (0 != ark_exists(ark, db[i].klen, db[i].key, &res)) + printf("ark_exists failed\n"); + + if (db[i].vlen != res) + printf("ark_exists bad length\n"); + } + } + + static void del(void *args) + { + ark_io_args_t *sg_args = (ark_io_args_t*)args; + ARK *ark = sg_args->ark; + kv_t *db = sg_args->db; + int64_t res = 0; + int32_t i = 0; + + /* delete all key/value pairs from the fixed db */ + for (i=0; iLEN; i++) + { + if (0 != ark_del(ark, db[i].klen, db[i].key, &res)) + printf("ark_del failed\n"); + + if (db[i].vlen != res) + printf("ark_del bad length\n"); + } + } + + static void SGD(void *args) + { + ark_io_args_t *sg_args = (ark_io_args_t*)args; + int32_t start = time(0); + int32_t next = start + 600; + int32_t cur = 0; + + KV_TRC(pFT, "RUN_PTH 0 minutes"); + + do + { + if (cur > next) + { + KV_TRC(pFT, "RUN_PTH %d minutes", (next-start)/60); + next += 600; + } + + set(args); + get(args); + del(args); + + cur = time(0); + } + while (cur-start < sg_args->secs); + + KV_TRC(pFT, "RUN_PTH DONE"); + } + + public: + void run_SGD(ark_io_args_t *args); + + void wait(ark_io_args_t *args); + + void run_multi_arks(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t vlen, + uint32_t secs); +}; + +#endif diff --git a/src/kv/test/fvt_kv_utils_async_cb.C b/src/kv/test/fvt_kv_utils_async_cb.C new file mode 100644 index 00000000..e26d9e3f --- /dev/null +++ b/src/kv/test/fvt_kv_utils_async_cb.C @@ -0,0 +1,1243 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_utils_async_cb.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/** + ******************************************************************************* + * \file + * \brief + * utility functions for async ark functions + * \ingroup + ******************************************************************************/ +#include + +extern "C" +{ +#include +#include +#include +#include +} + +#define KV_ASYNC_EASY 16 +#define KV_ASYNC_BIG_BLOCKS 64 +#define KV_ASYNC_LOW_STRESS 128 +#define KV_ASYNC_HIGH_STRESS 512 +#define KV_ASYNC_JOB_Q KV_ASYNC_HIGH_STRESS +#define KV_ASYNC_MAX_CONTEXTS 508 + +#define KV_ASYNC_CT_RUNNING 0x80000000 +#define KV_ASYNC_CT_PERF 0x40000000 +#define KV_ASYNC_CT_DONE 0x20000000 +#define KV_ASYNC_CT_ERROR_INJECT 0x10000000 + +#define KV_ASYNC_CB_REPLACE 0x80000000 +#define KV_ASYNC_CB_SGD 0x40000000 +#define KV_ASYNC_CB_READ_PERF 0x20000000 +#define KV_ASYNC_CB_WRITE_PERF 0x10000000 +#define KV_ASYNC_CB_RUNNING 0x08000000 +#define KV_ASYNC_CB_QUEUED 0x04000000 +#define KV_ASYNC_CB_SHUTDOWN 0x02000000 +#define KV_ASYNC_CB_MULTI_CTXT_IO 0x01000000 +#define KV_ASYNC_CB_SET 0x00800000 +#define KV_ASYNC_CB_GET 0x00400000 +#define KV_ASYNC_CB_EXISTS 0x00200000 +#define KV_ASYNC_CB_DEL 0x00100000 +#define KV_ASYNC_CB_GTEST 0x00080000 + +#define B_MARK 0xBEEFED0B +#define E_MARK 0xBEEFED0E + +#define IS_GTEST (pCB->flags & KV_ASYNC_CB_GTEST) + +/** + ******************************************************************************* + * \brief + * structs to represent a job in a context + ******************************************************************************/ +typedef struct async_CB_s +{ + uint64_t b_mark; + ARC *ark; + kv_t *db; + uint32_t len; + uint32_t len_i; + uint32_t replace; + uint32_t flags; + uint64_t tag; + uint32_t perf_loops; + void (*cb) (int, uint64_t, int64_t); + uint32_t (*regen)(kv_t*, uint32_t, uint32_t); + uint32_t regen_len; + char *gvalue; + uint64_t e_mark; +} async_CB_t; + +/** + ******************************************************************************* + * \brief + * structs to represent a context with X jobs + ******************************************************************************/ +typedef struct async_context_s +{ + ARK *ark; + uint32_t flags; + uint32_t secs; + async_CB_t pCBs[KV_ASYNC_JOB_Q]; +} async_context_t; + +async_context_t pCTs[KV_ASYNC_MAX_CONTEXTS]; +uint32_t start = 0; +uint32_t stop = 0; + +static void kv_async_SET_KEY (async_CB_t *pCB); +static void kv_async_GET_KEY (async_CB_t *pCB); +static void kv_async_EXISTS_KEY(async_CB_t *pCB); +static void kv_async_DEL_KEY (async_CB_t *pCB); +static void kv_async_q_retry (async_CB_t *pCB); +static void kv_async_dispatch (async_CB_t *pCB); +static void kv_async_perf_done (async_CB_t *pCB); + +/** + ******************************************************************************* + * \brief + * callback function for set/get/exists/del + ******************************************************************************/ +static void kv_async_cb(int errcode, uint64_t dt, int64_t res) +{ + async_CB_t *pCB = (async_CB_t*)dt; + kv_t *p_kv = NULL; + uint64_t tag = (uint64_t)pCB; + + if (pCB == NULL) + { + KV_TRC_FFDC(pFT, "FFDC: pCB NULL"); + return; + } + if (pCB->b_mark != B_MARK) + { + KV_TRC_FFDC(pFT, "FFDC: B_MARK FAILURE %p: %"PRIx64"", pCB, pCB->b_mark); + return; + } + if (pCB->e_mark != E_MARK) + { + KV_TRC_FFDC(pFT, "FFDC: E_MARK FAILURE %p: %"PRIx64"", pCB, pCB->e_mark); + return; + } + if (EBUSY == errcode) {kv_async_q_retry(pCB); goto done;} + + if (IS_GTEST) + { + EXPECT_EQ(0, errcode); + EXPECT_EQ(tag, pCB->tag); + } + p_kv = pCB->db + pCB->len_i; + ++pCB->len_i; + + if (pCB->flags & KV_ASYNC_CB_SET) + { + KV_TRC_IO(pFT, "KV_ASYNC_CB_SET, %p %d %d", pCB, pCB->len_i, pCB->len); + if (0 != errcode) printf("ark_set failed, errcode=%d\n", errcode); + if (tag != pCB->tag) printf("ark_set bad tag\n"); + if (res != p_kv->vlen) printf("ark_set bad vlen\n"); + if (IS_GTEST) { EXPECT_EQ(res, p_kv->vlen);} + + /* end of db len sequence, move to next step */ + if (pCB->len_i == pCB->len) + { + if (pCB->flags & KV_ASYNC_CB_WRITE_PERF) + { + pCB->len_i = 0; + kv_async_perf_done(pCB); + goto done; + } + pCB->len_i = 0; + pCB->flags &= ~KV_ASYNC_CB_SET; + pCB->flags |= KV_ASYNC_CB_GET; + kv_async_GET_KEY(pCB); + goto done; + } + kv_async_SET_KEY(pCB); + goto done; + } + else if (pCB->flags & KV_ASYNC_CB_GET) + { + uint32_t miscompare = memcmp(p_kv->value, pCB->gvalue, p_kv->vlen); + + KV_TRC_IO(pFT, "KV_ASYNC_CB_GET, %p %d %d", pCB, pCB->len_i, pCB->len); + if (0 != errcode) printf("ark_get failed, errcode=%d\n", errcode); + if (tag != pCB->tag) printf("ark_get bad tag\n"); + if (res != p_kv->vlen) printf("ark_get bad vlen\n"); + if (IS_GTEST) { EXPECT_EQ(0, miscompare);} + + /* end of db len sequence, move to next step */ + if (pCB->len_i == pCB->len) + { + if (pCB->flags & KV_ASYNC_CB_READ_PERF) + { + pCB->len_i = 0; + kv_async_perf_done(pCB); + goto done; + } + pCB->len_i = 0; + pCB->flags &= ~KV_ASYNC_CB_GET; + pCB->flags |= KV_ASYNC_CB_EXISTS; + kv_async_EXISTS_KEY(pCB); + goto done; + } + kv_async_GET_KEY(pCB); + goto done; + } + else if (pCB->flags & KV_ASYNC_CB_EXISTS) + { + KV_TRC_IO(pFT, "KV_ASYNC_CB_EXISTS, %p %d %d", pCB, pCB->len_i, pCB->len); + if (0 != errcode) printf("ark_exists failed,errcode=%d\n",errcode); + if (tag != pCB->tag) printf("ark_exists bad tag\n"); + if (res != p_kv->vlen) printf("ark_exists bad vlen\n"); + if (IS_GTEST) { EXPECT_EQ(res, p_kv->vlen);} + + /* if end of db len sequence, move to next step */ + if (pCB->len_i == pCB->len) + { + pCB->len_i = 0; + pCB->flags &= ~KV_ASYNC_CB_EXISTS; + + if (pCB->flags & KV_ASYNC_CB_SGD) + { + pCB->flags |= KV_ASYNC_CB_DEL; + kv_async_DEL_KEY(pCB); + goto done; + } + else if (pCB->flags & KV_ASYNC_CB_REPLACE) + { + /* make sure we don't shutdown before we have replaced once */ + if (pCB->replace && + pCB->flags & KV_ASYNC_CB_SHUTDOWN) + { + pCB->flags |= KV_ASYNC_CB_DEL; + kv_async_DEL_KEY(pCB); + goto done; + } + pCB->replace = TRUE; + if (0 != pCB->regen(pCB->db, pCB->len, pCB->regen_len)) + { + printf("regen failure, fatal\n"); + KV_TRC_FFDC(pFT, "FFDC: regen failure"); + memset(pCB, 0, sizeof(async_CB_t)); + goto done; + } + pCB->flags |= KV_ASYNC_CB_SET; + kv_async_SET_KEY(pCB); + goto done; + } + else + { + /* should not be here */ + EXPECT_TRUE(0); + } + } + kv_async_EXISTS_KEY(pCB); + goto done; + } + else if (pCB->flags & KV_ASYNC_CB_DEL) + { + KV_TRC_IO(pFT, "KV_ASYNC_CB_DEL, %p i:%d len:%d", pCB, pCB->len_i,pCB->len); + if (0 != errcode) printf("ark_del failed, errcode=%d\n",errcode); + if (tag != pCB->tag) printf("ark_del bad tag\n"); + if (res != p_kv->vlen) printf("ark_del bad vlen\n"); + if (IS_GTEST) { EXPECT_EQ(res, p_kv->vlen);} + + /* end of db len sequence, move to next step */ + if (pCB->len_i == pCB->len) + { + if (pCB->flags & KV_ASYNC_CB_SHUTDOWN) + { + if (!(pCB->flags & KV_ASYNC_CB_MULTI_CTXT_IO)) + { + kv_db_destroy(pCB->db, pCB->len); + } + if (pCB->gvalue) free(pCB->gvalue); + memset(pCB, 0, sizeof(async_CB_t)); + KV_TRC_IO(pFT, "LOOP_DONE: %p", pCB); + goto done; + } + KV_TRC_IO(pFT, "NEXT_LOOP, %p", pCB); + pCB->flags &= ~KV_ASYNC_CB_DEL; + pCB->flags |= KV_ASYNC_CB_SET; + pCB->len_i = 0; + kv_async_SET_KEY(pCB); + goto done; + } + kv_async_DEL_KEY(pCB); + goto done; + } + else + { + /* should not be here */ + EXPECT_TRUE(0); + } + +done: + return; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +static void kv_async_SET_KEY(async_CB_t *pCB) +{ + uint64_t tag = (uint64_t)pCB; + int32_t rc = 0; + + KV_TRC_IO(pFT, "SET_KEY: %p, %p %"PRIx64" %d", pCB, pCB->db, tag, pCB->len_i); + + pCB->tag = tag; + + rc = ark_set_async_cb(pCB->ark, + pCB->db[pCB->len_i].klen, + pCB->db[pCB->len_i].key, + pCB->db[pCB->len_i].vlen, + pCB->db[pCB->len_i].value, + pCB->cb, + tag); + if (EAGAIN == rc) + { + kv_async_q_retry(pCB); + } + else + { + EXPECT_EQ(0, rc); + } +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +static void kv_async_GET_KEY(async_CB_t *pCB) +{ + uint64_t tag = (uint64_t)pCB; + int32_t rc = 0; + + KV_TRC_IO(pFT, "GET_KEY: %p, %" PRIx64 " %d", pCB, tag, pCB->len_i); + + pCB->tag = tag; + + rc = ark_get_async_cb(pCB->ark, + pCB->db[pCB->len_i].klen, + pCB->db[pCB->len_i].key, + pCB->db[pCB->len_i].vlen, + pCB->gvalue, + 0, + pCB->cb, + tag); + if (EAGAIN == rc) + { + kv_async_q_retry(pCB); + } + else + { + EXPECT_EQ(0, rc); + } +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +static void kv_async_EXISTS_KEY(async_CB_t *pCB) +{ + uint64_t tag = (uint64_t)pCB; + int32_t rc = 0; + + KV_TRC_DBG(pFT, "EXI_KEY: %p, %" PRIx64 "", pCB, tag); + + pCB->tag = tag; + + rc = ark_exists_async_cb(pCB->ark, + pCB->db[pCB->len_i].klen, + pCB->db[pCB->len_i].key, + pCB->cb, + tag); + if (EAGAIN == rc) + { + kv_async_q_retry(pCB); + } + else + { + EXPECT_EQ(0, rc); + } +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +static void kv_async_DEL_KEY(async_CB_t *pCB) +{ + uint64_t tag = (uint64_t)pCB; + int32_t rc = 0; + + KV_TRC_DBG(pFT, "DEL_KEY: %p, %" PRIx64 "", pCB, tag); + + pCB->tag = tag; + + rc = ark_del_async_cb(pCB->ark, + pCB->db[pCB->len_i].klen, + pCB->db[pCB->len_i].key, + pCB->cb, + tag); + if (EAGAIN == rc) + { + kv_async_q_retry(pCB); + } + else + { + EXPECT_EQ(0, rc); + } +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +static void kv_async_q_retry(async_CB_t *pCB) +{ + uint32_t new_flags = pCB->flags; + + KV_TRC_DBG(pFT, "Q_RETRY %p", pCB); + new_flags &= ~KV_ASYNC_CB_RUNNING; + new_flags |= KV_ASYNC_CB_QUEUED; + pCB->flags = new_flags; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +static void kv_async_perf_done(async_CB_t *pCB) +{ + ++pCB->perf_loops; + + if (pCB->flags & KV_ASYNC_CB_SHUTDOWN) + { + KV_TRC(pFT, "shutdown %p %d", pCB, pCB->perf_loops); + + pCB->flags &= ~(KV_ASYNC_CB_SET | + KV_ASYNC_CB_GET | + KV_ASYNC_CB_WRITE_PERF | + KV_ASYNC_CB_READ_PERF | + KV_ASYNC_CB_RUNNING); + return; + } + + kv_async_dispatch(pCB); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +uint32_t kv_async_init_ctxt_perf(uint32_t ctxt, uint32_t npool, uint32_t secs) +{ + char *env_FVT = getenv("FVT_DEV"); + async_context_t *pCT = pCTs+ctxt; + + if (ctxt < 0 || ctxt > KV_ASYNC_MAX_CONTEXTS) + { + printf("FFDC: kv_async_init_ctxt %d %X\n", ctxt, ctxt); + return EINVAL; + } + memset(pCT, 0, sizeof(async_context_t)); + memset(pCT->pCBs, 0, sizeof(async_CB_t)*KV_ASYNC_JOB_Q); + + if (ark_create_verbose(env_FVT, &pCT->ark, + 1048576, + 4096, + 1048576, + npool, + 256, + 8*1024, + ARK_KV_VIRTUAL_LUN)) + { + printf("ark_create failed for ctxt:%d\n", ctxt); + return ENOMEM; + } + if (NULL == pCT->ark) return ENOMEM; + + pCT->flags |= KV_ASYNC_CT_RUNNING; + pCT->secs = secs; + KV_TRC(pFT, "init_ctxt ctxt:%d ark:%p", ctxt, pCT->ark); + return 0; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_ctxt(uint32_t ctxt, uint32_t secs) +{ + char *env_FVT = getenv("FVT_DEV"); + async_context_t *pCT = pCTs+ctxt; + + if (ctxt < 0 || ctxt > KV_ASYNC_MAX_CONTEXTS) + { + printf("FFDC: kv_async_init_ctxt %d %X\n", ctxt, ctxt); + return; + } + memset(pCT, 0, sizeof(async_context_t)); + memset(pCT->pCBs, 0, sizeof(async_CB_t)*KV_ASYNC_JOB_Q); + + ASSERT_EQ(0, ark_create_verbose(env_FVT, &pCT->ark, + 1048576, + 4096, + 1048576, + 20, + 256, + 8*1024, + ARK_KV_VIRTUAL_LUN)); + ASSERT_TRUE(NULL != pCT->ark); + + pCT->flags |= KV_ASYNC_CT_RUNNING; + pCT->secs = secs; + KV_TRC(pFT, "init_ctxt ctxt:%d ark:%p secs:%d", ctxt, pCT->ark, pCT->secs); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_set_job(uint32_t flags, + uint32_t ctxt, + uint32_t job, + kv_t *db, + uint32_t vlen, + uint32_t len) +{ + async_context_t *pCT = pCTs+ctxt; + async_CB_t *pCB = NULL; + char type[4] = {0}; + + ASSERT_TRUE(NULL != pCT); + ASSERT_TRUE(ctxt >= 0); + ASSERT_TRUE(ctxt < KV_ASYNC_MAX_CONTEXTS); + ASSERT_TRUE(0 != len); + + if (flags & KV_ASYNC_CB_SGD) sprintf(type, "SGD"); + else sprintf(type, "REP"); + + pCB = pCT->pCBs+job; memset(pCB, 0, sizeof(async_CB_t)); + pCB->ark = pCT->ark; + pCB->flags = flags | KV_ASYNC_CB_SET | KV_ASYNC_CB_QUEUED; + pCB->db = db; + pCB->regen = kv_db_fixed_regen_values; + pCB->len = len; + pCB->cb = kv_async_cb; + pCB->regen_len = vlen; + pCB->gvalue = (char*)malloc(pCB->regen_len); + pCB->b_mark = B_MARK; + pCB->e_mark = E_MARK; + + KV_TRC(pFT, "CREATE_JOB: ctxt:%d ark:%p %s: pCB:%p flags:%X", + ctxt, pCT->ark, type, pCB, pCB->flags); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_job(uint32_t flags, + uint32_t ctxt, + uint32_t job, + uint32_t klen, + uint32_t vlen, + uint32_t len) +{ + kv_t *db = (kv_t*)kv_db_create_fixed(len, klen, vlen); + ASSERT_TRUE(NULL != db); + + KV_TRC(pFT, "CREATE_JOB FIXED %dx%dx%d", klen, vlen, len); + + kv_async_set_job(flags|KV_ASYNC_CB_GTEST, ctxt, job, db, vlen, len); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_job_perf(uint32_t jobs, uint32_t klen, uint32_t vlen,uint32_t len) +{ + long int wr_us = 0; + long int rd_us = 0; + long int mil = 1000000; + float wr_s = 0; + float rd_s = 0; + float wr_mb = 0; + float rd_mb = 0; + uint64_t mb64_1 = (uint64_t)KV_1M; + uint64_t wr_bytes = 0; + uint64_t rd_bytes = 0; + uint64_t ops = 0; + uint64_t post_ops = 0; + uint64_t ios = 0; + uint64_t post_ios = 0; + float wr_ops = 0; + float wr_ios = 0; + float rd_ops = 0; + float rd_ios = 0; + uint32_t secs = 5; + uint32_t job = 0; + struct timeval stop, start; + + kv_async_init_ctxt(0, secs); + + for (job=0; jobflags |= KV_ASYNC_CT_PERF; + + /* do writes */ + (void)ark_stats(kv_async_get_ark(ASYNC_SINGLE_CONTEXT), &ops, &ios); + KV_TRC(pFT, "PERF wr: ops:%"PRIu64" ios:%"PRIu64"", ops, ios); + gettimeofday(&start, NULL); + kv_async_run_jobs(); /* run write jobs */ + KV_TRC(pFT, "writes done"); + gettimeofday(&stop, NULL); + wr_us += (stop.tv_sec*mil + stop.tv_usec) - + (start.tv_sec*mil + start.tv_usec); + (void)ark_stats(kv_async_get_ark(ASYNC_SINGLE_CONTEXT),&post_ops,&post_ios); + KV_TRC(pFT, "PERF wr: ops:%"PRIu64" ios:%"PRIu64"", post_ops, post_ios); + wr_ops += post_ops - ops; + wr_ios += post_ios - ios; + + /* calc bytes written */ + for (job=0; jobpCBs+job)->perf_loops; + } + + /* do reads */ + for (job=0; jobpCBs+job)->perf_loops) + { + (pCTs->pCBs+job)->perf_loops = 0; + (pCTs->pCBs+job)->flags = KV_ASYNC_CB_GET | + KV_ASYNC_CB_QUEUED | + KV_ASYNC_CB_READ_PERF; + } + } + pCTs->flags |= KV_ASYNC_CT_RUNNING; + + (void)ark_stats(kv_async_get_ark(0), &ops, &ios); + KV_TRC(pFT, "PERF rd: ops:%"PRIu64" ios:%"PRIu64"", ops, ios); + gettimeofday(&start, NULL); + kv_async_run_jobs(); /* run read jobs */ + gettimeofday(&stop, NULL); + KV_TRC(pFT, "reads done"); + rd_us += (stop.tv_sec*mil + stop.tv_usec) - + (start.tv_sec*mil + start.tv_usec); + (void)ark_stats(kv_async_get_ark(0), &post_ops, &post_ios); + KV_TRC(pFT, "PERF rd: ops:%"PRIu64" ios:%"PRIu64"", post_ops, post_ios); + rd_ops += post_ops - ops; + rd_ios += post_ios - ios; + + ASSERT_EQ(0, ark_delete(pCTs->ark)); + + /* calc bytes read */ + for (job=0; jobpCBs+job)->perf_loops; + kv_db_destroy((pCTs->pCBs+job)->db, (pCTs->pCBs+job)->len); + if ((pCTs->pCBs+job)->gvalue) + free((pCTs->pCBs+job)->gvalue); + } + + /* calc and print results */ + wr_s = (float)((float)wr_us/(float)mil); + wr_mb = (float)((double)wr_bytes / (double)mb64_1); + rd_s = (float)((float)rd_us/(float)mil); + rd_mb = (float)((double)rd_bytes / (double)mb64_1); + + printf("ASYNC %dx%dx%d writes: %.3d jobs %2.3f mb in %.1f secs at ", + klen, vlen, len, jobs, wr_mb, wr_s); + printf("%2.3f mbps, %6.0f op/s, %.0f io/s\n", + wr_mb/wr_s, + wr_ops/wr_s, + wr_ios/wr_s); + printf("ASYNC %dx%dx%d reads: %.3d jobs %2.3f mb in %.1f secs at ", + klen, vlen, len, jobs, rd_mb, rd_s); + printf("%2.3f mbps, %6.0f op/s, %.0f io/s\n", + rd_mb/rd_s, + rd_ops/rd_s, + rd_ios/rd_s); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_job_SGD(uint32_t ctxt, + uint32_t job, + uint32_t klen, + uint32_t vlen, + uint32_t len) +{ + kv_async_init_job(KV_ASYNC_CB_SGD, ctxt, job, klen, vlen, len); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_job_REP(uint32_t ctxt, + uint32_t job, + uint32_t klen, + uint32_t vlen, + uint32_t len) +{ + kv_async_init_job(KV_ASYNC_CB_REPLACE, ctxt, job, klen, vlen, len); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_job_easy(uint32_t ctxt) +{ + uint32_t i=0; + + for (i=0; i "); fflush(stdout); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_ark_io_inject(uint32_t num_ctxt, + uint32_t jobs, + uint32_t vlen, + uint32_t secs) +{ + uint32_t i=0; + + FVT_KV_SET_INJECT_ACTIVE; + + kv_async_init_ark_io(num_ctxt, jobs, vlen, secs); + + for (i=0; i "); fflush(stdout); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +uint32_t kv_async_init_perf_io(uint32_t num_ctxt, + uint32_t jobs, + uint32_t npool, + uint32_t klen, + uint32_t vlen, + uint32_t LEN, + uint32_t secs) +{ + uint32_t job = 0; + uint32_t ctxt = 0; + kv_t *db[jobs]; + + printf("ASYNC %dx%dx%d", klen, vlen, LEN); fflush(stdout); + for (job=0; job "); fflush(stdout); + return 0; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +static void kv_async_dispatch(async_CB_t *pCB) +{ + uint32_t new_flags = pCB->flags; + + new_flags &= ~KV_ASYNC_CB_QUEUED; + new_flags |= KV_ASYNC_CB_RUNNING; + pCB->flags = new_flags; + + if (pCB->flags & KV_ASYNC_CB_SET) + { + KV_TRC_IO(pFT, "DISPATCH: SET: %p", pCB); + kv_async_SET_KEY(pCB); + } + else if (pCB->flags & KV_ASYNC_CB_GET) + { + KV_TRC_IO(pFT, "DISPATCH: GET: %p", pCB); + kv_async_GET_KEY(pCB); + } + else if (pCB->flags & KV_ASYNC_CB_EXISTS) + { + KV_TRC_IO(pFT, "DISPATCH: EXI: %p", pCB); + kv_async_EXISTS_KEY(pCB); + } + else if (pCB->flags & KV_ASYNC_CB_DEL) + { + KV_TRC_IO(pFT, "DISPATCH: DEL: %p", pCB); + kv_async_DEL_KEY(pCB); + } + else + { + EXPECT_TRUE(0); + } +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +uint32_t kv_async_dispatch_jobs(uint32_t ctxt) +{ + async_context_t *pCT = pCTs+ctxt; + async_CB_t *pCB = NULL; + uint32_t jobs_running = 0; + + if (ctxt < 0 || ctxt > KV_ASYNC_MAX_CONTEXTS) + { + KV_TRC_FFDC(pFT, "FFDC %x", ctxt); + return FALSE; + } + + for (pCB=pCT->pCBs; pCBpCBs+KV_ASYNC_JOB_Q; pCB++) + { + if (pCB->flags & KV_ASYNC_CB_QUEUED) + { + kv_async_dispatch(pCB); + jobs_running = 1; + usleep(1000); + } + else if (pCB->flags & KV_ASYNC_CB_RUNNING) + { + jobs_running = 1; + } + } + return jobs_running; +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_run_jobs(void) +{ + async_CB_t *pCB = NULL; + uint32_t ctxt_running = 0; + uint32_t jobs_running = 0; + uint32_t i = 0; + uint32_t next = 0; + uint32_t elapse = 0; + uint32_t inject = 0; + uint32_t secs = 0; + uint32_t log_interval = 600; + uint64_t ops = 0; + uint64_t ios = 0; + uint32_t tops = 0; + uint32_t tios = 0; + uint32_t perf = 0; + + KV_TRC(pFT, "ASYNC START: 0 minutes"); + + if (!(pCTs->pCBs->flags & KV_ASYNC_CB_RUNNING)) start = time(0); + next = log_interval; + + do + { + ctxt_running = FALSE; + + if (elapse > next) + { + KV_TRC(pFT, "ASYNC RUNNING: %d elapsed minutes", elapse/60); + next += log_interval; + } + + for (i=0; i= inject && + pCTs[i].flags & KV_ASYNC_CT_ERROR_INJECT) + { + KV_TRC_FFDC(pFT, "FFDC: INJECT ERRORS"); + FVT_KV_INJECT_READ_ERROR; + FVT_KV_INJECT_WRITE_ERROR; + FVT_KV_INJECT_ALLOC_ERROR; + ++inject; + } + + if (elapse >= pCTs[i].secs) + { + for (pCB=pCTs[i].pCBs;pCBflags & KV_ASYNC_CB_RUNNING || + pCB->flags & KV_ASYNC_CB_QUEUED) + && + (!(pCB->flags & KV_ASYNC_CB_SHUTDOWN)) ) + { + pCB->flags |= KV_ASYNC_CB_SHUTDOWN; + KV_TRC_IO(pFT, "SHUTDOWN pCB %p (%d >= %d)", pCB, elapse, pCTs[i].secs); + } + } + } + usleep(100); + } + } + while (ctxt_running); + + stop = time(0); + secs = stop - start; + + KV_TRC(pFT, "ASYNC RUNNING DONE: %d minutes", elapse/60); + + /* log cleanup, since the first ark_delete closes the log file */ + for (i=0; ipCBs;pCBpCBs+KV_ASYNC_JOB_Q;pCB++) + { + if (pCB->flags & KV_ASYNC_CB_MULTI_CTXT_IO) + { + kv_db_destroy(pCB->db, pCB->len); + } + } + + for (i=0; i KV_ASYNC_MAX_CONTEXTS) + { + KV_TRC_FFDC(pFT, "FFDC %x", ctxt); + return FALSE; + } + + return pCT->ark; +} diff --git a/src/kv/test/fvt_kv_utils_async_cb.h b/src/kv/test/fvt_kv_utils_async_cb.h new file mode 100644 index 00000000..382d19ad --- /dev/null +++ b/src/kv/test/fvt_kv_utils_async_cb.h @@ -0,0 +1,157 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_utils_async_cb.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/** + ******************************************************************************* + * \file + * \brief + * functions to aid in testing the async kv ark functions + * \details + * \ingroup + ******************************************************************************/ +#ifndef KV_ASYNC_CB_UTILS_H +#define KV_ASYNC_CB_UTILS_H + +#define ASYNC_SINGLE_CONTEXT 0 +#define ASYNC_SINGLE_JOB 0 + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_ctxt(uint32_t ctxt, uint32_t secs); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_job_SGD(uint32_t ctxt, + uint32_t job, + uint32_t klen, + uint32_t vlen, + uint32_t len); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_job_REP(uint32_t ctxt, + uint32_t job, + uint32_t klen, + uint32_t vlen, + uint32_t len); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_job_easy(uint32_t ctxt); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_job_low_stress(uint32_t ctxt); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_job_high_stress(uint32_t ctxt); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_job_BIG_BLOCKS(uint32_t ctxt); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_ctxt_io(uint32_t num_ctxt, + uint32_t jobs, + uint32_t klen, + uint32_t vlen, + uint32_t LEN, + uint32_t secs); +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_ark_io(uint32_t num_ctxt, + uint32_t jobs, + uint32_t vlen, + uint32_t secs); +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_init_ark_io_inject(uint32_t num_ctxt, + uint32_t jobs, + uint32_t vlen, + uint32_t secs); +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +uint32_t kv_async_init_perf_io(uint32_t num_ctxt, + uint32_t jobs, + uint32_t npool, + uint32_t klen, + uint32_t vlen, + uint32_t LEN, + uint32_t secs); +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_run_jobs(void); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_start_jobs(void); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_wait_jobs(void); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void kv_async_job_perf(uint32_t jobs, uint32_t klen,uint32_t vlen,uint32_t len); + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +ARK* kv_async_get_ark(uint32_t ctxt); + +#endif diff --git a/src/kv/test/fvt_kv_utils_sync_pth.C b/src/kv/test/fvt_kv_utils_sync_pth.C new file mode 100644 index 00000000..8e1a6e6f --- /dev/null +++ b/src/kv/test/fvt_kv_utils_sync_pth.C @@ -0,0 +1,529 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_kv_utils_sync_pth.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/** + ******************************************************************************* + * \file + * \brief + * \ingroup + ******************************************************************************/ +#include +#include + +extern "C" +{ +#include +#include +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::run_set(set_get_args_t *args) +{ + void* (*fp)(void*) = (void*(*)(void*))set; + EXPECT_EQ(0, pthread_create(&args->pth, + NULL, + fp, + (void*)args)); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::run_get(set_get_args_t *args) +{ + void* (*fp)(void*) = (void*(*)(void*))get; + EXPECT_EQ(0, pthread_create(&args->pth, + NULL, + fp, + (void*)args)); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::run_del(set_get_args_t *args) +{ + void* (*fp)(void*) = (void*(*)(void*))del; + EXPECT_EQ(0, pthread_create(&args->pth, + NULL, + fp, + (void*)args)); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::run_SGD(set_get_args_t *args) +{ + uint32_t i = 0; + uint32_t rc = 0; + void* (*fp)(void*) = (void*(*)(void*))SGD; + + args->pth = 0; + for (i=0; i<5000; i++) + { + if (0 == (rc=pthread_create(&args->pth, + NULL, + fp, + (void*)args))) break; + usleep(10000); + } + if (rc) + { + args->pth = 0; + printf("."); fflush(stdout); + } +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::run_read_loop(set_get_args_t *args) +{ + uint32_t i = 0; + uint32_t rc = 0; + void* (*fp)(void*) = (void*(*)(void*))read_loop; + + args->pth = 0; + for (i=0; i<5000; i++) + { + if (0 == (rc=pthread_create(&args->pth, + NULL, + fp, + (void*)args))) break; + usleep(10000); + } + if (rc) + { + args->pth = 0; + printf("."); fflush(stdout); + } +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::run_write_loop(set_get_args_t *args) +{ + uint32_t i = 0; + uint32_t rc = 0; + void* (*fp)(void*) = (void*(*)(void*))write_loop; + + args->pth = 0; + for (i=0; i<5000; i++) + { + if (0 == (rc=pthread_create(&args->pth, + NULL, + fp, + (void*)args))) break; + usleep(10000); + } + if (rc) + { + args->pth = 0; + printf("."); fflush(stdout); + } +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::wait(set_get_args_t *args) +{ + if (args->pth) + EXPECT_EQ(0, pthread_join(args->pth, NULL)); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::run_multi_ctxt(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t vlen, + uint32_t LEN, + uint32_t secs) +{ + uint32_t ops = 0; + uint32_t ios = 0; + + run_multi_ctxt(num_ctxt, num_pth, vlen, LEN, secs, &ops, &ios); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::run_multi_ctxt(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t npool, + uint32_t vlen, + uint32_t LEN, + uint32_t secs) +{ + uint32_t ops = 0; + uint32_t ios = 0; + + run_multi_ctxt(num_ctxt, num_pth, npool, vlen, LEN, secs, &ops, &ios); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::run_multi_ctxt(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t vlen, + uint32_t LEN, + uint32_t secs, + uint32_t *p_ops, + uint32_t *p_ios) +{ + run_multi_ctxt(num_ctxt, num_pth, 20, vlen, LEN, secs, p_ops, p_ios); +} + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +void Sync_pth::run_multi_ctxt(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t npool, + uint32_t vlen, + uint32_t LEN, + uint32_t secs, + uint32_t *p_ops, + uint32_t *p_ios) +{ + uint32_t i = 0; + uint32_t ctxt_i = 0; + uint32_t pth_i = 0; + uint32_t klen = 140; + uint32_t tot_pth = num_ctxt * num_pth; + set_get_args_t pth_args[tot_pth]; + ARK *ark[num_ctxt]; + kv_t *db[num_pth]; + struct timeval stop, start; + uint64_t ops = 0; + uint64_t ios = 0; + + if (num_pth > MAX_PTH_PER_CONTEXT) + { + printf("cannot exceed %d pthreads for sync ops\n", MAX_PTH_PER_CONTEXT); + return; + } + + memset(pth_args, 0, sizeof(set_get_args_t) * tot_pth); + + /* alloc one set of db's, to be used for each context */ + for (i=0; i MAX_PTH_PER_CONTEXT) + { + printf("cannot exceed %d pthreads for sync ops\n", MAX_PTH_PER_CONTEXT); + return; + } + + memset(pth_args, 0, sizeof(set_get_args_t) * tot_pth); + + /* alloc one set of db's, to be used for each context */ + for (i=0; i MAX_PTH_PER_CONTEXT) + { + printf("cannot exceed %d pthreads for sync ops\n", MAX_PTH_PER_CONTEXT); + return; + } + + memset(pth_args, 0, sizeof(set_get_args_t) * tot_pth); + + /* alloc one set of db's, to be used for each context */ + for (i=0; i +#include +} + +#define MAX_PTH_PER_CONTEXT 128 + +/** + ******************************************************************************* + * \brief + * struct to contain args passed to child + ******************************************************************************/ +typedef struct +{ + ARK *ark; + kv_t *db; + int32_t vlen; + int32_t LEN; + int32_t secs; + uint32_t mb; + uint32_t ops; + pthread_t pth; +} set_get_args_t; + +/** + ******************************************************************************* + * \brief + ******************************************************************************/ +class Sync_pth +{ + protected: + static void set(void *args) + { + set_get_args_t *sg_args = (set_get_args_t*)args; + ARK *ark = sg_args->ark; + kv_t *db = sg_args->db; + int64_t res = 0; + int32_t i = 0; + + /* load all key/value pairs from the fixed db into the ark */ + for (i=0; iLEN; i++) + { + EXPECT_EQ(0, ark_set(ark, db[i].klen, db[i].key, db[i].vlen, + db[i].value, &res)); + EXPECT_EQ(db[i].vlen, res); + } + } + + static void get(void *args) + { + set_get_args_t *sg_args = (set_get_args_t*)args; + ARK *ark = sg_args->ark; + kv_t *db = sg_args->db; + int64_t res = 0; + int32_t i = 0; + uint8_t gvalue[sg_args->vlen]; + + /* query all key/value pairs from the fixed db */ + for (i=sg_args->LEN-1; i>=0; i--) + { + EXPECT_EQ(0, ark_get(ark, db[i].klen, db[i].key, db[i].vlen, + gvalue, 0, &res)); + EXPECT_EQ(db[i].vlen, res); + EXPECT_EQ(0, memcmp(db[i].value,gvalue,db[i].vlen)); + EXPECT_EQ(0, ark_exists(ark, db[i].klen, db[i].key, &res)); + EXPECT_EQ(db[i].vlen, res); + } + } + + static void del(void *args) + { + set_get_args_t *sg_args = (set_get_args_t*)args; + ARK *ark = sg_args->ark; + kv_t *db = sg_args->db; + int64_t res = 0; + int32_t i = 0; + + /* delete all key/value pairs from the fixed db */ + for (i=0; iLEN; i++) + { + EXPECT_EQ(0, ark_del(ark, db[i].klen, db[i].key, &res)); + EXPECT_EQ(db[i].vlen, res); + } + } + + static void SGD(void *args) + { + set_get_args_t *sg_args = (set_get_args_t*)args; + int32_t start = time(0); + int32_t next = start + 600; + int32_t cur = 0; + + KV_TRC(pFT, "RUN_PTH 0 minutes"); + + do + { + if (cur > next) + { + KV_TRC(pFT, "RUN_PTH %d minutes", (next-start)/60); + next += 600; + } + + set(args); + get(args); + del(args); + + cur = time(0); + } + while (cur-start < sg_args->secs); + + KV_TRC(pFT, "RUN_PTH DONE"); + } + + static void read_loop(void *args) + { + set_get_args_t *sg_args = (set_get_args_t*)args; + int32_t start = time(0); + int32_t next = start + 600; + int32_t cur = 0; + + KV_TRC(pFT, "RUN_PTH 0 minutes"); + + set(args); + + do + { + if (cur > next) + { + KV_TRC(pFT, "RUN_PTH %d minutes", (next-start)/60); + next += 600; + } + + get(args); + + cur = time(0); + } + while (cur-start < sg_args->secs); + + del(args); + + KV_TRC(pFT, "RUN_PTH DONE"); + } + + static void write_loop(void *args) + { + set_get_args_t *sg_args = (set_get_args_t*)args; + int32_t start = time(0); + int32_t next = start + 600; + int32_t cur = 0; + + KV_TRC(pFT, "RUN_PTH 0 minutes"); + + do + { + if (cur > next) + { + KV_TRC(pFT, "RUN_PTH %d minutes", (next-start)/60); + next += 600; + } + + set(args); + del(args); + + cur = time(0); + } + while (cur-start < sg_args->secs); + + KV_TRC(pFT, "RUN_PTH DONE"); + } + + public: + void run_set (set_get_args_t *args); + void run_get (set_get_args_t *args); + void run_del (set_get_args_t *args); + void run_SGD (set_get_args_t *args); + void run_read_loop (set_get_args_t *args); + void run_write_loop(set_get_args_t *args); + + void wait(set_get_args_t *args); + + void run_multi_ctxt_rd(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t npool, + uint32_t vlen, + uint32_t LEN, + uint32_t secs); + + void run_multi_ctxt_wr(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t npool, + uint32_t vlen, + uint32_t LEN, + uint32_t secs); + + void run_multi_ctxt(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t vlen, + uint32_t LEN, + uint32_t secs); + + void run_multi_ctxt(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t npool, + uint32_t vlen, + uint32_t LEN, + uint32_t secs); + + void run_multi_ctxt(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t vlen, + uint32_t LEN, + uint32_t secs, + uint32_t *p_ops, + uint32_t *p_ios); + + void run_multi_ctxt(uint32_t num_ctxt, + uint32_t num_pth, + uint32_t npool, + uint32_t vlen, + uint32_t LEN, + uint32_t secs, + uint32_t *p_ops, + uint32_t *p_ios); + +}; + +#endif diff --git a/src/kv/test/fvt_trace.c b/src/kv/test/fvt_trace.c new file mode 100644 index 00000000..7c7e32bb --- /dev/null +++ b/src/kv/test/fvt_trace.c @@ -0,0 +1,38 @@ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +KV_Trace_t fvt_kv_trace; +KV_Trace_t *pFT = &fvt_kv_trace; + +void fvt_trace_intHandler(int s) +{ + if (NULL == pFT) _exit(-2); + if (EYEC_INVALID(pFT)) _exit(-3); + + if (pFT->logfp) + { + fprintf(pFT->logfp,"---------------------------------------------------------------------------\n"); + fprintf(pFT->logfp,"DONE, signal handler: Date is %s at %s\n",__DATE__,__TIME__); + fprintf(pFT->logfp,"---------------------------------------------------------------------------\n"); + fflush (pFT->logfp); + fclose (pFT->logfp); + } + _exit(-1); +} diff --git a/src/kv/test/fvt_trace.h b/src/kv/test/fvt_trace.h new file mode 100644 index 00000000..b3e0177b --- /dev/null +++ b/src/kv/test/fvt_trace.h @@ -0,0 +1,31 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/fvt_trace.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern KV_Trace_t *pFT; + +void fvt_trace_intHandler(int s); + +#define FVT_TRC_SIGINT_HANDLER signal(SIGINT, fvt_trace_intHandler) diff --git a/src/kv/test/kv_utils_db.c b/src/kv/test/kv_utils_db.c new file mode 100644 index 00000000..84b3cc43 --- /dev/null +++ b/src/kv/test/kv_utils_db.c @@ -0,0 +1,457 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/kv_utils_db.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * utility functions to build key/value tables with random patterns for k/v + * \ingroup + ******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include + +#define TRUE 1 +#define FALSE 0 + +#define MAX_1BYTE_KEYS 255 +#define MAX_2BYTE_KEYS 30000 + +/** + ******************************************************************************* + * \brief + * return a random number >0 + ******************************************************************************/ +static uint32_t +get_rand8(void) +{ + uint32_t r = 0; + + while (0 == r) + { + r = lrand48(); + } + return r; +} + +/** + ******************************************************************************* + * \brief + * return a random number >0 + ******************************************************************************/ +static uint8_t +get_rand1(void) +{ + uint32_t r = 0; + + while (0 == r) + { + r = rand() & 0xff; + } + return r; +} + +/** + ******************************************************************************* + * \brief + * return a random number between 0 and max + ******************************************************************************/ +uint32_t +get_rand_max(uint32_t max) +{ + uint32_t r = 0; + + while (0 == r) + r = get_rand8() % (max+1); + + return r; +} + +/** + ******************************************************************************* + * \brief + * fill the array p of len bytes with random numbers + ******************************************************************************/ +static void +fill_rand(void *p, uint32_t len) +{ + uint32_t i = 0; + uint32_t *p_u32 = (uint32_t*)p; + uint8_t *p_u8 = (uint8_t*)p; + uint8_t rbyt = 4; + uint32_t div = (len/rbyt); + uint8_t l32 = (div < rbyt) ? div : rbyt; + + assert(NULL != p); + assert(0 < len); + + /* set the entire buf to a random number for general fill */ + memset(p_u8, get_rand1(), len); + + /* set up to 3 8-bit random numbers in any odd space */ + for (i=0; ikey); + + if (db[i].klen == e->klen && + memcmp(e->key, db[i].key, db[i].klen) == 0) + { + ++found; + } + if (2 == found) + { + assert(0 < db[i].klen); + KV_TRC(pFT, "DUP(klen:%d key:%p):",db[i].klen, e->key); + return TRUE; + } + } + return FALSE; +} + +/** + ******************************************************************************* + * \brief + * fill the key for element i in db with random bit patterns + ******************************************************************************/ +static void +create_key(kv_t *db, uint32_t i, uint32_t klen) +{ + kv_t *e = db + i; + + assert(NULL != db); + assert(0 < klen); + assert(0 <= i); + + fill_rand(e->key, klen); + + /* if klen > 512, its not likely that two keys match */ + if (klen >= 512) + { + fill_rand(e->key, klen); + } + else + { + while (kv_db_exists_twice(db, i+1, e)) + { + fill_rand(e->key, klen); + } + } + //disp(e->key, klen, TRUE); +} + +/******************************************************************************* + * create a fixed db of size num with keys the len of key_size + * and values the len of value_size + ******************************************************************************/ +void* +kv_db_create_fixed(uint32_t num, uint32_t key_size, uint32_t value_size) +{ + void *rc = NULL; + uint32_t i = 0; + kv_t *db = (kv_t*)malloc(num*sizeof(kv_t)); + + assert(NULL != db && 0 < key_size && 0 <= value_size); + assert(!(key_size == 1 && num > MAX_1BYTE_KEYS)); + assert(!(key_size == 2 && num > MAX_2BYTE_KEYS)); + + srand(time(0)); + srand48(time(0)); + bzero(db, num*sizeof(kv_t)); + + for (i=0; i MAX_1BYTE_KEYS)); + assert(!(key_max == 2 && num > MAX_2BYTE_KEYS)); + + srand(time(0)); + srand48(time(0)); + bzero(db, num*sizeof(kv_t)); + + for (i=0; i 1) + { + size = get_rand_max(key_max); + /* if we already have 256 1byte keys, can't have anymore */ + if (1 == size && MAX_1BYTE_KEYS == byte1) + { + size = 2; + } + /* if we already have 64k 2byte keys, can't have anymore */ + if (2 == size && MAX_2BYTE_KEYS == byte2) + { + size = 3; + } + if (1 == size) ++byte1; + else if (2 == size) ++byte2; + } + else + { + size = 1; + } + assert(0 < size); + db[i].key = malloc(size); + assert(NULL != db[i].key); + db[i].klen = size; + db[i].tag = size; + create_key(db, i, size); + + if (0 == value_max) + { + db[i].value = NULL; + db[i].vlen = 0; + continue; + } + + if (value_max > 1) + { + size = get_rand_max(value_max); + } + else + { + size = 1; + } + assert(0 < size); + db[i].value = malloc(size); + assert(NULL != db[i].value); + fill_rand(db[i].value, size); + db[i].vlen = size; + } + rc = db; + + KV_TRC(pFT, "CREATE_MIXED: %p %dx%dx%d, i:%d rc=%p", + db,key_max,value_max,num,i,rc); + return rc; +} + +/******************************************************************************* + * replace the value for each key + ******************************************************************************/ +uint32_t kv_db_fixed_regen_values(kv_t *db, uint32_t num, uint32_t vlen) +{ + uint32_t rc = NULL; + uint32_t i = 0; + + for (i=0; i 1) + { + vlen = get_rand_max(value_max); + } + else + { + vlen = value_max; + } + db[i].value = malloc(vlen); + db[i].vlen = vlen; + assert(NULL != db[i].value); + fill_rand(db[i].value, vlen); + } + + KV_TRC_IO(pFT, "REGEN_MIXED: %p %dx%d, rc=%d", db, value_max, num, rc); + return rc; +} + +/******************************************************************************* + * free all the storage malloc'ed for the db + ******************************************************************************/ +void +kv_db_destroy(kv_t *db, uint32_t num_keys) +{ + uint32_t i = 0; + + assert(NULL != db); + assert(0 < num_keys); + + for (i=0; i + +typedef struct +{ + void *key; + void *value; + uint32_t klen; + uint32_t vlen; + uint32_t tag; +} kv_t; + +/** + ******************************************************************************* + * \brief + * create an array database with fixed key/value sizes, & init random patterns + ******************************************************************************/ +void* kv_db_create_fixed(uint32_t num, uint32_t key_size, uint32_t value_size); + +/** + ******************************************************************************* + * \brief + * create an array database with max key/value sizes, and init random patterns + * \details + * each key and value are random lengths from 1 byte to the max bytes + ******************************************************************************/ +void* kv_db_create_mixed(uint32_t num, uint32_t key_max, uint32_t value_max); + +/** + ******************************************************************************* + * \brief + * replace the value for each key + ******************************************************************************/ +uint32_t kv_db_fixed_regen_values(kv_t *db, uint32_t num, uint32_t value_max); + +/** + ******************************************************************************* + * \brief + * replace the value for each key + ******************************************************************************/ +uint32_t kv_db_mixed_regen_values(kv_t *db, uint32_t num, uint32_t value_max); + +/** + ******************************************************************************* + * \brief + * destroy an array database + ******************************************************************************/ +void kv_db_destroy(kv_t *db, uint32_t num_keys); + +/** + ******************************************************************************* + * \brief + * lookup key and return value if found, else return NULL + ******************************************************************************/ +void* kv_db_find(kv_t *db, uint32_t num_keys, void *key, uint32_t klen); + +/** + ******************************************************************************* + * \brief + * print an array of bytes in hex, with newline=1, or without=0 + ******************************************************************************/ +void disp(void *p, uint32_t len, uint32_t newline); + +/** + ******************************************************************************* + * \brief + * print each key/value in the db + ******************************************************************************/ +void disp_db(kv_t *db, uint32_t num_keys); + +/** + ******************************************************************************* + * \brief + * gen a random number X, where 0 < X < max + ******************************************************************************/ +uint32_t +get_rand_max(uint32_t max); + +#endif diff --git a/src/kv/test/makefile b/src/kv/test/makefile new file mode 100644 index 00000000..1e487649 --- /dev/null +++ b/src/kv/test/makefile @@ -0,0 +1,148 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/kv/test/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +UNAME=$(shell uname) + +#NO_O3 = yes + +ROOTPATH = ../../.. +USER_DIR = . +SUBDIRS = +TESTDIR = ${ROOTPATH}/obj/tests +BITS = + +#test code != production code, so allow warnings here. +ALLOW_WARNINGS = yes + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${ROOTPATH}/img +LIBPATHS = -L${ROOTPATH}/img +LINKLIBS = -lcflshcom -lcflsh_block -larkdb -larkalloc +VPATH += .. + +BTESTS=_tst_ark _tst_kv_utils _tst_persist test_perf \ + res_pers run_kv_async run_kv_sync run_kv_async_multi \ + run_kv_benchmark + +BIN_TESTS=$(addprefix ${TESTDIR}/, ${BTESTS}) + +_tst_ark_OFILES = tst_ark.o +_tst_kv_utils_OFILES = kv_utils_db.o fvt_trace.o +_tst_persist_OFILES = tst_persist.o + +GTESTS = \ + fvt_kv_tst_ark fvt_kv_fixme1 \ + fvt_ark_io fvt_ark_mcio fvt_ark_mc_aio fvt_ark_perf \ + fvt_ark_perf2 fvt_ark_perf_check fvt_ark_io_inject\ + run_sync_async run_fvt_kv run_simple run_scenarios run_sync_pth run_async_cb\ + run_err fvt_kv_tst_persist + +GTESTS_NO_MAIN = fvt_ark_perf_tool +GTESTS_DIR = $(addprefix $(TESTDIR)/, $(GTESTS)) +GTESTS_NM_DIR = $(addprefix $(TESTDIR)/, $(GTESTS_NO_MAIN)) + +# AIX only +ifeq ($(UNAME),AIX) + +BTESTS64 = $(addsuffix 64, ${BTESTS}) +BIN_TESTS64 = $(addprefix ${TESTDIR}/, ${BTESTS64}) +GTESTS64 = $(addsuffix 64, ${GTESTS}) +GTESTS64_DIR = $(addprefix $(TESTDIR)/, $(GTESTS64)) +GTESTS64_NO_MAIN = $(addsuffix 64, ${GTESTS_NO_MAIN}) +GTESTS64_NM_DIR = $(addprefix $(TESTDIR)/, $(GTESTS64_NO_MAIN)) +BITS = 64 + +LINKLIBS +=-lpthreads +#Linux only +else +LINKLIBS+=-lpthread +endif + +fvt_ark_perf_tool_OFILES = kv_utils_db.o fvt_kv_utils.o fvt_kv_utils_async_cb.o\ + fvt_kv_utils_sync_pth.o fvt_trace.o +fvt_kv_tst_ark_OFILES = tst_ark.o +fvt_kv_tst_persist_OFILES = kv_utils_db.o fvt_kv_utils.o fvt_trace.o +fvt_ark_perf_OFILES = kv_utils_db.o fvt_kv_utils.o fvt_kv_utils_async_cb.o \ + fvt_kv_utils_sync_pth.o fvt_trace.o +fvt_ark_perf_check_OFILES = kv_utils_db.o fvt_kv_utils.o \ + fvt_kv_utils_async_cb.o fvt_kv_utils_sync_pth.o fvt_trace.o +fvt_ark_perf2_OFILES = kv_utils_db.o fvt_kv_utils.o fvt_kv_utils_async_cb.o \ + fvt_kv_utils_sync_pth.o fvt_trace.o +fvt_ark_io_inject_OFILES = kv_utils_db.o fvt_kv_utils_async_cb.o \ + fvt_kv_utils_ark_io.o fvt_trace.o +fvt_ark_io_OFILES = kv_utils_db.o fvt_kv_utils_async_cb.o \ + fvt_kv_utils_ark_io.o fvt_trace.o +fvt_ark_mcio_OFILES = kv_utils_db.o fvt_kv_utils.o fvt_kv_utils_sync_pth.o \ + fvt_trace.o +fvt_ark_mc_aio_OFILES = kv_utils_db.o fvt_kv_utils.o fvt_kv_utils_async_cb.o \ + fvt_trace.o +fvt_kv_fixme1_OFILES = kv_utils_db.o fvt_kv_utils_async_cb.o fvt_kv_utils.o \ + fvt_kv_utils_sync_pth.o fvt_kv_utils_ark_io.o fvt_trace.o +run_fvt_kv_OFILES = \ + fvt_kv_tst_simple.o fvt_kv_tst_scenario.o \ + fvt_kv_tst_sync_pth.o fvt_kv_tst_async_cb.o fvt_kv_tst_sync_async.o \ + fvt_kv_tst_errors.o fvt_kv_tst_persist.o fvt_kv_utils_ark_io.o \ + fvt_kv_utils_async_cb.o kv_utils_db.o fvt_kv_utils.o fvt_kv_utils_sync_pth.o \ + fvt_trace.o +run_simple_OFILES = fvt_kv_tst_simple.o kv_utils_db.o fvt_kv_utils.o \ + fvt_trace.o +run_scenarios_OFILES = fvt_kv_tst_scenario.o kv_utils_db.o fvt_kv_utils.o \ + fvt_trace.o +run_sync_pth_OFILES = fvt_kv_tst_sync_pth.o fvt_kv_utils_sync_pth.o \ + fvt_kv_utils.o kv_utils_db.o fvt_trace.o +run_async_cb_OFILES = fvt_kv_tst_async_cb.o kv_utils_db.o \ + fvt_kv_utils_async_cb.o fvt_kv_utils.o fvt_trace.o +run_sync_async_OFILES = fvt_kv_tst_sync_async.o kv_utils_db.o \ + fvt_kv_utils_async_cb.o fvt_kv_utils.o fvt_kv_utils_sync_pth.o \ + fvt_kv_utils_ark_io.o fvt_trace.o +run_err_OFILES = fvt_kv_tst_errors.o fvt_kv_utils.o kv_utils_db.o \ + fvt_kv_utils_async_cb.o fvt_kv_utils_ark_io.o fvt_trace.o + +CFLAGS += \ + -D__FVT__ -D__STDC_FORMAT_MACROS\ + -I$(ROOTPATH)/src/kv \ + -I$(ROOTPATH)/src/test/framework/googletest/googletest/include \ + $(KV_CFLAGS) + +CXXFLAGS+=$(CFLAGS) + +include ${ROOTPATH}/config.mk + +include $(ROOTPATH)/src/test/framework/gtest.objtests.mk +ifeq ($(UNAME),AIX) +$(GTESTS_DIR) $(GTESTS_NM_DIR) $(BIN_TESTS): $(IMGDIR)/libarkdb.a +else +$(GTESTS_DIR) $(GTESTS_NM_DIR) $(BIN_TESTS): $(IMGDIR)/libarkdb.so +endif + +cleanruns: + rm $(TESTDIR)/run_fvt_kv \ + $(TESTDIR)/run_simple \ + $(TESTDIR)/run_scenarios \ + $(TESTDIR)/run_async_cb \ + $(TESTDIR)/run_sync_pth \ + $(TESTDIR)/run_err + +unit: + @$(TESTDIR)/run_fvt_kv$(BITS) --gtest_output=xml:$(TESTDIR)/kv_fvt_results.xml; diff --git a/src/kv/test/res_pers.c b/src/kv/test/res_pers.c new file mode 100644 index 00000000..0c72f081 --- /dev/null +++ b/src/kv/test/res_pers.c @@ -0,0 +1,84 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/res_pers.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include "arkdb.h" + +int main(int argc, char **argv) { + int rc = 0; + ARK *ark; + char *key; + char buf[1024]; + int64_t resultSize; + + printf("Starting\n"); + rc = ark_create_verbose("/tmp/peter.kv", &ark, 1048576, 4096, 4096, 20, 0, 256, ARK_KV_VIRTUAL_LUN | ARK_KV_PERSIST_STORE | ARK_KV_PERSIST_LOAD); +// rc = ark_create("/tmp/peter/capi", &ark, ARK_KV_VIRTUAL_LUN); + + if (rc != 0) { + printf("ark_create failed %d\n", rc); + exit(1); + } + // printf("ark_create %x\n", ark); + key = "mykey"; + rc = ark_get(ark, (uint64_t)strlen(key), key, (uint64_t)sizeof(buf), &buf, 0, &resultSize); + if (rc != 0 && rc != 2) { + printf("ark_get 1 failed %d\n", rc); + exit(1); + } + if (0 == rc) { + printf("ark_get 1 resultSize=%ld\n", resultSize); + if (resultSize != 0) { +// printf("result='%*s'\n", resultSize, buf); + printf("result='%s'\n", buf); + } + exit(0); + } + strcpy(buf, "myvalue"); + rc = ark_set(ark, (uint64_t)strlen(key), key, (uint64_t)strlen(buf), buf, &resultSize); + if (rc != 0) { + printf("ark_set 1 failed %d\n", rc); + exit(1); + } + printf("ark_set written %ld\n", resultSize); + rc = ark_get(ark, (uint64_t)strlen(key), key, (uint64_t)sizeof(buf), &buf, 0, &resultSize); + printf("arg_get 2 returned %d\n", rc); + if (rc != 0 && rc != 2) { + printf("ark_get 2 failed %d\n", rc); + exit(1); + } + if (0 == rc) { + printf("ark_get 2 resultSize=%ld\n", resultSize); + if (resultSize != 0) { +// printf("result='%*s'\n", resultSize, buf); + printf("result='%s'\n", buf); + } + } + ark_delete(ark); + + return 0; +} diff --git a/src/kv/test/run_async_cb.C b/src/kv/test/run_async_cb.C new file mode 100644 index 00000000..cd0f34a4 --- /dev/null +++ b/src/kv/test/run_async_cb.C @@ -0,0 +1,27 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/run_async_cb.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +char *env_FVT_DEV = getenv("FVT_DEV"); diff --git a/src/kv/test/run_err.C b/src/kv/test/run_err.C new file mode 100644 index 00000000..bb0c4b31 --- /dev/null +++ b/src/kv/test/run_err.C @@ -0,0 +1,27 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/run_err.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +char *env_FVT_DEV = getenv("FVT_DEV"); diff --git a/src/kv/test/run_fvt_kv.C b/src/kv/test/run_fvt_kv.C new file mode 100644 index 00000000..c9fa4191 --- /dev/null +++ b/src/kv/test/run_fvt_kv.C @@ -0,0 +1,27 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/run_fvt_kv.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +char *env_FVT_DEV = getenv("FVT_DEV"); diff --git a/src/kv/test/run_kv_async.c b/src/kv/test/run_kv_async.c new file mode 100644 index 00000000..b3cbc719 --- /dev/null +++ b/src/kv/test/run_kv_async.c @@ -0,0 +1,613 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/run_kv_async.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * Key/Value ARK Database Asynchronous I/O Driver + * \details + * This runs I/O to the Key/Value ARK Database using ASYNC IO. The \n + * expected iops are around 220k. Four Arks are used to achieve the max \n + * iops. Each ark does set/get/exists/delete in a loop for a list of \n + * keys/values. This code essentially runs write/read/compare/delete. \n + * One or more completion threads are created to handle the \n + * async callback from the arkdb. \n + * \n + * Example: \n + * \n + * run_kv_async /dev/sg10 \n + * ctxt:4 async_ops:100 k/v:16x65536: op/s:24778 io/s:223010 secs:19 \n + ******************************************************************************* + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KV_ASYNC_CONTEXTS 4 +#define KV_ASYNC_JOBS_PER_CTXT 100 +#define KV_ASYNC_COMP_PTH 2 + +#define KV_ASYNC_MIN_SECS 15 + +#define KV_ASYNC_KLEN 16 +#define KV_ASYNC_VLEN 64*1024 +#define KV_ASYNC_NUM_KV 100 + +#define KV_ASYNC_RUNNING 0x08000000 +#define KV_ASYNC_SHUTDOWN 0x04000000 + +#define TRUE 1 +#define FALSE 0 + +/** +******************************************************************************** +** struct to hold a generated key/value pair +*******************************************************************************/ +typedef struct +{ + uint8_t key [KV_ASYNC_KLEN]; + uint8_t value[KV_ASYNC_VLEN]; +} kv_t; + +/** +******************************************************************************** +** struct to hold a list ("database") of key/value pairs +*******************************************************************************/ +typedef struct +{ + kv_t db[KV_ASYNC_NUM_KV]; +} db_t; + +/** +******************************************************************************** +** struct for each ASYNC op (job) +*******************************************************************************/ +typedef struct +{ + ARK *ark; + kv_t *db; + uint64_t itag; + uint64_t tag; + int64_t res; + void (*cb)(void*); + uint32_t len; + uint32_t len_i; + uint32_t flags; + uint32_t cb_rdy; + int errcode; + char gvalue[KV_ASYNC_VLEN]; +} async_CB_t; + +/** +******************************************************************************** +** struct to hold the async ops for a single context (ARK) +*******************************************************************************/ +typedef struct +{ + ARK *ark; + async_CB_t pCBs[KV_ASYNC_JOBS_PER_CTXT]; +} async_context_t; + +/** +******************************************************************************** +** structs for all contexts +*******************************************************************************/ +async_context_t pCTs[KV_ASYNC_CONTEXTS]; + +/** +******************************************************************************** +** struct for all k/v databases, one for each job. These are reused in +** each context +*******************************************************************************/ +db_t dbs[KV_ASYNC_JOBS_PER_CTXT]; + +/** +******************************************************************************** +** structs for each thread +*******************************************************************************/ +typedef struct +{ + uint32_t ctxt; + uint32_t num; + pthread_t pth; +} pth_t; + +uint32_t start = 0; +uint32_t stop = 0; + +void kv_async_SET_KEY (async_CB_t *pCB); +void kv_async_GET_KEY (async_CB_t *pCB); +void kv_async_EXISTS_KEY (async_CB_t *pCB); +void kv_async_DEL_KEY (async_CB_t *pCB); +void kv_async_wait_jobs (void); +void kv_async_completion_pth(pth_t *p); + +/******************************************************************************* + * hw specific sync macros + ******************************************************************************/ +#ifdef _AIX +#define SYNC() asm volatile ("lwsync") +#elif _MACOSX +#define SYNC() asm volatile ("mfence") +#else +#define SYNC() __asm__ __volatile__ ("sync") +#endif + +/******************************************************************************* + * setup unique values to help in debug + ******************************************************************************/ +#define SET_ITAG(ctxt_i, cb_i) (UINT64_C(0xBEEF000000000000) | \ + ((uint64_t)ctxt_i)<<32 | \ + (uint64_t)cb_i <<16) +#define GET_CTXT(_tag) (uint32_t)((UINT64_C(0x0000ffff00000000) & _tag) >> 32) +#define GET_CB(_tag) (uint32_t)((UINT64_C(0x00000000ffff0000) & _tag) >> 16) + +/** +******************************************************************************** +** stop on error +*******************************************************************************/ +#define KV_ERR_STOP(_pCB, _msg, _rc) \ +do \ +{ \ + printf("\n(%s:%p:%d)\n", _msg, _pCB, _rc); \ + if (NULL == _pCB) return; \ + _pCB->flags &= ~KV_ASYNC_RUNNING; \ + return; \ +} while (0) + +/** +******************************************************************************** +** setup all the databases with unique key/values, lengths a multiple of 8bytes +*******************************************************************************/ +void init_kv_db(void) +{ + uint32_t i,j; + uint64_t *p,tag; + + assert(8 <= KV_ASYNC_KLEN && 0 == KV_ASYNC_KLEN%8); + assert(8 <= KV_ASYNC_VLEN && 0 == KV_ASYNC_VLEN%8); + + /* ensure unique keys for each k/v in all dbs */ + for (i=0; ipCBs+GET_CB(dt); + + if (NULL == pCB) KV_ERR_STOP(pCB, "bad dt: cb", 0); + if (dt != pCB->tag) KV_ERR_STOP(pCB, "bad dt: tag", 0); + + pCB->errcode = errcode; + pCB->res = res; + + SYNC(); + pCB->cb_rdy = TRUE; +} + +/** +******************************************************************************** +** process the call back for a ARK set +*******************************************************************************/ +void kv_set_cb(void *p) +{ + async_CB_t *pCB = (async_CB_t*)p; + + ++pCB->len_i; + + if (0 != pCB->errcode) KV_ERR_STOP(pCB, "setcb1", pCB->errcode); + if (pCB->res != KV_ASYNC_VLEN) KV_ERR_STOP(pCB, "setcb2", 0); + + /* if end of db len sequence, move to get */ + if (pCB->len_i == pCB->len) + { + pCB->len_i = 0; + kv_async_GET_KEY(pCB); + goto done; + } + /* else, do another set */ + kv_async_SET_KEY(pCB); + +done: + return; +} + +/** +******************************************************************************** +** process the call back for a ARK get +*******************************************************************************/ +void kv_get_cb(void *p) +{ + async_CB_t *pCB = (async_CB_t*)p; + kv_t *p_kv = pCB->db + pCB->len_i; + + ++pCB->len_i; + + if (0 != pCB->errcode) KV_ERR_STOP(pCB, "getcb1", pCB->errcode); + if (pCB->res != KV_ASYNC_VLEN) KV_ERR_STOP(pCB, "getcb2", 0); + + if (memcmp(p_kv->value, pCB->gvalue, KV_ASYNC_VLEN)) + { + KV_ERR_STOP(pCB,"get miscompare", 0); + } + /* end of db len sequence, move to exists */ + if (pCB->len_i == pCB->len) + { + pCB->len_i = 0; + kv_async_EXISTS_KEY(pCB); + goto done; + } + /* else, do another get */ + kv_async_GET_KEY(pCB); + +done: + return; +} + +/** +******************************************************************************** +** process the call back for a ARK exists +*******************************************************************************/ +void kv_exists_cb(void *p) +{ + async_CB_t *pCB = (async_CB_t*)p; + + ++pCB->len_i; + + if (0 != pCB->errcode) KV_ERR_STOP(pCB,"existcb1",pCB->errcode); + if (pCB->res != KV_ASYNC_VLEN) KV_ERR_STOP(pCB,"existcb2",0); + + /* if end of db len sequence, move to del */ + if (pCB->len_i == pCB->len) + { + pCB->len_i = 0; + kv_async_DEL_KEY(pCB); + goto done; + } + /* else, do another exists */ + kv_async_EXISTS_KEY(pCB); + +done: + return; +} + +/** +******************************************************************************** +** process the call back for a ARK delete +*******************************************************************************/ +void kv_del_cb(void *p) +{ + async_CB_t *pCB = (async_CB_t*)p; + + ++pCB->len_i; + + if (0 != pCB->errcode) KV_ERR_STOP(pCB, "delcb1", pCB->errcode); + if (pCB->res != KV_ASYNC_VLEN) KV_ERR_STOP(pCB, "delcb2", 0); + + /* end of db len sequence, move to next step */ + if (pCB->len_i == pCB->len) + { + if (pCB->flags & KV_ASYNC_SHUTDOWN) + { + pCB->flags &= ~KV_ASYNC_RUNNING; + goto done; + } + pCB->len_i = 0; + kv_async_SET_KEY(pCB); + goto done; + } + /* else, do another del */ + kv_async_DEL_KEY(pCB); + +done: + return; +} + +/** +******************************************************************************** +** issue a set to the ARK +*******************************************************************************/ +void kv_async_SET_KEY(async_CB_t *pCB) +{ + uint32_t rc=0; + + pCB->tag = pCB->itag + pCB->len_i; + pCB->cb = (void (*)(void*))kv_set_cb; + + while (EAGAIN == (rc=ark_set_async_cb(pCB->ark, + KV_ASYNC_KLEN, + pCB->db[pCB->len_i].key, + KV_ASYNC_VLEN, + pCB->db[pCB->len_i].value, + kv_async_cb, + pCB->tag))) usleep(10000); + if (rc) KV_ERR_STOP(pCB,"SET_KEY",rc); +} + +/** +******************************************************************************** +** issue a get to the ARK +*******************************************************************************/ +void kv_async_GET_KEY(async_CB_t *pCB) +{ + uint32_t rc=0; + + pCB->tag = pCB->itag + pCB->len_i; + pCB->cb = (void (*)(void*))kv_get_cb; + + while (EAGAIN == (rc=ark_get_async_cb(pCB->ark, + KV_ASYNC_KLEN, + pCB->db[pCB->len_i].key, + KV_ASYNC_VLEN, + pCB->gvalue, + 0, + kv_async_cb, + pCB->tag))) usleep(10000); + if (rc) KV_ERR_STOP(pCB,"GET_KEY",rc); +} + +/** +******************************************************************************** +** issue an exists to the ARK +*******************************************************************************/ +void kv_async_EXISTS_KEY(async_CB_t *pCB) +{ + uint32_t rc=0; + + pCB->tag = pCB->itag + pCB->len_i; + pCB->cb = (void (*)(void*))kv_exists_cb; + + while (EAGAIN == (rc=ark_exists_async_cb(pCB->ark, + KV_ASYNC_KLEN, + pCB->db[pCB->len_i].key, + kv_async_cb, + pCB->tag))) usleep(10000); + if (rc) KV_ERR_STOP(pCB,"EXIST_KEY",rc); +} + +/** +******************************************************************************** +** issue a delete to the ARK +*******************************************************************************/ +void kv_async_DEL_KEY(async_CB_t *pCB) +{ + uint32_t rc=0; + + pCB->tag = pCB->itag + pCB->len_i; + pCB->cb = (void (*)(void*))kv_del_cb; + + while (EAGAIN == (rc=ark_del_async_cb(pCB->ark, + KV_ASYNC_KLEN, + pCB->db[pCB->len_i].key, + kv_async_cb, + pCB->tag))) usleep(10000); + if (rc) KV_ERR_STOP(pCB,"DEL_KEY",rc); +} + +/** +******************************************************************************** +** create completion thread(s), wait for jobs to complete, cleanup +*******************************************************************************/ +void kv_async_wait_jobs(void) +{ + uint32_t i = 0; + uint32_t e_secs = 0; + uint64_t ops = 0; + uint64_t ios = 0; + uint32_t tops = 0; + uint32_t tios = 0; + pth_t p[KV_ASYNC_COMP_PTH]; + void* (*fp)(void*) = (void*(*)(void*))kv_async_completion_pth; + + /* create completion threads */ + for (i=0; iark, ARK_KV_VIRTUAL_LUN)); + assert(NULL != pCT->ark); + + for (job=0; jobpCBs+job; + pCB->itag = SET_ITAG(ctxt,job); + pCB->ark = pCT->ark; + pCB->db = dbs[job].db; + pCB->len = KV_ASYNC_NUM_KV; + } + } + + start = time(0); + + /* start all jobs for all contexts(arks) */ + for (ctxt=0; ctxtflags |= KV_ASYNC_RUNNING; + kv_async_SET_KEY(pCB); + } + } + kv_async_wait_jobs(); +} + +/** +******************************************************************************** +** \brief +** function called at completion thread create +** \details +** loop while jobs are running \n +** if the ARK has called back for an async op, run its callback function \n +** if time is elapsed, shutdown jobs \n +*******************************************************************************/ +void kv_async_completion_pth(pth_t *p) +{ + async_CB_t *pCB = NULL; + uint32_t ctxt_running = 0; + uint32_t i = 0; + + /* loop until all jobs are done or until time elapses */ + do + { + ctxt_running = FALSE; + + /* loop through contexts(arks) and check if all jobs are done or + * time has elapsed + */ + for (i=p->ctxt; ictxt+p->num; i++) + { + /* check if all jobs are done */ + for (pCB=pCTs[i].pCBs;pCBcb_rdy) + { + pCB->cb_rdy = FALSE; + pCB->cb(pCB); + } + if (pCB->flags & KV_ASYNC_RUNNING) + { + ctxt_running = TRUE; + } + } + if (!ctxt_running) continue; + + /* check if time has elapsed */ + if (time(0) - start < KV_ASYNC_MIN_SECS) continue; + + for (pCB=pCTs[i].pCBs;pCBflags & KV_ASYNC_RUNNING && + (!(pCB->flags & KV_ASYNC_SHUTDOWN)) ) + { + pCB->flags |= KV_ASYNC_SHUTDOWN; + } + } + } + } + while (ctxt_running); +} + +/** +******************************************************************************** +** check input parm, start all jobs +*******************************************************************************/ +int main(int argc, char **argv) +{ + if (argv[1] == NULL) + { + printf("dev name required as parameter\n"); + exit(-1); + } + + printf("ctxt:%d async_ops:%d k/v:%dx%d: ", + KV_ASYNC_CONTEXTS, + KV_ASYNC_JOBS_PER_CTXT, + KV_ASYNC_KLEN, + KV_ASYNC_VLEN); + fflush(stdout); + + kv_async_run_io(argv[1]); + + return 0; +} diff --git a/src/kv/test/run_kv_async_multi.c b/src/kv/test/run_kv_async_multi.c new file mode 100644 index 00000000..ccf86009 --- /dev/null +++ b/src/kv/test/run_kv_async_multi.c @@ -0,0 +1,483 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/run_kv_async_multi.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KV_ASYNC_MAX_CONTEXTS 40 +#define KV_ASYNC_MAX_CTXT_PER_DEV 4 +#define KV_ASYNC_MAX_JOBS_PER_CTXT 60 + +#define KV_ASYNC_SECS 20 + +#define KV_ASYNC_KLEN 16 +#define KV_ASYNC_VLEN 64*1024 +#define KV_ASYNC_NUM_KV 100 + +#define KV_ASYNC_RUNNING 0x08000000 +#define KV_ASYNC_SHUTDOWN 0x04000000 +#define KV_ASYNC_SET 0x01000000 +#define KV_ASYNC_GET 0x00800000 +#define KV_ASYNC_EXISTS 0x00400000 +#define KV_ASYNC_DEL 0x00200000 + +uint32_t KV_ASYNC_CONTEXTS = 0; +uint32_t KV_ASYNC_CTXT_PER_DEV = KV_ASYNC_MAX_CTXT_PER_DEV; +uint32_t KV_ASYNC_JOBS_PER_CTXT = KV_ASYNC_MAX_JOBS_PER_CTXT; + +#define TRUE 1 +#define FALSE 0 + +typedef struct +{ + uint8_t key [KV_ASYNC_KLEN]; + uint8_t value[KV_ASYNC_VLEN]; +} kv_t; + +typedef struct +{ + kv_t db[KV_ASYNC_NUM_KV]; +} db_t; + +typedef struct +{ + ARC *ark; + kv_t *db; + uint32_t len; + uint32_t len_i; + uint32_t flags; + uint64_t itag; + uint64_t tag; + char gvalue[KV_ASYNC_VLEN]; +} async_CB_t; + +typedef struct +{ + ARK *ark; + uint32_t flags; + async_CB_t pCBs[KV_ASYNC_MAX_JOBS_PER_CTXT]; +} async_context_t; + +async_context_t pCTs[KV_ASYNC_MAX_CONTEXTS]; + +db_t dbs[KV_ASYNC_MAX_JOBS_PER_CTXT]; + +uint32_t start = 0; +uint32_t stop = 0; +uint64_t ark_create_flag = ARK_KV_VIRTUAL_LUN; + +void kv_async_SET_KEY (async_CB_t *pCB); +void kv_async_GET_KEY (async_CB_t *pCB); +void kv_async_EXISTS_KEY(async_CB_t *pCB); +void kv_async_DEL_KEY (async_CB_t *pCB); +void kv_async_wait_jobs (void); + +/******************************************************************************* + ******************************************************************************/ +#define SET_ITAG(ctxt_i, cb_i) (UINT64_C(0xBEEF000000000000) | \ + ((uint64_t)ctxt_i)<<32 | \ + (uint64_t)cb_i <<16) +#define GET_CTXT(_tag) (uint32_t)((UINT64_C(0x0000ffff00000000) & _tag) >> 32) +#define GET_CB(_tag) (uint32_t)((UINT64_C(0x00000000ffff0000) & _tag) >> 16) + +/******************************************************************************* + ******************************************************************************/ +#define KV_ERR_STOP(_pCB, _msg, _rc) \ +do \ +{ \ + printf("(%s:%d)", _msg,_rc); \ + if (NULL == _pCB) return; \ + _pCB->flags &= ~KV_ASYNC_RUNNING; \ + return; \ +} while (0) + +/******************************************************************************* + ******************************************************************************/ +void init_kv_db(void) +{ + uint32_t i,j; + uint64_t *p,tag; + + assert(8 <= KV_ASYNC_KLEN && 0 == KV_ASYNC_KLEN%8); + assert(8 <= KV_ASYNC_VLEN && 0 == KV_ASYNC_VLEN%8); + + /* ensure unique keys for each k/v in all dbs */ + for (i=0; ipCBs+GET_CB(dt); + + if (NULL == pCB) KV_ERR_STOP(pCB, "bad dt: cb", 0); + if (0 != errcode) KV_ERR_STOP(pCB, "bad errcode", errcode); + if (dt != pCB->tag) KV_ERR_STOP(pCB, "bad tag", 0); + if (res != KV_ASYNC_VLEN) KV_ERR_STOP(pCB, "bad res", 0); + + p_kv = pCB->db + pCB->len_i; + ++pCB->len_i; + + if (pCB->flags & KV_ASYNC_SET) + { + /* end of db len sequence, move to next step */ + if (pCB->len_i == pCB->len) + { + pCB->len_i = 0; + pCB->flags &= ~KV_ASYNC_SET; + pCB->flags |= KV_ASYNC_GET; + kv_async_GET_KEY(pCB); + goto done; + } + kv_async_SET_KEY(pCB); + goto done; + } + else if (pCB->flags & KV_ASYNC_GET) + { + if (0 != memcmp(p_kv->value, pCB->gvalue, KV_ASYNC_VLEN)) + { + KV_ERR_STOP(pCB,"get miscompare",0); + } + + /* end of db len sequence, move to next step */ + if (pCB->len_i == pCB->len) + { + pCB->len_i = 0; + pCB->flags &= ~KV_ASYNC_GET; + pCB->flags |= KV_ASYNC_EXISTS; + kv_async_EXISTS_KEY(pCB); + goto done; + } + kv_async_GET_KEY(pCB); + goto done; + } + else if (pCB->flags & KV_ASYNC_EXISTS) + { + /* if end of db len sequence, move to next step */ + if (pCB->len_i == pCB->len) + { + pCB->len_i = 0; + pCB->flags &= ~KV_ASYNC_EXISTS; + pCB->flags |= KV_ASYNC_DEL; + kv_async_DEL_KEY(pCB); + goto done; + } + kv_async_EXISTS_KEY(pCB); + goto done; + } + else if (pCB->flags & KV_ASYNC_DEL) + { + /* end of db len sequence, move to next step */ + if (pCB->len_i == pCB->len) + { + if (pCB->flags & KV_ASYNC_SHUTDOWN) + { + pCB->flags &= ~KV_ASYNC_RUNNING; + goto done; + } + pCB->flags &= ~KV_ASYNC_DEL; + pCB->flags |= KV_ASYNC_SET; + pCB->len_i = 0; + kv_async_SET_KEY(pCB); + goto done; + } + kv_async_DEL_KEY(pCB); + goto done; + } + else + { + /* should not be here */ + assert(0); + } + +done: + return; +} + +/******************************************************************************* + ******************************************************************************/ +void kv_async_SET_KEY(async_CB_t *pCB) +{ + uint32_t rc=0; + pCB->tag = pCB->itag + pCB->len_i; + + while (EAGAIN == (rc=ark_set_async_cb(pCB->ark, + KV_ASYNC_KLEN, + pCB->db[pCB->len_i].key, + KV_ASYNC_VLEN, + pCB->db[pCB->len_i].value, + kv_async_cb, + pCB->tag))) usleep(10000); + if (rc) KV_ERR_STOP(pCB,"SET_KEY",rc); +} + +/******************************************************************************* + ******************************************************************************/ +void kv_async_GET_KEY(async_CB_t *pCB) +{ + uint32_t rc=0; + pCB->tag = pCB->itag + pCB->len_i; + + while (EAGAIN == (rc=ark_get_async_cb(pCB->ark, + KV_ASYNC_KLEN, + pCB->db[pCB->len_i].key, + KV_ASYNC_VLEN, + pCB->gvalue, + 0, + kv_async_cb, + pCB->tag))) usleep(10000); + if (rc) KV_ERR_STOP(pCB,"GET_KEY",rc); +} + +/******************************************************************************* + ******************************************************************************/ +void kv_async_EXISTS_KEY(async_CB_t *pCB) +{ + uint32_t rc=0; + pCB->tag = pCB->itag + pCB->len_i; + + while (EAGAIN == (rc=ark_exists_async_cb(pCB->ark, + KV_ASYNC_KLEN, + pCB->db[pCB->len_i].key, + kv_async_cb, + pCB->tag))) usleep(10000); + if (rc) KV_ERR_STOP(pCB,"EXIST_KEY",rc); +} + +/******************************************************************************* + ******************************************************************************/ +void kv_async_DEL_KEY(async_CB_t *pCB) +{ + uint32_t rc=0; + pCB->tag = pCB->itag + pCB->len_i; + + while (EAGAIN == (rc=ark_del_async_cb(pCB->ark, + KV_ASYNC_KLEN, + pCB->db[pCB->len_i].key, + kv_async_cb, + pCB->tag))) usleep(10000); + if (rc) KV_ERR_STOP(pCB,"DEL_KEY",rc); +} + +/******************************************************************************* + ******************************************************************************/ +void kv_async_init_io(char *dev, + uint32_t jobs, + uint32_t klen, + uint32_t vlen, + uint32_t LEN) +{ + async_context_t *pCT = NULL; + async_CB_t *pCB = NULL; + uint32_t job = 0; + uint32_t ctxt = 0; + + printf("dev:%s ctxt:%d async_ops:%d k/v:%dx%d: \n", + dev, KV_ASYNC_CTXT_PER_DEV, jobs, klen, vlen); + fflush(stdout); + + init_kv_db(); + + for (ctxt=KV_ASYNC_CONTEXTS; + ctxtark, ark_create_flag)); + assert(NULL != pCT->ark); + + for (job=0; jobpCBs+job; + pCB->itag = SET_ITAG(ctxt,job); + pCB->ark = pCT->ark; + pCB->flags = KV_ASYNC_SET; + pCB->db = dbs[job].db; + pCB->len = LEN; + } + } +} + +/******************************************************************************* + ******************************************************************************/ +void kv_async_wait_jobs(void) +{ + async_CB_t *pCB = NULL; + uint32_t ctxt_running = 0; + uint32_t i = 0; + uint32_t e_secs = 0; + uint64_t ops = 0; + uint64_t ios = 0; + uint32_t tops = 0; + uint32_t tios = 0; + + printf("ASYNC: "); fflush(stdout); + + /* loop until all jobs are done or until time elapses */ + do + { + ctxt_running = FALSE; + + /* loop through contexts(arks) and check if all jobs are done or + * time has elapsed + */ + for (i=0; iflags & KV_ASYNC_RUNNING) + { + ctxt_running = TRUE; + } + } + if (!ctxt_running) continue; + + /* check if time has elapsed */ + if (time(0) - start < KV_ASYNC_SECS) continue; + + for (pCB=pCTs[i].pCBs;pCBflags & KV_ASYNC_RUNNING && + (!(pCB->flags & KV_ASYNC_SHUTDOWN)) ) + { + pCB->flags |= KV_ASYNC_SHUTDOWN; + } + } + } + } + while (ctxt_running); + + stop = time(0); + e_secs = stop - start; + + /* sum perf ops for all contexts/jobs and delete arks */ + for (i=0; i ...\n"); + printf(" ex: run_kv_async_multi /dev/cxl/afu0.0s /dev/cxl/afu1.0s\n"); + printf(" ex: run_kv_async_multi /dev/sdb /dev/sdh /dev/sdq\n"); + printf(" ex: run_kv_async_multi -p /dev/sg4 /dev/sg8\n"); + exit(0); + } + bzero(pCTs, sizeof(pCTs)); + + if (0 == strncmp(argv[1], "-p", 7)) + { + printf("Attempting to run with physical lun\n"); + ark_create_flag = ARK_KV_PERSIST_STORE; + KV_ASYNC_CTXT_PER_DEV = 1; + i = 2; + } + else + { + //KV_ASYNC_CTXT_PER_DEV = 1; + KV_ASYNC_CTXT_PER_DEV = 4; + KV_ASYNC_JOBS_PER_CTXT = 60; + } + + while (argv[i] != NULL && KV_ASYNC_CONTEXTS <= KV_ASYNC_MAX_CONTEXTS) + { + kv_async_init_io(argv[i++], + KV_ASYNC_JOBS_PER_CTXT, + KV_ASYNC_KLEN, + KV_ASYNC_VLEN, + KV_ASYNC_NUM_KV); + KV_ASYNC_CONTEXTS += KV_ASYNC_CTXT_PER_DEV; + } + start = time(0); + + /* start all jobs for all contexts */ + for (ctxt=0; ctxtflags |= KV_ASYNC_RUNNING; + kv_async_SET_KEY(pCB); + } + } + kv_async_wait_jobs(); + + return 0; +} diff --git a/src/kv/test/run_kv_benchmark.c b/src/kv/test/run_kv_benchmark.c new file mode 100644 index 00000000..35d5263e --- /dev/null +++ b/src/kv/test/run_kv_benchmark.c @@ -0,0 +1,499 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/run_kv_benchmark.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KV_PTHS 128 +#define KV_PTHS_BIG 80 +#define KV_NASYNC 128 +#define KV_BASYNC 8*1024 +#define KV_NPOOL 20 +#define KV_WORKER_MIN_SECS 15 + +#define KLEN 16 +#define VLEN_4k 4000 +#define VLEN_64k 64000 +#ifdef _AIX +#define VLEN_500k 180*1024 +#else +#define VLEN_500k 500*1024 +#endif +#define LEN_4k 100 // make this 50 to view even % r/w +#define LEN_BIG 5 + +#define MAX_IO_ARKS 4 +#define MAX_IO_PTHS 60 + +typedef struct +{ + uint8_t *key; + uint8_t *value; +} kv_t; + +typedef struct +{ + kv_t *kv; + uint32_t klen; + uint32_t vlen; + uint32_t LEN; +} db_t; + +typedef struct +{ + ARK *ark; + db_t *db; + pthread_t pth; +} worker_t; + +db_t *dbs; +void* (*fp)(void*); +uint32_t testcase_start = 0; +uint32_t testcase_stop = 0; +uint64_t ark_create_flag = ARK_KV_VIRTUAL_LUN; + +/******************************************************************************* + ******************************************************************************/ +#define KV_ERR(_msg, _rc) \ +do \ +{ \ + printf("\n(%s:%d)\n", _msg,_rc); \ + return _rc; \ +} while (0) + +/******************************************************************************* + ******************************************************************************/ +#define NEW_ARK(_dev, _ark) \ + assert(0 == ark_create_verbose(_dev, _ark, \ + 1048576, \ + 4096, \ + 1048576, \ + KV_NPOOL, \ + KV_NASYNC, \ + KV_BASYNC, \ + ark_create_flag)); \ + assert(NULL != ark); \ + +/******************************************************************************* + ******************************************************************************/ +void init_kv_db(uint32_t n_pth, uint32_t klen, uint32_t vlen, uint32_t LEN) +{ + uint32_t i,j; + uint64_t *p,tag; + + assert(8 <= klen && 0 == klen%8); + assert(8 <= vlen && 0 == vlen%8); + + dbs = (db_t*)malloc(n_pth*sizeof(db_t)); + assert(dbs); + + /* ensure unique keys for each k/v in all dbs */ + for (i=0; iLEN; i++) + { + while (EAGAIN == (rc=ark_set(ark, + db->klen, + db->kv[i].key, + db->vlen, + db->kv[i].value, + &res))) usleep(10000); + if (rc) KV_ERR("set1",rc); + if (db->vlen != res) KV_ERR("set2",0); + } + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +uint32_t kv_query(ARK *ark, db_t *db) +{ + uint32_t i = 0; + uint32_t rc = 0; + int64_t res = 0; + uint8_t gvalue[db->vlen]; + + for (i=0; iLEN; i++) + { + while (EAGAIN == (rc=ark_get(ark, + db->klen, + db->kv[i].key, + db->vlen, + gvalue, + 0, + &res))) usleep(10000); + if (rc) KV_ERR("get1", rc); + if (db->vlen != res) KV_ERR("get2",0); + + while (EAGAIN == (rc=ark_exists(ark, + db->klen, + db->kv[i].key, + &res))) usleep(10000); + if (rc) KV_ERR("exists1",rc); + if (db->vlen != res) KV_ERR("exists2",0); + } + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +uint32_t kv_exists(ARK *ark, db_t *db) +{ + uint32_t i = 0; + uint32_t rc = 0; + int64_t res = 0; + + for (i=0; iLEN; i++) + { + while (EAGAIN == (rc=ark_exists(ark, + db->klen, + db->kv[i].key, + &res))) usleep(10000); + if (rc) KV_ERR("exists1",rc); + if (db->vlen != res) KV_ERR("exists2",0); + } + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +uint32_t kv_del(ARK *ark, db_t *db) +{ + uint32_t i = 0; + uint32_t rc = 0; + int64_t res = 0; + + for (i=0; iLEN; i++) + { + while (EAGAIN == (rc=ark_del(ark, + db->klen, + db->kv[i].key, + &res))) usleep(10000); + if (rc) KV_ERR("del1", rc); + if (db->vlen != res) KV_ERR("del2",0); + } + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +void do_100_percent_read(worker_t *w) +{ + int start=0,cur=0,rc=0; + + kv_load(w->ark, w->db); + + start = time(0); + do + { + if ((rc=kv_query(w->ark, w->db))) break; + cur = time(0); + } + while (cur-start < KV_WORKER_MIN_SECS); + + kv_del(w->ark, w->db); +} + +/******************************************************************************* + ******************************************************************************/ +void do_75_percent_read(worker_t *w) +{ + int start=0,cur=0,rc=0; + + start = time(0); + do + { + if ((rc=kv_load (w->ark, w->db))) break; + if ((rc=kv_query(w->ark, w->db))) break; + if ((rc=kv_del (w->ark, w->db))) break; + cur = time(0); + } + while (cur-start < KV_WORKER_MIN_SECS); +} + +/******************************************************************************* + ******************************************************************************/ +void do_50_percent_read(worker_t *w) +{ + int start=0,cur=0,rc=0; + + start = time(0); + do + { + if ((rc=kv_load (w->ark, w->db))) break; + if ((rc=kv_exists(w->ark, w->db))) break; + if ((rc=kv_del (w->ark, w->db))) break; + cur = time(0); + } + while (cur-start < KV_WORKER_MIN_SECS); +} + +/******************************************************************************* + ******************************************************************************/ +void get_stats(ARK *ark, uint32_t *ops, uint32_t *ios) +{ + uint64_t ops64 = 0; + uint64_t ios64 = 0; + + (void)ark_stats(ark, &ops64, &ios64); + *ops = (uint32_t)ops64; + *ios = (uint32_t)ios64; +} + +/******************************************************************************* + ******************************************************************************/ +void run_io(ARK *ark, uint32_t pths) +{ + worker_t w[pths]; + uint32_t i = 0; + uint32_t ops = 0; + uint32_t ios = 0; + uint32_t e_secs = 0; + + testcase_start = time(0); + + /* start all threads */ + for (i=0; i +#include +#include +#include +#include +#include +#include +#include +#include + +#define KV_CONTEXTS 4 +#define KV_WORKERS_PER_CTXT 100 +#define KV_WORKERS_PTH (KV_CONTEXTS*KV_WORKERS_PER_CTXT) +#define KV_WORKER_MIN_SECS 15 + +#define KLEN 16 +#define VLEN 64*1024 +#define LEN 100 + +/** +******************************************************************************** +** struct to hold a generated key/value pair +*******************************************************************************/ +typedef struct +{ + uint8_t key [KLEN]; + uint8_t value[VLEN]; +} kv_t; + +/** +******************************************************************************** +** struct to hold a list ("database") of key/value pairs +*******************************************************************************/ +typedef struct +{ + kv_t db[LEN]; +} db_t; + +/** +******************************************************************************** +** struct for all k/v databases, one for each thread. These are reused in +** each context +*******************************************************************************/ +db_t dbs[KV_WORKERS_PER_CTXT]; + +/** +******************************************************************************** +** ark for each context +*******************************************************************************/ +ARK *arks[KV_CONTEXTS]; + +/** +******************************************************************************** +** struct for each worker thread +*******************************************************************************/ +typedef struct +{ + ARK *ark; + kv_t *db; + pthread_t pth; +} worker_t; + +/** +******************************************************************************** +** stop on error +*******************************************************************************/ +#define KV_ERR(_msg, _rc) \ +do \ +{ \ + printf("\n(%s:%d)\n", _msg,_rc); \ + return _rc; \ +} while (0) + +/** +******************************************************************************** +** setup all the databases with unique key/values, lengths a multiple of 8bytes +*******************************************************************************/ +void init_kv_db(void) +{ + uint32_t i,j; + uint64_t *p,tag; + + assert(8 <= KLEN && 0 == KLEN%8); + assert(8 <= VLEN && 0 == VLEN%8); + + /* ensure unique keys for each k/v in all dbs */ + for (i=0; iark, w->db))) break; + if ((rc=kv_query(w->ark, w->db))) break; + if ((rc=kv_del (w->ark, w->db))) break; + + cur = time(0); + } + while (cur-start < KV_WORKER_MIN_SECS); +} + + +/** +******************************************************************************** +** \brief +** run the synchronous IO to the ARK +** \detail +** create and init a key/value database for each ark \n +** create arks and threads \n +** wait for all threads to complete \n +** print iops and cleanup \n +*******************************************************************************/ +int main(int argc, char **argv) +{ + void* (*fp)(void*) = (void*(*)(void*))SGD; + ARK *ark; + worker_t w[KV_WORKERS_PTH]; + uint64_t ops = 0; + uint64_t ios = 0; + uint32_t tops = 0; + uint32_t tios = 0; + uint32_t start = 0; + uint32_t stop = 0; + uint32_t e_secs = 0; + uint32_t i = 0; + + if (argv[1] == NULL) + { + printf("dev name required as parameter\n"); + exit(-1); + } + + init_kv_db(); + + printf("SYNC: ctxt:%d threads:%d k/v:%dx%dx%d: ", + KV_CONTEXTS, KV_WORKERS_PER_CTXT, KLEN, VLEN, LEN); fflush(stdout); + + start = time(0); + + /* create an ark and threads for each context */ + for (i=0; i diff --git a/src/kv/test/run_simple.C b/src/kv/test/run_simple.C new file mode 100644 index 00000000..42ecb9c4 --- /dev/null +++ b/src/kv/test/run_simple.C @@ -0,0 +1,25 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/run_simple.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include diff --git a/src/kv/test/run_sync_async.C b/src/kv/test/run_sync_async.C new file mode 100644 index 00000000..0b613f4c --- /dev/null +++ b/src/kv/test/run_sync_async.C @@ -0,0 +1,27 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/run_sync_async.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +char *env_FVT_DEV = getenv("FVT_DEV"); diff --git a/src/kv/test/run_sync_pth.C b/src/kv/test/run_sync_pth.C new file mode 100644 index 00000000..534692bd --- /dev/null +++ b/src/kv/test/run_sync_pth.C @@ -0,0 +1,27 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/run_sync_pth.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +char *env_FVT_DEV = getenv("FVT_DEV"); diff --git a/src/kv/test/test_perf.c b/src/kv/test/test_perf.c new file mode 100644 index 00000000..e509caf0 --- /dev/null +++ b/src/kv/test/test_perf.c @@ -0,0 +1,226 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test_perf.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_KEYS 100000 +#define MAX_STRING_LEN 16 + +typedef struct t_data +{ + int tid; + int start; + int end; +} t_data_t; + +int run = 0; +int numthrs = 4; +char **keys = NULL; +ARK *arkp = NULL; + +void set_cb(int errcode, uint64_t dt, int64_t res) +{ + if (errcode != 0) + { + printf("Set failed for key %"PRIi64"\n", dt); + } + + return; +} + +void *thread_run(void *arg) +{ + int rc = 0; + int i = 0; + int tid = 0; + uint64_t retry_cnt = 0; + //int nasyncs = MAX_KEYS / numthrs; + t_data_t *data = (t_data_t *)arg; + // ARC *arcp = NULL; + + tid = data->tid; + +#if 0 + rc = ark_connect_verbose(&arcp, arkp, nasyncs); + if (rc != 0) + { + printf("Thread %d: failed ark_connect (%d)\n", tid, rc); + return NULL; + } +#endif + + while ( run == 0 ); + + for (i = data->start; i <= data->end; i++) + { + do + { + rc = ark_set_async_cb(arkp, strlen(keys[i]), (void*)keys[i], + strlen(keys[i]), (void*)keys[i], + &set_cb, i); + if ( rc == EAGAIN ) + { + retry_cnt++; + } + } while (rc == EAGAIN); + + if (rc != 0) + { + printf("Thread %d: Failed to add key %d (%d)\n", tid, i, rc); + break; + } + } + + printf("Thread %d is done: %ld\n", tid, retry_cnt); + + return NULL; +} + +int main(int argc, char **argv) { + + int i = 0; + int rc = 0; + int thrs = 0; + int64_t key_cnt = 0; + uint64_t ops = 0; + uint64_t ios = 0; + uint64_t act = 0; + int64_t total_us = 0; + int64_t total_sec = 0; + pthread_t *threads = NULL; + t_data_t *datap = NULL; + struct timeval stop, start; + + keys = (char **)malloc(MAX_KEYS * sizeof(char *)); + if (keys == NULL) + { + printf("malloc failed for keys\n"); + return ENOMEM; + } + + for (i = 0; i < MAX_KEYS; i++) + { + keys[i] = (char *)malloc(MAX_STRING_LEN); + snprintf(keys[i], MAX_STRING_LEN, "%d-hello", i); + } + + rc = ark_create("/dev/cxl/afu0.0s", &arkp, ARK_KV_VIRTUAL_LUN); + if (rc != 0) + { + printf("Error opening ark (%d)\n", rc); + return rc; + } + + threads = (pthread_t *)malloc(numthrs * sizeof(pthread_t)); + datap = (t_data_t *)malloc(numthrs * sizeof(t_data_t)); + + for (thrs = 0; thrs < numthrs; thrs++) + { + datap[thrs].tid = thrs; + datap[thrs].start = thrs * (MAX_KEYS / numthrs); + datap[thrs].end = ((thrs + 1) * (MAX_KEYS / numthrs)) - 1; + if (datap[thrs].end >= MAX_KEYS) + { + datap[thrs].end = MAX_KEYS - 1; + } + + printf("App Thread %d: start %d, end %d\n", thrs, datap[thrs].start, datap[thrs].end); + + rc = pthread_create(&(threads[thrs]), NULL, thread_run, &datap[thrs]); + if (rc != 0) + { + printf("Failed to create pthread %d (%d)\n", i, rc); + return rc; + } + } + + gettimeofday(&start, NULL); + + run = 1; + + do + { + key_cnt = 0; + rc = ark_count(arkp, &key_cnt); + if (rc != 0) + { + printf("ark_count failed %d\n", rc); + } + } while (key_cnt < MAX_KEYS); + + gettimeofday(&stop, NULL); + + rc = ark_stats(arkp, &ops, &ios); + if (rc != 0) + { + printf("ark_stats failed %d\n", rc); + return rc; + } + + rc = ark_actual(arkp, &act); + if (rc != 0) + { + printf("ark_actual failed %d\n", rc); + return rc; + } + + for (thrs = 0; thrs < numthrs; thrs++) + { + (void)pthread_join(threads[thrs], NULL); + } + + printf("Added all keys\n"); + + free(threads); + free(datap); + + rc = ark_delete(arkp); + if (rc != 0) + { + printf("Error deleting ark (%d)\n", rc); + return rc; + } + + total_us = ((stop.tv_sec * 1000000) + stop.tv_usec) - + ((start.tv_sec * 1000000) + start.tv_usec); + total_sec = total_us / 1000000; + + printf("K/V ops: %"PRIu64", ios: %"PRIu64"\n", ops, ios); + printf("usecs %"PRIi64", secs: %"PRIi64"\n", total_us, total_sec); + printf("K/V ops/usec: %6.0f, ios/usec: %.0f\n", (float)ops/(float)total_us, + (float)ios/(float)total_us); + printf("K/V ops/sec: %6.0f, ios/sec: %.0f\n", (float)ops/(float)total_sec, + (float)ios/(float)total_sec); + + return 0; +} diff --git a/src/kv/test/tst_ark.c b/src/kv/test/tst_ark.c new file mode 100644 index 00000000..c9e33787 --- /dev/null +++ b/src/kv/test/tst_ark.c @@ -0,0 +1,445 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/tst_ark.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cl.h" +#include "bv.h" +#include "arkdb.h" +#include + +int iter = 10; +int keys = 10; +int seed = 1234; + +int simple = 0; +int cstr = 0; +int table = 0; +int progress = 0; +int chk = 0; + +int kmn = 0; +int kmx = 8; +int klo = 97; +int khi = 122; +int kmod = -1; +int krng = -1; + +int vmn = 0; +int vmx = 16; +int vlo = 97; +int vhi = 122; +int vmod = -1; +int vrng = -1; + +uint64_t vlim = 256; + +char *dev = NULL; + +int vdf = 8; + +int64_t total_keys = 0; + +uint64_t klen = 0; +uint8_t *kval = NULL; + +uint64_t vlen = 0; +uint8_t *vval = NULL; + +uint64_t glen = 0; +uint8_t *gval = NULL; + +uint64_t size = 1024 * 1024; + +uint64_t bs = 4096; +//uint64_t bc = 1024; + +unsigned short s0[3]; +unsigned short s1[3]; + +BV *bvk = NULL; +unsigned short *bvs = NULL; + +/* BT *bti = NULL; */ +/* BT *bto = NULL; */ +/* BT *btx = NULL; */ + +int64_t bt_set_ret = 0; +#define BT_SET(k,v) \ + ark_set(ark,(uint64_t)(strlen(k)+1), (uint8_t *)(k), (uint64_t)(strlen(v)+1), (uint8_t *)(v), &bt_set_ret); \ + printf("%s %"PRIi64" = Set '%s' -> '%s'\n",bt_set_ret==strlen(v)+1 ? "OK " : "ERR", bt_set_ret, k, v); \ + if (strlen(v)+1!=bt_set_ret) return(99) + + +int64_t bt_exi_ret = 0; +#define BT_EXI(k,e) \ + ark_exists(ark,(uint64_t)(strlen(k)+1), (uint8_t *)(k), &bt_exi_ret); \ + printf("%s %"PRIi64" = Exists '%s'\n",e==bt_exi_ret ? "OK " : "ERR", bt_exi_ret, k); \ + if (e!=bt_exi_ret) return(99) + + +int64_t bt_del_ret = 0; +#define BT_DEL(k,rc) \ + ark_del(ark,(uint64_t)(strlen(k)+1), (uint8_t *)(k), &bt_del_ret ); \ + printf("%s %"PRIi64" = Del '%s'\n",bt_del_ret==rc ? "OK " : "ERR", bt_del_ret, k); \ + + +int64_t bt_get_ret = 0; +#define BT_GET(k,v) \ + ark_get(ark,(uint64_t)(strlen(k)+1), (uint8_t*)(k), (uint64_t)vmx, vval, 0, &bt_get_ret); \ + printf("%s %"PRIi64" = Get '%s'\n", \ + ((v==NULL && bt_get_ret==-1) || \ + ((strlen(v)+1)==bt_get_ret && memcmp(v,vval,bt_get_ret)==0)) ? "OK" : "ERR", \ + bt_get_ret, k); \ + +void gen_key(int ki) { + int i; + s1[0] = bvs[ki*3+0]; + s1[1] = bvs[ki*3+1]; + s1[2] = bvs[ki*3+2]; + klen = kmn + nrand48(s1)%kmod; + for(i=0; i<(int)klen; i++) { + kval[i] = klo + nrand48(s1)%krng; + } + if (ki>0) { + kval[klen++] = '~'; + klen += sprintf(((char*)kval)+klen, "%d", ki); + } + + kval[klen] = 0; +} +void gen_key_val(int ki) { + int i; + gen_key(ki); + vlen = vmn + nrand48(s1)%vmod; + for(i=0; i<(int)vlen; i++) { + vval[i] = vlo + nrand48(s1)%vrng; + } + vval[vlen] = 0; +} +void adv_key_val(int ki) { + gen_key_val(ki); + bvs[ki*3+0] = s1[0]; + bvs[ki*3+1] = + +s1[1]; + bvs[ki*3+2] = s1[2]; + gen_key_val(ki); +} + +#define PRINT_GET(ki) if (progress) printf("Ki %d Get kl = %"PRIu64", vl = %"PRIu64" bvi %d '%s'\n", \ + ki, klen, vlen,bvi, \ + cstr ? (char*)kval : "") \ + +#define PRINT_SET(ki) if (progress) printf("Ki %d Set kl = %"PRIu64", vl = %"PRIu64" bvi %d '%s'->'%s'\n", \ + ki, klen, vlen,bvi, \ + cstr ? (char*)kval : "", \ + cstr ? (char*)vval : "") + +#define PRINT_DEL(ki) if (progress) printf("Ki %d Del kl = %"PRIu64", vl = %"PRIu64" bvi %d '%s'\n", \ + ki, klen, vlen, bvi, \ + cstr ? (char*)kval : ""); + +/* #define SWAPBKT btx = bti; bti = bto; bto = btx */ + +int bref = 34; +int grow = 1024; +uint64_t hc = 16; + +int tst_ark_entry(int argc, char **argv) +{ + int i; + int cnt_rc; + int close_rc; + int64_t cnt; + //uint64_t flags = ARK_KV_VIRTUAL_LUN; //unused for now + + char *anon[] = {NULL,NULL,NULL,NULL}; + + CL args[] = { + { (char*)"-n", &iter, AR_INT, (char*)"iterations per thread"}, + { (char*)"-k", &keys, AR_INT, (char*)"# of keys"}, + { (char*)"-s", &seed, AR_INT, (char*)"random seed"}, + { (char*)"-bs", &bs, AR_INT64, (char*)"block size"}, + // {"-bc", &bc, AR_INT64, (char*)"block count (for growing)"}, + { (char*)"-hc", &hc, AR_INT64, (char*)"hash count"}, + { (char*)"-size", &size, AR_INT64, (char*)"inital storage size"}, + { (char*)"-simple", &simple, AR_FLG, (char*)"simple test"}, + { (char*)"-cstr", &cstr, AR_FLG, (char*)"c string keys and values"}, + { (char*)"-table", &table, AR_FLG, (char*)"print table"}, + { (char*)"-progress", &progress, AR_FLG, (char*)"print progress"}, + { (char*)"-kmn", &kmn, AR_INT, (char*)"kmn"}, + { (char*)"-kmx", &kmx, AR_INT, (char*)"kmx"}, + { (char*)"-klo", &klo, AR_INT, (char*)"klo"}, + { (char*)"-khi", &khi, AR_INT, (char*)"khi"}, + { (char*)"-vmn", &vmn, AR_INT, (char*)"vmn"}, + { (char*)"-vmx", &vmx, AR_INT, (char*)"vmx"}, + { (char*)"-vlo", &vlo, AR_INT, (char*)"vlo"}, + { (char*)"-vhi", &vhi, AR_INT, (char*)"vhi"}, + { (char*)"-vdf", &vdf, AR_INT, (char*)"vdf"}, + { (char*)"-bref", &bref, AR_INT, (char*)"block ref bits"}, + { (char*)"-grow", &grow, AR_INT, (char*)"# of blocks to grow by"}, + { (char*)"-vlim", &vlim, AR_INT64, (char*)"value limit for bucket store"}, + { (char*)"-dev", &dev, AR_STR, (char*)"Device to be used for store"}, + { (char*)"-chk", &chk, AR_FLG, (char*)"Run key count check"}, + { NULL, NULL, 0, NULL} + }; + + int echo = 0; + (void)cl_parse(argc,argv,args,anon,echo); + + kmod = kmx - kmn + 1; + krng = khi - klo + 1; + + vmod = vmx - vmn + 1; + vrng = vhi - vlo + 1; + + srand48(seed); + s0[0] = lrand48(); + s0[1] = lrand48(); + s0[2] = lrand48(); + + bvk = bv_new(keys); + bvs = (unsigned short*)malloc(3 * keys * sizeof(unsigned short)); + + kval = (uint8_t*)malloc(kmx); + vval = (uint8_t*)malloc(vmx); + gval = (uint8_t*)malloc(vmx); + + for (i=0; i<3*keys; i++) bvs[i] = lrand48(); + + ARK *ark = NULL; + int rc_ark = ark_create_verbose(dev, &ark, size, bs, hc, + 4, 256, 256, + ARK_KV_VIRTUAL_LUN); + + if (rc_ark != 0) + { + fprintf(stderr, "ark_create failed\n"); + return 1; + } + + int64_t rc = -1; + + int64_t get_cnt = 0; + int64_t get_succ = 0; + int64_t get_fail = 0; + int64_t get_err = 0; + + int64_t set_cnt = 0; + int64_t set_succ = 0; + int64_t set_fail = 0; + int64_t set_err = 0; + + int64_t del_cnt = 0; + int64_t del_succ = 0; + int64_t del_fail = 0; + int64_t del_err = 0; + + int64_t cnt_cnt = 0; + int64_t cnt_succ = 0; + int64_t cnt_fail = 0; + + if (simple) { + //addinitial + BT_SET("abc", "defg"); + BT_SET("hijkl", "mnopqrst"); + + BT_EXI("abc", 5); + BT_EXI("cba", -1); + + BT_SET("uv", "wxyz12"); + BT_SET("3456", "7"); + BT_SET("8", "90"); + // replace a few + BT_SET("uv", "!@#$"); + BT_SET("8", "%^&*("); + BT_SET("8", ")_+"); + // add some + BT_SET("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"); + // delete some + BT_DEL("8", 4); + BT_DEL("3456", 2); + // delete nonexist + BT_DEL("Ui", -1); + // delete the rest + BT_DEL("ABCDEFGHIJKLM", 14); + BT_DEL("hijkl", 9); + BT_DEL("uv", 5); + BT_DEL("abc", 5); + //addinitial + BT_SET("abc", "defg"); + BT_SET("hijkl", "mnopqrst"); + BT_SET("uv", "wxyz12"); + BT_SET("3456", "7"); + BT_SET("8", "90"); + // replace a few + BT_SET("uv", "!@#$"); + BT_SET("8", "%^&*("); + BT_SET("8", ")_+"); + // add some + BT_SET("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"); + // delete some + BT_DEL("8", 4); + BT_DEL("3456", 2); + // delete nonexist + BT_DEL("Ui",-1); + // do some gets + BT_GET("hijkl", "mnopqrst"); + //BT_GET("Nope", NULL); + BT_GET("uv", "!@#$"); + BT_GET("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"); + //get out + return(0); + } + int bvi; + for(i=0; iblkused, ark->bcount, (double)ark->blkused / (double)ark->bcount); + } + printf("Summary:\n"); + printf("Set %"PRIu64" stored = %9"PRIu64", not stored = %9"PRIu64", error = %9"PRIu64"\n", set_cnt, set_succ, set_fail, set_err); + printf("Get %"PRIu64" retrieved = %9"PRIu64", not found = %9"PRIu64", error = %9"PRIu64"\n", get_cnt, get_succ, get_fail, get_err); + printf("Del %"PRIu64" removed = %9"PRIu64", not found = %9"PRIu64", error = %9"PRIu64"\n", del_cnt, del_succ, del_fail, del_err); + if (chk) + { + printf("Cnt %"PRIu64" success = %9"PRIu64", error = %9"PRIu64"\n", cnt_cnt, cnt_succ, cnt_fail); + } + + close_rc = ark_delete(ark); + if (close_rc != 0) + { + printf("ARK_DELETE failed: %d\n", close_rc); + } + + return(get_err + set_err + del_err); +} diff --git a/src/kv/test/tst_bl.c b/src/kv/test/tst_bl.c new file mode 100644 index 00000000..c7c57e57 --- /dev/null +++ b/src/kv/test/tst_bl.c @@ -0,0 +1,318 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/tst_bl.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int tst_bl(int iter, int bcnt, int ccnt, int cmax,int grow,int hold ,int resz,int seed,int prnt,int dprt,int w) +{ + int i; + + int *blks = (int*)malloc(bcnt * sizeof(int)); + for (i=0; icount, bl_len(bl,bl->head)); + if (0<=b) { + ntakes++; + int64_t j; + for (j = b; j>0; j = bl_next(bl,j)) { + blks[j]++; + } + chains[ch] = b; + } else { + nfails++; + fails++; + if (resz && fails >= resz) { + printf("Resizing: %"PRIi64" + %d\n", bl->n, grow); + BL *rbl = bl_resize(bl, bl->n+grow, bl->w); + if (!rbl) { + printf("Resize failed, exiting\n"); + return 1; + } else { + bl = rbl; + fails = 0; + blks = (int*)realloc(blks, (bcnt + grow)*sizeof(int)); + if (!blks) { + printf("Realloc for blks failed\n"); + return 1; + } + int j; + for(j=0; jcount, bl_len(bl,bl->head)); + } + } + } else { + //if (prnt && i%prnt==0) printf("%d:Drop\n",i); + ndrops++; + int64_t b = chains[ch]; + int64_t n = bl_len(bl,b); + int64_t j; + for (j = chains[ch]; j>0; j = bl_next(bl,j)) { + blks[j]--; + } + bl_drop(bl,b); + if (prnt && i%prnt==0) + printf("%d: Drop %"PRIi64" = @%"PRIi64" (%"PRIi64",%"PRIi64")\n", + i, n, b, bl->count, bl_len(bl,bl->head)); + chains[ch] = -1; + if (hold && holds == hold) { + bl_release(bl); + bl_hold(bl); + holds = 0; + } else { + holds++; + } + } + } + if (hold) bl_release(bl); + + if (dprt) bl_dot(bl,iter,blks,ccnt, chains); + + printf("Done: take %d, drops %d, fails %d\n", ntakes, ndrops, nfails); + + + int *infree = (int*)malloc(bcnt * sizeof(int)); + int *inuse = (int*)malloc(bcnt * sizeof(int)); + int *inhold = (int*)malloc(bcnt * sizeof(int)); + for(i=0; ihead; + while (0hold; + while (0 1) err2++; + if (blks[i]==0 && infree[i]==0 && inhold[i]==0) err3++; + if (blks[i]==1 && inuse[i]==0) err4++; + } + if (err0) + printf("Error 0 = %d\n", err0); + else + printf("Pass: all present\n"); + + if (err1) printf("Error 1 = %d\n", err1); + if (err2) printf("Error 2 = %d\n", err2); + + if (err3) + printf("Error 3 = %d\n", err3); + else + printf("Pass: free blocks in the correct place\n"); + + if (err4) + printf("Error 4 = %d\n", err4); + else + printf("Pass: in use blocks in the correct place\n"); + + + /* int errs; */ + /* for(i=0; ihead); */ + /* int ncnt = 0; */ + /* for(i=0; i= 0) */ + /* ncnt += bl_len(bl,chains[i]); */ + /* if (fcnt != zeros || ncnt != ones) */ + /* printf("Test 3 failed, %d != %d | %d != %d\n", fcnt, zeros, ncnt, ones); */ + /* else */ + /* printf("Test 3 passed\n"); */ + + /* errs = 0; */ + /* for (i=0; ihead, i)!=1) */ + /* errs++; */ + /* if (errs) */ + /* printf("Test 4 failed, errors = %d\n", errs); */ + /* else */ + /* printf("Test 4 passed\n"); */ + + + + /* for(i=0; ihead; */ + /* while (0<=b) { */ + /* blks[b]++; */ + /* b=bl_next(bl,b); */ + /* } */ + /* for(i=0; i=0) { */ + /* //printf("bl_cnt(bl,chains[%d],%d) -> %"PRIi64"\n",j,i, bl_cnt(bl,chains[j],i)); */ + /* cnt += bl_cnt(bl,chains[j],i); */ + /* } */ + /* if (cnt!=1) errs++; */ + /* } */ + /* if (errs) */ + /* printf("Test 5 failed, errs = %d\n", errs); */ + /* else */ + /* printf("Test 5 passed\n"); */ + + + bl_delete(bl); + free(blks); + free(chains); + + return (err0+err1+err2+err3+err4); +} + +int tst_bl_entry(int argc, char **argv) +{ + char *anon[] = {NULL,NULL,NULL,NULL}; + int iter = 10; + int bcnt = 8; + int ccnt = 16; + int cmax = 6; + int grow = 8; + int hold = 0; + int resz = 0; + int seed = 1234; + int prnt = 0; + int dprt = 0; + int w = 34; + CL args[] = {{"-d", &dprt, AR_INT, "dot print"}, + {"-g", &grow, AR_INT, "amount to grow by"}, + {"-p", &prnt, AR_INT, "print (alot)"}, + {"-b", &bcnt, AR_INT, "block count per thread"}, + {"-c", &ccnt, AR_INT, "chain count"}, + {"-l", &cmax, AR_INT, "max chain length"}, + {"-s", &seed, AR_INT, "random seed"}, + {"-r", &resz, AR_INT, "resize after n take fails"}, + {"-n", &iter, AR_INT, "iterations per thread"}, + {"-w", &w, AR_INT, "lba width"}, + {"-j", &hold, AR_INT, "only release after hold # of drops"}, + {NULL, NULL, 0, NULL}}; + + int echo = 1; + + (void)cl_parse(argc,argv,args,anon,echo); + + //function call to the tst_bl function and return code of 0 = success and else for fail + return tst_bl(iter,bcnt,ccnt,cmax,grow,hold,resz,seed,prnt,dprt,w); +} diff --git a/src/kv/test/tst_bl.h b/src/kv/test/tst_bl.h new file mode 100644 index 00000000..4c530b07 --- /dev/null +++ b/src/kv/test/tst_bl.h @@ -0,0 +1,25 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/tst_bl.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +int tst_bl(int iter, int bcnt, int ccnt, int cmax,int grow,int hold ,int resz,int seed,int prnt,int dprt,int w); diff --git a/src/kv/test/tst_bt.c b/src/kv/test/tst_bt.c new file mode 100644 index 00000000..f4c7885d --- /dev/null +++ b/src/kv/test/tst_bt.c @@ -0,0 +1,369 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/tst_bt.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cl.h" +#include "bv.h" +#include "bt.h" + +int iter = 10; +int keys = 10; +int seed = 1234; + +int simple = 0; +int cstr = 0; +int table = 0; +int progress = 0; + +int kmn = 0; +int kmx = 8; +int klo = 97; +int khi = 122; +int kmod = -1; +int krng = -1; + +int vmn = 0; +int vmx = 16; +int vlo = 97; +int vhi = 122; +int vmod = -1; +int vrng = -1; + +int vdf = 8; + + +uint64_t klen = 0; +uint8_t *kval = NULL; + +uint64_t vlen = 0; +uint8_t *vval = NULL; + +uint64_t glen = 0; +uint8_t *gval = NULL; + +int bs = (1024 * 1024); + +unsigned short s0[3]; +unsigned short s1[3]; + +BV *bvk = NULL; +unsigned short *bvs = NULL; + +BT *bti = NULL; +BT *bti_orig = NULL; +BT *bto = NULL; +BT *bto_orig = NULL; +BT *btx = NULL; + +int64_t bt_set_ret = 0; +#define BT_SET(k,v) \ + { \ + uint64_t ovlen = 0; \ + bt_set_ret = bt_set(bto,bti,(uint64_t)(strlen(k)+1), (uint8_t *)(k), (uint64_t)(strlen(v)+1), (uint8_t *)(v), &ovlen); \ + printf("%s %"PRIi64" = Set '%s' -> '%s'\n",bt_set_ret==strlen(v)+1 ? "OK " : "ERR", bt_set_ret, k, v); \ + bt_cstr(bto); \ + btx= bti; bti=bto; bto=btx; \ + if (strlen(v)+1!=bt_set_ret) return(99); \ + } + + +int64_t bt_exi_ret = 0; +#define BT_EXI(k,e) \ + bt_exi_ret = bt_exists(bti,(uint64_t)(strlen(k)+1), (uint8_t *)(k)); \ + printf("%s %"PRIi64" = Exists '%s'\n",e==bt_exi_ret ? "OK " : "ERR", bt_exi_ret, k); \ + bt_cstr(bti); \ + if (e!=bt_exi_ret) return(99) + + +int64_t bt_del_ret = 0; +#define BT_DEL(k,rc) \ + bt_del_ret = bt_del(bto,bti,(uint64_t)(strlen(k)+1), (uint8_t *)(k)); \ + printf("%s %"PRIi64" = Del '%s'\n",bt_del_ret==rc ? "OK " : "ERR", bt_del_ret, k); \ + bt_cstr(bto); \ + btx= bti; bti=bto; bto=btx + +int64_t bt_get_ret = 0; +#define BT_GET(k,v) \ + bt_get_ret = bt_get(bti,(uint64_t)(strlen(k)+1), (uint8_t*)(k), vval); \ + printf("%s %"PRIi64" = Get '%s'\n", \ + ((v==NULL && bt_get_ret==-1) || \ + ((strlen(v)+1)==bt_get_ret && memcmp(v,vval,bt_get_ret)==0)) ? "OK" : "ERR", \ + bt_get_ret, k); + +void gen_key(int ki) { + int i; + s1[0] = bvs[ki*3+0]; + s1[1] = bvs[ki*3+1]; + s1[2] = bvs[ki*3+2]; + klen = kmn + nrand48(s1)%kmod; + for(i=0; i0) { + kval[klen++] = '~'; + klen += sprintf(((char*)kval)+klen, "%d", ki); + } + + kval[klen] = 0; +} +void gen_key_val(int ki) { + int i; + gen_key(ki); + vlen = vmn + nrand48(s1)%vmod; + for(i=0; i'%s'\n", \ + ki, klen, vlen,bvi, \ + cstr ? (char*)kval : "", \ + cstr ? (char*)vval : "") + +#define PRINT_DEL(ki) if (progress) printf("Ki %d Del kl = %"PRIu64", vl = %"PRIu64" bvi %d '%s'\n", \ + ki, klen, vlen, bvi, \ + cstr ? (char*)kval : ""); + +#define SWAPBKT btx = bti; bti = bto; bto = btx + +int tst_bt_entry(int argc, char **argv) +{ + int i; + + char *anon[] = {NULL,NULL,NULL,NULL}; + CL args[] = {{"-n", &iter, AR_INT, "iterations per thread"}, + {"-k", &keys, AR_INT, "# of keys"}, + {"-s", &seed, AR_INT, "random seed"}, + {"-b", &bs, AR_INT, "bucket size"}, + {"-simple", &simple, AR_FLG, "simple test"}, + {"-cstr", &cstr, AR_FLG,"c string keys and values"}, + {"-table", &table, AR_FLG, "print table"}, + {"-progress", &progress, AR_FLG, "print progress"}, + { "-kmn", &kmn, AR_INT, "kmn"}, + { "-kmx", &kmx, AR_INT, "kmx"}, + { "-klo", &klo, AR_INT, "klo"}, + { "-khi", &khi, AR_INT, "khi"}, + { "-vmn", &vmn, AR_INT, "vmn"}, + { "-vmx", &vmx, AR_INT, "vmx"}, + { "-vlo", &vlo, AR_INT, "vlo"}, + { "-vhi", &vhi, AR_INT, "vhi"}, + { "-vdf", &vdf, AR_INT, "vdf"}, + {NULL, NULL, 0, NULL}}; + + int echo = 1; + (void)cl_parse(argc,argv,args,anon,echo); + + kmod = kmx - kmn + 1; + krng = khi - klo + 1; + + vmod = vmx - vmn + 1; + vrng = vhi - vlo + 1; + + srand48(seed); + s0[0] = lrand48(); + s0[1] = lrand48(); + s0[2] = lrand48(); + + bvk = bv_new(keys); + bvs = malloc(3 * keys * sizeof(unsigned short)); + + kval = malloc(kmx); + vval = malloc(vmx); + gval = malloc(vmx); + + for (i=0; i<3*keys; i++) bvs[i] = lrand48(); + + bti = bt_new(bs,vmx,vdf, NULL, &(bti_orig)); + bto = bt_new(bs,vmx,vdf, NULL, &(bto_orig)); + + int64_t rc = -1; + + int64_t get_cnt = 0; + int64_t get_succ = 0; + int64_t get_fail = 0; + int64_t get_err = 0; + + int64_t set_cnt = 0; + int64_t set_succ = 0; + int64_t set_fail = 0; + int64_t set_err = 0; + + int64_t del_cnt = 0; + int64_t del_succ = 0; + int64_t del_fail = 0; + int64_t del_err = 0; + + + if (simple) { + //addinitial + BT_SET("abc", "defg"); + BT_SET("hijkl", "mnopqrst"); + + BT_EXI("abc", 5); + BT_EXI("cba", -1); + + BT_SET("uv", "wxyz12"); + BT_SET("3456", "7"); + BT_SET("8", "90"); + // replace a few + BT_SET("uv", "!@#$"); + BT_SET("8", "%^&*("); + BT_SET("8", ")_+"); + // add some + BT_SET("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"); + // delete some + BT_DEL("8", 4); + BT_DEL("3456", 2); + // delete nonexist + BT_DEL("Ui", -1); + // delete the rest + BT_DEL("ABCDEFGHIJKLM", 14); + BT_DEL("hijkl", 9); + BT_DEL("uv", 5); + BT_DEL("abc", 5); + //addinitial + BT_SET("abc", "defg"); + BT_SET("hijkl", "mnopqrst"); + BT_SET("uv", "wxyz12"); + BT_SET("3456", "7"); + BT_SET("8", "90"); + // replace a few + BT_SET("uv", "!@#$"); + BT_SET("8", "%^&*("); + BT_SET("8", ")_+"); + // add some + BT_SET("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"); + // delete some + BT_DEL("8", 4); + BT_DEL("3456", 2); + // delete nonexist + BT_DEL("Ui",-1); + // do some gets + BT_GET("hijkl", "mnopqrst"); + // BT_GET("Nope", NULL); + BT_GET("uv", "!@#$"); + //get out + return(0); + } + int bvi; + for(i=0; i +#include +#include +#include +#include +#include +#include + +#include +#include + +int tst_bv_entry(int argc, char **argv) +{ + //int i,j,k,l; + + char *anon[] = {NULL,NULL,NULL,NULL}; + int iter = 10; + int bcnt = 8; + int seed = 1234; + CL args[] = {{"-b", &bcnt, AR_INT, "block count per thread"}, + {"-n", &iter, AR_INT, "iterations per thread"}, + {"-s", &seed, AR_INT, "random seed"}, + {NULL, NULL, 0, NULL}}; + + int echo = 1; + (void)cl_parse(argc,argv,args,anon,echo); + + srand48(seed); + + BV *bv = bv_new(bcnt); + + char *bchk = malloc(bcnt); + + int i; + for(i=0; i +#include +#include +#include +#include +#include +#include +#include + +#include "cl.h" +#include "ht.h" + +int *vld = NULL; +uint64_t *blk = NULL; +HT *ht = NULL; + +int iter = 10; +int hcnt = 10; +int mcnt = 34; +int seed = 1234; +int prt = 0; + +int check_ht() { + int i; + int evld = 0; + int eblk = 0; + int v; + uint64_t b; + for(i=0; in != hcnt) printf("Fail : initial size bad\n"); + uint64_t i; + for(i=0; i +#include +#include +#include +#include +#include +#include +#include + + +#include "cl.h" +#include "iv.h" + +int tst_iv_entry(int argc, char **argv) +{ + char *anon[] = {NULL,NULL,NULL,NULL}; + int iter = 15; + uint64_t n = 10; + uint64_t m = 56; + int test = 1; + int seed = 1234; + int def = 1234; + CL args[] = {{"-n", &n, AR_INT64, "array size"}, + {"-m", &m, AR_INT64, "integer size"}, + {"-i", &iter, AR_INT, "iterations"}, + {"-t", &test, AR_INT, "test to run"}, + {"-s", &seed, AR_INT, "random seed"}, + {NULL, NULL, 0, NULL}}; + + int echo = 1; + (void)cl_parse(argc,argv,args,anon,echo); + + srand48(seed); + + IV *iv = iv_new(n, m); + uint64_t mask = 1; + mask <<= m; + mask -= 1; + + uint64_t errs = 0; + + printf("Running test %d\n", test); + switch (test) { + case 0: + { + uint64_t *arr = malloc(n * sizeof(uint64_t)); + memset(arr, 0x00, n * sizeof(uint64_t)); + int i; + for(i=0; i%llx %llx\n", ind, v0, v1); + if (v0!=v1) errs++; + break; + } + case 1: + { + uint64_t val = lrand48(); + val <<= 32; + val |= lrand48(); + val &= mask; + arr[ind] = val; + // printf("Setting %"PRIu64"->%"PRIu64"\n", ind, val); + iv_set(iv,ind,val); + break; + } + } + } + break; + } + case 1: + { + uint64_t i; + for (i=0; i +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cl.h" +#include "arkdb.h" + +char *device = NULL; + +//int main(int argc, char **argv) +int tst_persist_entry(int argc, char **argv) +{ + int i; + int rc = 0; + int64_t res; + //uint64_t flags = ARK_KV_VIRTUAL_LUN; + uint64_t flags = 0; + int64_t keycnt = 0; +#define VALUE_BUFFER_SIZE 256 + char val_buffer[VALUE_BUFFER_SIZE]; +#define NUM_KEYS 3 + char *keys[NUM_KEYS] = {"abc", "defg", "hijkl" }; + char *vals[NUM_KEYS] = {"123", "4567", "89012" }; + + char *anon[] = {NULL,NULL,NULL,NULL}; + + CL args[] = { + { (char*)"-dev", &device, AR_STR, (char*)"Device to be used for store"}, + { NULL, NULL, 0, NULL} + }; + + int echo = 0; + (void)cl_parse(argc,argv,args,anon,echo); + + ARK *ark = NULL; + rc = ark_create_verbose(device, &ark, 1024 * 1024, 4096, 4096, 20, + 256, 256, flags|ARK_KV_PERSIST_STORE); + //rc = ark_create(device, &ark, flags|ARK_KV_PERSIST_STORE); + + if (rc != 0) + { + fprintf(stderr, "ark_create failed\n"); + return 1; + } + + + for (i = 0; i < NUM_KEYS; i++) + { + rc = ark_set(ark, (uint64_t)strlen(keys[i]), + (void *)keys[i], (uint64_t)strlen(vals[i]), + (void *)vals[i], &res); + if ( rc != 0 ) + { + printf("ark_set failed for key: %d (%d)\n", i, rc); + break; + } + } + + if ( rc == 0 ) + { + ark_delete(ark); + + sleep(2); + + rc = ark_create(device, &ark, flags|ARK_KV_PERSIST_LOAD); + if (rc != 0) + { + printf("ark_create persist open failed: %d\n", rc); + } + } + + if (rc == 0) + { + for (i = 0; i < NUM_KEYS; i++) + { + res = -1; + memset(&(val_buffer[0]), 0, VALUE_BUFFER_SIZE); + rc = ark_get(ark, (uint64_t)strlen(keys[i]), + (void *)keys[i], VALUE_BUFFER_SIZE, + (void *)&(val_buffer[0]), 0, &res); + if (rc != 0) + { + printf("ark_get failed: %d (%d)\n", i, rc); + break; + } + + if (res != (uint64_t)strlen(vals[i])) + { + printf("ark_get didn't return correct len: exp: %"PRIu64" act: %"PRIu64"\n", (uint64_t)strlen(vals[i]), res); + break; + } + + printf("Key: %s, value: %s\n", keys[i], vals[i]); + } + + rc = ark_count(ark, &keycnt); + if (rc != 0) + { + printf("ark_count failed: %d\n", rc); + } + else + { + if (keycnt != NUM_KEYS) + { + printf("Wrong number of keys: %"PRIu64"\n", keycnt); + rc = -1; + } + else + { + printf("Correct number of keys: %"PRIu64"\n", keycnt); + } + } + } + + ark_delete(ark); + + return rc; +} diff --git a/src/kv/test/tst_tg.c b/src/kv/test/tst_tg.c new file mode 100644 index 00000000..768403e0 --- /dev/null +++ b/src/kv/test/tst_tg.c @@ -0,0 +1,410 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/test/tst_tg.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ct.h" + +#include "cl.h" +#include "bv.h" +#include "tg.h" + +#include "ark.h" +#include "arkdb.h" + +int iter = 10; +int npool = 4; +int nqueue = 1024; + +int nclient = 3; +int nasync = 128; + + +int64_t bs = 4096; +int64_t size = 4096 * 1024; +int64_t hc = 1024 * 1024; +int bref = 34; +int grow = 1024; + +int nkey = 64; +int seed = 1234; + +int kmn = 0; +int kmx = 8; +int klo = 97; +int khi = 122; +int kmod = -1; +int krng = -1; + +int vmn = 0; +int vmx = 16; +int vlo = 97; +int vhi = 122; +int vmod = -1; +int vrng = -1; + +int cstr_ = 0; + +char *dev = NULL; + +void gen_keyval(int id, int ki, unsigned short *isd, + uint64_t *kl, uint8_t *key, + uint64_t *vl, uint8_t *val, + int discard) { + unsigned short sd[3]; + //printf("in (%d,%d)] with [%d %d %d]\n",id, ki,isd[0], isd[1], isd[2]); + sd[0] = isd[0]; + sd[1] = isd[1]; + sd[2] = isd[2]; + int i; + int klen = 0; + if (id) { + klen = sprintf((char *)key,"_%d_", id); + } + if (ki) { + klen += sprintf((char *)key+klen,".%d.", ki); + } + /* if (ki) { */ + /* sprintf(key+7, "%6d_", ki */ + /* } */ + int len = kmn + nrand48(sd) % kmod; + for (i=0; iid, ct->seed[0],ct->seed[1],ct->seed[2]); + + BV *kvmap = bv_new(nkey); + unsigned short *kvseed = malloc (nkey * 3 * sizeof(unsigned short)); + int i; + for (i=0; i<3*nkey; i++) kvseed[i] = nrand48(ct->seed); + + + // enough buffer space to support async number of operations + uint8_t **keys = malloc(nasync * sizeof(uint8_t *)); + uint8_t *keyspace = malloc(nasync * (kmx + 8)); + for(i=0; iseed) % nkey; + + if (tg_left(tg)==0) { + do { + erc = ark_anyreturn((_ARC *)arc, &reclaimed, &returned); + } while (erc); +#define GOTIT \ + printf("%sGot completed operations tag _%d_.%d. %s returned %"PRIi64" %c= expected %"PRIi64"%s\n", \ + C_Green, ct->id, reclaimed, \ + returned==expres[tgmap[reclaimed]] ? C_Green : C_Red , returned, \ + returned==expres[tgmap[reclaimed]] ? '=' : '!' ,expres[tgmap[reclaimed]], C_Reset) \ + + GOTIT; + if (erc) { + printf("Huh?\n"); + } else { + //printf("C%p tg_return(%d)\n", arc, reclaimed); + tg_return(tg, tgmap[reclaimed]); + } + } + + ai = tg_get(tg); + op = nrand48(ct->seed) % 2; + switch (op) { + case 0: //set + { + gen_keyval(ct->id,ki,kvseed + ki*3, klen + ai, keys[ai], vlen + ai, vals[ai],1); + gen_keyval(ct->id,ki,kvseed + ki*3, klen + ai, keys[ai], vlen + ai, vals[ai],0); + printf("%s%c(%d,%d)'%s'->'%s' tag [%d,%d]%s\n", C_Yellow,bv_get(kvmap,ki) ? '!' : '+', + ct->id, ki, keys[ai], vals[ai], ct->id,ai, C_Reset); + // int ark_set_tag(arc, ai, + tag = ark_set_async_tag((_ARC *)arc, klen[ai], keys[ai], vlen[ai], vals[ai]); + expres[ai] = vlen[ai]; + if (tag < 0) { + exit(2); + } else { + tgmap[tag] = ai; + } + bv_set(kvmap,ki); + break; + } + case 1: //del + { + gen_keyval(ct->id,ki,kvseed + ki*3, klen + ai, keys[ai], vlen + ai, vals[ai],1); + printf("%s%c(%d,%d)'%s'->'%s' tag [%d,%d]%s\n", C_Yellow, bv_get(kvmap,ki) ? '-' : '~', + ct->id, ki, keys[ai], vals[ai], ct->id,ai, C_Reset); + tag = ark_del_async_tag((_ARC *)arc, klen[ai], keys[ai]); + if (tag<0) { + printf("exit(3)\n"); + exit(3); + } + tgmap[tag] = ai; + expres[ai] = bv_get(kvmap,ki) ? vlen[ai] : -1; + bv_clr(kvmap,ki); + break; + } + } + } + // wait for all to complete + while (tg_left(tg) < nasync) { + erc = -1; + do { + erc = ark_anyreturn((_ARC *)arc, &reclaimed, &returned); + } while (erc); + GOTIT; + if (erc) { + printf("Huh #2? %d\n", erc); + } else { + //printf("C%p finalizing set/del tg_return(%d)\n", arc, reclaimed); + tg_return(tg, tgmap[reclaimed]); + } + //ai = tg_get(tg); + } + // do get operations + for (i=0; iseed) % nkey; + if (tg_left(tg)==0) { + do { + erc = ark_anyreturn((_ARC *)arc, &reclaimed, &returned); + } while (erc); + GOTIT;//printf("Got completed operations tag %d\n", reclaimed); + if (erc) { + printf("Huh?\n"); + } else { + //printf("C%p tg_return(%d)\n", arc, reclaimed); + tg_return(tg, tgmap[reclaimed]); + } + } + + ai = tg_get(tg); + gen_keyval(ct->id,ki,kvseed + ki*3, klen + ai, keys[ai], vlen + ai, vals[ai],0); + printf("%s%c(%d,%d)'%s'->'%s' tag [%d,%d]%s\n", C_Yellow, bv_get(kvmap,ki) ? '$' : '?', + ct->id, ki, keys[ai], vals[ai], ct->id,ai, C_Reset); + tag = ark_get_async_tag((_ARC *)arc, klen[ai], keys[ai], vlen[ai], vals[ai],0); + if (tag<0) { + printf("exit(4)\n"); + exit(4); + } + tgmap[tag] = ai; + expres[ai] = bv_get(kvmap,ki) ? vlen[ai] : -1; + } + + // wait for all to complete + while (tg_left(tg) < nasync) { + erc = -1; + do { + erc = ark_anyreturn((_ARC *)arc, &reclaimed, &returned); + } while (erc); + GOTIT; + if (erc) { + printf("Huh #2? %d\n", erc); + } else { + //printf("C%p finalizing set/del tg_return(%d)\n", arc, reclaimed); + tg_return(tg, tgmap[reclaimed]); + } + //ai = tg_get(tg); + } + + + // print present keys + for(i=0; iid,i,kvseed + i*3, klen + i, keys[i], vlen + i, vals[i],0); + printf(":(%d,%d)'%s'->'%s'\n", ct->id,i, keys[i], vals[i]); + } + } + + printf("Client %d complete\n", ct->id); + return NULL; +} + +int tst_tg_entry(int argc, char **argv) { + + char *anon[] = {NULL,NULL,NULL,NULL}; + CL args[] = {{"-n", &iter, AR_INT, "iterations per client"}, + {"-c", &nclient, AR_INT, "# of clients"}, + {"-p", &npool, AR_INT, "# of threads in processing pool"}, + {"-bs", &bs, AR_INT64, "block size"}, + {"-size", &size, AR_INT64, "inital storage size"}, + {"-hc", &hc, AR_INT64, "hash count"}, + {"-vlim", &vlim, AR_INT64, "value limit for bucket store"}, + {"-bref", &bref, AR_INT, "block ref bits"}, + {"-grow", &grow, AR_INT, "# of blocks to grow by"}, + {"-k", &nkey, AR_INT, "# of keys"}, + {"-s", &seed, AR_INT, "random seed"}, + {"-q", &nqueue, AR_INT, "# of queue entries per thread in the pool"}, + {"-a", &nasync, AR_INT, "# of allowed async ops per client"}, + {"-kmn", &kmn, AR_INT, "kmn"}, + {"-kmx", &kmx, AR_INT, "kmx"}, + {"-klo", &klo, AR_INT, "klo"}, + {"-khi", &khi, AR_INT, "khi"}, + {"-vmn", &vmn, AR_INT, "vmn"}, + {"-vmx", &vmx, AR_INT, "vmx"}, + {"-vlo", &vlo, AR_INT, "vlo"}, + {"-vhi", &vhi, AR_INT, "vhi"}, + {"-cstr_", &cstr_, AR_FLG,"c string keys and values"}, + {"-dev", &dev, AR_STR, "Device to be used for store"}, + /* {"-simple", &simple, AR_FLG, "simple test"}, */ + /* {"-table", &table, AR_FLG, "print table"}, */ + /* {"-progress", &progress, AR_FLG, "print progress"}, */ + {NULL, NULL, 0, NULL}}; + + // TODO: Until tst_tg.c is updated to handle the changes + // of removing ark_connect / ark_disconnect, we + // exit with 0 to report "success". + // The problem is that no longer does each thread + // have it's own tag space. instead, the tag space + // is global to the ark instance. + exit(0); + int echo = 1; + (void)cl_parse(argc,argv,args,anon,echo); + + kmod = kmx - kmn + 1; + krng = khi - klo + 1; + + vmod = vmx - vmn + 1; + vrng = vhi - vlo + 1; + + int rc = ark_create_verbose(dev, &ark, size, bs, hc, + npool, nqueue, 256, ARK_KV_VIRTUAL_LUN); + + if (rc) { + printf("bad create return %d\n", rc); + exit(1); + } + + CT *ct = malloc(nclient * sizeof(CT)); + pthread_t *clients = malloc(nclient * sizeof(pthread_t)); + + srand48(seed); + + struct timeval tv; + struct timeval post_tv; + uint64_t ops = 0; + uint64_t post_ops = 0; + uint64_t io_cnt = 0; + uint64_t post_io_cnt = 0; + + (void)gettimeofday(&tv, NULL); + (void)ark_stats(ark, &ops, &io_cnt); + + int i; + for(i=0; i +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cl.h" +#include "ut.h" +#include "ll.h" + +#include "vi.h" + + +int tst_vi_entry(int argc, char **argv) { + + char *anon[] = {"foo","bar","doo","dah",NULL,NULL,NULL,NULL}; + int echo = 1; + int seed = 1234; + int iter = 1000; + int prt = 0; + CL args[] = {{"-p", &prt, AR_FLG, "print flag"}, + {"-s", &seed, AR_INT, "random seed"}, + {"-n", &iter, AR_INT, "iterations"}, + {NULL, NULL, 0, NULL}}; + (void)cl_parse(argc,argv,args,anon,echo); + + + uint64_t *arr = malloc(iter * sizeof(uint64_t)); + uint64_t *res = malloc(iter * sizeof(uint64_t)); + + srand48(seed); + + uint64_t h,l; + int i; + for (i=0; i +#include +#include "am.h" + +#include "tg.h" + +int tg_sizeof(int n) { + return sizeof(TG) + n*sizeof(int) + n*sizeof(char); +} + +TGP tg_init(TGP tg, int n) { + if (tg==NULL) { + tg = am_malloc(tg_sizeof(n)); + } + tg->size = n; + tg->left = n; + (void)pthread_mutex_init(&(tg->tg_lock), NULL); + tg->avail = ((char *)&(tg->stack[0])) + n * sizeof(int); + int i; + for(i=0; iavail[i] = 1; + tg->stack[i] = (n-1) - i; + } + return tg; +} + +int tg_left(TGP tg) { + if(tg!=NULL) + return tg->left; + else + return -1; +} + +int tg_get(TGP tg) { + int tag; + pthread_mutex_lock(&(tg->tg_lock)); + if (tg->left > 0) { + tag = tg->stack[tg->left-1]; + tg->avail[tag] = 0; + tg->left--; + } else { + tag = -1; + } + pthread_mutex_unlock(&(tg->tg_lock)); + return tag; +} + +void tg_return(TGP tg, int tag) { + pthread_mutex_lock(&(tg->tg_lock)); + if (tag<0 || tag>=tg->size) { + fprintf(stderr,"Bad tag return %d\n", tag); + pthread_mutex_unlock(&(tg->tg_lock)); + exit(1); + } + if (tg->avail[tag]==0) { + tg->stack[tg->left] = tag; + tg->avail[tag]= 1; + tg->left++; + } + pthread_mutex_unlock(&(tg->tg_lock)); +} diff --git a/src/kv/tg.h b/src/kv/tg.h new file mode 100644 index 00000000..cca979c4 --- /dev/null +++ b/src/kv/tg.h @@ -0,0 +1,53 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/tg.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef __TG_H__ +#define __TG_H__ + +//#include +//#include +#include + +typedef struct _tg { + int size; + int left; + pthread_mutex_t tg_lock; + char *avail; + int stack[]; +} TG; + +typedef TG * TGP; + +int tg_sizeof(int n); + +TGP tg_init(TGP tg, int n); + +int tg_left(TGP tg); + +int tg_get(TGP tg); + +void tg_return(TGP tg, int tag); + +#endif diff --git a/src/kv/ut.c b/src/kv/ut.c new file mode 100644 index 00000000..4193374b --- /dev/null +++ b/src/kv/ut.c @@ -0,0 +1,133 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/ut.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include + +#include + +#include +#include + +#include +#ifndef _AIX +#include +#endif +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "am.h" +#include "ut.h" + +uint64_t divup(uint64_t n, uint64_t m) { + return n/m + (n%m==0 ? 0 : 1); +} + +uint64_t divceil(uint64_t n, uint64_t m) { + return n/m + (n%m==0 ? 0 : 1); +} + +char *rndalpha(int n, int m) { + int i; + char *ret = am_malloc(n + 1); + if (ret) { + for (i=0; i + +uint64_t divup(uint64_t n, uint64_t m); + +uint64_t divceil(uint64_t n, uint64_t m); + +char *rndalpha(int n, int m); + +void expandif(void **buf, uint64_t *len, uint64_t req); + +double time_per_tick(int n, int del); + + +#if defined(__powerpc__) || defined(__ppc__) +typedef uint64_t ticks; +static __inline__ ticks getticks(void) +{ + unsigned int x, x0, x1; + do { + __asm__ __volatile__ ("mftbu %0" : "=r"(x0)); + __asm__ __volatile__ ("mftb %0" : "=r"(x)); + __asm__ __volatile__ ("mftbu %0" : "=r"(x1)); + } while (x0 != x1); + + return (((uint64_t)x0) << 32) | x; +} +#endif + +#if defined(__x86_64__) +typedef uint64_t ticks; +static __inline__ ticks getticks(void) +{ + uint32_t x, y; + asm volatile("rdtsc" : "=a" (x), "=d" (y)); + return ((ticks)x) | (((ticks)y) << 32); +} +#endif + +static __inline__ double elapsed(ticks t1, ticks t0) +{ + return (double)t1 - (double)t0; +} + +#endif //__UT_H__ diff --git a/src/kv/vi.c b/src/kv/vi.c new file mode 100644 index 00000000..74995b54 --- /dev/null +++ b/src/kv/vi.c @@ -0,0 +1,62 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/vi.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include "vi.h" + +uint64_t vi_enc64(uint64_t n, uint8_t *buf) { + uint64_t len = 0; + int done = 0; + while (!done) { + if (n<128) { + buf[len++] = n & 0x000000000000007F; + done = 1; + } else { + buf[len++] = (n & 0x000000000000007F) | 0x0000000000000080; + n >>= 7; + } + } + return len; +} + + +uint64_t vi_dec64(uint8_t *buf, uint64_t *n) { + uint64_t m = 0; + int len = 0; + int done = 0; + uint64_t tmp = 0; + while (!done) { + if (buf[len] & 0x80) { + tmp = buf[len] & 0x7F; + } else { + tmp = buf[len]; + done = 1; + } + m |= tmp << (len * 7); + len++; + } + *n = m; + return len; +} + diff --git a/src/kv/vi.h b/src/kv/vi.h new file mode 100644 index 00000000..cf293a83 --- /dev/null +++ b/src/kv/vi.h @@ -0,0 +1,34 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/kv/vi.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __VI_H__ +#define __VI_H__ + +#include + +uint64_t vi_enc64(uint64_t n, uint8_t *buf); + +uint64_t vi_dec64(uint8_t *buf, uint64_t *n); + +#endif diff --git a/src/lib/arkalloc/makefile b/src/lib/arkalloc/makefile new file mode 100644 index 00000000..50cb87fe --- /dev/null +++ b/src/lib/arkalloc/makefile @@ -0,0 +1,45 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/lib/arkalloc/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +ROOTPATH = ../../.. + + + +OBJS = zmalloc.o +OBJS64 = zmalloc.64o +MODULE = arkalloc + +MODLIBS = +MODULE_LINKLIBS = + +LINKLIBS = -l${MODULE} +LIBPATHS = -L${ROOTPATH}/img +EXPFLAGS = -bexpall + +CFLAGS += $(KV_CFLAGS) + +SUBDIRS = + + +include ${ROOTPATH}/config.mk diff --git a/src/lib/arkalloc/zmalloc.c b/src/lib/arkalloc/zmalloc.c new file mode 100644 index 00000000..3423c062 --- /dev/null +++ b/src/lib/arkalloc/zmalloc.c @@ -0,0 +1,34 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/lib/arkalloc/zmalloc.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "zmalloc.h" + + +void *zmalloc(size_t size) { return malloc(size); } + +void *zrealloc(void *ptr, size_t size) {return realloc(ptr, size); } + +void zfree(void *ptr) { free(ptr); } + + diff --git a/src/lib/libcxl-be/libcxl.so b/src/lib/libcxl-be/libcxl.so new file mode 100644 index 00000000..f7aad1c3 Binary files /dev/null and b/src/lib/libcxl-be/libcxl.so differ diff --git a/src/lib/libcxl-le/libcxl.so b/src/lib/libcxl-le/libcxl.so new file mode 100644 index 00000000..cafaaf4e Binary files /dev/null and b/src/lib/libcxl-le/libcxl.so differ diff --git a/src/lib/makefile b/src/lib/makefile new file mode 100644 index 00000000..7202413c --- /dev/null +++ b/src/lib/makefile @@ -0,0 +1,49 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/lib/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +ROOTPATH = ../.. + +all: + +OBJS = + +SUBDIRS = arkalloc.d + +code_pass: ${IMGDIR}/libcxl.so + +${IMGDIR}/libcxl.so: + @echo "WARNING: TODO: Need to pick up libcxl.so from external toolchain dirs. This is temporary." + mkdir -p ${IMGDIR} + @if [ "${TARGET_PLATFORM}" = "PPC64EL" ]; then \ + echo "Copying little-endian libcxl"; \ + cp libcxl-le/libcxl.so ${IMGDIR}; \ + else \ + echo "Copying big-endian libcxl"; \ + cp libcxl-be/libcxl.so ${IMGDIR}; \ + fi + @echo "Done patching" + + +include ${ROOTPATH}/config.mk + diff --git a/src/makefile b/src/makefile new file mode 100644 index 00000000..abb918c1 --- /dev/null +++ b/src/makefile @@ -0,0 +1,56 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +ROOTPATH = .. + +.NOTPARALLEL: + +#common is built first to satisfy dependencies elsewhere. We likely need to support +#different makefile passes for exported libs vs regular programs, libs, and shared libs. +SUBDIRS = lib.d common.d block.d build.d kv.d test.d cflash.d provisioning.d + +BASE_OBJECTS = + +BASE_MODULES = trace errl devicefw scom xscom initservice taskargs \ + pnor i2c fsi vfs + +EXTENDED_MODULES = targeting ecmddatabuffer hwpf fapi hwp plat \ + extinitsvc istepdisp hwas fsiscom + +DIRECT_BOOT_MODULES = example +RUNTIME_MODULES = +TESTCASE_MODULES = cxxtest testerrl testdevicefw testsyslib \ + testscom testxscom testtargeting testinitservice testkernel \ + testhwpf testecmddatabuffer initsvctasktest2 testcxxtest \ + testpnor testi2c testfsi testvfs testhwas + +RELOCATABLE_IMAGE_LDFLAGS = -pie --export-dynamic + +test: MKTESTSDIR + +MKTESTSDIR: + @-mkdir -p $(ROOTPATH)/obj/tests + +include ${ROOTPATH}/config.mk diff --git a/src/master/block_alloc.c b/src/master/block_alloc.c new file mode 100755 index 00000000..ae294156 --- /dev/null +++ b/src/master/block_alloc.c @@ -0,0 +1,384 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/block_alloc.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include + +#include "block_alloc.h" +#include "block_alloc_internal.h" + +/************************************************************** + * * + * LUN BIT map table * + * * + * 0 1 2 3 4 5 6 ... 63 * + * 64 65 66 67 68 69 70 ... 127 * + * ...... * + * * + **************************************************************/ + + +/************************************************************** + * * + * Defines * + * * + **************************************************************/ + +/* Bit operations */ +#define SET_BIT(num, bit_pos) num |= (uint64_t)0x01 << (63-bit_pos); +#define CLR_BIT(num, bit_pos) num &= ~((uint64_t)0x01 << (63-bit_pos)); +#define TEST_BIT(num, bit_pos) (num & ((uint64_t)0x01 << (63-bit_pos))) + + +/************************************************************** + * * + * Function Prototypes * + * * + **************************************************************/ +static int find_free_bit(uint64_t lun_map_entry); + + +/************************************************************** + * * + * Extern variables * + * * + **************************************************************/ + +extern unsigned int trc_lvl; + +int +ba_init(ba_lun_t *ba_lun) +{ + lun_info_t *lun_info = NULL; + int lun_size_au = 0, i = 0; + int last_word_underflow = 0; + + /* Allocate lun_fino */ + lun_info = (lun_info_t *) malloc(sizeof(lun_info_t)); + if (lun_info == NULL) { + BA_TRACE_1(LOG_ERR, "block_alloc: Failed to allocate lun_info for lun_id %lx\n", ba_lun->lun_id); + return -1; + } + + /* Lock and bzero the allocated lun_info */ + mlock(lun_info, sizeof(lun_info_t)); + bzero(lun_info, sizeof(lun_info_t)); + + BA_TRACE_3(LOG_INFO, "block_alloc: Initializing LUN: lun_id = %lx, ba_lun->lsize = %lx, ba_lun->au_size = %lx\n", ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size); + + /* Calculate bit map size */ + lun_size_au = ba_lun->lsize / ba_lun->au_size; +#ifdef _FILEMODE_ + if (lun_size_au == 0) { + lun_size_au = 1; + } +#endif /* _FILEMODE_ */ + lun_info->total_aus = lun_size_au; + lun_info->lun_bmap_size = lun_size_au / 64; + if (lun_size_au % 64 > 0) + lun_info->lun_bmap_size++; + + /* Allocate bitmap space */ + lun_info->lun_alloc_map = (uint64_t *)malloc(lun_info->lun_bmap_size * sizeof(uint64_t)); + if (lun_info->lun_alloc_map == NULL) { + BA_TRACE_1(LOG_ERR, "block_alloc: Failed to allocate lun allocation map: lun_id = %lx\n", ba_lun->lun_id); + free (lun_info); + return -1; + } + + /* Lock and initialize the bit map */ + mlock(lun_info->lun_alloc_map, + lun_info->lun_bmap_size * sizeof(uint64_t)); + lun_info->free_aun_cnt = lun_size_au; + + /* set all bits in bitmap to '1' */ + for (i = 0; i < lun_info->lun_bmap_size; i++) { + lun_info->lun_alloc_map[i] = (uint64_t)~0; + } + + /* If the last word is not fully utilized, mark the extra bits as allocated */ + last_word_underflow = (lun_info->lun_bmap_size * 64) - lun_info->free_aun_cnt; + if (last_word_underflow > 0 ) { + for ( i = 63-last_word_underflow+1; i < 64 ; i++) { + CLR_BIT(lun_info->lun_alloc_map[lun_info->lun_bmap_size-1], i); + } + } + + /* Initialize elevator indices */ + lun_info->free_low_idx = 0; + lun_info->free_curr_idx = 0; + lun_info->free_high_idx = lun_info->lun_bmap_size; + + /* Allocate clone map */ + lun_info->aun_clone_map = (unsigned char *)malloc(lun_info->total_aus * sizeof(unsigned char)); + if (lun_info->aun_clone_map == NULL) { + BA_TRACE_1(LOG_ERR, "block_alloc: Failed to allocate clone map: lun_id = %lx\n", ba_lun->lun_id); + free (lun_info->lun_alloc_map); + free (lun_info); + return -1; + } + + mlock(lun_info->aun_clone_map, lun_info->total_aus * sizeof(unsigned char)); + bzero(lun_info->aun_clone_map, lun_info->total_aus * sizeof(unsigned char)); + + ba_lun->ba_lun_handle = (void *)lun_info; + + BA_TRACE_3(LOG_INFO, "block_alloc: Successfully initialized the LUN: lun_id = %lx, bitmap size = %x, free_aun_cnt = %lx\n", ba_lun->lun_id, lun_info->lun_bmap_size, lun_info->free_aun_cnt); + + return 0; + +} /* End of ba_init */ + + +static int +find_free_bit(uint64_t lun_map_entry) +{ + int pos = -1; + +#ifndef _AIX + asm volatile ( "cntlzd %0, %1": "=r"(pos) : "r"(lun_map_entry) ); +#else + int i = 0; + for (i = 1; i <= 64; i++) { + if (lun_map_entry & ((uint64_t)0x1 << (64 - i))) { + pos = i-1; + break; + } + } /* End of for */ +#endif + + return pos; + +} /* End of find_free_bit */ + + +aun_t +ba_alloc(ba_lun_t *ba_lun) +{ + aun_t bit_pos = -1; + int i = 0; + lun_info_t *lun_info = NULL; + + lun_info = (lun_info_t *)ba_lun->ba_lun_handle; + + BA_TRACE_2(LOG_INFO, "block_alloc: Received block allocation request: lun_id = %lx, free_aun_cnt = %lx\n", ba_lun->lun_id, lun_info->free_aun_cnt); + + if (lun_info->free_aun_cnt == 0) { + BA_TRACE_1(LOG_ERR, "block_alloc: No space left on LUN: lun_id = %lx\n", ba_lun->lun_id); + return (aun_t)-1; + } + + /* Search for free entry between free_curr_idx and free_high_idx */ + for (i = lun_info->free_curr_idx; i < lun_info->free_high_idx; i++) + { + if (lun_info->lun_alloc_map[i] != 0) { + + /* There are some free AUs .. find free entry */ + bit_pos = find_free_bit(lun_info->lun_alloc_map[i]); + + BA_TRACE_3(LOG_INFO, "block_alloc: Found free bit %lx in lun map entry %lx at bitmap index = %x\n", bit_pos, lun_info->lun_alloc_map[i], i); + + lun_info->free_aun_cnt--; + CLR_BIT(lun_info->lun_alloc_map[i], bit_pos); + + break; + } + } + + if (bit_pos == -1) { + /* Search for free entry between free_low_idx and free_curr_idx */ + for (i = lun_info->free_low_idx; i < lun_info->free_curr_idx; i++) + { + if (lun_info->lun_alloc_map[i] != 0) { + + /* There are some free AUs .. find free entry */ + bit_pos = find_free_bit(lun_info->lun_alloc_map[i]); + + BA_TRACE_3(LOG_INFO, "block_alloc: Found free bit %lx in lun map entry %lx at bitmap index = %x\n", bit_pos, lun_info->lun_alloc_map[i], i); + + lun_info->free_aun_cnt--; + CLR_BIT(lun_info->lun_alloc_map[i], bit_pos); + + break; + } + } + } + + + if (bit_pos == -1) { + BA_TRACE_1(LOG_ERR, "block_alloc: Could not find an allocation unit on LUN: lun_id = %lx\n", ba_lun->lun_id); + return (aun_t)-1; + } + + /* Update the free_curr_idx */ + if ( bit_pos == 63 ) + lun_info->free_curr_idx = i + 1; + else + lun_info->free_curr_idx = i; + + BA_TRACE_3(LOG_INFO, "block_alloc: Allocating AU number %lx, on lun_id %lx, free_aun_cnt = %lx\n", (i * 64 + bit_pos), ba_lun->lun_id, lun_info->free_aun_cnt); + return (aun_t)(i * 64 + bit_pos); + +} /* End of ba_alloc */ + + +static int +validate_alloc(lun_info_t *lun_info, aun_t aun) +{ + int idx = 0, bit_pos = 0; + + idx = aun / 64; + bit_pos = aun % 64; + + if (TEST_BIT(lun_info->lun_alloc_map[idx], bit_pos)) { + return -1; + } + + return 0; +} /* End of validate_alloc */ + + +int +ba_free(ba_lun_t *ba_lun, aun_t to_free) +{ + int idx = 0, bit_pos = 0; + lun_info_t *lun_info = NULL; + + lun_info = (lun_info_t *)ba_lun->ba_lun_handle; + + if (validate_alloc(lun_info, to_free) != 0) { + BA_TRACE_2(LOG_ERR, "block_alloc: The AUN %lx is not allocated on lun_id %lx\n", to_free, ba_lun->lun_id); + return -1; + } + + BA_TRACE_3(LOG_INFO, "block_alloc: Received a request to free AU %lx on lun_id %lx, free_aun_cnt = %lx\n", to_free, ba_lun->lun_id, lun_info->free_aun_cnt); + + + if (lun_info->aun_clone_map[to_free] > 0) { + BA_TRACE_3(LOG_INFO, "block_alloc: AU %lx on lun_id %lx has been cloned. Clone count = %x\n", to_free, ba_lun->lun_id, lun_info->aun_clone_map[to_free]); + lun_info->aun_clone_map[to_free]--; + return 0; + } + + idx = to_free / 64; + bit_pos = to_free % 64; + + SET_BIT(lun_info->lun_alloc_map[idx], bit_pos); + lun_info->free_aun_cnt++; + + if (idx < lun_info->free_low_idx) + lun_info->free_low_idx = idx; + else if (idx > lun_info->free_high_idx) + lun_info->free_high_idx = idx; + + BA_TRACE_4(LOG_INFO, "block_alloc: Successfully freed AU at bit_pos %x, bit map index %x on lun_id %lx, free_aun_cnt = %lx\n", bit_pos, idx, ba_lun->lun_id, lun_info->free_aun_cnt); + return 0; + +} /* End of ba_free */ + + +int +ba_clone(ba_lun_t *ba_lun, aun_t to_clone) +{ + lun_info_t *lun_info = NULL; + + lun_info = (lun_info_t *)ba_lun->ba_lun_handle; + + if (validate_alloc(lun_info, to_clone) != 0) { + BA_TRACE_2(LOG_ERR, "block_alloc: AUN %lx is not allocated on lun_id %lx\n", to_clone, ba_lun->lun_id); + return -1; + } + + BA_TRACE_2(LOG_INFO, "block_alloc: Received a request to clone AU %lx on lun_id %lx\n", to_clone, ba_lun->lun_id); + + if (lun_info->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) { + BA_TRACE_2(LOG_ERR, "block_alloc: AUN %lx on lun_id %lx has hit max clones already\n", to_clone, ba_lun->lun_id); + return -1; + } + + lun_info->aun_clone_map[to_clone]++; + + return 0; +} /* End of ba_clone */ + + +uint64_t +ba_space(ba_lun_t *ba_lun) +{ + lun_info_t *lun_info = NULL; + + lun_info = (lun_info_t *)ba_lun->ba_lun_handle; + + return (lun_info->free_aun_cnt); +} /* End of ba_space */ + +#ifdef BA_DEBUG +void +dump_ba_map(ba_lun_t *ba_lun) +{ + lun_info_t *lun_info = NULL; + int i = 0, j = 0; + + lun_info = (lun_info_t *)ba_lun->ba_lun_handle; + + printf("Dumping block allocation map: map size = %d\n", lun_info->lun_bmap_size); + + for (i = 0; i < lun_info->lun_bmap_size; i++) { + printf("%4d ", i * 64); + for (j = 0; j < 64; j++) { + if ( j % 4 == 0) + printf(" "); + printf("%1d", TEST_BIT(lun_info->lun_alloc_map[i], j) ? 1:0); + } + printf("\n"); + } + + printf("\n"); + return; +} + +void +dump_ba_clone_map(ba_lun_t *ba_lun) +{ + lun_info_t *lun_info = NULL; + int i = 0; + + lun_info = (lun_info_t *)ba_lun->ba_lun_handle; + + printf("Dumping clone map: map size = %d\n", lun_info->total_aus); + + for (i = 0; i < lun_info->total_aus; i++) { + if (i % 64 == 0) { + printf("\n%3d", i); + } + + if (i %4 == 0) + printf(" "); + printf("%2x", lun_info->aun_clone_map[i]); + } + + printf("\n"); + return; +} +#endif diff --git a/src/master/block_alloc.h b/src/master/block_alloc.h new file mode 100755 index 00000000..efa85b9e --- /dev/null +++ b/src/master/block_alloc.h @@ -0,0 +1,52 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/block_alloc.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _BLOCK_ALLOC_H +#define _BLOCK_ALLOC_H + +#include + +typedef size_t aun_t; + +typedef struct ba_lun { + uint64_t lun_id; + uint64_t wwpn; + size_t lsize; /* Lun size in number of LBAs */ + size_t lba_size; /* LBA size in number of bytes */ + size_t au_size; /* Allocation Unit size in number of LBAs */ + void *ba_lun_handle; +} ba_lun_t; + +int ba_init(ba_lun_t *ba_lun); +aun_t ba_alloc(ba_lun_t *ba_lun); +int ba_free(ba_lun_t *ba_lun, aun_t to_free); +int ba_clone(ba_lun_t *ba_lun, aun_t to_clone); +uint64_t ba_space(ba_lun_t *ba_lun); + +#ifdef BA_DEBUG +void dump_ba_map(ba_lun_t *ba_lun); +void dump_ba_clone_map(ba_lun_t *ba_lun); +#endif + +#endif /* _BLOCK_ALLOC_H */ diff --git a/src/master/block_alloc_internal.h b/src/master/block_alloc_internal.h new file mode 100755 index 00000000..4e128609 --- /dev/null +++ b/src/master/block_alloc_internal.h @@ -0,0 +1,74 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/block_alloc_internal.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _BLOCK_ALLOC_INTERNAL_H +#define _BLOCK_ALLOC_INTERNAL_H + +#include +#ifndef _AIX +#include +#endif /* !_AIX */ +#include +#include +#include +#include +#include +#include + +#define MAX_AUN_CLONE_CNT 0xFF + +typedef struct lun_info { + uint64_t *lun_alloc_map; + uint32_t lun_bmap_size; + uint32_t total_aus; + uint64_t free_aun_cnt; + + /* indices to be used for elevator lookup of free map */ + uint32_t free_low_idx; + uint32_t free_curr_idx; + uint32_t free_high_idx; + + unsigned char *aun_clone_map; +} lun_info_t; + +// use only LOG_ERR, LOG_WARNING, LOG_NOTICE, LOG_INFO & LOG_DEBUG +#define BA_TRACE_0(lvl, fmt) \ + if (trc_lvl > lvl) { syslog(lvl, fmt); } + +#define BA_TRACE_1(lvl, fmt, A) \ + if (trc_lvl > lvl) { syslog(lvl, fmt, A); } + +#define BA_TRACE_2(lvl, fmt, A, B) \ + if (trc_lvl > lvl) { syslog(lvl, fmt, A, B); } + +#define BA_TRACE_3(lvl, fmt, A, B, C) \ + if (trc_lvl > lvl) { syslog(lvl, fmt, A, B, C); } + +#define BA_TRACE_4(lvl, fmt, A, B, C, D)\ + if (trc_lvl > lvl) { syslog(lvl, fmt, A, B, C, D); } + +#define BA_TRACE_5(lvl, fmt, A, B, C, D, E)\ + if (trc_lvl > lvl) { syslog(lvl, fmt, A, B, C, D, E); } + +#endif /* ifndef _BLOCK_ALLOC_INTERNAL_H */ diff --git a/src/master/makefile b/src/master/makefile new file mode 100644 index 00000000..a5a7c47f --- /dev/null +++ b/src/master/makefile @@ -0,0 +1,69 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/master/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +UNAME=$(shell uname) + +ROOTPATH = ../.. +SUBDIRS = test.d + +#if BLOCK_FILEMODE is enabled, then tell the block code as much +#pass down as a #define to the underlying code +ifdef BLOCK_FILEMODE_ENABLED +ifeq ($(BLOCK_FILEMODE_ENABLED),1) + CUSTOMFLAGS += -D_FILEMODE_ +endif +endif + +EXPFLAGS = -bexpall + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${ROOTPATH}/img +LIBPATHS = -L${ROOTPATH}/img + +PGMDIR = ${ROOTPATH}/obj/programs +TESTDIR = ${ROOTPATH}/obj/tests + +OBJS = mclient.o +OBJS64 = mclient.64o +MODULE = mclient + +PGMS = mserv +PROGRAMS = $(addprefix ${PGMDIR}/, ${PGMS}) +#PROGRAMS64 = $(addprefix ${PGMDIR}/, ${PGMS}) + +BTESTS = mclient_test +BIN_TESTS = $(addprefix ${TESTDIR}/, ${BTESTS}) + +ifeq ($(UNAME),AIX) +LINKLIBS +=-lpthreads -lmclient +else +LINKLIBS +=-lpthread -lmclient -lrt +endif + +mserv_OFILES = block_alloc.o +DEPS=$(addprefix $(PGMDIR)/, $(mserv_OFILES:.o=.dep)) + +EXTRAINCDIR = + +include ${ROOTPATH}/config.mk diff --git a/src/master/mclient.c b/src/master/mclient.c new file mode 100644 index 00000000..98f623df --- /dev/null +++ b/src/master/mclient.c @@ -0,0 +1,889 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/mclient.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +/* Two mc_hndl_priv_t instances must not share any data, including + the fixed caller supplied data that is identical for all duplicated + handles. Sharing implies locking which the design prefers to avoid. + Different mc handle can be run in parallel in different client threads + w/o locks or serialization. + + One mc_hndl_priv_t corresponds to one client connection to the server. + Client code does not care/enforce how many connections can be made. + It is for server to enforce. + + A connection is bound to a AFU context during registration. Multiple + connections (mc handles) can be bound to the same context. + + This implementation does not prevent a child using a mc_hndl inherited + from its parent. Doing so is considered a programming error and the + results are undefined. +*/ +typedef struct +{ + int conn_fd; + uint8_t tag; + + /* save pid of registrant for limited special handling of child after + a fork */ + pid_t pid; + + /* save off caller supplied parms for mc_hdup etc */ + ctx_hndl_t ctx_hndl; + char master_dev_path[MC_PATHLEN]; + volatile struct sisl_host_map* p_mmio_map; +} mc_hndl_priv_t; + + +static __u64 gen_rand(mc_hndl_priv_t *p_mc_hndl_priv) { + __u64 rand; + __u64 p_low; + + asm volatile ( "mfspr %0, 268" : "=r"(rand) : ); // time base + + // add in low 32 bits of malloc'ed address to top 32 bits of TB + p_low = (((__u64)p_mc_hndl_priv) & 0xFFFFFFFF); + rand ^= (p_low << 32); + + return rand; +} + +/* + * Procedure: xfer_data + * + * Description: Perform a transfer operation for the given + * socket file descriptor. + * + * Parameters: + * fd: Socket File Descriptor + * op: Read or Write Operation + * buf: Buffer to either read from or write to + * exp_size: Size of data transfer + * + * Return: 0, if successful + * non-zero otherwise + */ +static int +xfer_data(int fd, int op, void *buf, ssize_t exp_size) +{ + int rc = 0; + ssize_t offset = 0; + ssize_t bytes_xfer = 0; + ssize_t target_size = exp_size; + struct iovec iov; + struct msghdr msg; + + while ( 1 ) + { + // Set up IO vector for IO operation. + memset(&msg, 0, sizeof(struct msghdr)); + iov.iov_base = buf + offset; + iov.iov_len = target_size; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + // Check to see if we are sending or receiving data + if ( op == XFER_OP_READ ) + { + bytes_xfer = recvmsg(fd, &msg, MSG_WAITALL); + } + else + { + bytes_xfer = sendmsg(fd, &msg, MSG_NOSIGNAL); + } + + if ( -1 == bytes_xfer ) + { + if ( EAGAIN == errno || EWOULDBLOCK == errno || EINTR == errno) + { + // just retry the whole request + continue; + } + else + { + // connection closed by the other end + rc = 1; + break; + } + } + else if ( 0 == bytes_xfer ) + { + // connection closed by the other end + rc = 1; + break; + } + else if ( bytes_xfer == target_size ) + { + // We have transfered all the bytes we wanted, we + // can stop now. + rc = 0; + break; + } + else + { + // less than target size - partial condition + // set up to transfer for the remainder of the request + offset += bytes_xfer; + target_size = (target_size - bytes_xfer); + } + } + + return rc; +} + + +/***************************************************************************** + * Procedure: blk_connect + * + * Description: Connect to the server entity + * + * Parameters: + * + * Return: 0 or greater is the file descriptor for the connection + * -1 is error + *****************************************************************************/ +static int +blk_connect(char *mdev_path) +{ + struct sockaddr_un svr_addr; + int conn_fd; + int rc; + int retry; + + // Create a socket file descriptor + conn_fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (conn_fd < 0) + { + fprintf(stderr, "blk_connect: socket failed: %d (%d)\n", + conn_fd, errno); + } + else + { + bzero(&svr_addr, sizeof(struct sockaddr_un)); + svr_addr.sun_family = AF_UNIX; + strcpy(svr_addr.sun_path, MC_SOCKET_DIR); + strcat(svr_addr.sun_path, mdev_path); + + // Connect to the server entity + for (retry = 0; retry < 3; retry++) { + // retry to handle ECONNREFUSED, there may be too many pending + // connections on the server (SOMAXCONN). + // + rc = connect(conn_fd, (struct sockaddr *)&svr_addr, sizeof(svr_addr)); + if (rc == 0) { + break; + } + usleep(100); + } + + if (rc) { + fprintf(stderr, "block_connect: Connect failed: %d (%d)\n", + rc, errno); + close(conn_fd); + conn_fd = -1; + } + } + + return conn_fd; +} + +/***************************************************************************** + * Procedure: blk_send_command + * + * Description: Send a command to the server entity + * + * Parameters: + * conn_fd: Socket File Descriptor + * cmd: Command to perform + * cmdblock: Command request block + * cmdresp: Command response block + * + * Return: 0, if successful + * non-zero otherwise + *****************************************************************************/ +static int +blk_send_command(int conn_fd, mc_req_t *p_mc_req, mc_resp_t *p_mc_resp) +{ + int rc = 0; + + // Send the command block + rc = xfer_data(conn_fd, XFER_OP_WRITE, (void *)p_mc_req, sizeof(*p_mc_req)); + if (rc) + { + fprintf(stderr, "blk_send_command: PID %d failed send: %d\n", + getpid(), rc); + } + else + { + // Wait for the response + rc = xfer_data(conn_fd, XFER_OP_READ, (void *)p_mc_resp, sizeof(*p_mc_resp)); + if (rc) + { + fprintf(stderr, "blk_send_command: PID %d failed read: %d\n", + getpid(), rc); + } + } + + return rc; +} + + + +int +mc_init() +{ + return 0; +} + +int +mc_term() +{ + return 0; +} + +// backend function used by mc_register after making a connection +// to server. +// +static int +mc_register_back(mc_hndl_t mc_hndl, ctx_hndl_t ctx_hndl, + __u8 mode) + +{ + mc_req_t mc_req; + mc_resp_t mc_resp; + __u64 challenge; + int rc; + + mc_hndl_priv_t *p_mc_hndl_priv = (mc_hndl_priv_t *) mc_hndl; + + challenge = gen_rand(p_mc_hndl_priv); + write_64(&p_mc_hndl_priv->p_mmio_map->mbox_w, challenge); + + memset(&mc_req, '\0', sizeof(mc_req)); + memset(&mc_resp, '\0', sizeof(mc_resp)); + mc_req.header.size = sizeof(mc_req); + mc_resp.header.size = sizeof(mc_resp); + mc_req.header.tag = p_mc_hndl_priv->tag++; + + mc_req.header.command = CMD_MCREG; + mc_req.reg.client_pid = getpid(); + mc_req.reg.client_fd = p_mc_hndl_priv->conn_fd; + mc_req.reg.mode = mode; + mc_req.reg.ctx_hndl = ctx_hndl; + mc_req.reg.challenge = challenge; + + rc = blk_send_command(p_mc_hndl_priv->conn_fd, &mc_req, &mc_resp); + + // this is important: protect our context from someone trying to + // claim as their's by locking out the mbox. + write_64(&p_mc_hndl_priv->p_mmio_map->mbox_w, 0); + + if (rc != 0) { + fprintf(stderr, "%s failed - transport error rc: %d\n", __func__, rc); + errno = EIO; // transport error + rc = -1; + } + else if (mc_resp.header.tag != mc_req.header.tag) { + fprintf(stderr, "%s failed - tag mismatch: exp(%d), actual(%d)\n", __func__, + mc_req.header.tag, mc_resp.header.tag); + errno = EIO; // this is a response for some other cmd + rc = -1; + } + else if (mc_resp.header.status != 0) { + fprintf(stderr, "%s failed - server errno: %d\n", __func__, + mc_resp.header.status); + errno = mc_resp.header.status; + rc = -1; + } + else { + rc = 0; + } + + return rc; +} + +int +mc_register(char *master_dev_path, ctx_hndl_t ctx_hndl, + volatile __u64 *p_mmio_map, mc_hndl_t *p_mc_hndl) +{ + int conn_fd; + int rc; + mc_hndl_priv_t *p_mc_hndl_priv; + + conn_fd = blk_connect(master_dev_path); + if (conn_fd < 0) { + fprintf(stderr, "socket failed: %d (%d)\n", conn_fd, errno); + return -1; // errno set in blk_connect() + } + + p_mc_hndl_priv = (mc_hndl_priv_t *) malloc(sizeof(mc_hndl_priv_t)); + if (p_mc_hndl_priv == NULL) { + close(conn_fd); + fprintf(stderr, "cannot allocate client handle\n"); + errno = ENOMEM; + return -1; + } + + // init mc_hndl + memset(p_mc_hndl_priv, 0, sizeof(*p_mc_hndl_priv)); + p_mc_hndl_priv->conn_fd = conn_fd; + p_mc_hndl_priv->tag = 0; + p_mc_hndl_priv->pid = getpid(); + strncpy(p_mc_hndl_priv->master_dev_path, master_dev_path, + MC_PATHLEN - 1); + p_mc_hndl_priv->ctx_hndl = ctx_hndl; + p_mc_hndl_priv->p_mmio_map = (volatile struct sisl_host_map*) p_mmio_map; + + // initial MCREG of ctx_hndl (not a dup) + rc = mc_register_back(p_mc_hndl_priv, ctx_hndl, MCREG_INITIAL_REG); + + if (rc != 0) { + free(p_mc_hndl_priv); + close(conn_fd); + return rc; // errno is already set + } + + *p_mc_hndl = p_mc_hndl_priv; +#if 0 + printf("%s success - registered ctx_hndl %d on client handle %p\n", + __func__, + ctx_hndl, *p_mc_hndl); +#endif + return 0; +} + + +int +mc_hdup(mc_hndl_t mc_hndl, mc_hndl_t *p_mc_hndl) +{ + int conn_fd; + int rc; + mc_hndl_priv_t *p_mc_hndl_priv; + mc_hndl_priv_t *p_mc_hndl_orig = (mc_hndl_priv_t *) mc_hndl; + + conn_fd = blk_connect(p_mc_hndl_orig->master_dev_path); + if (conn_fd < 0) { + fprintf(stderr, "socket failed: %d (%d)\n", conn_fd, errno); + return -1; // errno set in blk_connect() + } + + p_mc_hndl_priv = (mc_hndl_priv_t *) malloc(sizeof(mc_hndl_priv_t)); + if (p_mc_hndl_priv == NULL) { + close(conn_fd); + fprintf(stderr, "cannot allocate client handle\n"); + errno = ENOMEM; + return -1; + } + + // init dup'ed mc_hndl + memcpy(p_mc_hndl_priv, p_mc_hndl_orig, sizeof(*p_mc_hndl_priv)); + p_mc_hndl_priv->conn_fd = conn_fd; + p_mc_hndl_priv->tag = 0; + p_mc_hndl_priv->pid = getpid(); + + // duplicate MCREG + rc = mc_register_back(p_mc_hndl_priv, p_mc_hndl_priv->ctx_hndl, + MCREG_DUP_REG); + + if (rc != 0) { + free(p_mc_hndl_priv); + close(conn_fd); + return rc; // errno is already set + } + + *p_mc_hndl = p_mc_hndl_priv; +#if 0 + printf("%s success - registered ctx_hndl %d on client handle %p\n", + __func__, + p_mc_hndl_priv->ctx_hndl, *p_mc_hndl); +#endif + return 0; +} + + + +int mc_unregister(mc_hndl_t mc_hndl) +{ + mc_req_t mc_req; + mc_resp_t mc_resp; + int rc = 0; + mc_hndl_priv_t *p_mc_hndl_priv = (mc_hndl_priv_t *) mc_hndl; + + /* Unregister with server if this is called by parent i.e. the + original registrant */ + if (p_mc_hndl_priv-> pid == getpid()) { + memset(&mc_req, '\0', sizeof(mc_req)); + memset(&mc_resp, '\0', sizeof(mc_resp)); + mc_req.header.size = sizeof(mc_req); + mc_resp.header.size = sizeof(mc_resp); + mc_req.header.tag = p_mc_hndl_priv->tag++; + + mc_req.header.command = CMD_MCUNREG; + + rc = blk_send_command(p_mc_hndl_priv->conn_fd, &mc_req, &mc_resp); + + if (rc != 0) { + fprintf(stderr, "%s failed - transport error rc: %d\n", __func__, rc); + errno = EIO; // transport error + rc = -1; + } + else if (mc_resp.header.tag != mc_req.header.tag) { + fprintf(stderr, "%s failed - tag mismatch: exp(%d), actual(%d)\n", __func__, + mc_req.header.tag, mc_resp.header.tag); + errno = EIO; // this is a response for some other cmd + rc = -1; + } + else if (mc_resp.header.status != 0) { + fprintf(stderr, "%s failed - server errno: %d\n", __func__, + mc_resp.header.status); + errno = mc_resp.header.status; + rc = -1; + } + else { + rc = 0; +#if 0 + printf("%s success - unregisterd client handle %p\n", __func__, mc_hndl); +#endif + } + } + + close(p_mc_hndl_priv->conn_fd); + free(p_mc_hndl_priv); + + return rc; +} + +int mc_open(mc_hndl_t mc_hndl, __u64 flags, res_hndl_t *p_res_hndl) +{ + mc_req_t mc_req; + mc_resp_t mc_resp; + int rc; + mc_hndl_priv_t *p_mc_hndl_priv = (mc_hndl_priv_t *) mc_hndl; + + memset(&mc_req, '\0', sizeof(mc_req)); + memset(&mc_resp, '\0', sizeof(mc_resp)); + mc_req.header.size = sizeof(mc_req); + mc_resp.header.size = sizeof(mc_resp); + mc_req.header.tag = p_mc_hndl_priv->tag++; + + mc_req.header.command = CMD_MCOPEN; + mc_req.open.flags = flags; + + rc = blk_send_command(p_mc_hndl_priv->conn_fd, &mc_req, &mc_resp); + + if (rc != 0) { + fprintf(stderr, "%s failed - transport error rc: %d\n", __func__, rc); + errno = EIO; // transport error + rc = -1; + } + else if (mc_resp.header.tag != mc_req.header.tag) { + fprintf(stderr, "%s failed - tag mismatch: exp(%d), actual(%d)\n", __func__, + mc_req.header.tag, mc_resp.header.tag); + errno = EIO; // this is a response for some other cmd + rc = -1; + } + else if (mc_resp.header.status != 0) { + fprintf(stderr, "%s failed - server errno: %d\n", __func__, + mc_resp.header.status); + errno = mc_resp.header.status; + rc = -1; + } + else { + rc = 0; + *p_res_hndl = mc_resp.open.res_hndl; +#if 0 + printf("%s success - opened res_hndl %d\n", __func__, *p_res_hndl); +#endif + } + + return rc; +} + + +int +mc_close(mc_hndl_t mc_hndl, res_hndl_t res_hndl) +{ + mc_req_t mc_req; + mc_resp_t mc_resp; + int rc; + mc_hndl_priv_t *p_mc_hndl_priv = (mc_hndl_priv_t *) mc_hndl; + + memset(&mc_req, '\0', sizeof(mc_req)); + memset(&mc_resp, '\0', sizeof(mc_resp)); + mc_req.header.size = sizeof(mc_req); + mc_resp.header.size = sizeof(mc_resp); + mc_req.header.tag = p_mc_hndl_priv->tag++; + + mc_req.header.command = CMD_MCCLOSE; + mc_req.close.res_hndl = res_hndl; + + rc = blk_send_command(p_mc_hndl_priv->conn_fd, &mc_req, &mc_resp); + + if (rc != 0) { + fprintf(stderr, "%s failed - transport error rc: %d\n", __func__, rc); + errno = EIO; // transport error + rc = -1; + } + else if (mc_resp.header.tag != mc_req.header.tag) { + fprintf(stderr, "%s failed - tag mismatch: exp(%d), actual(%d)\n", __func__, + mc_req.header.tag, mc_resp.header.tag); + errno = EIO; // this is a response for some other cmd + rc = -1; + } + else if (mc_resp.header.status != 0) { + fprintf(stderr, "%s failed - server errno: %d\n", __func__, + mc_resp.header.status); + errno = mc_resp.header.status; + rc = -1; + } + else { + rc = 0; +#if 0 + printf("%s success - closed res_hndl %d\n", __func__, res_hndl); +#endif + } + + return rc; +} + +int mc_size(mc_hndl_t mc_hndl, res_hndl_t res_hndl, + __u64 new_size, __u64 *p_actual_new_size) +{ + mc_req_t mc_req; + mc_resp_t mc_resp; + int rc; + mc_hndl_priv_t *p_mc_hndl_priv = (mc_hndl_priv_t *) mc_hndl; + + memset(&mc_req, '\0', sizeof(mc_req)); + memset(&mc_resp, '\0', sizeof(mc_resp)); + mc_req.header.size = sizeof(mc_req); + mc_resp.header.size = sizeof(mc_resp); + mc_req.header.tag = p_mc_hndl_priv->tag++; + + mc_req.header.command = CMD_MCSIZE; + mc_req.size.res_hndl = res_hndl; + mc_req.size.new_size = new_size; + rc = blk_send_command(p_mc_hndl_priv->conn_fd, &mc_req, &mc_resp); + + if (rc != 0) { + fprintf(stderr, "%s failed - transport error rc: %d\n", __func__, rc); + errno = EIO; // transport error + rc = -1; + } + else if (mc_resp.header.tag != mc_req.header.tag) { + fprintf(stderr, "%s failed - tag mismatch: exp(%d), actual(%d)\n", __func__, + mc_req.header.tag, mc_resp.header.tag); + errno = EIO; // this is a response for some other cmd + rc = -1; + } + else if (mc_resp.header.status != 0) { + fprintf(stderr, "%s failed - server errno: %d\n", __func__, + mc_resp.header.status); + errno = mc_resp.header.status; + rc = -1; + } + else { + rc = 0; + *p_actual_new_size = mc_resp.size.act_new_size; +#if 0 + printf("%s success - res_hndl %d new size is 0x%lx\n", + __func__, + res_hndl, *p_actual_new_size); +#endif + } + + return rc; +} + +int +mc_xlate_lba(mc_hndl_t mc_hndl, res_hndl_t res_hndl, + __u64 v_lba, __u64 *p_lba) +{ + mc_req_t mc_req; + mc_resp_t mc_resp; + int rc; + mc_hndl_priv_t *p_mc_hndl_priv = (mc_hndl_priv_t *) mc_hndl; + + memset(&mc_req, '\0', sizeof(mc_req)); + memset(&mc_resp, '\0', sizeof(mc_resp)); + mc_req.header.size = sizeof(mc_req); + mc_resp.header.size = sizeof(mc_resp); + mc_req.header.tag = p_mc_hndl_priv->tag++; + + mc_req.header.command = CMD_MCXLATE_LBA; + mc_req.xlate_lba.res_hndl = res_hndl; + mc_req.xlate_lba.v_lba = v_lba; + rc = blk_send_command(p_mc_hndl_priv->conn_fd, &mc_req, &mc_resp); + + if (rc != 0) { + fprintf(stderr, "%s failed - transport error rc: %d\n", __func__, rc); + errno = EIO; // transport error + rc = -1; + } + else if (mc_resp.header.tag != mc_req.header.tag) { + fprintf(stderr, "%s failed - tag mismatch: exp(%d), actual(%d)\n", __func__, + mc_req.header.tag, mc_resp.header.tag); + errno = EIO; // this is a response for some other cmd + rc = -1; + } + else if (mc_resp.header.status != 0) { + fprintf(stderr, "%s failed - server errno: %d\n", __func__, + mc_resp.header.status); + errno = mc_resp.header.status; + rc = -1; + } + else { + rc = 0; + *p_lba = mc_resp.xlate_lba.p_lba; +#if 0 + printf("%s success - res_hndl %d v_lba 0x%lx, p_lba 0x%lx\n", + __func__, + res_hndl, v_lba, *p_lba); +#endif + } + + return rc; +} + + +int mc_clone(mc_hndl_t mc_hndl, mc_hndl_t mc_hndl_src, + __u64 flags) +{ + mc_req_t mc_req; + mc_resp_t mc_resp; + int rc; + __u64 challenge; + + mc_hndl_priv_t *p_mc_hndl_priv = (mc_hndl_priv_t *) mc_hndl; + mc_hndl_priv_t *p_mc_hndl_priv_src = (mc_hndl_priv_t *) mc_hndl_src; + + challenge = gen_rand(p_mc_hndl_priv); + write_64(&p_mc_hndl_priv_src->p_mmio_map->mbox_w, challenge); + + memset(&mc_req, '\0', sizeof(mc_req)); + memset(&mc_resp, '\0', sizeof(mc_resp)); + mc_req.header.size = sizeof(mc_req); + mc_resp.header.size = sizeof(mc_resp); + mc_req.header.tag = p_mc_hndl_priv->tag++; + + mc_req.header.command = CMD_MCCLONE; + mc_req.clone.ctx_hndl_src = p_mc_hndl_priv_src->ctx_hndl; + mc_req.clone.flags = flags; + mc_req.clone.challenge = challenge; + rc = blk_send_command(p_mc_hndl_priv->conn_fd, &mc_req, &mc_resp); + + // lock mbox + write_64(&p_mc_hndl_priv_src->p_mmio_map->mbox_w, 0); + + if (rc != 0) { + fprintf(stderr, "%s failed - transport error rc: %d\n", __func__, rc); + errno = EIO; // transport error + rc = -1; + } + else if (mc_resp.header.tag != mc_req.header.tag) { + fprintf(stderr, "%s failed - tag mismatch: exp(%d), actual(%d)\n", __func__, + mc_req.header.tag, mc_resp.header.tag); + errno = EIO; // this is a response for some other cmd + rc = -1; + } + else if (mc_resp.header.status != 0) { + fprintf(stderr, "%s failed - server errno: %d\n", __func__, + mc_resp.header.status); + errno = mc_resp.header.status; + rc = -1; + } + else { + rc = 0; +#if 0 + printf("%s success - cloned context %d\n", __func__, + p_mc_hndl_priv_src->ctx_hndl); +#endif + } + + return rc; +} + +int mc_dup(mc_hndl_t mc_hndl, mc_hndl_t mc_hndl_cand) +{ + mc_req_t mc_req; + mc_resp_t mc_resp; + int rc; + __u64 challenge; + + mc_hndl_priv_t *p_mc_hndl_priv = (mc_hndl_priv_t *) mc_hndl; + mc_hndl_priv_t *p_mc_hndl_priv_cand = (mc_hndl_priv_t *) mc_hndl_cand; + + challenge = gen_rand(p_mc_hndl_priv); + write_64(&p_mc_hndl_priv_cand->p_mmio_map->mbox_w, challenge); + + memset(&mc_req, '\0', sizeof(mc_req)); + memset(&mc_resp, '\0', sizeof(mc_resp)); + mc_req.header.size = sizeof(mc_req); + mc_resp.header.size = sizeof(mc_resp); + mc_req.header.tag = p_mc_hndl_priv->tag++; + + mc_req.header.command = CMD_MCDUP; + mc_req.dup.ctx_hndl_cand = p_mc_hndl_priv_cand->ctx_hndl; + mc_req.dup.challenge = challenge; + + rc = blk_send_command(p_mc_hndl_priv->conn_fd, &mc_req, &mc_resp); + + // lock mbox + write_64(&p_mc_hndl_priv_cand->p_mmio_map->mbox_w, 0); + + if (rc != 0) { + fprintf(stderr, "%s failed - transport error rc: %d\n", __func__, rc); + errno = EIO; // transport error + rc = -1; + } + else if (mc_resp.header.tag != mc_req.header.tag) { + fprintf(stderr, "%s failed - tag mismatch: exp(%d), actual(%d)\n", __func__, + mc_req.header.tag, mc_resp.header.tag); + errno = EIO; // this is a response for some other cmd + rc = -1; + } + else if (mc_resp.header.status != 0) { + fprintf(stderr, "%s failed - server errno: %d\n", __func__, + mc_resp.header.status); + errno = mc_resp.header.status; + rc = -1; + } + else { + rc = 0; +#if 0 + printf("%s success - duped context %d\n", __func__, + p_mc_hndl_priv_cand->ctx_hndl); +#endif + } + + return rc; +} + +int mc_stat(mc_hndl_t mc_hndl, res_hndl_t res_hndl, + mc_stat_t *p_mc_stat) +{ + mc_req_t mc_req; + mc_resp_t mc_resp; + int rc; + mc_hndl_priv_t *p_mc_hndl_priv = (mc_hndl_priv_t *) mc_hndl; + + memset(&mc_req, '\0', sizeof(mc_req)); + memset(&mc_resp, '\0', sizeof(mc_resp)); + mc_req.header.size = sizeof(mc_req); + mc_resp.header.size = sizeof(mc_resp); + mc_req.header.tag = p_mc_hndl_priv->tag++; + + mc_req.header.command = CMD_MCSTAT; + mc_req.stat.res_hndl = res_hndl; + rc = blk_send_command(p_mc_hndl_priv->conn_fd, &mc_req, &mc_resp); + + if (rc != 0) { + fprintf(stderr, "%s failed - transport error rc: %d\n", __func__, rc); + errno = EIO; // transport error + rc = -1; + } + else if (mc_resp.header.tag != mc_req.header.tag) { + fprintf(stderr, "%s failed - tag mismatch: exp(%d), actual(%d)\n", __func__, + mc_req.header.tag, mc_resp.header.tag); + errno = EIO; // this is a response for some other cmd + rc = -1; + } + else if (mc_resp.header.status != 0) { + fprintf(stderr, "%s failed - server errno: %d\n", __func__, + mc_resp.header.status); + errno = mc_resp.header.status; + rc = -1; + } + else { + rc = 0; + *p_mc_stat = mc_resp.stat; +#if 0 + printf("%s success - res_hndl %d size is 0x%lx\n", + __func__, + res_hndl, *p_size); +#endif + } + + return rc; +} + +int mc_notify(mc_hndl_t mc_hndl, mc_notify_t *p_mc_notify) +{ + mc_req_t mc_req; + mc_resp_t mc_resp; + int rc; + mc_hndl_priv_t *p_mc_hndl_priv = (mc_hndl_priv_t *) mc_hndl; + + memset(&mc_req, '\0', sizeof(mc_req)); + memset(&mc_resp, '\0', sizeof(mc_resp)); + mc_req.header.size = sizeof(mc_req); + mc_resp.header.size = sizeof(mc_resp); + mc_req.header.tag = p_mc_hndl_priv->tag++; + + mc_req.header.command = CMD_MCNOTIFY; + mc_req.notify = *p_mc_notify; + rc = blk_send_command(p_mc_hndl_priv->conn_fd, &mc_req, &mc_resp); + + if (rc != 0) { + fprintf(stderr, "%s failed - transport error rc: %d\n", __func__, rc); + errno = EIO; // transport error + rc = -1; + } + else if (mc_resp.header.tag != mc_req.header.tag) { + fprintf(stderr, "%s failed - tag mismatch: exp(%d), actual(%d)\n", __func__, + mc_req.header.tag, mc_resp.header.tag); + errno = EIO; // this is a response for some other cmd + rc = -1; + } + else if (mc_resp.header.status != 0) { + fprintf(stderr, "%s failed - server errno: %d\n", __func__, + mc_resp.header.status); + errno = mc_resp.header.status; + rc = -1; + } + else { + rc = 0; +#if 0 + printf("%s success - res_hndl %d size is 0x%lx\n", + __func__, + res_hndl, *p_size); +#endif + } + + return rc; +} + + diff --git a/src/master/mclient_test.c b/src/master/mclient_test.c new file mode 100644 index 00000000..3f964294 --- /dev/null +++ b/src/master/mclient_test.c @@ -0,0 +1,1141 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/mclient_test.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +#define B_DONE 0x01 +#define B_ERROR 0x02 +#define NUM_RRQ_ENTRY 128 +#define NUM_CMDS 63 /* max is NUM_RRQ_ENTRY + * must be <= 64 due to the select_mask + * must be (NUM_CMDS*n == nchunk), where n is 1, 2, ... etc + */ + +#define CL_SIZE 128 /* Processor cache line size */ +#define CL_SIZE_MASK 0x7F /* Cache line size mask */ +#define DATA_SEED 0xdead000000000000ull + +#define RETRY_CNT 5 + +// use mmap for rcb+data. munmap/remap in loops +// hopefully that will cause seg/page faults. +// +struct ctx { + /* Stuff requiring alignment go first. */ + + /* Command & data for AFU commands issued by test. */ + char rbuf[NUM_CMDS][0x1000]; // 4K read data buffer (page aligned) + char wbuf[NUM_CMDS][0x1000]; // 4K write data buffer (page aligned) + __u64 rrq_entry[NUM_RRQ_ENTRY]; // 128B RRQ (page aligned) + + struct afu_cmd { + sisl_ioarcb_t rcb; // IOARCB (cache line aligned) + sisl_ioasa_t sa; // IOASA follows RCB + pthread_mutex_t mutex; + pthread_cond_t cv; + + __u8 cl_pad[CL_SIZE - + ((sizeof(sisl_ioarcb_t) + + sizeof(sisl_ioasa_t) + + sizeof(pthread_mutex_t) + + sizeof(pthread_cond_t)) & CL_SIZE_MASK)]; + } cmd[NUM_CMDS]; + + // AFU interface + int afu_fd; + struct cxl_ioctl_start_work work; + char event_buf[0x1000]; /* Linux cxl event buffer (interrupts) */ + volatile struct sisl_host_map *p_host_map; + ctx_hndl_t ctx_hndl; + + __u64 *p_hrrq_start; + __u64 *p_hrrq_end; + volatile __u64 *p_hrrq_curr; + unsigned int toggle; + unsigned int flags; +#define CTX_STOP 0x1 + + // MC client interface + mc_hndl_t mc_hndl; + res_hndl_t res_hndl; +} __attribute__ ((aligned (0x1000))); + +enum test_mode { + TEST_DEFAULT = 0, + TEST_BALLOC, /* 1: stress block allocator */ + TEST_BAD_EA /* 2: send bad EA to AFU */ +}; + +enum test_mode mode = TEST_DEFAULT; /* test mode */ +char *afu_path; /* points to argv[] string */ +pid_t pid; +int nloops = -1; +int rand_fd = -1; +struct ctx myctx[2]; // myctx[0] -> child, myctx[1] -> parent +#define PARENT_INDX 1 +#define CHILD_INDX 0 + + +void manual_mode(); +void *ctx_intr_rx(void *arg); +int ctx_init(struct ctx *p_ctx, char *dev_path); +void send_write(struct ctx *p_ctx, __u64 start_lba, __u64 stride, int just_send); +void send_read(struct ctx *p_ctx, __u64 start_lba, __u64 stride, int just_send); +void rw_cmp_buf(struct ctx *p_ctx, __u64 start_lba); +void rw_cmp_buf_cloned(struct ctx *p_ctx, __u64 start_lba); +void send_cmd(struct ctx *p_ctx, __u64 sel_mask); +void wait_resp(struct ctx *p_ctx, __u64 sel_mask); +__u64 check_status(struct ctx *p_ctx); +void fill_buf(__u64* p_buf, unsigned int len); +int cmp_buf(__u64* p_buf1, __u64 *p_buf2, unsigned int len); +int cmp_buf_cloned(__u64* p_buf, unsigned int len); +__u64 gen_rand(); +void balloc_test(char *afu_path, char *afu_path_m); +void bad_ea_test(char *afu_path, char *afu_path_m); +void halt_test(); + +void +usage(char *prog) +{ + printf("Usage: %s [-m mode] [-n nloops] afu_dev_path\n", prog); + printf("e. g.: %s -m 2 -n 10 /dev/cxl/afu0.0s\n", prog); +} + +void +get_parameters(int argc, char** argv) +{ + extern int optind; /* for getopt function */ + extern char *optarg; /* for getopt function */ + int ch; + + while ((ch = getopt(argc,argv,"m:n:h")) != EOF) { + switch (ch) { + case 'm': /* decimal */ + mode = atoi(optarg); + break; + case 'n': /* decimal: -1 is forever */ + nloops = atoi(optarg); + break; + case 'h': + usage(argv[0]); + exit(0); + default: + usage(argv[0]); + exit(-1); + } + } + + if ((argc - optind) != 1) { /* number of afus specified in cmd line */ + usage(argv[0]); + exit(-1); + } + + afu_path = argv[optind]; +} + + +void ctx_rrq_intr(struct ctx *p_ctx) { + struct afu_cmd *p_cmd; + + // process however many RRQ entries that are ready + while ((*p_ctx->p_hrrq_curr & SISL_RESP_HANDLE_T_BIT) == + p_ctx->toggle) { + p_cmd = (struct afu_cmd*)((*p_ctx->p_hrrq_curr) & (~SISL_RESP_HANDLE_T_BIT)); + + pthread_mutex_lock(&p_cmd->mutex); + p_cmd->sa.host_use_b[0] |= B_DONE; + pthread_cond_signal(&p_cmd->cv); + pthread_mutex_unlock(&p_cmd->mutex); + + if (p_ctx->p_hrrq_curr < p_ctx->p_hrrq_end) { + p_ctx->p_hrrq_curr++; /* advance to next RRQ entry */ + } + else { /* wrap HRRQ & flip toggle */ + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle ^= SISL_RESP_HANDLE_T_BIT; + } + } +} + +void ctx_sync_intr(struct ctx *p_ctx) { + __u64 reg; + __u64 reg_unmasked; + + reg = read_64(&p_ctx->p_host_map->intr_status); + reg_unmasked = (reg & SISL_ISTATUS_UNMASK); + + if (reg_unmasked == 0) { + fprintf(stderr, + "%d: spurious interrupt, intr_status 0x%016lx, ctx %d\n", + pid, reg, p_ctx->ctx_hndl); + return; + } + + if (reg_unmasked == SISL_ISTATUS_PERM_ERR_RCB_READ && + (p_ctx->flags & CTX_STOP)) { + // ok - this is a signal to stop this thread + } + else if (mode == TEST_BAD_EA) { + fprintf(stderr, + "%d: intr_status 0x%016lx, ctx %d\n", + pid, reg, p_ctx->ctx_hndl); + } + else { + fprintf(stderr, + "%d: unexpected interrupt, intr_status 0x%016lx, ctx %d, halting test...\n", + pid, reg, p_ctx->ctx_hndl); + halt_test(); + } + + write_64(&p_ctx->p_host_map->intr_clear, reg_unmasked); + + return; +} + +void *ctx_intr_rx(void *arg) { + struct cxl_event *p_event; + int len; + struct ctx *p_ctx = (struct ctx*) arg; + + while (!(p_ctx->flags & CTX_STOP)) { + // + // read afu fd & block on any interrupt + len = read(p_ctx->afu_fd, &p_ctx->event_buf[0], + sizeof(p_ctx->event_buf)); + + if (len < 0) { + fprintf(stderr, "afu has been reset, exiting...\n"); + exit(-1); + } + + p_event = (struct cxl_event *)&p_ctx->event_buf[0]; + while (len >= sizeof(p_event->header)) { + if (p_event->header.type == CXL_EVENT_AFU_INTERRUPT) { + switch(p_event->irq.irq) { + case SISL_MSI_RRQ_UPDATED: + ctx_rrq_intr(p_ctx); + break; + + case SISL_MSI_SYNC_ERROR: + ctx_sync_intr(p_ctx); + break; + + default: + fprintf(stderr, "%d: unexpected irq %d, ctx %d, halting test...\n", + pid, p_event->irq.irq, p_ctx->ctx_hndl); + halt_test(); + break; + } + + } + else if (p_event->header.type == CXL_EVENT_DATA_STORAGE && + (p_ctx->flags & CTX_STOP)) { + // this is a signal to terminate this thread + } + else if (p_event->header.type == CXL_EVENT_DATA_STORAGE && + (mode == TEST_BAD_EA)) { + // this is expected, but no need to print since we print + // the sync_intr status that accompanies the DSI + } + else { + fprintf(stderr, "%d: unexpected event %d, ctx %d, halting test...\n", + pid, p_event->header.type, p_ctx->ctx_hndl); + halt_test(); + } + + len -= p_event->header.size; + p_event = (struct cxl_event *) + (((char*)p_event) + p_event->header.size); + } + } + + return NULL; +} + + +int ctx_init(struct ctx *p_ctx, char *dev_path) +{ + void *map; + __u32 proc_elem; + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + int i; + + // general init, no resources allocated + + pthread_mutexattr_init(&mattr); + pthread_condattr_init(&cattr); + + // must clear RRQ memory for reinit in child + memset(&p_ctx->rrq_entry[0], 0, sizeof(p_ctx->rrq_entry)); + p_ctx->flags = 0; + + for (i = 0; i < NUM_CMDS; i++) { + pthread_mutex_init(&p_ctx->cmd[i].mutex, &mattr); + pthread_cond_init(&p_ctx->cmd[i].cv, &cattr); + } + + // open non-master device + p_ctx->afu_fd = open(dev_path, O_RDWR); + if (p_ctx->afu_fd < 0) { + fprintf(stderr, "open failed: device %s, errno %d\n", dev_path, errno); + return -1; + } + + // enable the AFU. This must be done before mmap. + p_ctx->work.num_interrupts = 4; + p_ctx->work.flags = CXL_START_WORK_NUM_IRQS; + if (ioctl(p_ctx->afu_fd, CXL_IOCTL_START_WORK, &p_ctx->work) != 0) { + close(p_ctx->afu_fd); + fprintf(stderr, "start command failed on AFU, errno %d\n", errno); + return -1; + } + if (ioctl(p_ctx->afu_fd, CXL_IOCTL_GET_PROCESS_ELEMENT, + &proc_elem) != 0) { + fprintf(stderr, "get_process_element failed, errno %d\n", errno); + return -1; + } + + // mmap host transport MMIO space of this context + // the map must be accessible by forked child for clone access + // checks. + // so do not madvise(map, 0x10000, MADV_DONTFORK); + // + map = mmap(NULL, 0x10000, // 64KB + PROT_READ|PROT_WRITE, MAP_SHARED, p_ctx->afu_fd, 0); + if (map == MAP_FAILED) { + fprintf(stderr, "mmap failed, errno %d\n", errno); + close(p_ctx->afu_fd); + return -1; + } + + // copy frequently used fields into p_ctx + p_ctx->ctx_hndl = proc_elem; // ctx_hndl is 16 bits in CAIA + p_ctx->p_host_map = (volatile struct sisl_host_map *) map; + + // initialize RRQ pointers + p_ctx->p_hrrq_start = &p_ctx->rrq_entry[0]; + p_ctx->p_hrrq_end = &p_ctx->rrq_entry[NUM_RRQ_ENTRY - 1]; + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle = 1; + + printf("p_host_map %p, ctx_hndl %d, rrq_start %p\n", + p_ctx->p_host_map, p_ctx->ctx_hndl, p_ctx->p_hrrq_start); + + // initialize cmd fields that never change + for (i = 0; i < NUM_CMDS; i++) { + p_ctx->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; + p_ctx->cmd[i].rcb.rrq = 0x0; + p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl; + } + + // set up RRQ in AFU + write_64(&p_ctx->p_host_map->rrq_start, (__u64) p_ctx->p_hrrq_start); + write_64(&p_ctx->p_host_map->rrq_end, (__u64) p_ctx->p_hrrq_end); + + // set LISN#, it is always sent to the context that wrote IOARRIN + write_64(&p_ctx->p_host_map->ctx_ctrl, SISL_MSI_SYNC_ERROR); + write_64(&p_ctx->p_host_map->intr_mask, SISL_ISTATUS_MASK); + + return 0; +} + +void ctx_close(struct ctx *p_ctx, int stop_rrq_thread) +{ + __u64 room = 0; + + if (stop_rrq_thread) { + p_ctx->flags |= CTX_STOP; + asm volatile ( "lwsync" : : ); // make flags visible & + // let any IOARRIN writes complete + do { + room = read_64(&p_ctx->p_host_map->cmd_room); + } while (room == 0); + + // this MMIO will send 2 interrupts (a DSI and a AFU sync error) + // and wake up the rrq thread if it is blocked on a read. + // After it unblocks, the thread will terminate. + write_64(&p_ctx->p_host_map->ioarrin, 0xdeadbeef); + sleep(1); + } + + munmap((void*)p_ctx->p_host_map, 0x10000); + close(p_ctx->afu_fd); // afu_fd is closed and the ctx freed only + // if there is no read pending on the afu_fd. + // so, the rrq thread must not be blocked on + // a read at this point, else close will fail. +} + +// writes using virtual LBAs +void send_write(struct ctx *p_ctx, __u64 start_lba, __u64 stride, int just_send) { + int i; + __u64 *p_u64; + __u32 *p_u32; + __u64 vlba; + __u64 sel_mask; + + for (i = 0; i < NUM_CMDS; i++) { + fill_buf((__u64*)&p_ctx->wbuf[i][0], + sizeof(p_ctx->wbuf[i])/sizeof(__u64)); + + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[i].rcb.req_flags = (SISL_REQ_FLAGS_RES_HNDL | + SISL_REQ_FLAGS_HOST_WRITE); + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[i]); + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->wbuf[i][0]; + + memset(&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + p_ctx->cmd[i].rcb.cdb[0] = 0x8A; // write(16) + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + vlba = start_lba + i*stride; + + write_64(p_u64, vlba); // virtual LBA# + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, 8); // 8 LBAs for 4K + + p_ctx->cmd[i].sa.host_use_b[1] = 0; // reset retry cnt + } + + sel_mask = ((NUM_CMDS == 64) ? + (-1ull) : (1ull << NUM_CMDS) - 1); // select each command + + if (just_send) { + send_cmd(p_ctx, sel_mask); + return; + } + + do { + send_cmd(p_ctx, sel_mask); + wait_resp(p_ctx, sel_mask); + } while ((sel_mask = check_status(p_ctx)) != 0); + + for (i = 0; i < NUM_CMDS; i++) { + if (p_ctx->cmd[i].sa.host_use_b[0] & B_ERROR) { + fprintf(stderr, "%d: write failed, ctx %d, halting test...\n", pid, + p_ctx->ctx_hndl); + halt_test(); + } + } +} + +// reads using virtual LBA +void send_read(struct ctx *p_ctx, __u64 start_lba, __u64 stride, int just_send) { + int i; + __u64 *p_u64; + __u32 *p_u32; + __u64 vlba; + __u64 sel_mask; + + for (i = 0; i < NUM_CMDS; i++) { + memset(&p_ctx->rbuf[i][0], 0xB, sizeof(p_ctx->rbuf[i])); + + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[i].rcb.req_flags = (SISL_REQ_FLAGS_RES_HNDL | + SISL_REQ_FLAGS_HOST_READ); + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->rbuf[i]); + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->rbuf[i][0]; + + memset(&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + p_ctx->cmd[i].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + vlba = start_lba + i*stride; + + write_64(p_u64, vlba); // virtual LBA# + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, 8); // 8 LBAs for 4K + + p_ctx->cmd[i].sa.host_use_b[1] = 0; // reset retry cnt + } + + sel_mask = ((NUM_CMDS == 64) ? + (-1ull) : (1ull << NUM_CMDS) - 1); // select each command + + if (just_send) { + send_cmd(p_ctx, sel_mask); + return; + } + + do { + send_cmd(p_ctx, sel_mask); + wait_resp(p_ctx, sel_mask); + } while ((sel_mask = check_status(p_ctx)) != 0); + + for (i = 0; i < NUM_CMDS; i++) { + if (p_ctx->cmd[i].sa.host_use_b[0] & B_ERROR) { + fprintf(stderr, "%d:read failed, ctx %d, halting test...\n", pid, + p_ctx->ctx_hndl); + halt_test(); + } + } +} + +void rw_cmp_buf(struct ctx *p_ctx, __u64 start_lba) { + int i; + char buf[32]; + int read_fd, write_fd; + for (i = 0; i < NUM_CMDS; i++) { + if (cmp_buf((__u64*)&p_ctx->rbuf[i][0], (__u64*)&p_ctx->wbuf[i][0], + sizeof(p_ctx->rbuf[i])/sizeof(__u64))) { + sprintf(buf, "read.%d", pid); + read_fd = open(buf, O_RDWR|O_CREAT); + sprintf(buf, "write.%d", pid); + write_fd = open(buf, O_RDWR|O_CREAT); + + write(read_fd, &p_ctx->rbuf[i][0], sizeof(p_ctx->rbuf[i])); + write(write_fd, &p_ctx->wbuf[i][0], sizeof(p_ctx->wbuf[i])); + + close(read_fd); + close(write_fd); + + fprintf(stderr, + "%d: miscompare at start_lba 0x%lx, ctx %d, halting test...\n", + pid, start_lba, p_ctx->ctx_hndl); + halt_test(); + } + } +} + +void rw_cmp_buf_cloned(struct ctx *p_ctx, __u64 start_lba) { + int i; + + for (i = 0; i < NUM_CMDS; i++) { + if (cmp_buf_cloned((__u64*)&p_ctx->rbuf[i][0], + sizeof(p_ctx->rbuf[i])/sizeof(__u64))) { + fprintf(stderr, + "%d: clone miscompare at start_lba 0x%lx, ctx %d, halting test...\n", + pid, start_lba, p_ctx->ctx_hndl); + halt_test(); + } + } +} + + +void send_cmd(struct ctx *p_ctx, __u64 sel_mask) { + int i; + __u64 room = 0; + + for (i = 0; i < NUM_CMDS; i++) { + if (sel_mask & (1ull << i)) { + p_ctx->cmd[i].sa.host_use_b[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; // clear + } + } + + /* make memory updates visible to AFU before MMIO */ + asm volatile ( "lwsync" : : ); + + for (i = 0; i < NUM_CMDS; i++) { + if (sel_mask & (1ull << i)) { + if (room == 0) { + asm volatile ( "eieio" : : ); // let IOARRIN writes complete + do { + room = read_64(&p_ctx->p_host_map->cmd_room); + } while (room == 0); + } + + // write IOARRIN + write_64(&p_ctx->p_host_map->ioarrin, + (__u64)&p_ctx->cmd[i].rcb); + room--; + } + } + +} + +void wait_resp(struct ctx *p_ctx, __u64 sel_mask) { + int i; + + for (i = 0; i < NUM_CMDS; i++) { + if (sel_mask & (1ull << i)) { + pthread_mutex_lock(&p_ctx->cmd[i].mutex); + while (p_ctx->cmd[i].sa.host_use_b[0] != B_DONE) { + pthread_cond_wait(&p_ctx->cmd[i].cv, &p_ctx->cmd[i].mutex); + } + pthread_mutex_unlock(&p_ctx->cmd[i].mutex); + + if (p_ctx->cmd[i].sa.ioasc) { + fprintf(stderr, + "%d:CMD 0x%x failed, IOASC = flags 0x%x, afu_rc 0x%x, scsi_rc 0x%x, fc_rc 0x%x\n", + pid, + p_ctx->cmd[i].rcb.cdb[0], + p_ctx->cmd[i].sa.rc.flags, + p_ctx->cmd[i].sa.rc.afu_rc, + p_ctx->cmd[i].sa.rc.scsi_rc, + p_ctx->cmd[i].sa.rc.fc_rc); + } + } + } +} + + +// returns a select mask of cmd indicies to retry +// 0 means all indices completed +__u64 check_status(struct ctx *p_ctx) +{ + int i; + __u64 ret = 0; + + // check_status does not take a sel_mask and examines each + // command every time. If the cmd was successful, ioasc + // will remain 0. + for (i = 0; i < NUM_CMDS; i++) { + + if (p_ctx->cmd[i].sa.ioasc == 0) { + continue; + } + + p_ctx->cmd[i].sa.host_use_b[0] |= B_ERROR; + + + if (!(p_ctx->cmd[i].sa.host_use_b[1]++ < RETRY_CNT)) { + continue; + } + + switch (p_ctx->cmd[i].sa.rc.afu_rc) + { + /* 1. let afu choose another FC port or retry tmp buf full */ + case SISL_AFU_RC_NO_CHANNELS: + case SISL_AFU_RC_OUT_OF_DATA_BUFS: + usleep(100); // 100 microsec + ret |= (1ull << i); + break; + + + /* 2. check for master exit, if so user must restart */ + case SISL_AFU_RC_RHT_DMA_ERR: + case SISL_AFU_RC_LXT_DMA_ERR: + // afu_extra=1 when mserv exited and restarted using same ctx + // new mserv (on same ctx) gets CXL_EVENT_DATA_STORAGE events + // the above is when mserv did not clear capabiliy on start + // allowing AFU to read RHT set up by exited mserv. + // + // if did not restart at all or rstarted on another ctx, afu_extra=11 + // + if (p_ctx->cmd[i].sa.afu_extra == SISL_AFU_DMA_ERR_INVALID_EA) { + fprintf(stderr, "%d: master may have exited, halting test...\n", pid); + halt_test(); + } + break; + case SISL_AFU_RC_NOT_XLATE_HOST: + fprintf(stderr, "%d: master may have exited, halting test...\n", pid); + halt_test(); + + + /* 3. check for afu reset and/or master restart */ + case SISL_AFU_RC_CAP_VIOLATION: + fprintf(stderr, "%d: afu reset or master may have restated, halting test...\n", pid); + halt_test(); + + + /* 4. any other afu_rc is a user error, just fail the cmd */ + + /* 5. if no afu_rc, then either scsi_rc and/or fc_rc is set + * retry all scsi_rc and fc_rc after a small delay + */ + case 0: + usleep(100); // 100 microsec + ret |= (1ull << i); + break; + } + } + + + return ret; +} + +void halt_test() { + while (1); +} + +/******************************************************************** + * NAME : main + * + * + * RETURNS : + * 0 = success. + * !0 = failure. + * + *********************************************************************/ +int +main(int argc, char *argv[]) +{ + int rc; + __u64 act_new_size; + __u64 start_lba; + char master_dev_path[MC_PATHLEN]; + pthread_t thread; + pid_t new_pid; + mc_stat_t stat; + + int parent = 1; // start as parent and fork + __u64 npass = 0; + __u64 stride = 0; // stop gcc warnings + __u64 nlba = 0; // stop gcc warnings + unsigned long nchunk = NUM_CMDS*2; + struct ctx *p_ctx = &myctx[PARENT_INDX]; + mc_hndl_t mc_hndl; + + get_parameters(argc, argv); + strcpy(master_dev_path, afu_path); + master_dev_path[strlen(master_dev_path) - 1] = 0; // drop "s" suffix + strcat(master_dev_path, "m"); // add master suffix + + rand_fd = open("/dev/urandom", O_RDONLY); + if (rand_fd < 0) { + fprintf(stderr, "cannot open random device, errno %d\n", errno); + exit(-1); + } + + if (mc_init() != 0) { + fprintf(stderr, "mc_init failed\n"); + exit(-1); + } + + if (mode == TEST_BALLOC) { + balloc_test(afu_path, master_dev_path); + return 0; + } + else if (mode == TEST_BAD_EA) { + bad_ea_test(afu_path, master_dev_path); + return 0; + } + + // TEST_DEFAULT mode + memset(p_ctx, 0, sizeof(*p_ctx)); + +reinit: + printf("instantiating ctx on %s...\n", afu_path); + rc = ctx_init(p_ctx, afu_path); + if (rc != 0) { + fprintf(stderr, "error instantiating ctx, rc %d\n", rc); + exit(-1); + } + pthread_create(&thread, NULL, ctx_intr_rx, p_ctx); + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *) p_ctx->p_host_map, + &mc_hndl); + if (rc != 0) { + fprintf(stderr, "error registering ctx_hndl %d, rc %d\n", + p_ctx->ctx_hndl, rc); + exit(-1); + } + + rc = mc_hdup(mc_hndl, &p_ctx->mc_hndl); + if (rc != 0) { + fprintf(stderr, "error in mc_hdup, rc %d\n", rc); + exit(-1); + } + + mc_unregister(mc_hndl); // we will use the p_ctx->mc_hndl + + if (parent) { // mc_open + mc_size + rc = mc_open(p_ctx->mc_hndl, MC_RDWR, &p_ctx->res_hndl); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + exit(-1); + } + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &stat); + if (rc != 0) { + fprintf(stderr, "error in stat of res_hndl rc %d\n", rc); + exit(-1); + } + + stride = (1 << stat.nmask); // set to chunk size + // or use 8 to test all LBAs + + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + nchunk, &act_new_size); + if (rc != 0 || nchunk != act_new_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + exit(-1); + } + + nlba = nchunk*(1 << stat.nmask); + + pid = getpid(); // pid is used to create unique data patterns + // now it has parent's pid + + for (start_lba = 0; start_lba < nlba; start_lba += (NUM_CMDS*stride)) { + send_write(p_ctx, start_lba, stride, 0); + } + + // leave data written by parent to be read and compared by + // child after a clone. + + // In a real application, it is very important that parent & child + // coordinate access to cloned data since they are on the same LBAs. + // This is until AFU implements copy-on-write. + // + new_pid = fork(); + if (new_pid) { + sleep(30); // let child clone before parent closes the + // resources + mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx, 1); + + printf("ppid = %d, cpid = %d\n", pid, new_pid); fflush(stdout); + new_pid = wait(&rc); + printf("%d terminated, signalled %s, signo %d\n", + new_pid, WIFSIGNALED(rc) ? "yes" : "no", WTERMSIG(rc)); + fflush(stdout); + } + else { + parent = 0; + // copy data bufs and res_hndl to child. + // this is an artifact of this test and not something for + // real user app + // + myctx[CHILD_INDX] = myctx[PARENT_INDX]; + p_ctx = &myctx[CHILD_INDX]; // point to child's + + goto reinit; // but do not clear p_ctx since we need res_hndl+data + } + } + else { // mc_clone which is equivalent to mc_open + mc_size + rc = mc_clone(p_ctx->mc_hndl, myctx[PARENT_INDX].mc_hndl, MC_RDWR); + if (rc != 0) { + fprintf(stderr, "error cloning rc %d\n", rc); + exit(-1); + } + + // Once cloned, close the inherited interfaces from parent: + // 1) the mc handle, 2) the parent's context. + // + // child must unregister any inherited mc handle. + // child must not call any API using the inherited handle other + // than mc_clone & mc_unregister + // + rc = mc_unregister(myctx[PARENT_INDX].mc_hndl); + + // now close parent's AFU context + ctx_close(&myctx[PARENT_INDX], 0); + + // first read what parent wrote + for (start_lba = 0; start_lba < nlba; start_lba += (NUM_CMDS*stride)) { + send_read(p_ctx, start_lba, stride, 0); + rw_cmp_buf_cloned(p_ctx, start_lba); + } + printf("%d:clone compare success\n", pid); fflush(stdout); + + pid = getpid(); // use child pid from now on for new data patterns + // to write, must be after cloned cmp + + // child loops in write, read & compare. + // Note that parent had closed its handle & context, but that does + // not affect child's cloned copy. + // + while (nloops--) { + for (start_lba = 0; start_lba < nlba; start_lba += (NUM_CMDS*stride)) { + send_write(p_ctx, start_lba, stride, 0); + send_read(p_ctx, start_lba, stride, 0); + rw_cmp_buf(p_ctx, start_lba); + } + + if ((npass++ & 0x3F) == 0) { + printf("%d:completed pass %ld\n", pid, npass>>6); fflush(stdout); + } + } + + // queue a bunch of cmds just before we exit but do not wait for responses + // must send both reads and writes + // + if (gen_rand() & 0x1) { + send_write(p_ctx, 0, stride, 1); + } + else { + send_read(p_ctx, 0, stride, 1); + } + + if (gen_rand() & 0x2) { + exit(0); // get out fast w/o closing ctx etc + } + else { + // do not mc_unregister by intent. + ctx_close(p_ctx, 1); + } + } + + pthread_join(thread, NULL); + mc_term(); + + return 0; +} + +// len in __u64 +void fill_buf(__u64* p_buf, unsigned int len) +{ + static __u64 data = DATA_SEED; + int i; + + for (i = 0; i < len; i += 2) { + p_buf[i] = pid; + p_buf[i + 1] = data++; + } +} + +// len in __u64 +int cmp_buf_cloned(__u64* p_buf, unsigned int len) +{ + static __u64 data = DATA_SEED; + int i; + + for (i = 0; i < len; i += 2) { + if (!(p_buf[i] == pid && p_buf[i + 1] == data++)) { + return -1; + } + } + return 0; +} + +// len in __u64 +int cmp_buf(__u64* p_buf1, __u64 *p_buf2, unsigned int len) +{ + return memcmp(p_buf1, p_buf2, len*sizeof(__u64)); +} + + +__u64 gen_rand() { + __u64 rand; + + if (read(rand_fd, &rand, sizeof(rand)) != sizeof(rand)) { + fprintf(stderr, "cannot read random device, errno %d\n", errno); + exit(-1); + } + return rand; +} + + +void balloc_test(char *afu_path, char *afu_path_m) +{ + +#define NUM_RH 16 + + int rc; + __u64 act_new_size; + pthread_t thread_a; + pthread_t thread_b; + struct ctx *p_ctx_a = &myctx[0]; + struct ctx *p_ctx_b = &myctx[1]; + res_hndl_t rh[NUM_RH]; + int i; + unsigned int req_nchunk; + + pid = getpid(); + + while (nloops--) { + // init ctx_a + rc = ctx_init(p_ctx_a, afu_path); + if (rc != 0) { + fprintf(stderr, "error instantiating ctx, rc %d\n", rc); + exit(-1); + } + pthread_create(&thread_a, NULL, ctx_intr_rx, p_ctx_a); + rc = mc_register(afu_path_m, p_ctx_a->ctx_hndl, + (volatile __u64 *) p_ctx_a->p_host_map, + &p_ctx_a->mc_hndl); + if (rc != 0) { + fprintf(stderr, "error registering ctx_hndl %d, rc %d\n", + p_ctx_a->ctx_hndl, rc); + exit(-1); + } + + // init ctx_b + rc = ctx_init(p_ctx_b, afu_path); + if (rc != 0) { + fprintf(stderr, "error instantiating ctx, rc %d\n", rc); + exit(-1); + } + pthread_create(&thread_b, NULL, ctx_intr_rx, p_ctx_b); + rc = mc_register(afu_path_m, p_ctx_b->ctx_hndl, + (volatile __u64 *) p_ctx_b->p_host_map, + &p_ctx_b->mc_hndl); + if (rc != 0) { + fprintf(stderr, "error registering ctx_hndl %d, rc %d\n", + p_ctx_b->ctx_hndl, rc); + exit(-1); + } + + // open & size ctx_a + for (i = 0; i < NUM_RH; i++) { + rc = mc_open(p_ctx_a->mc_hndl, MC_RDWR, &rh[i]); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + exit(-1); + } + + //req_nchunk = 0x71c0/NUM_RH; // fixed & based on lun size + req_nchunk = (gen_rand() & 0x7FF); + + rc = mc_size(p_ctx_a->mc_hndl, rh[i], req_nchunk, &act_new_size); + if (rc != 0) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + exit(-1); + } + else if (req_nchunk != act_new_size) { + printf("sized rh %d to 0x%lx chunks (req 0x%x)\n", i, act_new_size, + req_nchunk); + } + } + + // clone ctx_a into ctx_b - ok to fail + rc = mc_clone(p_ctx_b->mc_hndl, p_ctx_a->mc_hndl, MC_RDWR); + + // close ctx_a + for (i = 0; i < NUM_RH; i++) { + mc_close(p_ctx_a->mc_hndl, rh[i]); + } + mc_unregister(p_ctx_a->mc_hndl); + ctx_close(p_ctx_a, 1); + + // close ctx_b if clone was successful + if (rc == 0) { + for (i = 0; i < NUM_RH; i++) { + mc_close(p_ctx_b->mc_hndl, rh[i]); + } + } + mc_unregister(p_ctx_b->mc_hndl); + ctx_close(p_ctx_b, 1); + } +} + +/* + * bad RCB EA, bad RRQ EA, bad Data EA + * queue many bad cmds and exit when nloops exhaust + */ +void bad_ea_test(char *afu_path, char *afu_path_m) +{ + int i; + int rc; + __u64 stride; + __u64 act_new_size; + __u64 sel_mask; + __u64 room; + pthread_t thread; + mc_stat_t stat; + unsigned long nchunk = NUM_CMDS*2; + struct ctx *p_ctx = &myctx[0]; + + + pid = getpid(); + + // init ctx + rc = ctx_init(p_ctx, afu_path); + if (rc != 0) { + fprintf(stderr, "error instantiating ctx, rc %d\n", rc); + exit(-1); + } + pthread_create(&thread, NULL, ctx_intr_rx, p_ctx); + + rc = mc_register(afu_path_m, p_ctx->ctx_hndl, + (volatile __u64 *) p_ctx->p_host_map, + &p_ctx->mc_hndl); + if (rc != 0) { + fprintf(stderr, "error registering ctx_hndl %d, rc %d\n", + p_ctx->ctx_hndl, rc); + exit(-1); + } + + rc = mc_open(p_ctx->mc_hndl, MC_RDWR, &p_ctx->res_hndl); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + exit(-1); + } + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + nchunk, &act_new_size); + if (rc != 0 || nchunk != act_new_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + exit(-1); + } + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &stat); + if (rc != 0) { + fprintf(stderr, "error in stat of res_hndl rc %d\n", rc); + exit(-1); + } + + stride = (1 << stat.nmask); // set to chunk size + + send_write(p_ctx, 0, stride, 0); + send_read(p_ctx, 0, stride, 0); + rw_cmp_buf(p_ctx, 0); + + printf("%s: so far so good...\n", __func__); + + + // set up bad RRQ in AFU, host pointers do not matter since nothing + // will be written to RRQ + write_64(&p_ctx->p_host_map->rrq_start, (__u64) (-1ull & (~7))); + write_64(&p_ctx->p_host_map->rrq_end, (__u64) 0); + + for (i = 0; i < NUM_CMDS; i++) { + // odd cmd indicies will report bad data_ea + // even ones will report bad RRQ + if (i & 0x1) { + p_ctx->cmd[i].rcb.data_ea = (__u64) 0x5555555500000000ull; + } + } + sel_mask = ((NUM_CMDS == 64) ? + (-1ull) : (1ull << NUM_CMDS) - 1); // select each command + + while (nloops--) { + sleep(1); + + asm volatile ( "eieio" : : ); // let any IOARRIN writes complete + do { + room = read_64(&p_ctx->p_host_map->cmd_room); + } while (room == 0); + + // bad RCB EA + write_64(&p_ctx->p_host_map->ioarrin, 0xdeadbeef); + + send_cmd(p_ctx, sel_mask); + } + + // exit w/o ctx_close when send_cmd called last has pending cmds +} + + diff --git a/src/master/mserv.c b/src/master/mserv.c new file mode 100644 index 00000000..f1decad3 --- /dev/null +++ b/src/master/mserv.c @@ -0,0 +1,2983 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/mserv.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef _AIX +#include +#else +#include +#endif /* !_AIX */ +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifndef _AIX +#include +REVISION_TAGS(mserv); +#endif + +// LOG_ERR(3), LOG_WARNING(4), LOG_NOTICE(5), LOG_INFO(6) & LOG_DEBUG(7) +// +// TRACE_x versions: fmt must be of the form: "%s: xxxx" - the +// leading %s is the afu name +// +// trace_x versions: used when the log is not specific to +// a particular AFU or the AFU name is not available +// +#define TRACE_0(lvl, p_afu, fmt) \ + if (trc_lvl > lvl) { syslog(lvl, fmt, (p_afu)->name); } + +#define TRACE_1(lvl, p_afu, fmt, A) \ + if (trc_lvl > lvl) { syslog(lvl, fmt, (p_afu)->name, A); } + +#define TRACE_2(lvl, p_afu, fmt, A, B) \ + if (trc_lvl > lvl) { syslog(lvl, fmt, (p_afu)->name, A, B); } + +#define TRACE_3(lvl, p_afu, fmt, A, B, C) \ + if (trc_lvl > lvl) { syslog(lvl, fmt, (p_afu)->name, A, B, C); } + +#define TRACE_4(lvl, p_afu, fmt, A, B, C, D)\ + if (trc_lvl > lvl) { syslog(lvl, fmt, (p_afu)->name, A, B, C, D); } + +#define TRACE_5(lvl, p_afu, fmt, A, B, C, D, E)\ + if (trc_lvl > lvl) { syslog(lvl, fmt, (p_afu)->name, A, B, C, D, E); } + +// these do not have a afu name +#define trace_0(lvl, fmt) \ + if (trc_lvl > lvl) { syslog(lvl, fmt); } + +#define trace_1(lvl, fmt, A) \ + if (trc_lvl > lvl) { syslog(lvl, fmt, A); } + +#define trace_2(lvl, fmt, A, B) \ + if (trc_lvl > lvl) { syslog(lvl, fmt, A, B); } + +#define trace_3(lvl, fmt, A, B, C) \ + if (trc_lvl > lvl) { syslog(lvl, fmt, A, B, C); } + +unsigned int trc_lvl = LOG_INFO; // by default, log NOTICE and lower levels. + // lower level is higher severity + +/*************************************************************************** + * Master Context Server + * + * The functions of the master context are: + * + * 1. virtualize physical LUN(s) for user contexts. Users send IO to a + * resource handle that identifies a virtual LUN. Resource handles + * are mapped to physical LBAs by the master. + * 2. control what a user context is allowed or not allowed to do. + * 3. field AFU wide errors that cannot be attributed to a user context + * + * There are several key functions that are not provided by the master: + * + * 1. load balancing across AFUs + * 2. handle user errors including SCSI errors + * 3. persistence of resources & LBA allocation across reboots/restarts + * 4. recovery from an AFU reset. All users and the master must restart + * if the AFU had to be reset. + * + * + * The master does not have the most up to date information on who + * owns a particular AFU context at any time (the kernel driver does). + * It must also work with misbehaving applications that close the AFU + * context while leaving the registration with the master open. The + * AFU provides HW support to deal with such scenarios by automatically + * clearing certain states during context establishment and/or termination. + * + * A single instance of the master serves an AFU set. The AFU set is any + * number of AFUs that are connected to the same storage and share the + * same LUN(s) for use by the master to virtualize on. Each AFU in the + * set must have the same volumes (SCSI LUNs) mapped in. However, there is + * no requirement that they be mapped using the same LUN_ID even though + * that is the recommended configuration. + * + * The master will run when some AFUs in the set are not functional as + * long as each of the remaining functional AFUs see the same LUNs that + * the master is configured to use. + * + ***************************************************************************/ + +typedef struct { + struct capikv_ini *p_ini; + struct afu_alloc *p_afu_a; + timer_t timer_hb; + timer_t timer_fc; +} global_t; + +global_t gb; /* mserv globals */ + + +conn_info_t* +alloc_connection(afu_t *p_afu, int fd) +{ + int i; + + TRACE_0(LOG_INFO, p_afu, + "%s: allocating a new connection\n"); + + for (i = 0; i < MAX_CONNS; i++) { + if (p_afu->conn_tbl[i].fd == -1) { /* free entry */ + p_afu->conn_tbl[i].fd = fd; + p_afu->conn_tbl[i].rx = rx_mcreg; + break; + } + } + + return (i == MAX_CONNS) ? NULL : &p_afu->conn_tbl[i]; +} + +void +free_connection(afu_t *p_afu, conn_info_t *p_conn_info) +{ + TRACE_1(LOG_INFO, p_afu, "%s: freeing connection %p\n", + p_conn_info); + + p_conn_info->fd = -1; +} + + + +int afu_init(afu_t *p_afu, struct capikv_ini_elm *p_elm) +{ + int i, j; + int rc; + __u64 reg; + __u32 proc_elem; + void *map; + char version[16]; + mode_t oldmask; + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + struct sigevent sigev; +#ifndef _AIX + struct epoll_event epoll_event; +#endif /* !_AIX */ + enum undo_level level = UNDO_NONE; + + pthread_mutexattr_init(&mattr); + pthread_condattr_init(&cattr); + + memset(p_afu, 0, sizeof(*p_afu)); + + strncpy(p_afu->master_dev_path, p_elm->afu_dev_pathm, MC_PATHLEN - 1); + p_afu->name = strrchr(p_afu->master_dev_path, '/') + 1; + pthread_mutex_init(&p_afu->mutex, &mattr); + pthread_mutex_init(&p_afu->err_mutex, &mattr); + pthread_cond_init(&p_afu->err_cv, &cattr); + + for (i = 0; i < MAX_CONNS; i++) { + p_afu->conn_tbl[i].fd = -1; + } + for (i = 0; i < MAX_CONTEXT; i++) { + p_afu->rht_info[i].rht_start = &p_afu->rht[i][0]; + } + + // keep AFU accessed data in RAM as much as possible + mlock(p_afu->rht, sizeof(p_afu->rht)); + level = UNDO_MLOCK; + + sigev.sigev_notify = SIGEV_SIGNAL; + sigev.sigev_signo = SIGRTMIN; /* must use a queued signal */ + + for (i = 0; i < NUM_CMDS; i++) { + pthread_mutex_init(&p_afu->cmd[i].mutex, &mattr); + pthread_cond_init(&p_afu->cmd[i].cv, &cattr); + + sigev.sigev_value.sival_ptr = &p_afu->cmd[i]; + rc = timer_create(CLOCK_REALTIME, &sigev, &p_afu->cmd[i].timer); + if (rc != 0) { + TRACE_1(LOG_ERR, p_afu, "%s: timer_create failed, errno %d\n", errno); + for (j = 0; j < i; j++) { + timer_delete(p_afu->cmd[j].timer); + } + undo_afu_init(p_afu, level); + return -1; + } + } + level = UNDO_TIMER; + + pthread_condattr_destroy(&cattr); + pthread_mutexattr_destroy(&mattr); + + // open master device + p_afu->afu_fd = open(p_afu->master_dev_path, O_RDWR); + if (p_afu->afu_fd < 0) { + TRACE_1(LOG_ERR, p_afu, "%s: open failed, errno %d\n", errno); + undo_afu_init(p_afu, level); + return -1; + } + level = UNDO_AFU_OPEN; + + // enable the AFU. This must be done before mmap. + p_afu->work.num_interrupts = 4; + p_afu->work.flags = CXL_START_WORK_NUM_IRQS; + if (ioctl(p_afu->afu_fd, CXL_IOCTL_START_WORK, &p_afu->work) != 0) { + TRACE_1(LOG_ERR, p_afu, "%s: START_WORK failed, errno %d\n", errno); + undo_afu_init(p_afu, level); + return -1; + } + if (ioctl(p_afu->afu_fd, CXL_IOCTL_GET_PROCESS_ELEMENT, + &proc_elem) != 0) { + TRACE_1(LOG_ERR, p_afu, "%s: GET_PROCESS_ELEMENT failed, errno %d\n", errno); + undo_afu_init(p_afu, level); + return -1; + } + level = UNDO_AFU_START; + + // mmap entire MMIO space of AFU + map = mmap(NULL, sizeof(struct surelock_afu_map), + PROT_READ|PROT_WRITE, MAP_SHARED, p_afu->afu_fd, 0); + if (map == MAP_FAILED) { + TRACE_1(LOG_ERR, p_afu, "%s: mmap failed, errno %d\n", errno); + undo_afu_init(p_afu, level); + return -1; + } + p_afu->p_afu_map = (volatile struct surelock_afu_map *) map; + + for (i = 0; i < MAX_CONTEXT; i++) { + p_afu->ctx_info[i].p_ctrl_map = &p_afu->p_afu_map->ctrls[i].ctrl; + + // disrupt any clients that could be running + // e. g. clients that survived a master restart + write_64(&p_afu->ctx_info[i].p_ctrl_map->rht_start, 0); + write_64(&p_afu->ctx_info[i].p_ctrl_map->rht_cnt_id, 0); + write_64(&p_afu->ctx_info[i].p_ctrl_map->ctx_cap, 0); + } + level = UNDO_AFU_MMAP; + + // copy frequently used fields into p_afu + p_afu->ctx_hndl = (__u16)proc_elem; // ctx_hndl is 16 bits in CAIA + p_afu->p_host_map = &p_afu->p_afu_map->hosts[p_afu->ctx_hndl].host; + p_afu->p_ctrl_map = &p_afu->p_afu_map->ctrls[p_afu->ctx_hndl].ctrl; + + // initialize RRQ pointers + p_afu->p_hrrq_start = &p_afu->rrq_entry[0]; + p_afu->p_hrrq_end = &p_afu->rrq_entry[NUM_RRQ_ENTRY - 1]; + p_afu->p_hrrq_curr = p_afu->p_hrrq_start; + p_afu->toggle = 1; + + memset(&version[0], 0, sizeof(version)); + // don't byte reverse on reading afu_version, else the string form + // will be backwards + reg = p_afu->p_afu_map->global.regs.afu_version; + memcpy(&version[0], ®, 8); + TRACE_2(LOG_NOTICE, p_afu, "%s: afu version %s, ctx_hndl %d\n", + version, p_afu->ctx_hndl); + + // initialize cmd fields that never change + for (i = 0; i < NUM_CMDS; i++) { + p_afu->cmd[i].rcb.ctx_id = p_afu->ctx_hndl; + p_afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; + p_afu->cmd[i].rcb.rrq = 0x0; + } + + // set up RRQ in AFU for master issued cmds + write_64(&p_afu->p_host_map->rrq_start, (__u64) p_afu->p_hrrq_start); + write_64(&p_afu->p_host_map->rrq_end, (__u64) p_afu->p_hrrq_end); + + // AFU configuration + reg = read_64(&p_afu->p_afu_map->global.regs.afu_config); + reg |= 0x7F00; // enable auto retry + // leave others at default: + // CTX_CAP write protected, mbox_r does not clear on read and + // checker on if dual afu + write_64(&p_afu->p_afu_map->global.regs.afu_config, reg); + + // global port select: select either port + write_64(&p_afu->p_afu_map->global.regs.afu_port_sel, 0x3); + + for (i = 0; i < NUM_FC_PORTS; i++) { + // program FC_PORT LUN Tbl + write_64(&p_afu->p_afu_map->global.fc_port[i][0], p_elm->lun_id); + + // unmask all errors (but they are still masked at AFU) + write_64(&p_afu->p_afu_map->global.fc_regs[i][FC_ERRMSK/8], 0); + + // clear CRC error cnt & set a threshold + (void) read_64(&p_afu->p_afu_map->global.fc_regs[i][FC_CNT_CRCERR/8]); + write_64(&p_afu->p_afu_map->global.fc_regs[i][FC_CRC_THRESH/8], + MC_CRC_THRESH); + + // set WWPNs. If already programmed, p_elm->wwpn[i] is 0 + if (p_elm->wwpn[i] != 0 && + afu_set_wwpn(p_afu, i, &p_afu->p_afu_map->global.fc_regs[i][0], + p_elm->wwpn[i])) { + TRACE_1(LOG_ERR, p_afu, "%s: failed to set WWPN on port %d\n", i); + undo_afu_init(p_afu, level); + return -1; + } + + // record the lun_id to be used in discovery later + p_afu->lun_info[i].lun_id = p_elm->lun_id; + } + + // set up master's own CTX_CAP to allow real mode, host translation + // tbls, afu cmds and non-read/write GSCSI cmds. + // First, unlock ctx_cap write by reading mbox + // + (void) read_64(&p_afu->p_ctrl_map->mbox_r); // unlock ctx_cap + asm volatile ( "eieio" : : ); + write_64(&p_afu->p_ctrl_map->ctx_cap, + SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | + SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD); + + // init heartbeat + p_afu->hb = read_64(&p_afu->p_afu_map->global.regs.afu_hb); + + // Create a socket to be used for listening to connections + p_afu->listen_fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (p_afu->listen_fd < 0) { + TRACE_1(LOG_ERR, p_afu, "%s: socket failed, errno %d\n", errno); + undo_afu_init(p_afu, level); + return -1; + } + level = UNDO_OPEN_SOCK; + + // Bind the socket to the file + bzero(&p_afu->svr_addr, sizeof(struct sockaddr_un)); + p_afu->svr_addr.sun_family = AF_UNIX; + strcpy(p_afu->svr_addr.sun_path, MC_SOCKET_DIR); + strcat(p_afu->svr_addr.sun_path, p_afu->master_dev_path); + mkdir_p(p_afu->svr_addr.sun_path); // make intermediate directories + unlink(p_afu->svr_addr.sun_path); + // create socket with rwx for group but no perms for others + oldmask = umask(007); + rc = bind(p_afu->listen_fd, (struct sockaddr *)&p_afu->svr_addr, + sizeof(p_afu->svr_addr)); + umask(oldmask); // set umask back to default + if (rc) { + TRACE_2(LOG_ERR, p_afu, "%s: bind failed, rc %d, errno %d\n", rc, errno); + undo_afu_init(p_afu, level); + return -1; + } + level = UNDO_BIND_SOCK; + + // do not listen on the socket at this point since we do not + // want clients to be able to connect yet. + +#ifndef _AIX + /* + * NOTE: This functionality is currently broken in AIX, which + * does not support epoll. So if this is needed in AIX + * some analog must be added. + */ + + // Create the epoll array to be used for waiting on events. + p_afu->epfd = epoll_create(MAX_CONN_TO_POLL); + if (p_afu->epfd == -1) { + TRACE_1(LOG_ERR, p_afu, "%s: epoll_create failed, errno %d\n", errno); + undo_afu_init(p_afu, level); + return -1; + } + level = UNDO_EPOLL_CREATE; + + // Add the listening file descriptor to the epoll array + p_afu->conn_tbl[0].fd = p_afu->listen_fd; + memset(&epoll_event, 0, sizeof(struct epoll_event)); + epoll_event.events = EPOLLIN; + epoll_event.data.ptr = &p_afu->conn_tbl[0]; + rc = epoll_ctl(p_afu->epfd, EPOLL_CTL_ADD, p_afu->listen_fd, &epoll_event); + if (rc) { + TRACE_2(LOG_ERR, p_afu, "%s: epoll_ctl failed for ADD, rc %d, errno %d\n", + rc, errno); + undo_afu_init(p_afu, level); + return -1; + } + level = UNDO_EPOLL_ADD; +#else + + memset(p_afu->events,0,sizeof(struct pollfd) * MAX_CONN_TO_POLL); + + // Add the listening file descriptor to the epoll array + p_afu->conn_tbl[0].fd = p_afu->listen_fd; + p_afu->events[0].fd = p_afu->listen_fd; + p_afu->events[0].events = POLLIN|POLLMSG; + p_afu->num_poll_events++; +#endif /* !_AIX */ + + return 0; +} + +void undo_afu_init(afu_t *p_afu, enum undo_level level) +{ + int i; + + switch(level) + { + case UNDO_AFU_ALL: + case UNDO_EPOLL_ADD: + case UNDO_EPOLL_CREATE: +#ifndef _AIX + close(p_afu->epfd); +#endif + case UNDO_BIND_SOCK: + unlink(p_afu->svr_addr.sun_path); + case UNDO_OPEN_SOCK: + close(p_afu->listen_fd); + case UNDO_AFU_MMAP: + munmap((void*)p_afu->p_afu_map, sizeof(struct surelock_afu_map)); + + case UNDO_AFU_START: + case UNDO_AFU_OPEN: + close(p_afu->afu_fd); + case UNDO_TIMER: + for (i = 0; i < NUM_CMDS; i++) { + timer_delete(p_afu->cmd[i].timer); + } + case UNDO_MLOCK: + munlock(p_afu->rht, sizeof(p_afu->rht)); + default: + break; + } +} + + +int afu_term(afu_t *p_afu) +{ + undo_afu_init(p_afu, UNDO_AFU_ALL); + + return 0; +} + +void afu_err_intr_init(afu_t *p_afu) +{ + int i; + + /* global async interrupts: AFU clears afu_ctrl on context exit + if async interrupts were sent to that context. This prevents + the AFU form sending further async interrupts when there is + nobody to receive them. + */ + // mask all + write_64(&p_afu->p_afu_map->global.regs.aintr_mask, -1ull); + // set LISN# to send and point to master context + write_64(&p_afu->p_afu_map->global.regs.afu_ctrl, + ((__u64)((p_afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); + // clear all + write_64(&p_afu->p_afu_map->global.regs.aintr_clear, -1ull); + // unmask bits that are of interest + // note: afu can send an interrupt after this step + write_64(&p_afu->p_afu_map->global.regs.aintr_mask, SISL_ASTATUS_MASK); + // clear again in case a bit came on after previous clear but before unmask + write_64(&p_afu->p_afu_map->global.regs.aintr_clear, -1ull); + + // now clear FC errors + for (i = 0; i < NUM_FC_PORTS; i++) { + write_64(&p_afu->p_afu_map->global.fc_regs[i][FC_ERROR/8], (__u32)-1); + write_64(&p_afu->p_afu_map->global.fc_regs[i][FC_ERRCAP/8], 0); + } + + + // sync interrupts for master's IOARRIN write + // note that unlike asyncs, there can be no pending sync interrupts + // at this time (this is a fresh context and master has not written + // IOARRIN yet), so there is nothing to clear. + // + // set LISN#, it is always sent to the context that wrote IOARRIN + write_64(&p_afu->p_host_map->ctx_ctrl, SISL_MSI_SYNC_ERROR); + write_64(&p_afu->p_host_map->intr_mask, SISL_ISTATUS_MASK); +} + +// online means the FC link layer has sync and has completed the link layer +// handshake. It is ready for login to start. +void set_port_online(volatile __u64 *p_fc_regs) +{ + __u64 cmdcfg; + + cmdcfg = read_64(&p_fc_regs[FC_MTIP_CMDCONFIG/8]); + cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); // clear OFF_LINE + cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); // set ON_LINE + write_64(&p_fc_regs[FC_MTIP_CMDCONFIG/8], cmdcfg); +} + +void set_port_offline(volatile __u64 *p_fc_regs) +{ + __u64 cmdcfg; + + cmdcfg = read_64(&p_fc_regs[FC_MTIP_CMDCONFIG/8]); + cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); // clear ON_LINE + cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); // set OFF_LINE + write_64(&p_fc_regs[FC_MTIP_CMDCONFIG/8], cmdcfg); +} + +// returns 1 - went online +// wait_port_xxx will timeout when cable is not pluggd in +int wait_port_online(volatile __u64 *p_fc_regs, + useconds_t delay_us, + unsigned int nretry) +{ + __u64 status; + + do { + usleep(delay_us); + status = read_64(&p_fc_regs[FC_MTIP_STATUS/8]); + } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && + nretry--); + + return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); +} + +// returns 1 - went offline +int wait_port_offline(volatile __u64 *p_fc_regs, + useconds_t delay_us, + unsigned int nretry) +{ + __u64 status; + + do { + usleep(delay_us); + status = read_64(&p_fc_regs[FC_MTIP_STATUS/8]); + } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && + nretry--); + + return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); +} + +// this function can block up to a few seconds +int afu_set_wwpn(afu_t *p_afu, int port, volatile __u64 *p_fc_regs, + __u64 wwpn) +{ + int ret = 0; + + set_port_offline(p_fc_regs); + + if (!wait_port_offline(p_fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, + FC_PORT_STATUS_RETRY_CNT)) { + TRACE_1(LOG_ERR, p_afu, "%s: wait on port %d to go offline timed out\n", + port); + ret = -1; // but continue on to leave the port back online + } + + if (ret == 0) { + write_64(&p_fc_regs[FC_PNAME/8], wwpn); + } + + set_port_online(p_fc_regs); + + if (!wait_port_online(p_fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, + FC_PORT_STATUS_RETRY_CNT)) { + TRACE_1(LOG_ERR, p_afu, "%s: wait on port %d to go online timed out\n", + port); + ret = -1; + } + + return ret; +} + +// this function can block up to a few seconds +void afu_link_reset(afu_t *p_afu, int port, volatile __u64 *p_fc_regs) +{ + __u64 port_sel; + + // first switch the AFU to the other links, if any + port_sel = read_64(&p_afu->p_afu_map->global.regs.afu_port_sel); + port_sel &= ~(1 << port); + write_64(&p_afu->p_afu_map->global.regs.afu_port_sel, port_sel); + afu_sync(p_afu, 0, 0, AFU_GSYNC); + + set_port_offline(p_fc_regs); + if (!wait_port_offline(p_fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, + FC_PORT_STATUS_RETRY_CNT)) { + TRACE_1(LOG_ERR, p_afu, "%s: wait on port %d to go offline timed out\n", + port); + } + + set_port_online(p_fc_regs); + if (!wait_port_online(p_fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, + FC_PORT_STATUS_RETRY_CNT)) { + TRACE_1(LOG_ERR, p_afu, "%s: wait on port %d to go online timed out\n", + port); + } + + // switch back to include this port + port_sel |= (1 << port); + write_64(&p_afu->p_afu_map->global.regs.afu_port_sel, port_sel); + afu_sync(p_afu, 0, 0, AFU_GSYNC); + +} + + +// all do_xxx functions return 0 or an errno value +// + +/* + * NAME: do_mc_register + * + * FUNCTION: Register a user AFU context with master. + * + * INPUTS: + * p_afu - Pointer to afu struct + * p_conn_info - Pointer to connection the request came in + * challenge - Challenge to validate if requester owns + * the context it is trying to register. + * OUTPUTS: + * none + * + * RETURNS: + * 0 - Success + * errno - Failure + * + * NOTES: + * When successful: + * a. Sets CTX_CAP + * b. Sets RHT_START & RHT_CNT registers for the + * registered context + * c. Clears all RHT entries effectively making + * all resource handles invalid. + * d. goes to rx_ready state + * + * If registation fails, stay in rx_mcreg state to allow client retries. + */ +int +do_mc_register(afu_t *p_afu, + conn_info_t *p_conn_info, + __u64 challenge) +{ + ctx_info_t *p_ctx_info; + __u64 reg; + int i; + + TRACE_4(LOG_INFO, p_afu, "%s: %s, client_pid=%d client_fd=%d ctx_hdl=%d\n", + __func__, + p_conn_info->client_pid, + p_conn_info->client_fd, + p_conn_info->ctx_hndl); + + if (p_conn_info->ctx_hndl < MAX_CONTEXT) { + p_ctx_info = &p_afu->ctx_info[p_conn_info->ctx_hndl]; + + /* This code reads the mbox w/o knowing if the requester is the + true owner of the context it wants to register. The read has + no side effect and does not affect the true owner if this is + a fraudulent registration attempt. + */ + reg = read_64(&p_ctx_info->p_ctrl_map->mbox_r); + + if (reg == 0 || /* zeroed mbox is a locked mbox */ + challenge != reg) { + return EACCES; /* return Permission denied */ + } + + if (p_conn_info->mode == MCREG_DUP_REG && + p_ctx_info->ref_cnt == 0) { + return EINVAL; /* no prior registration to dup */ + } + + /* a fresh registration will cause all previous registrations, + if any, to be forcefully canceled. This is important since + a client can close the context (AFU) but not unregister the + mc_handle. A new owner of the same context must be able to + mc_register by forcefully unregistering the previous owner. + */ + if (p_conn_info->mode == MCREG_INITIAL_REG) { + for (i = 0; i < MAX_CONNS; i++) { + if (p_afu->conn_tbl[i].p_ctx_info == p_ctx_info) { + do_mc_unregister(p_afu, &p_afu->conn_tbl[i]); + } + } + + if (p_ctx_info->ref_cnt != 0) { + TRACE_0(LOG_ERR, p_afu, + "%s: internal error: p_ctx_info->ref_cnt != 0"); + } + + /* This context is not duped and is in a group by itself. */ + p_ctx_info->p_next = p_ctx_info; + p_ctx_info->p_forw = p_ctx_info; + + /* restrict user to read/write cmds in translated mode. + User has option to choose read and/or write permissions + again in mc_open. + */ + write_64(&p_ctx_info->p_ctrl_map->ctx_cap, + SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD); + asm volatile ( "eieio" : : ); + reg = read_64(&p_ctx_info->p_ctrl_map->ctx_cap); + + /* if the write failed, the ctx must have been closed since + the mbox read and the ctx_cap register locked up. + fail the registration */ + if (reg != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) { + return EAGAIN; + } + + /* the context gets a dedicated RHT tbl unless it is dup'ed + later. */ + p_ctx_info->p_rht_info = &p_afu->rht_info[p_conn_info->ctx_hndl]; + p_ctx_info->p_rht_info->ref_cnt = 1; + memset(p_ctx_info->p_rht_info->rht_start, 0, + sizeof(sisl_rht_entry_t)*MAX_RHT_PER_CONTEXT); + /* make clearing of the RHT visible to AFU before MMIO */ + asm volatile ( "lwsync" : : ); + + /* set up MMIO registers pointing to the RHT */ + write_64(&p_ctx_info->p_ctrl_map->rht_start, + (__u64)p_ctx_info->p_rht_info->rht_start); + write_64(&p_ctx_info->p_ctrl_map->rht_cnt_id, + SISL_RHT_CNT_ID((__u64)MAX_RHT_PER_CONTEXT, + (__u64)(p_afu->ctx_hndl))); + } + + p_conn_info->p_ctx_info = p_ctx_info; + p_ctx_info->ref_cnt++; + p_conn_info->rx = rx_ready; /* it is now registered, go to ready state */ + return 0; + } + else { + return EINVAL; + } +} + +/* + * NAME: do_mc_unregister + * + * FUNCTION: Unregister a user AFU context with master. + * + * INPUTS: + * p_afu - Pointer to afu struct + * p_conn_info - Pointer to connection the request came in + * + * OUTPUTS: + * none + * + * RETURNS: + * 0 - Success + * errno - Failure + * + * NOTES: + * When successful: + * a. RHT_START, RHT_CNT & CTX_CAP registers for the + * context are cleared + * b. There is no need to clear RHT entries since + * RHT_CNT=0. + * c. goes to rx_mcreg state to allow re-registration + */ +int +do_mc_unregister(afu_t *p_afu, + conn_info_t *p_conn_info) +{ + int i; + ctx_info_t *p_ctx_info = p_conn_info->p_ctx_info; + + TRACE_4(LOG_INFO, p_afu, "%s: %s, client_pid=%d client_fd=%d ctx_hdl=%d\n", + __func__, + p_conn_info->client_pid, + p_conn_info->client_fd, + p_conn_info->ctx_hndl); + + if (p_ctx_info->ref_cnt-- == 1) { /* close the context */ + /* for any resource still open, dealloate LBAs and close if + nobody else is using it. */ + if (p_ctx_info->p_rht_info->ref_cnt-- == 1) { + for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { + do_mc_close(p_afu, p_conn_info, i); // will this p_conn_info work ? + } + } + + /* clear RHT registers for this context */ + write_64(&p_ctx_info->p_ctrl_map->rht_start, 0); + write_64(&p_ctx_info->p_ctrl_map->rht_cnt_id, 0); + + /* drop all capabilities */ + write_64(&p_ctx_info->p_ctrl_map->ctx_cap, 0); + } + + p_conn_info->p_ctx_info = NULL; + p_conn_info->rx = rx_mcreg; /* client can now send another MCREG */ + + return 0; +} + + + + +/* + * NAME: do_mc_open + * + * FUNCTION: Create a virtual LBA space of 0 size + * + * INPUTS: + * p_afu - Pointer to afu struct + * p_conn_info - Pointer to connection the request came in + * flags - Permission flags + * + * OUTPUTS: + * p_res_hndl - Pointer to allocated resource handle + * + * RETURNS: + * 0 - Success + * errno - Failure + * + * NOTES: + * When successful, the RHT entry contains + * a. non-zero NMASK, format & permission bits. + * b. LXT_START & LXT_CNT are still zeroed. For all purposes, + * the resource handle is opened in SW, but invalid in HW + * due to 0 size. + * + * A zero NMASK means the RHT entry is free/closed. + */ +int +do_mc_open(afu_t *p_afu, + conn_info_t *p_conn_info, + __u64 flags, + res_hndl_t *p_res_hndl) +{ + ctx_info_t *p_ctx_info = p_conn_info->p_ctx_info; + rht_info_t *p_rht_info = p_ctx_info->p_rht_info; + sisl_rht_entry_t *p_rht_entry = NULL; + int i; + + TRACE_4(LOG_INFO, p_afu, "%s: %s, client_pid=%d client_fd=%d ctx_hdl=%d\n", + __func__, + p_conn_info->client_pid, + p_conn_info->client_fd, + p_conn_info->ctx_hndl); + + /* find a free RHT entry */ + for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { + if (p_rht_info->rht_start[i].nmask == 0) { + p_rht_entry = &p_rht_info->rht_start[i]; + break; + } + } + + /* if we did not find a free entry, reached max opens allowed per + context */ + if (p_rht_entry == NULL) { + return EMFILE; // too many mc_opens + } + + p_rht_entry->nmask = MC_RHT_NMASK; + p_rht_entry->fp = SISL_RHT_FP(0u, flags & 0x3); /* format 0 & perms */ + *p_res_hndl = (p_rht_entry - p_rht_info->rht_start); + + return 0; +} + + +/* + * NAME: do_mc_close + * + * FUNCTION: Close a virtual LBA space setting it to 0 size and + * marking the res_hndl as free/closed. + * + * INPUTS: + * p_afu - Pointer to afu struct + * p_conn_info - Pointer to connection the request came in + * res_hndl - resource handle to close + * + * OUTPUTS: + * none + * + * RETURNS: + * 0 - Success + * errno - Failure + * + * NOTES: + * When successful, the RHT entry is cleared. + */ +int +do_mc_close(afu_t *p_afu, + conn_info_t *p_conn_info, + res_hndl_t res_hndl) +{ + ctx_info_t *p_ctx_info = p_conn_info->p_ctx_info; + rht_info_t *p_rht_info = p_ctx_info->p_rht_info; + sisl_rht_entry_t *p_rht_entry; + __u64 act_new_size; + + TRACE_4(LOG_INFO, p_afu, "%s: %s, client_pid=%d client_fd=%d ctx_hdl=%d\n", + __func__, + p_conn_info->client_pid, + p_conn_info->client_fd, + p_conn_info->ctx_hndl); + + if (res_hndl < MAX_RHT_PER_CONTEXT) { + p_rht_entry = &p_rht_info->rht_start[res_hndl]; + + if (p_rht_entry->nmask == 0) { /* not open */ + return EINVAL; + } + + /* set size to 0, this will clear LXT_START and LXT_CNT fields + in the RHT entry */ + do_mc_size(p_afu, p_conn_info, res_hndl, 0u, &act_new_size); // p_conn good ? + + p_rht_entry->nmask = 0; + p_rht_entry->fp = 0; + + /* now the RHT entry is all cleared */ + } + else { + return EINVAL; + } + + return 0; +} + + +/* + * NAME: do_mc_size + * + * FUNCTION: Resize a resource handle by changing the RHT entry and LXT + * Tbl it points to. Synchronize all contexts that refer to + * the RHT. + * + * INPUTS: + * p_afu - Pointer to afu struct + * p_conn_info - Pointer to connection the request came in + * res_hndl - resource handle to resize + * new_size - new size in chunks + * + * OUTPUTS: + * p_act_new_size - pointer to actual new size in chunks + * + * RETURNS: + * 0 - Success + * errno - Failure + * + * NOTES: + * Setting new_size=0 will clear LXT_START and LXT_CNT fields + * in the RHT entry. + */ +int +do_mc_size(afu_t *p_afu, + conn_info_t *p_conn_info, + res_hndl_t res_hndl, + __u64 new_size, + __u64 *p_act_new_size) +{ + ctx_info_t *p_ctx_info = p_conn_info->p_ctx_info; + rht_info_t *p_rht_info = p_ctx_info->p_rht_info; + sisl_rht_entry_t *p_rht_entry; + + TRACE_4(LOG_INFO, p_afu, "%s: %s, client_pid=%d client_fd=%d ctx_hdl=%d\n", + __func__, + p_conn_info->client_pid, + p_conn_info->client_fd, + p_conn_info->ctx_hndl); + + if (res_hndl < MAX_RHT_PER_CONTEXT) { + p_rht_entry = &p_rht_info->rht_start[res_hndl]; + + if (p_rht_entry->nmask == 0) { /* not open */ + return EINVAL; + } + + if (new_size > p_rht_entry->lxt_cnt) { + grow_lxt(p_afu, + p_conn_info->ctx_hndl, + res_hndl, + p_rht_entry, + new_size - p_rht_entry->lxt_cnt, + p_act_new_size); + + } + else if (new_size < p_rht_entry->lxt_cnt) { + shrink_lxt(p_afu, + p_conn_info->ctx_hndl, + res_hndl, + p_rht_entry, + p_rht_entry->lxt_cnt - new_size, + p_act_new_size); + } + else { + *p_act_new_size = new_size; + return 0; + } + } + else { + return EINVAL; + } + + return 0; +} + +int +grow_lxt(afu_t *p_afu, + ctx_hndl_t ctx_hndl_u, + res_hndl_t res_hndl_u, + sisl_rht_entry_t *p_rht_entry, + __u64 delta, + __u64 *p_act_new_size) +{ + sisl_lxt_entry_t *p_lxt, *p_lxt_old; + unsigned int av_size; + unsigned int ngrps, ngrps_old; + aun_t aun; /* chunk# allocated by block allocator */ + int i; + + /* check what is available in the block allocator before re-allocating + LXT array. This is done up front under the mutex which must not be + released until after allocation is complete. + */ + pthread_mutex_lock(&p_afu->p_blka->mutex); + av_size = ba_space(&p_afu->p_blka->ba_lun); + if (av_size < delta) { + delta = av_size; + } + + p_lxt_old = p_rht_entry->lxt_start; + ngrps_old = LXT_NUM_GROUPS(p_rht_entry->lxt_cnt); + ngrps = LXT_NUM_GROUPS(p_rht_entry->lxt_cnt + delta); + + if (ngrps != ngrps_old) { + /* realloate to fit new size */ + p_lxt = (sisl_lxt_entry_t *) malloc(sizeof(sisl_lxt_entry_t) * + LXT_GROUP_SIZE * ngrps); + if (p_lxt == NULL) { + pthread_mutex_unlock(&p_afu->p_blka->mutex); + return ENOMEM; + } + /* copy over all old entries */ + memcpy(p_lxt, p_lxt_old, + sizeof(sisl_lxt_entry_t)*p_rht_entry->lxt_cnt); + } + else { + p_lxt = p_lxt_old; + } + + /* nothing can fail from now on */ + *p_act_new_size = p_rht_entry->lxt_cnt + delta; + + /* add new entries to the end */ + for (i = p_rht_entry->lxt_cnt; i < *p_act_new_size; i++) { + /* Due to the earlier check of available space, ba_alloc cannot + fail here. If it did due to internal error, leave a rlba_base + of -1u which will likely be a invalid LUN (too large). + */ + aun = ba_alloc(&p_afu->p_blka->ba_lun); + if (aun == (aun_t) -1 || + aun >= p_afu->p_blka->nchunk) { + TRACE_2(LOG_ERR, p_afu, + "%s: ba_alloc error: allocated chunk# 0x%lx, max 0x%lx", + aun, p_afu->p_blka->nchunk - 1); + } + + /* lun_indx = 0, select both ports, use r/w perms from RHT */ + p_lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) | 0x33); + } + + pthread_mutex_unlock(&p_afu->p_blka->mutex); + + asm volatile ( "lwsync" : : ); /* make lxt updates visible */ + + /* Now sync up AFU - this can take a while */ + p_rht_entry->lxt_start = p_lxt; /* even if p_lxt didn't change */ + asm volatile ( "lwsync" : : ); + + p_rht_entry->lxt_cnt = *p_act_new_size; + asm volatile ( "lwsync" : : ); + + afu_sync(p_afu, ctx_hndl_u, res_hndl_u, AFU_LW_SYNC); + + /* free old lxt if reallocated */ + if (p_lxt != p_lxt_old) { + free(p_lxt_old); + } + // sync up AFU on each context in the doubly linked list + return 0; +} + +int +shrink_lxt(afu_t *p_afu, + ctx_hndl_t ctx_hndl_u, + res_hndl_t res_hndl_u, + sisl_rht_entry_t *p_rht_entry, + __u64 delta, + __u64 *p_act_new_size) +{ + sisl_lxt_entry_t *p_lxt, *p_lxt_old; + unsigned int ngrps, ngrps_old; + aun_t aun; /* chunk# allocated by block allocator */ + int i; + + p_lxt_old = p_rht_entry->lxt_start; + ngrps_old = LXT_NUM_GROUPS(p_rht_entry->lxt_cnt); + ngrps = LXT_NUM_GROUPS(p_rht_entry->lxt_cnt - delta); + + if (ngrps != ngrps_old) { + /* realloate to fit new size unless new size is 0 */ + if (ngrps) { + p_lxt = (sisl_lxt_entry_t *) malloc(sizeof(sisl_lxt_entry_t) * + LXT_GROUP_SIZE * ngrps); + if (p_lxt == NULL) { + return ENOMEM; + } + /* copy over old entries that will remain */ + memcpy(p_lxt, p_lxt_old, + sizeof(sisl_lxt_entry_t)*(p_rht_entry->lxt_cnt - delta)); + } + else { + p_lxt = NULL; + } + } + else { + p_lxt = p_lxt_old; + } + + /* nothing can fail from now on */ + *p_act_new_size = p_rht_entry->lxt_cnt - delta; + + /* Now sync up AFU - this can take a while */ + p_rht_entry->lxt_cnt = *p_act_new_size; + asm volatile ( "lwsync" : : ); /* also makes lxt updates visible */ + + p_rht_entry->lxt_start = p_lxt; /* even if p_lxt didn't change */ + asm volatile ( "lwsync" : : ); + + afu_sync(p_afu, ctx_hndl_u, res_hndl_u, AFU_HW_SYNC); + + /* free LBAs allocated to freed chunks */ + pthread_mutex_lock(&p_afu->p_blka->mutex); + for (i = delta - 1; i >= 0; i--) { + aun = (p_lxt_old[*p_act_new_size + i].rlba_base >> MC_CHUNK_SHIFT); + ba_free(&p_afu->p_blka->ba_lun, aun); + } + pthread_mutex_unlock(&p_afu->p_blka->mutex); + + /* free old lxt if reallocated */ + if (p_lxt != p_lxt_old) { + free(p_lxt_old); + } + + // sync up AFU on each context in the doubly linked list!!! + + return 0; +} + +/* + * NAME: clone_lxt + * + * FUNCTION: clone a LXT table + * + * INPUTS: + * p_afu - Pointer to afu struct + * ctx_hndl_u - context that owns the destination LXT + * res_hndl_u - res_hndl of the destination LXT + * p_rht_entry - destination RHT to clone into + * p_rht_entry_src - source RHT to clone from + * + * OUTPUTS: + * + * RETURNS: + * 0 - Success + * errno - Failure + * + * NOTES: + */ +int +clone_lxt(afu_t *p_afu, + ctx_hndl_t ctx_hndl_u, + res_hndl_t res_hndl_u, + sisl_rht_entry_t *p_rht_entry, + sisl_rht_entry_t *p_rht_entry_src) +{ + sisl_lxt_entry_t *p_lxt; + unsigned int ngrps; + aun_t aun; /* chunk# allocated by block allocator */ + int i, j; + + ngrps = LXT_NUM_GROUPS(p_rht_entry_src->lxt_cnt); + + if (ngrps) { + /* alloate new LXTs for clone */ + p_lxt = (sisl_lxt_entry_t *) malloc(sizeof(sisl_lxt_entry_t) * + LXT_GROUP_SIZE * ngrps); + if (p_lxt == NULL) { + return ENOMEM; + } + /* copy over */ + memcpy(p_lxt, p_rht_entry_src->lxt_start, + sizeof(sisl_lxt_entry_t)*p_rht_entry_src->lxt_cnt); + + /* clone the LBAs in block allocator via ref_cnt */ + pthread_mutex_lock(&p_afu->p_blka->mutex); + for (i = 0; i < p_rht_entry_src->lxt_cnt; i++) { + aun = (p_lxt[i].rlba_base >> MC_CHUNK_SHIFT); + if (ba_clone(&p_afu->p_blka->ba_lun, aun) == -1) { + /* free the clones already made */ + for (j = 0; j < i; j++) { + aun = (p_lxt[j].rlba_base >> MC_CHUNK_SHIFT); + ba_free(&p_afu->p_blka->ba_lun, aun); + } + pthread_mutex_unlock(&p_afu->p_blka->mutex); + free(p_lxt); + return EIO; + } + } + pthread_mutex_unlock(&p_afu->p_blka->mutex); + } + else { + p_lxt = NULL; + } + + asm volatile ( "lwsync" : : ); /* make lxt updates visible */ + + /* Now sync up AFU - this can take a while */ + p_rht_entry->lxt_start = p_lxt; /* even if p_lxt is NULL */ + asm volatile ( "lwsync" : : ); + + p_rht_entry->lxt_cnt = p_rht_entry_src->lxt_cnt; + asm volatile ( "lwsync" : : ); + + afu_sync(p_afu, ctx_hndl_u, res_hndl_u, AFU_LW_SYNC); + + // sync up AFU on each context in the doubly linked list + return 0; +} + + +/* + * NAME: do_mc_xlate_lba + * + * FUNCTION: Query the physical LBA mapped to a virtual LBA + * + * INPUTS: + * p_afu - Pointer to afu struct + * p_conn_info - Pointer to connection the request came in + * res_hndl - resource handle to query on + * v_lba - virtual LBA on res_hndl + * + * OUTPUTS: + * p_p_lba - pointer to output physical LBA + * + * RETURNS: + * 0 - Success + * errno - Failure + * + */ +int +do_mc_xlate_lba(afu_t *p_afu, + conn_info_t* p_conn_info, + res_hndl_t res_hndl, + __u64 v_lba, + __u64 *p_p_lba) +{ + ctx_info_t *p_ctx_info = p_conn_info->p_ctx_info; + rht_info_t *p_rht_info = p_ctx_info->p_rht_info; + sisl_rht_entry_t *p_rht_entry; + __u64 chunk_id, chunk_off, rlba_base; + + TRACE_4(LOG_INFO, p_afu, "%s: %s, client_pid=%d client_fd=%d ctx_hdl=%d\n", + __func__, + p_conn_info->client_pid, + p_conn_info->client_fd, + p_conn_info->ctx_hndl); + + if (res_hndl < MAX_RHT_PER_CONTEXT) { + p_rht_entry = &p_rht_info->rht_start[res_hndl]; + + if (p_rht_entry->nmask == 0) { /* not open */ + return EINVAL; + } + + chunk_id = (v_lba >> MC_CHUNK_SHIFT); + chunk_off = (v_lba & MC_CHUNK_OFF_MASK); + + if (chunk_id < p_rht_entry->lxt_cnt) { + rlba_base = + (p_rht_entry->lxt_start[chunk_id].rlba_base & (~MC_CHUNK_OFF_MASK)); + *p_p_lba = (rlba_base | chunk_off); + } + else { + return EINVAL; + } + } + else { + return EINVAL; + } + + return 0; +} + +/* + * NAME: do_mc_clone + * + * FUNCTION: clone by making a snapshot copy of another context + * + * INPUTS: + * p_afu - Pointer to afu struct + * p_conn_info - Pointer to connection the request came in + * This is also the target of the clone. + * ctx_hndl_src - AFU context to clone from + * challenge - used to validate access to ctx_hndl_src + * flags - permissions for the cloned copy + * + * OUTPUTS: + * None + * + * RETURNS: + * 0 - Success + * errno - Failure + * + * clone effectively does what open and size do. The destination + * context must be in pristine state with no resource handles open. + * + */ +// todo dest ctx must be unduped +int +do_mc_clone(afu_t *p_afu, + conn_info_t *p_conn_info, + ctx_hndl_t ctx_hndl_src, + __u64 challenge, + __u64 flags) +{ + ctx_info_t *p_ctx_info = p_conn_info->p_ctx_info; + rht_info_t *p_rht_info = p_ctx_info->p_rht_info; + + ctx_info_t *p_ctx_info_src; + rht_info_t *p_rht_info_src; + __u64 reg; + int i, j; + int rc; + + TRACE_4(LOG_INFO, p_afu, "%s: %s, client_pid=%d client_fd=%d ctx_hdl=%d\n", + __func__, + p_conn_info->client_pid, + p_conn_info->client_fd, + p_conn_info->ctx_hndl); + + /* verify there is no open resource handle in the target context + of the clone. + */ + for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { + if (p_rht_info->rht_start[i].nmask != 0) { + return EINVAL; + } + } + + /* do not clone yourself */ + if (p_conn_info->ctx_hndl == ctx_hndl_src) { + return EINVAL; + } + + if (ctx_hndl_src < MAX_CONTEXT) { + p_ctx_info_src = &p_afu->ctx_info[ctx_hndl_src]; + p_rht_info_src = &p_afu->rht_info[ctx_hndl_src]; + } + else { + return EINVAL; + } + + reg = read_64(&p_ctx_info_src->p_ctrl_map->mbox_r); + + if (reg == 0 || /* zeroed mbox is a locked mbox */ + challenge != reg) { + return EACCES; /* return Permission denied */ + } + + /* this loop is equivalent to do_mc_open & do_mc_size + * Not checking if the source context has anything open or whether + * it is even registered. + */ + for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { + p_rht_info->rht_start[i].nmask = + p_rht_info_src->rht_start[i].nmask; + p_rht_info->rht_start[i].fp = + SISL_RHT_FP_CLONE(p_rht_info_src->rht_start[i].fp, flags & 0x3); + + rc = clone_lxt(p_afu, p_conn_info->ctx_hndl, i, + &p_rht_info->rht_start[i], + &p_rht_info_src->rht_start[i]); + if (rc != 0) { + for (j = 0; j < i; j++) { + do_mc_close(p_afu, p_conn_info, j); + } + + p_rht_info->rht_start[i].nmask = 0; + p_rht_info->rht_start[i].fp = 0; + + return rc; + } + } + + return 0; +} + +/* + * NAME: do_mc_dup + * + * FUNCTION: dup 2 contexts by linking their RHTs + * + * INPUTS: + * p_afu - Pointer to afu struct + * p_conn_info - Pointer to connection the request came in + * This is the context to dup to (target) + * ctx_hndl_cand - This is the context to dup from source) + * challenge - used to validate access to ctx_hndl_cand + * + * OUTPUTS: + * None + * + * RETURNS: + * 0 - Success + * errno - Failure + * + */ +// dest ctx must be unduped and with no open res_hndls +int +do_mc_dup(afu_t *p_afu, + conn_info_t *p_conn_info, + ctx_hndl_t ctx_hndl_cand, + __u64 challenge) +{ + ctx_info_t *p_ctx_info = p_conn_info->p_ctx_info; + rht_info_t *p_rht_info = p_ctx_info->p_rht_info; + + ctx_info_t *p_ctx_info_cand; + //rht_info_t *p_rht_info_cand; + __u64 reg; + int i; //, j; + //int rc; + + TRACE_4(LOG_INFO, p_afu, "%s: %s, client_pid=%d client_fd=%d ctx_hdl=%d\n", + __func__, + p_conn_info->client_pid, + p_conn_info->client_fd, + p_conn_info->ctx_hndl); + + /* verify there is no open resource handle in the target context + of the clone. + */ + for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { + if (p_rht_info->rht_start[i].nmask != 0) { + return EINVAL; + } + } + + /* do not dup yourself */ + if (p_conn_info->ctx_hndl == ctx_hndl_cand) { + return EINVAL; + } + + if (ctx_hndl_cand < MAX_CONTEXT) { + p_ctx_info_cand = &p_afu->ctx_info[ctx_hndl_cand]; + //p_rht_info_cand = &p_afu->rht_info[ctx_hndl_cand]; + } + else { + return EINVAL; + } + + reg = read_64(&p_ctx_info_cand->p_ctrl_map->mbox_r); + + if (reg == 0 || /* zeroed mbox is a locked mbox */ + challenge != reg) { + return EACCES; /* return Permission denied */ + } + + + return EIO; // todo later!!! +} + + +/* + * NAME: do_mc_stat + * + * FUNCTION: Query the current information on a resource handle + * + * INPUTS: + * p_afu - Pointer to afu struct + * p_conn_info - Pointer to connection the request came in + * res_hndl - resource handle to query + * + * OUTPUTS: + * p_mc_stat - pointer to output stat information + * + * RETURNS: + * 0 - Success + * errno - Failure + * + */ +int +do_mc_stat(afu_t *p_afu, + conn_info_t *p_conn_info, + res_hndl_t res_hndl, + mc_stat_t *p_mc_stat) +{ + ctx_info_t *p_ctx_info = p_conn_info->p_ctx_info; + rht_info_t *p_rht_info = p_ctx_info->p_rht_info; + sisl_rht_entry_t *p_rht_entry; + + TRACE_4(LOG_INFO, p_afu, "%s: %s, client_pid=%d client_fd=%d ctx_hdl=%d\n", + __func__, + p_conn_info->client_pid, + p_conn_info->client_fd, + p_conn_info->ctx_hndl); + + if (res_hndl < MAX_RHT_PER_CONTEXT) { + p_rht_entry = &p_rht_info->rht_start[res_hndl]; + + if (p_rht_entry->nmask == 0) { /* not open */ + return EINVAL; + } + + p_mc_stat->blk_len = p_afu->p_blka->ba_lun.lba_size; + p_mc_stat->nmask = p_rht_entry->nmask; + p_mc_stat->size = p_rht_entry->lxt_cnt; + p_mc_stat->flags = SISL_RHT_PERM(p_rht_entry->fp); + } + else { + return EINVAL; + } + + return 0; +} + +/* + * NAME: do_mc_notify + * + * FUNCTION: Send an even to master + * + * INPUTS: + * p_afu - Pointer to afu struct + * p_conn_info - Pointer to connection the request came in + * p_mc_stat - pointer to input event data + * + * OUTPUTS: + * + * RETURNS: + * 0 - Success + * errno - Failure + * + */ +int +do_mc_notify(afu_t *p_afu, + conn_info_t *p_conn_info, + mc_notify_t *p_mc_notify) +{ + //ctx_info_t *p_ctx_info = p_conn_info->p_ctx_info; + //rht_info_t *p_rht_info = p_ctx_info->p_rht_info; + + TRACE_4(LOG_INFO, p_afu, "%s: %s, client_pid=%d client_fd=%d ctx_hdl=%d\n", + __func__, + p_conn_info->client_pid, + p_conn_info->client_fd, + p_conn_info->ctx_hndl); + + return 0; +} + + +/***************************************************************************** + * Procedure: xfer_data + * + * Description: Perform a transfer operation for the given + * socket file descriptor. + * + * Parameters: + * fd: Socket File Descriptor + * op: Read or Write Operation + * buf: Buffer to either read from or write to + * exp_size: Size of data transfer + * + * Return: 0, if successful + * non-zero otherwise + *****************************************************************************/ +int +xfer_data(int fd, int op, void *buf, ssize_t exp_size) +{ + int rc = 0; + ssize_t offset = 0; + ssize_t bytes_xfer = 0; + ssize_t target_size = exp_size; + struct iovec iov; + struct msghdr msg; + + while ( 1 ) + { + // Set up IO vector for IO operation. + memset(&msg, 0, sizeof(struct msghdr)); + iov.iov_base = buf + offset; + iov.iov_len = target_size; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + // Check to see if we are sending or receiving data + if ( op == XFER_OP_READ ) + { + bytes_xfer = recvmsg(fd, &msg, MSG_WAITALL); + } + else + { + bytes_xfer = sendmsg(fd, &msg, MSG_NOSIGNAL); + } + + if ( -1 == bytes_xfer ) + { + if ( EAGAIN == errno || EWOULDBLOCK == errno || EINTR == errno) + { + // just retry the whole request + continue; + } + else + { + // connection closed by the other end + rc = 1; + break; + } + } + else if ( 0 == bytes_xfer ) + { + // connection closed by the other end + rc = 1; + break; + } + else if ( bytes_xfer == target_size ) + { + // We have transfered all the bytes we wanted, we + // can stop now. + rc = 0; + break; + } + else + { + // less than target size - partial condition + // set up to transfer for the remainder of the request + offset += bytes_xfer; + target_size = (target_size - bytes_xfer); + } + } + + return rc; +} + +// rx fcn on a fresh connection waiting a MCREG. +// On receipt of a MCREG, it will go to the rx_ready state where +// all cmds except a MCREG is accepted. +// +int rx_mcreg(afu_t *p_afu, conn_info_t *p_conn_info, + mc_req_t *p_req, mc_resp_t *p_resp) +{ + int status; + + switch (p_req->header.command) + { + case CMD_MCREG: + // first command on a fresh connection + // copy input fields to connection info + p_conn_info->client_pid = p_req->reg.client_pid; + p_conn_info->client_fd = p_req->reg.client_fd; + p_conn_info->ctx_hndl = p_req->reg.ctx_hndl; + p_conn_info->mode = p_req->reg.mode; + + status = do_mc_register(p_afu, p_conn_info, p_req->reg.challenge); + + break; + + default: + // fail everything else + TRACE_1(LOG_ERR, p_afu, "%s: bad command code %d in wait_mcreg state\n", + p_req->header.command); + status = EINVAL; + break; + } + + return status; +} + +int rx_ready(afu_t *p_afu, conn_info_t *p_conn_info, + mc_req_t *p_req, mc_resp_t *p_resp) +{ + int status; + + switch (p_req->header.command) + { + case CMD_MCUNREG: + (void) do_mc_unregister(p_afu, p_conn_info); + status = 0; + break; + + case CMD_MCOPEN: + status = do_mc_open(p_afu, p_conn_info, + p_req->open.flags, &p_resp->open.res_hndl); + break; + + case CMD_MCCLOSE: + status = do_mc_close(p_afu, p_conn_info, p_req->close.res_hndl); + break; + + case CMD_MCSIZE: + status = do_mc_size(p_afu, p_conn_info, + p_req->size.res_hndl, + p_req->size.new_size, + &p_resp->size.act_new_size); + break; + + case CMD_MCXLATE_LBA: + status = do_mc_xlate_lba(p_afu, p_conn_info, + p_req->xlate_lba.res_hndl, + p_req->xlate_lba.v_lba, + &p_resp->xlate_lba.p_lba); + break; + + case CMD_MCCLONE: + status = do_mc_clone(p_afu, p_conn_info, + p_req->clone.ctx_hndl_src, + p_req->clone.challenge, + p_req->clone.flags); + break; + + case CMD_MCDUP: + status = do_mc_dup(p_afu, p_conn_info, + p_req->dup.ctx_hndl_cand, + p_req->dup.challenge); + break; + + case CMD_MCSTAT: + status = do_mc_stat(p_afu, p_conn_info, + p_req->stat.res_hndl, + &p_resp->stat); + break; + + case CMD_MCNOTIFY: + status = do_mc_notify(p_afu, p_conn_info, + &p_req->notify); + break; + + default : + TRACE_1(LOG_ERR, p_afu, "%s: bad command code %d in ready state\n", + p_req->header.command); + status = EINVAL; + break; + } + + return status; +} + +void *afu_ipc_rx(void *arg) { + int conn_fd; + int rc; + int i; + int nready; + socklen_t len; + struct sockaddr_un cli_addr; +#ifndef _AIX + struct epoll_event epoll_event; +#endif /* !_AIX */ + mc_req_t mc_req; + mc_resp_t mc_resp; + conn_info_t *p_conn_info; + conn_info_t *p_conn_info_new; + + afu_t *p_afu = (struct afu*) arg; + +#ifndef _AIX + + // The listen is delayed until everything is ready. This prevent + // clients from connecting to the server until it reaches this + // point. + rc = listen(p_afu->listen_fd, SOMAXCONN); + if ( rc ) { + TRACE_2(LOG_ERR, p_afu, "%s: listen failed, rc %d, errno %d\n", rc, errno); + exit(-1); + } + + while (1) { + // + // Wait for an event on any of the watched file descriptors + // block for ever if no events. + // + // Note we poll all file descriptors in the epoll structure + // but receive only up to MAX_CONN_TO_POLL ready fds at a time. + // + nready = epoll_wait(p_afu->epfd, &p_afu->events[0], + MAX_CONN_TO_POLL, -1); + + if ((nready == -1) && (errno == EINTR)) { + continue; + } + + for (i = 0; i < nready; i++) { + p_conn_info = (conn_info_t*) p_afu->events[i].data.ptr; + + if (p_afu->events[i].events & (EPOLLHUP | EPOLLERR)) { + // Something bad happened with the connection or the client + // just closed its end. Remove the file descriptor from the + // array of watched FD's + + TRACE_3(LOG_INFO, p_afu, + "%s: connection is bad, %d (0x%08X), client fd %d\n", + p_conn_info->fd, p_afu->events[i].events, + p_conn_info->client_fd); + + rc = epoll_ctl(p_afu->epfd, EPOLL_CTL_DEL, p_conn_info->fd, + &p_afu->events[i]); + if (rc) { + TRACE_2(LOG_ERR, p_afu, + "%s: epoll_ctl failed for DEL: %d (%d)\n", rc, errno); + } + + close(p_conn_info->fd); + if (p_conn_info->p_ctx_info != NULL) { /* if registered */ + (void) do_mc_unregister(p_afu, p_conn_info); + } + free_connection(p_afu, p_conn_info); + + continue; + } + + // Is this the listening FD...would mean a new connection + if (p_conn_info->fd == p_afu->listen_fd) { + len = sizeof(cli_addr); + conn_fd = accept(p_afu->listen_fd, (struct sockaddr *)&cli_addr, &len); + if (conn_fd == -1) { + TRACE_2(LOG_ERR, p_afu, + "%s: accept failed, fd %d, errno %d\n", conn_fd, errno); + continue; + } + + p_conn_info_new = alloc_connection(p_afu, conn_fd); + if (p_conn_info_new == NULL) { + TRACE_0(LOG_ERR, p_afu, + "%s: too many connections, closing new connection\n"); + close(conn_fd); + continue; + } + + // Add the connection to the watched array + epoll_event.events = EPOLLIN; + epoll_event.data.ptr = p_conn_info_new; + rc = epoll_ctl(p_afu->epfd, EPOLL_CTL_ADD, conn_fd, &epoll_event); + if (rc == -1) { + TRACE_1(LOG_ERR, p_afu, + "%s: epoll_ctl ADD failed, errno %d\n", errno); + close(conn_fd); + continue; + } + } + else { + // We can now assume that we have data to be read from a client + // Read the entire command + rc = xfer_data(p_conn_info->fd, XFER_OP_READ, + (void *)&mc_req, sizeof(mc_req)); + if (rc) { + TRACE_1(LOG_ERR, p_afu, "%s: read of cmd failed, rc %d\n", rc); + continue; + } + + TRACE_4(LOG_INFO, p_afu, + "%s: command code=%d, tag=%d, size=%d, conn_info=%p\n", + mc_req.header.command, + mc_req.header.tag, + mc_req.header.size, + p_conn_info); + + // process command and fill in response while protecting from + // other threads accessing the afu + pthread_mutex_lock(&p_afu->mutex); + mc_resp.header.status = + (*p_conn_info->rx)(p_afu, p_conn_info, &mc_req, &mc_resp); + pthread_mutex_unlock(&p_afu->mutex); + + // Send response back to the client. + mc_resp.header.command = mc_req.header.command; + mc_resp.header.tag = mc_req.header.tag; + mc_resp.header.size = sizeof(mc_resp); + rc = xfer_data(p_conn_info->fd, XFER_OP_WRITE, + (void *)&mc_resp, sizeof(mc_resp)); + if (rc) { + TRACE_1(LOG_ERR, p_afu, + "%s: write of cmd response failed, rc %d\n", rc); + continue; + } + } + } + } + +#endif /* !_AIX */ + return NULL; +} + + +asyc_intr_info_t ainfo[] = { + { SISL_ASTATUS_FC0_OTHER, "fc 0: other error", 0, CLR_FC_ERROR | LINK_RESET }, + { SISL_ASTATUS_FC0_LOGO, "fc 0: target initiated LOGO", 0, 0 }, + { SISL_ASTATUS_FC0_CRC_T, "fc 0: CRC threshold exceeded", 0, LINK_RESET }, + { SISL_ASTATUS_FC0_LOGI_R, "fc 0: login timed out, retrying", 0, 0 }, + { SISL_ASTATUS_FC0_LOGI_F, "fc 0: login failed", 0, CLR_FC_ERROR }, + { SISL_ASTATUS_FC0_LOGI_S, "fc 0: login succeeded", 0, 0 }, + { SISL_ASTATUS_FC0_LINK_DN, "fc 0: link down", 0, 0 }, + { SISL_ASTATUS_FC0_LINK_UP, "fc 0: link up", 0, 0 }, + + { SISL_ASTATUS_FC1_OTHER, "fc 1: other error", 1, CLR_FC_ERROR | LINK_RESET }, + { SISL_ASTATUS_FC1_LOGO, "fc 1: target initiated LOGO", 1, 0 }, + { SISL_ASTATUS_FC1_CRC_T, "fc 1: CRC threshold exceeded", 1, LINK_RESET }, + { SISL_ASTATUS_FC1_LOGI_R, "fc 1: login timed out, retrying", 1, 0 }, + { SISL_ASTATUS_FC1_LOGI_F, "fc 1: login failed", 1, CLR_FC_ERROR }, + { SISL_ASTATUS_FC1_LOGI_S, "fc 1: login succeeded", 1, 0 }, + { SISL_ASTATUS_FC1_LINK_DN, "fc 1: link down", 1, 0 }, + { SISL_ASTATUS_FC1_LINK_UP, "fc 1: link up", 1, 0 }, + + { 0x0, "", 0, 0 } /* terminator */ +}; + +asyc_intr_info_t *find_ainfo(__u64 status) +{ + asyc_intr_info_t *p_info; + + for (p_info = &ainfo[0]; p_info->status; p_info++) { + if (p_info->status == status) { + return p_info; + } + } + + return NULL; +} + +void afu_rrq_intr(afu_t *p_afu) { + struct afu_cmd *p_cmd; + + // process however many RRQ entries that are ready + while ((*p_afu->p_hrrq_curr & SISL_RESP_HANDLE_T_BIT) == + p_afu->toggle) { + p_cmd = (struct afu_cmd*) + ((*p_afu->p_hrrq_curr) & (~SISL_RESP_HANDLE_T_BIT)); + + pthread_mutex_lock(&p_cmd->mutex); + p_cmd->sa.host_use_b[0] |= B_DONE; + pthread_cond_signal(&p_cmd->cv); + pthread_mutex_unlock(&p_cmd->mutex); + + if (p_afu->p_hrrq_curr < p_afu->p_hrrq_end) { + p_afu->p_hrrq_curr++; /* advance to next RRQ entry */ + } + else { /* wrap HRRQ & flip toggle */ + p_afu->p_hrrq_curr = p_afu->p_hrrq_start; + p_afu->toggle ^= SISL_RESP_HANDLE_T_BIT; + } + } +} + +void afu_sync_intr(afu_t *p_afu) { + __u64 reg; + __u64 reg_unmasked; + + reg = read_64(&p_afu->p_host_map->intr_status); + reg_unmasked = (reg & SISL_ISTATUS_UNMASK); + + if (reg_unmasked == 0) { + TRACE_1(LOG_ERR, p_afu, + "%s: spurious interrupt, intr_status 0x%016lx\n", reg); + return; + } + + TRACE_1(LOG_ERR, p_afu, + "%s: unexpected interrupt, intr_status 0x%016lx\n", reg); + + write_64(&p_afu->p_host_map->intr_clear, reg_unmasked); + + return; +} + +// this function can block up to a few seconds +void afu_async_intr(afu_t *p_afu) { + int i; + __u64 reg; + __u64 reg_unmasked; + asyc_intr_info_t *p_info; + volatile struct sisl_global_map *p_global = &p_afu->p_afu_map->global; + + reg = read_64(&p_global->regs.aintr_status); + reg_unmasked = (reg & SISL_ASTATUS_UNMASK); + + if (reg_unmasked == 0) { + TRACE_1(LOG_ERR, p_afu, + "%s: spurious interrupt, aintr_status 0x%016lx\n", reg); + return; + } + + /* it is OK to clear AFU status before FC_ERROR */ + write_64(&p_global->regs.aintr_clear, reg_unmasked); + + /* check each bit that is on */ + for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { + if ((reg_unmasked & 0x1) == 0 || + (p_info = find_ainfo(1ull << i)) == NULL) { + continue; + } + + TRACE_2(LOG_ERR, p_afu, "%s: %s, fc_status 0x%08lx\n", + p_info->desc, + read_64(&p_global->fc_regs[p_info->port][FC_STATUS/8])); + + // do link reset first, some OTHER errors will set FC_ERROR again + // if cleared before or w/o a reset + // + if (p_info->action & LINK_RESET) { + TRACE_1(LOG_ERR, p_afu, "%s: fc %d: resetting link\n", + p_info->port); + afu_link_reset(p_afu, p_info->port, + &p_afu->p_afu_map->global.fc_regs[p_info->port][0]); + } + + if (p_info->action & CLR_FC_ERROR) { + reg = read_64(&p_global->fc_regs[p_info->port][FC_ERROR/8]); + + // since all errors are unmasked, FC_ERROR and FC_ERRCAP + // should be the same and tracing one is sufficient. + // + TRACE_2(LOG_ERR, p_afu, "%s: fc %d: clearing fc_error 0x%08lx\n", + p_info->port, reg); + write_64(&p_global->fc_regs[p_info->port][FC_ERROR/8], reg); + write_64(&p_global->fc_regs[p_info->port][FC_ERRCAP/8], 0); + } + + } + +} + +// notify error thread that it has work to do +void notify_err(afu_t *p_afu, int err_flag) +{ + pthread_mutex_lock(&p_afu->err_mutex); + p_afu->err_flag |= err_flag; + pthread_cond_signal(&p_afu->err_cv); + pthread_mutex_unlock(&p_afu->err_mutex); +} + +/* + * This thread receives all interrupts, not just RRQ. + * + * But it processes only the RRQ. Error interrupts are forwarded to their + * own thread for processing. Note that error processing may involve + * sending a command to the afu. Doing so from this thread and then blocking + * for a response will be a deadlock. The rrq thread must remain free ALL + * the time to collect responses. + * + * The rrq thread wakes up the error thread via a cv. Ideally, each thread + * could block on the afu_fd waiting for a particular irq#. If the kernel + * were to support such a demultiplexed irq dispatch via read, the cv can + * be removed. + * + * NOTE: afu_rrq_rx must not block (on events) or sleep, else response + * delivery can delay/deadlock. + */ +void *afu_rrq_rx(void *arg) { + struct cxl_event *p_event; + int len; + afu_t *p_afu = (struct afu*) arg; + + while (1) { + // + // read afu fd & block on any interrupt + len = read(p_afu->afu_fd, &p_afu->event_buf[0], + sizeof(p_afu->event_buf)); + + + if (len < 0) { + TRACE_0(LOG_ERR, p_afu, "%s: afu has been reset, exiting...\n"); + exit(-1); + } + + p_event = (struct cxl_event *)&p_afu->event_buf[0]; + while (len >= sizeof(p_event->header)) { + if (p_event->header.type == CXL_EVENT_AFU_INTERRUPT) { + switch(p_event->irq.irq) { + case SISL_MSI_RRQ_UPDATED: + afu_rrq_intr(p_afu); + break; + + case SISL_MSI_ASYNC_ERROR: + notify_err(p_afu, E_ASYNC_INTR); + break; + + case SISL_MSI_SYNC_ERROR: + notify_err(p_afu, E_SYNC_INTR); + break; + + default: + TRACE_1(LOG_ERR, p_afu, "%s: unexpected irq %d\n", p_event->irq.irq); + break; + } + } + else if (p_event->header.type == CXL_EVENT_DATA_STORAGE) { + TRACE_2(LOG_ERR, p_afu, + "%s: CXL_EVENT_DATA_STORAGE addr = 0x%lx, dsisr = 0x%lx\n", + p_event->fault.addr, p_event->fault.dsisr); + } + else { + TRACE_1(LOG_ERR, p_afu, "%s: unexpected event %d\n", + p_event->header.type); + } + + len -= p_event->header.size; + p_event = (struct cxl_event *) + (((char*)p_event) + p_event->header.size); + } + } + + return NULL; +} + + +void *afu_err_rx(void *arg) { + afu_t *p_afu = (struct afu*) arg; + int err_flag; + + while (1) { + // + // block on error notification + pthread_mutex_lock(&p_afu->err_mutex); + while (p_afu->err_flag == 0) { + pthread_cond_wait(&p_afu->err_cv, &p_afu->err_mutex); + } + err_flag = p_afu->err_flag; + p_afu->err_flag = 0; + pthread_mutex_unlock(&p_afu->err_mutex); + + // about to access afu and/or send cmds, so protect from other + // threads doing the same. + // + pthread_mutex_lock(&p_afu->mutex); + + if (err_flag & E_ASYNC_INTR) { + afu_async_intr(p_afu); + } + + if (err_flag & E_SYNC_INTR) { + afu_sync_intr(p_afu); + } + + pthread_mutex_unlock(&p_afu->mutex); + + // delay so someone else can grab p_afu->mutex + // else it can starve out other threads when error + // interrupts flood the system + // + usleep(100000); // 100ms + + } + + return NULL; +} + + + +int mkdir_p(char *file_path) // path must be absolute and must end on a file +{ + struct stat s; + char *p; + char *last = NULL; + + if (file_path[0] != '/') { + return -1; + } + + if ((last = strrchr(file_path, '/'))) { + *last = '\0'; // get rid of the last component + } + + for (p = (file_path + 1); *p; p++) { + if (*p == '/') { + *p = '\0'; + if (stat(file_path, &s) == -1) { + // create dirs as traversable by group members and no one else + mkdir(file_path, 0710); + } + *p = '/'; + } + } + + if (stat(file_path, &s) == -1) { + // create dirs as traversable by group members and no one else + mkdir(file_path, 0710); + } + + if (last) { // restore original file_path + *last = '/'; + } + + return 0; +} + +void send_cmd(afu_t *p_afu, struct afu_cmd *p_cmd) { + int nretry = 0; + + if (p_afu->room == 0) { + asm volatile ( "eieio" : : ); // let IOARRIN writes complete + do { + p_afu->room = read_64(&p_afu->p_host_map->cmd_room); + usleep(nretry); + } while (p_afu->room == 0 && nretry++ < MC_ROOM_RETRY_CNT); + } + + p_cmd->sa.host_use_b[0] = 0; // 0 means active + p_cmd->sa.ioasc = 0; + + /* make memory updates visible to AFU before MMIO */ + asm volatile ( "lwsync" : : ); + + timer_start(p_cmd->timer, p_cmd->rcb.timeout*2, 0); + + if (p_afu->room) { + // write IOARRIN + write_64(&p_afu->p_host_map->ioarrin, (__u64)&p_cmd->rcb); + } + else { + TRACE_1(LOG_ERR, p_afu, "%s: no cmd_room to send 0x%x\n", + p_cmd->rcb.cdb[0]); + // let timer fire to complete the response + } +} + +void wait_resp(afu_t *p_afu, struct afu_cmd *p_cmd) { + pthread_mutex_lock(&p_cmd->mutex); + while (!(p_cmd->sa.host_use_b[0] & B_DONE)) { + pthread_cond_wait(&p_cmd->cv, &p_cmd->mutex); + } + pthread_mutex_unlock(&p_cmd->mutex); + + timer_stop(p_cmd->timer); /* already stopped if timer fired */ + + if (p_cmd->sa.ioasc != 0) { + TRACE_5(LOG_ERR, p_afu, + "%s: CMD 0x%x failed, IOASC: flags 0x%x, afu_rc 0x%x, scsi_rc 0x%x, fc_rc 0x%x\n", + p_cmd->rcb.cdb[0], + p_cmd->sa.rc.flags, + p_cmd->sa.rc.afu_rc, + p_cmd->sa.rc.scsi_rc, + p_cmd->sa.rc.fc_rc); + } +} + +// do we need to retry AFU_CMDs (sync) on afu_rc = 0x30 ? +// can we not avoid that ? +// not retrying afu timeouts (B_TIMEOUT) +// returns 1 if the cmd should be retried, 0 otherwise +// sets B_ERROR flag based on IOASA +int check_status(sisl_ioasa_t *p_ioasa) +{ + if (p_ioasa->ioasc == 0) { + return 0; + } + + p_ioasa->host_use_b[0] |= B_ERROR; + + if (!(p_ioasa->host_use_b[1]++ < MC_RETRY_CNT)) { + return 0; + } + + switch (p_ioasa->rc.afu_rc) + { + case SISL_AFU_RC_NO_CHANNELS: + case SISL_AFU_RC_OUT_OF_DATA_BUFS: + usleep(100); // 100 microsec + return 1; + + case 0: + // no afu_rc, but either scsi_rc and/or fc_rc is set + // retry all scsi_rc and fc_rc after a small delay + usleep(100); // 100 microsec + return 1; + } + + return 0; +} + +// lun_id must be set in p_lun_info +int find_lun(afu_t *p_afu, lun_info_t *p_lun_info, + __u32 port_sel) { + __u32 *p_u32; + __u32 len; + __u64 *p_u64; + __u64 lun_id; + struct afu_cmd *p_cmd = &p_afu->cmd[AFU_INIT_INDEX]; + + memset(&p_afu->buf[0], 0, sizeof(p_afu->buf)); + memset(&p_cmd->rcb.cdb[0], 0, sizeof(p_cmd->rcb.cdb)); + + p_cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_SUP_UNDERRUN | + SISL_REQ_FLAGS_HOST_READ); + p_cmd->rcb.port_sel = port_sel; + p_cmd->rcb.lun_id = 0x0; /* use lun_id=0 w/report luns */ + p_cmd->rcb.data_len = sizeof(p_afu->buf); + p_cmd->rcb.data_ea = (__u64) &p_afu->buf[0]; + p_cmd->rcb.timeout = MC_DISCOVERY_TIMEOUT; + + p_cmd->rcb.cdb[0] = 0xA0; /* report luns */ + p_u32 = (__u32*)&p_cmd->rcb.cdb[6]; + write_32(p_u32, sizeof(p_afu->buf)); /* allocaiton length */ + p_cmd->sa.host_use_b[1] = 0; /* reset retry cnt */ + + TRACE_3(LOG_INFO, p_afu, + "%s: sending cmd(0x%x) with RCB EA=%p data EA=0x%lx\n", + p_cmd->rcb.cdb[0], + &p_cmd->rcb, + p_cmd->rcb.data_ea); + + do { + send_cmd(p_afu, p_cmd); + wait_resp(p_afu, p_cmd); + } while (check_status(&p_cmd->sa)); + + if (p_cmd->sa.host_use_b[0] & B_ERROR) { + return -1; + } + + // report luns success + len = read_32((__u32*)&p_afu->buf[0]); + p_u64 = (__u64*)&p_afu->buf[8]; /* start of lun list */ + + while (len) { + lun_id = read_64(p_u64++); + if (lun_id == p_lun_info->lun_id) { + return 0; + } + len -= 8; + } + + return -1; +} + +int read_cap16(afu_t *p_afu, lun_info_t *p_lun_info, + __u32 port_sel) { + __u32 *p_u32; + __u64 *p_u64; + struct afu_cmd *p_cmd = &p_afu->cmd[AFU_INIT_INDEX]; + + memset(&p_afu->buf[0], 0, sizeof(p_afu->buf)); + memset(&p_cmd->rcb.cdb[0], 0, sizeof(p_cmd->rcb.cdb)); + + p_cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_SUP_UNDERRUN | + SISL_REQ_FLAGS_HOST_READ); + p_cmd->rcb.port_sel = port_sel; + p_cmd->rcb.lun_id = p_lun_info->lun_id; + p_cmd->rcb.data_len = sizeof(p_afu->buf); + p_cmd->rcb.data_ea = (__u64) &p_afu->buf[0]; + p_cmd->rcb.timeout = MC_DISCOVERY_TIMEOUT; + + p_cmd->rcb.cdb[0] = 0x9E; /* read cap(16) */ + p_cmd->rcb.cdb[1] = 0x10; /* service action */ + p_u32 = (__u32*)&p_cmd->rcb.cdb[10]; + write_32(p_u32, sizeof(p_afu->buf)); /* allocation length */ + p_cmd->sa.host_use_b[1] = 0; /* reset retry cnt */ + + TRACE_3(LOG_INFO, p_afu, + "%s: sending cmd(0x%x) with RCB EA=%p data EA=0x%lx\n", + p_cmd->rcb.cdb[0], + &p_cmd->rcb, + p_cmd->rcb.data_ea); + + do { + send_cmd(p_afu, p_cmd); + wait_resp(p_afu, p_cmd); + } while (check_status(&p_cmd->sa)); + + if (p_cmd->sa.host_use_b[0] & B_ERROR) { + return -1; + } + + // read cap success + p_u64 = (__u64*)&p_afu->buf[0]; + p_lun_info->li.max_lba = read_64(p_u64); + + p_u32 = (__u32*)&p_afu->buf[8]; + p_lun_info->li.blk_len = read_32(p_u32); + return 0; +} + + +int page83_inquiry(afu_t *p_afu, lun_info_t *p_lun_info, + __u32 port_sel) { + __u16 *p_u16; + __u16 length; + struct scsi_inquiry_page_83_hdr *p_hdr; + struct scsi_inquiry_p83_id_desc_hdr *p_desc; + struct afu_cmd *p_cmd = &p_afu->cmd[AFU_INIT_INDEX]; + + memset(&p_afu->buf[0], 0, sizeof(p_afu->buf)); + memset(&p_cmd->rcb.cdb[0], 0, sizeof(p_cmd->rcb.cdb)); + + p_cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_SUP_UNDERRUN | + SISL_REQ_FLAGS_HOST_READ); + p_cmd->rcb.port_sel = port_sel; + p_cmd->rcb.lun_id = p_lun_info->lun_id; + p_cmd->rcb.data_len = sizeof(p_afu->buf); + p_cmd->rcb.data_ea = (__u64) &p_afu->buf[0]; + p_cmd->rcb.timeout = MC_DISCOVERY_TIMEOUT; + + p_cmd->rcb.cdb[0] = 0x12; /* inquiry */ + p_cmd->rcb.cdb[1] = 0x1; /* EVPD bit */ + p_cmd->rcb.cdb[2] = 0x83; /* page# */ + p_u16 = (__u16*)&p_cmd->rcb.cdb[3]; + write_16(p_u16, sizeof(p_afu->buf)); /* allocaiton length */ + p_cmd->sa.host_use_b[1] = 0; /* reset retry cnt */ + + TRACE_3(LOG_INFO, p_afu, + "%s: sending cmd(0x%x) with RCB EA=%p data EA=0x%lx\n", + p_cmd->rcb.cdb[0], + &p_cmd->rcb, + p_cmd->rcb.data_ea); + + do { + send_cmd(p_afu, p_cmd); + wait_resp(p_afu, p_cmd); + } while (check_status(&p_cmd->sa)); + + if (p_cmd->sa.host_use_b[0] & B_ERROR) { + return -1; + } + + // inquiry success + p_hdr = (struct scsi_inquiry_page_83_hdr *)&p_afu->buf[0]; + length = read_16(&p_hdr->adtl_page_length); + if (length > (sizeof(p_afu->buf) - 4)) { + length = sizeof(p_afu->buf) - 4; + } + p_desc = (struct scsi_inquiry_p83_id_desc_hdr *)(p_hdr + 1); + /* loop through data searching for LUN WWID entry */ + while (length >= sizeof(*p_desc)) { + if (p_desc->prot_code == TEXAN_PAGE_83_DESC_PROT_CODE && + p_desc->assoc_id == TEXAN_PAGE_83_ASSC_ID_LUN_WWID && + p_desc->adtl_id_length >= 0x10) { /* NAA-6 id is 0x10 bytes */ + + memcpy(p_lun_info->li.wwid, (char*)(p_desc + 1), + sizeof(p_lun_info->li.wwid)); + return 0; + } + + length -= (sizeof(struct scsi_inquiry_p83_id_desc_hdr) + + p_desc->adtl_id_length); + + p_desc = (struct scsi_inquiry_p83_id_desc_hdr *) + ((char *)p_desc + + sizeof(struct scsi_inquiry_p83_id_desc_hdr) + + p_desc->adtl_id_length); + } + + return -1; +} + +// add write_same !!! +// must not use AFU_SYNC_INDEX when sending multiple cmds + + +// afu_sync can be called from interrupt thread and the main processing +// thread. Caller is responsible for any serialization. +// Also, it can be called even before/during discovery, so we must use +// a dedicated cmd not used by discovery. +// +// AFU takes only 1 sync cmd at a time. +// +int afu_sync(afu_t *p_afu, ctx_hndl_t ctx_hndl_u, + res_hndl_t res_hndl_u, + __u8 mode) { + __u32 *p_u32; + __u16 *p_u16; + struct afu_cmd *p_cmd = &p_afu->cmd[AFU_SYNC_INDEX]; + + memset(&p_cmd->rcb.cdb[0], 0, sizeof(p_cmd->rcb.cdb)); + + p_cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; + p_cmd->rcb.port_sel = 0x0; /* NA */ + p_cmd->rcb.lun_id = 0x0; /* NA */ + p_cmd->rcb.data_len = 0x0; + p_cmd->rcb.data_ea = 0x0; + p_cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; + + p_cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ + p_cmd->rcb.cdb[1] = mode; + p_u16 = (__u16*)&p_cmd->rcb.cdb[2]; + write_16(p_u16, ctx_hndl_u); /* context to sync up */ + p_u32 = (__u32*)&p_cmd->rcb.cdb[4]; + write_32(p_u32, res_hndl_u); /* res_hndl to sync up */ + + send_cmd(p_afu, p_cmd); + wait_resp(p_afu, p_cmd); + + if (p_cmd->sa.ioasc != 0 || + (p_cmd->sa.host_use_b[0] & B_ERROR)) // B_ERROR is set on timeout + { + return -1; + } + + return 0; +} + +/* Periodic functions are called from sig_rx and can only do simple + * MMIOs that 1) do not block/wait and 2) do not need p_afu->mutex. + * + * Needing to lock the mutex can cause sig_rx to wait for another + * thread and delay/deadlock timer delivery. If locking is + * absolutely necessary in periodic processing, these functions + * must be moved to another thread. + * + */ +void periodic_hb() +{ + int i; + afu_t *p_afu; + __u64 hb_read; + + for (i = 0; i < gb.p_ini->nelm; i++) { + p_afu = &gb.p_afu_a[i].afu; + hb_read = read_64(&p_afu->p_afu_map->global.regs.afu_hb); + p_afu->hb++; + if (hb_read != p_afu->hb) { + TRACE_2(LOG_ERR, p_afu, "%s: hb_cnt 0x%lx, expected 0x%lx\n", + hb_read, p_afu->hb); + p_afu->hb = hb_read; // re-sync + } + } +} + +void periodic_fc() +{ + int i; + afu_t *p_afu; + + for (i = 0; i < gb.p_ini->nelm; i++) { + p_afu = &gb.p_afu_a[i].afu; + + for (i = 0; i < NUM_FC_PORTS; i++) { + // clear CRC error cnt + read_64(&p_afu->p_afu_map->global.fc_regs[i][FC_CNT_CRCERR/8]); + } + } +} + + +/* Note about timers: + * + * 1. All timers use the same signal. Since posix queues 1 signal per timer, + * this is not a problem. + * 2. The signal must be a queued signal (one of realtime signals). If 2 + * timers expire at the same time, we want 2 signals queued. Using a + * traditional signal will result in only 1 signal to the process. + * 3. posix does not queue 1 signal per expiration of a timer, only 1 + * for any number of expirations. Since this code uses 1 shot timer, + * this fact is not applicable. + * 4. There can be a delay between expiry and when the signal is accepted. + * During that delay, the command may have completed. When sig_rx runs, + * it may mark the next command using the same afu_cmd as timedout. + * The net is that a timeout will be detected but on a different command + * than the one that almost timed out (but returned). Detection of a + * timeout is more important that pin pointing exactly which command + * timed out. The adapter must be reset on any timeout. + * 5. A single thread collects timer expiry for any number of AFU instances. + * This thread has the same function as the afu_intr_rx threads. + * A command can return via the RRQ or via a timeout. + * 6. Same thread also performs simple periodic tasks across all AFUs. + * + * NOTE: sig_rx must not block (on events) or sleep, else timer delivery + * can delay/deadlock. + */ +void *sig_rx(void *arg) { + sigset_t *p_set = (sigset_t *) arg; + siginfo_t sig; + struct afu_cmd *p_cmd; + + while (1) { + if (sigwaitinfo(p_set, &sig) > 0) { + /* anyone can send us a signal, so make sure it is from a + timer expiry */ + if (sig.si_code != SI_TIMER) { + continue; + } + + if (sig.si_value.sival_ptr == &gb.timer_hb) { + periodic_hb(); + } + else if (sig.si_value.sival_ptr == &gb.timer_fc) { + periodic_fc(); + } + else { + /* cmd that timed out */ + p_cmd = (struct afu_cmd*) sig.si_value.sival_ptr; + + trace_1(LOG_ERR, "command timeout, opcode 0x%x\n", + p_cmd->rcb.cdb[0]); + pthread_mutex_lock(&p_cmd->mutex); + p_cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT); + pthread_cond_signal(&p_cmd->cv); + pthread_mutex_unlock(&p_cmd->mutex); + } + } + } + + return NULL; +} + +// rep = 0 for 1 shot timer. +void timer_start(timer_t timer, time_t sec, int rep) +{ + struct itimerspec it; + + memset(&it, 0, sizeof(it)); + it.it_value.tv_sec = sec; /* it_interval zeroed for 1 shot timer */ + if (rep) { + it.it_interval.tv_sec = sec; + } + timer_settime(timer, 0, &it, NULL); +} + + +void timer_stop(timer_t timer) +{ + struct itimerspec it; + + memset(&it, 0, sizeof(it)); /* zeroed it_value stops the timer */ + timer_settime(timer, 0, &it, NULL); +} + +// ascii_buf must be "char[33] ascii_buf" +void print_wwid(__u8 *p_wwid, char *ascii_buf) { + int i; + + for (i = 0; i < 16; i++) { + sprintf(ascii_buf, "%02x", (unsigned int) p_wwid[i]); + ascii_buf += 2; + } + + *ascii_buf = 0; +} + +void usage(char *prog) { + fprintf(stderr, "usage-a: %s [-l LUN_ID] [-v level] /dev/cxl/afu0.0m ....\n", prog); + fprintf(stderr, " : %s -l 0x1000000000000 /dev/cxl/afu0.0m\n", prog); + fprintf(stderr, "usage-b: %s [-i ini_file] [-v level]\n", prog); + fprintf(stderr, " : %s -i myafu.ini\n", prog); + fprintf(stderr, " specify any number of master AFU paths in usage-a\n"); + fprintf(stderr, " LUN_ID is in hex and must match report luns\n"); + fprintf(stderr, " level is trace level in decimal from 0(least) to 8(all)\n"); +} + +// the caller to validate the returned struct +// +// need: domain sock dir? +// +struct capikv_ini *parse_cmd_line(int argc, char **argv) +{ + int get_char; + __u64 lun_id = 0; /* default lun_id to use */ + int nafu; + int nbytes; + int i; + char *ini_path = NULL; + int fd; + struct stat sbuf; + void *map; + struct capikv_ini_elm *p_elm; + struct capikv_ini *p_ini; + + while ((get_char = getopt(argc, argv, "i:l:v:h")) != EOF) + { + switch (get_char) + { + case 'l' : /* LUN_ID to use in hex */ + sscanf(optarg, "%lx", &lun_id); + break; + + case 'v' : /* trace level in decimal */ + sscanf(optarg, "%d", &trc_lvl); + break; + + case 'i' : /* path to init file */ + ini_path = optarg; + break; + + case 'h' : /* usage help */ + usage(argv[0]); + exit(0); + + default: + usage(argv[0]); + exit(-1); + } + } + + if (ini_path == NULL) { + nafu = argc - optind; /* number of afus specified in cmd line */ + + nbytes = sizeof(*p_ini) + + ((nafu > 1) ? (nafu - 1)*sizeof(*p_elm) : 0); + + p_ini = (struct capikv_ini *) malloc(nbytes); + if (p_ini == NULL) { + trace_1(LOG_ERR, "cannot allocate %d bytes\n", nbytes); + return NULL; + } + + memset(p_ini, 0, nbytes); + p_ini->sini_marker = 0x53494e49; + p_ini->nelm = nafu; + p_ini->size = sizeof(*p_elm); + + for (i = 0; i < nafu; i++) { + p_elm = &p_ini->elm[i]; + + // for this mode, the user enters the master dev path. + // also assume wwpns are already programmed off-line and + // master should leave them alone + // + p_elm->elmd_marker = 0x454c4d44; + strcpy(&p_elm->afu_dev_pathm[0], argv[i + optind]); + p_elm->lun_id = lun_id; + } + } + else { + fd = open(ini_path, O_RDONLY); + if (fd < 0) { + trace_2(LOG_ERR, "open of %s failed, errno %d\n", ini_path, errno); + return NULL; + } + + if (fstat(fd, &sbuf) != 0) { + trace_2(LOG_ERR, "fstat failed on %s, errno %d\n", ini_path, errno); + close(fd); + return NULL; + } + + map = mmap(NULL, sbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + if (map == MAP_FAILED) { + trace_1(LOG_ERR, "mmap failed, errno %d\n", errno); + close(fd); + return NULL; + } + + p_ini = (struct capikv_ini *) map; + } + + return p_ini; +} + + +int +main(int argc, char *argv[]) +{ + int i; + int j; + char ascii_buf[32+1]; + int rc; + afu_t *p_afu; + lun_info_t *p_lun_info_first = NULL; + sigset_t set; + struct capikv_ini_elm *p_elm; + struct capikv_ini *p_ini; + struct afu_alloc *p_afu_a; + struct sigevent sigev; + pthread_t sig_thread; + blka_t blka; + + + openlog("mserv", LOG_PERROR, LOG_USER); + + p_ini = parse_cmd_line(argc, argv); + + /* some sanity checks */ + if (p_ini == NULL || + p_ini->nelm == 0 || + p_ini->sini_marker != 0x53494e49) { + trace_0(LOG_ERR, "bad input parameters, exiting...\n"); + usage(argv[0]); // additional errors already logged by parse_cmd_line() + exit(-1); + } + gb.p_ini = p_ini; + + sigemptyset(&set); + sigaddset(&set, SIGRTMIN); + /* signal mask is inherited by all threads created from now on */ + pthread_sigmask(SIG_BLOCK, &set, NULL); + + rc = posix_memalign((void**)&p_afu_a, 0x1000, + sizeof(struct afu_alloc) * p_ini->nelm); + if (rc != 0) { + trace_1(LOG_ERR, "cannot allocate AFU structs, rc %d\n", rc); + exit(-1); + } + gb.p_afu_a = p_afu_a; + + sigev.sigev_notify = SIGEV_SIGNAL; + sigev.sigev_signo = SIGRTMIN; /* must use a queued signal */ + + sigev.sigev_value.sival_ptr = &gb.timer_hb; + rc = timer_create(CLOCK_REALTIME, &sigev, &gb.timer_hb); + if (rc != 0) { + trace_1(LOG_ERR, "hb timer_create failed, errno %d\n", errno); + exit(-1); + } + + sigev.sigev_value.sival_ptr = &gb.timer_fc; + rc = timer_create(CLOCK_REALTIME, &sigev, &gb.timer_fc); + if (rc != 0) { + trace_1(LOG_ERR, "fc timer_create failed, errno %d\n", errno); + exit(-1); + } + + /* start thread to handle all timeouts. However, at this point it + is prepared to handle only command timer expiry during discovery. + Hold off starting the periodic timers till the end. + */ + pthread_create(&sig_thread, NULL, sig_rx, &set); + + p_elm = &p_ini->elm[0]; + for (i = 0; i < p_ini->nelm; i++) { + p_afu = &p_afu_a[i].afu; + rc = afu_init(p_afu, p_elm); + if (rc != 0) { + trace_2(LOG_ERR, "error instantiating afu %s, rc %d\n", + p_elm->afu_dev_pathm, rc); + exit(-1); + } + + pthread_create(&p_afu->rrq_thread, NULL, afu_rrq_rx, + p_afu); + pthread_create(&p_afu->err_thread, NULL, afu_err_rx, + p_afu); + + /* after creating afu_err_rx thread, unmask error interrupts */ + afu_err_intr_init(p_afu); + + // advance to next element using correct size + p_elm = (struct capikv_ini_elm*) (((char*)p_elm) + p_ini->size); + } + + /* discover the LUN specified in cmd line or cfg file */ + for (i = 0; i < p_ini->nelm; i++) { + p_afu = &p_afu_a[i].afu; + + for (j = 0; j < NUM_FC_PORTS; j++) { /* discover on each port */ + if ((rc = find_lun(p_afu, &p_afu->lun_info[j], 1u << j)) == 0 && + (rc = read_cap16(p_afu, &p_afu->lun_info[j], 1u << j)) == 0 && + (rc = page83_inquiry(p_afu, &p_afu->lun_info[j], 1u << j)) == 0) { + p_afu->lun_info[j].flags = LUN_INFO_VALID; + TRACE_3(LOG_NOTICE, p_afu, + "%s: found lun_id 0x%lx: max_lba 0x%lx, blk_len %d\n", + p_afu->lun_info[j].lun_id, + p_afu->lun_info[j].li.max_lba, + p_afu->lun_info[j].li.blk_len); + print_wwid(&p_afu->lun_info[j].li.wwid[0], ascii_buf); + TRACE_2(LOG_NOTICE, p_afu, + "%s: wwid %s, fc_port %d\n", ascii_buf, j); + } + else { + TRACE_2(LOG_ERR, p_afu, + "%s: error in LUN discovery: lun_id 0x%lx, port %d\n", + p_afu->lun_info[j].lun_id, j); + continue; /* this is ok if FC cable is not connected etc */ + } + } + } + + /* verfiy all valid LUNs are the same, else bail out. */ + for (i = 0; i < p_ini->nelm; i++) { + p_afu = &p_afu_a[i].afu; + for (j = 0; j < NUM_FC_PORTS; j++) { + if (!(p_afu->lun_info[j].flags & LUN_INFO_VALID)) { + continue; + } + + if (p_lun_info_first == NULL) { + p_lun_info_first = &p_afu->lun_info[j]; + } + else if (memcmp(&p_lun_info_first->li, &p_afu->lun_info[j].li, + sizeof(p_lun_info_first->li))) { + TRACE_0(LOG_ERR, p_afu, + "%s: configuration error: different LUNs on FC ports\n"); + exit(1); + } + } + } + + /* make sure we have at least 1 valid LUN */ + if (p_lun_info_first == NULL) { + trace_0(LOG_ERR, "configuration error: no working LUNs found\n"); + exit(1); + } + + /* initialize block allocator and point from each AFU */ + memset(&blka, 0, sizeof(blka)); + pthread_mutex_init(&blka.mutex, NULL); + + blka.ba_lun.lun_id = p_lun_info_first->lun_id; + blka.ba_lun.lsize = p_lun_info_first->li.max_lba + 1; + blka.ba_lun.lba_size = p_lun_info_first->li.blk_len; + blka.ba_lun.au_size = MC_CHUNK_SIZE; + blka.nchunk = blka.ba_lun.lsize/MC_CHUNK_SIZE; + rc = ba_init(&blka.ba_lun); + + if (rc != 0) { + trace_1(LOG_ERR, "cannot init block_alloc, rc %d\n", rc); + exit(-1); + } + + for (i = 0; i < p_ini->nelm; i++) { + p_afu = &p_afu_a[i].afu; + p_afu->p_blka = &blka; + } + + /* When all went well, start IPC threads to take requests */ + for (i = 0; i < p_ini->nelm; i++) { + pthread_create(&p_afu_a[i].afu.ipc_thread, NULL, afu_ipc_rx, + &p_afu_a[i].afu); + } + + /* start timers to kick off periodic processing */ + timer_start(gb.timer_hb, MC_HB_PERIOD, 1); + timer_start(gb.timer_fc, MC_FC_PERIOD, 1); + + for (i = 0; i < p_ini->nelm; i++) { + pthread_join(p_afu_a[i].afu.ipc_thread, NULL); + pthread_join(p_afu_a[i].afu.rrq_thread, NULL); + pthread_join(p_afu_a[i].afu.err_thread, NULL); + afu_term(&p_afu_a[i].afu); + } + + timer_stop(gb.timer_hb); + timer_stop(gb.timer_fc); + + pthread_join(sig_thread, NULL); + + timer_delete(gb.timer_hb); + timer_delete(gb.timer_fc); + + closelog(); + + return 0; +} diff --git a/src/master/mserv.h b/src/master/mserv.h new file mode 100644 index 00000000..bbd3b6c5 --- /dev/null +++ b/src/master/mserv.h @@ -0,0 +1,476 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/mserv.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _MSERVE_H +#define _MSERVE_H + +#include +#include +#include +#include +#include + +#include +#ifndef _AIX +#include +#else +#include +#endif /* !_AIX */ +#include +#include + +/* + * Terminology: use afu (and not adapter) to refer to the HW. + * Adapter is the entire slot and includes PSL out of which + * only the AFU is visible to user space. + */ + +/* Chunk size parms: note sislite minimum chunk size is + 0x10000 LBAs corresponding to a NMASK or 16. +*/ +#define MC_RHT_NMASK 16 /* in bits */ +#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs, see mclient.h */ +#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */ +#define MC_CHUNK_OFF_MASK (MC_CHUNK_SIZE - 1) /* apply to LBA get offset + into a chunk */ + +/* Sizing parms: same context can be registered multiple times. + Therefore we allow MAX_CONNS > MAX_CONTEXT. +*/ +#define MAX_CONTEXT SURELOCK_MAX_CONTEXT /* num contexts per afu */ +#define MAX_RHT_PER_CONTEXT 16 /* num resource hndls per context */ +#define MAX_CONNS (MAX_CONTEXT*2) /* num client connections per AFU */ +#define MAX_CONN_TO_POLL 64 /* num fds to poll once */ +#define NUM_RRQ_ENTRY 16 /* for master issued cmds */ +#define NUM_CMDS 16 /* must be <= NUM_RRQ_ENTRY */ +#define NUM_FC_PORTS SURELOCK_NUM_FC_PORTS /* ports per AFU */ + + + +/* LXT tables are allocated dynamically in groups. This is done to + avoid a malloc/free overhead each time the LXT has to grow + or shrink. + + Based on the current lxt_cnt (used), it is always possible to + know how many are allocated (used+free). The number of allocated + entries is not stored anywhere. + + The LXT table is re-allocated whenever it needs to cross into + another group. +*/ +#define LXT_GROUP_SIZE 8 +#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */ + +/* port online retry intervals */ +#define FC_PORT_STATUS_RETRY_CNT 100 // 100 100ms retries = 10 seconds +#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000 // microseconds + + +/* flags in IOA status area for host use */ +#define B_DONE 0x01 +#define B_ERROR 0x02 /* set with B_DONE */ +#define B_TIMEOUT 0x04 /* set with B_DONE & B_ERROR */ + +/* AFU command timeout values */ +#define MC_DISCOVERY_TIMEOUT 5 /* 5 secs */ +#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */ + +/* AFU command retry limit */ +#define MC_RETRY_CNT 5 /* sufficient for SCSI check and + certain AFU errors */ + +/* AFU command room retry limit */ +#define MC_ROOM_RETRY_CNT 10 + +/* AFU heartbeat periodic timer */ +#define MC_HB_PERIOD 5 /* 5 secs */ + +/* FC CRC clear periodic timer */ +#define MC_FC_PERIOD 300 /* 5 mins */ +#define MC_CRC_THRESH 100 /* threshold in 5 mins */ + +#define CL_SIZE 128 /* Processor cache line size */ +#define CL_SIZE_MASK 0x7F /* Cache line size mask */ + +struct scsi_inquiry_page_83_hdr +{ + __u8 peri_qual_dev_type; + __u8 page_code; + __u16 adtl_page_length; /* not counting 4 byte hdr */ + /* Identification Descriptor list */ +}; + +struct scsi_inquiry_p83_id_desc_hdr +{ + __u8 prot_code; /* Protocol Identifier & Code Set */ +#define TEXAN_PAGE_83_DESC_PROT_CODE 0x01u + __u8 assoc_id; /* PIV/Association/Identifier type */ +#define TEXAN_PAGE_83_ASSC_ID_LUN_WWID 0x03u + __u8 reserved; + __u8 adtl_id_length; + /* Identifier Data */ +}; + +/* + * A resource handle table (RHT) can be pointed to by multiple contexts. + * This happens when one context is duped to another. + * W/o dup, each context has its own resource handles that is visible + * only from that context. + * + * The rht_info refers to all resource handles of a context and not to + * a particular RHT entry or a single resource handle. + */ +typedef struct rht_info +{ + sisl_rht_entry_t *rht_start; /* initialized at startup */ + int ref_cnt; /* num ctx_infos pointing to me */ +} rht_info_t; + + +/* Single AFU context can be pointed to by multiple client connections. + * The client can create multiple endpoints (mc_hndl_t) to the same + * (context + AFU). + */ +typedef struct ctx_info +{ + volatile struct sisl_ctrl_map *p_ctrl_map; /* initialized at startup */ + + /* The rht_info pointer is initialized when the context is first + registered, can be changed on dup. + */ + rht_info_t *p_rht_info; + + /* all dup'ed contexts are in a doubly linked circular list */ + struct ctx_info *p_forw; + struct ctx_info *p_next; + + int ref_cnt; /* num conn_infos pointing to me */ +} ctx_info_t; + +/* forward decls */ +struct capikv_ini_elm; +typedef struct afu afu_t; +typedef struct conn_info conn_info_t; +typedef struct mc_req mc_req_t; +typedef struct mc_resp mc_resp_t; +typedef int (*rx_func)(afu_t *p_afu, conn_info_t *p_conn_info, + mc_req_t *p_req, mc_resp_t *p_resp); + + +/* On accept, the server allocates a connection info. + A connection can be associated with at most one AFU context. + */ +typedef struct conn_info +{ + int fd; /* accepted fd on server, -1 means entry is free */ + pid_t client_pid; /* client PID - trace use only */ + int client_fd; /* client's socket fd - trace use only */ + ctx_hndl_t ctx_hndl; /* AFU context this connection is bound to */ + __u8 mode; /* registration mode - see cblk.h */ + rx_func rx; /* IPC receive fcn depends on state */ + ctx_info_t *p_ctx_info;/* ptr to bound context + or NULL if not registered */ +} conn_info_t; + + +/* LUN discovery results are in lun_info */ +typedef struct lun_info +{ + __u64 lun_id; /* from cmd line/cfg file */ + __u32 flags; /* housekeeping */ + + struct { + __u8 wwid[16]; /* LUN WWID from page 0x83 (NAA-6) */ + __u64 max_lba; /* from read cap(16) */ + __u32 blk_len; /* from read cap(16) */ + } li; + +#define LUN_INFO_VALID 0x01 +} lun_info_t; + + +/* Block Alocator can be shared between AFUs */ +typedef struct blka +{ + ba_lun_t ba_lun; /* single LUN for SureLock */ + __u64 nchunk; /* number of chunks */ + pthread_mutex_t mutex; +} blka_t; + + +enum undo_level { + UNDO_NONE = 0, + UNDO_MLOCK, + UNDO_TIMER, + UNDO_AFU_OPEN, + UNDO_AFU_START, + UNDO_AFU_MMAP, + UNDO_OPEN_SOCK, + UNDO_BIND_SOCK, + UNDO_LISTEN, + UNDO_EPOLL_CREATE, + UNDO_EPOLL_ADD, + UNDO_AFU_ALL /* must be last */ +}; + +typedef struct afu +{ + /* Stuff requiring alignment go first. */ + + /* + * Command & data for AFU commands. + * master sends only 1 command at a time except WRITE SAME. + * Therefore, only 1 data buffer shared by all commands suffice. + */ + char buf[0x1000]; /* 4K AFU data buffer (page aligned) */ + __u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ (page aligned) */ + struct afu_cmd { + sisl_ioarcb_t rcb; /* IOARCB (cache line aligned) */ + sisl_ioasa_t sa; /* IOASA must follow IOARCB */ + pthread_mutex_t mutex; + pthread_cond_t cv; /* for signalling responses */ + timer_t timer; + + __u8 cl_pad[CL_SIZE - + ((sizeof(sisl_ioarcb_t) + + sizeof(sisl_ioasa_t) + + sizeof(pthread_mutex_t) + + sizeof(pthread_cond_t) + + sizeof(timer_t)) & CL_SIZE_MASK)]; + + } cmd[NUM_CMDS]; + +#define AFU_INIT_INDEX 0 // first cmd is used in init/discovery, + // free for other use thereafter +#define AFU_SYNC_INDEX (NUM_CMDS - 1) // last cmd is rsvd for afu sync + + /* Housekeeping data */ + char master_dev_path[MC_PATHLEN]; /* e. g. /dev/cxl/afu1.0m */ + conn_info_t conn_tbl[MAX_CONNS]; /* conn_tbl[0] is rsvd for listening */ + ctx_info_t ctx_info[MAX_CONTEXT]; + rht_info_t rht_info[MAX_CONTEXT]; + char *name; /* ptr to last component in master_dev_path, e.g. afu1.0m */ + pthread_mutex_t mutex; /* for anything that needs serialization + e. g. to access afu */ + pthread_mutex_t err_mutex; /* for signalling error thread */ + pthread_cond_t err_cv; + int err_flag; +#define E_SYNC_INTR 0x1 /* synchronous error interrupt */ +#define E_ASYNC_INTR 0x2 /* asynchronous error interrupt */ + + /* AFU Shared Data */ + sisl_rht_entry_t rht[MAX_CONTEXT][MAX_RHT_PER_CONTEXT]; + /* LXTs are allocated dynamically in groups */ + + /* AFU HW */ + int afu_fd; + struct cxl_ioctl_start_work work; + char event_buf[0x1000]; /* Linux cxl event buffer (interrupts) */ + volatile struct surelock_afu_map *p_afu_map; /* entire MMIO map */ + volatile struct sisl_host_map *p_host_map; /* master's sislite host map */ + volatile struct sisl_ctrl_map *p_ctrl_map; /* master's control map */ + + ctx_hndl_t ctx_hndl; /* master's context handle */ + __u64 *p_hrrq_start; + __u64 *p_hrrq_end; + volatile __u64 *p_hrrq_curr; + unsigned int toggle; + __u64 room; + __u64 hb; + + /* client IPC */ + int listen_fd; + int epfd; + struct sockaddr_un svr_addr; +#ifndef _AIX + struct epoll_event events[MAX_CONN_TO_POLL]; /* ready events */ +#else + struct pollfd events[MAX_CONN_TO_POLL]; /* ready events */ + int num_poll_events; +#endif /* !_AIX */ + + /* LUN discovery: one lun_info per path */ + lun_info_t lun_info[NUM_FC_PORTS]; + + /* shared block allocator with other AFUs */ + blka_t *p_blka; + + /* per AFU threads */ + pthread_t ipc_thread; + pthread_t rrq_thread; + pthread_t err_thread; + +} __attribute__ ((aligned (0x1000))) afu_t; + +struct afu_alloc { + afu_t afu; + __u8 page_pad[0x1000 - (sizeof(afu_t) & 0xFFF)]; +}; + + +typedef struct asyc_intr_info { + __u64 status; + char *desc; + __u8 port; + __u8 action; +#define CLR_FC_ERROR 0x01 +#define LINK_RESET 0x02 +} asyc_intr_info_t; + + + +conn_info_t *alloc_connection(afu_t *p_afu, int fd); +void free_connection(afu_t *p_afu, conn_info_t *p_conn_info); +int afu_init(afu_t *p_afu, struct capikv_ini_elm *p_elm); +void undo_afu_init(afu_t *p_afu, enum undo_level level); +int afu_term(afu_t *p_afu); +void afu_err_intr_init(afu_t *p_afu); + + +void set_port_online(volatile __u64 *p_fc_regs); +void set_port_offline(volatile __u64 *p_fc_regs); +int wait_port_online(volatile __u64 *p_fc_regs, + useconds_t delay_us, + unsigned int nretry); +int wait_port_offline(volatile __u64 *p_fc_regs, + useconds_t delay_us, + unsigned int nretry); +int afu_set_wwpn(afu_t *p_afu, int port, + volatile __u64 *p_fc_regs, + __u64 wwpn); +void afu_link_reset(afu_t *p_afu, int port, + volatile __u64 *p_fc_regs); + + +int do_mc_register(afu_t *p_afu, + conn_info_t *p_conn_info, + __u64 challenge); + +int do_mc_unregister(afu_t *p_afu, + conn_info_t *p_conn_info); + +int do_mc_open(afu_t *p_afu, + conn_info_t *p_conn_info, + __u64 flags, + res_hndl_t *p_res_hndl); + +int do_mc_close(afu_t *p_afu, + conn_info_t *p_conn_info, + res_hndl_t res_hndl); + +int do_mc_size(afu_t *p_afu, + conn_info_t *p_conn_info, + res_hndl_t res_hndl, + __u64 new_size, + __u64 *p_act_new_size); + +int do_mc_xlate_lba(afu_t *p_afu, + conn_info_t* p_conn_info, + res_hndl_t res_hndl, + __u64 v_lba, + __u64 *p_p_lba); + +int do_mc_clone(afu_t *p_afu, + conn_info_t *p_conn_info, + ctx_hndl_t ctx_hndl_src, + __u64 challenge, + __u64 flags); + +int do_mc_dup(afu_t *p_afu, + conn_info_t *p_conn_info, + ctx_hndl_t ctx_hndl_cand, + __u64 challenge); + +int do_mc_stat(afu_t *p_afu, + conn_info_t *p_conn_info, + res_hndl_t res_hndl, + mc_stat_t *p_mc_stat); + +int do_mc_notify(afu_t *p_afu, + conn_info_t *p_conn_info, + mc_notify_t *p_mc_notify); + +int grow_lxt(afu_t *p_afu, + ctx_hndl_t ctx_hndl_u, + res_hndl_t res_hndl_u, + sisl_rht_entry_t *p_rht_entry, + __u64 delta, + __u64 *p_act_new_size); + +int shrink_lxt(afu_t *p_afu, + ctx_hndl_t ctx_hndl_u, + res_hndl_t res_hndl_u, + sisl_rht_entry_t *p_rht_entry, + __u64 delta, + __u64 *p_act_new_size); + +int clone_lxt(afu_t *p_afu, + ctx_hndl_t ctx_hndl_u, + res_hndl_t res_hndl_u, + sisl_rht_entry_t *p_rht_entry, + sisl_rht_entry_t *p_rht_entry_src); + + + +int xfer_data(int fd, int op, void *buf, ssize_t exp_size); + +int rx_mcreg(afu_t *p_afu, struct conn_info *p_conn_info, + mc_req_t *p_req, mc_resp_t *p_resp); +int rx_ready(afu_t *p_afu, struct conn_info *p_conn_info, + mc_req_t *p_req, mc_resp_t *p_resp); + + +asyc_intr_info_t *find_ainfo(__u64 status); +void afu_rrq_intr(afu_t *p_afu); +void afu_sync_intr(afu_t *p_afu); +void afu_async_intr(afu_t *p_afu); +void notify_err(afu_t *p_afu, int err_flag); + +void *afu_ipc_rx(void *arg); +void *afu_rrq_rx(void *arg); +void *afu_err_rx(void *arg); + +int mkdir_p(char *file_path); +void send_cmd(afu_t *p_afu, struct afu_cmd *p_cmd); +void wait_resp(afu_t *p_afu, struct afu_cmd *p_cmd); + +int find_lun(afu_t *p_afu, lun_info_t *p_lun_info, + __u32 port_sel); +int read_cap16(afu_t *p_afu, lun_info_t *p_lun_info, + __u32 port_sel); +int page83_inquiry(afu_t *p_afu, lun_info_t *p_lun_info, + __u32 port_sel); +int afu_sync(afu_t *p_afu, ctx_hndl_t ctx_hndl_u, + res_hndl_t res_hndl_u, __u8 mode); + +void print_wwid(__u8 *p_wwid, char *ascii_buf); + +void periodic_hb(); +void periodic_fc(); +void *sig_rx(void *arg); + +void timer_start(timer_t timer, time_t sec, int rep); +void timer_stop(timer_t timer); + +#endif /* _MSERVE_H */ diff --git a/src/master/test/fvt_master.C b/src/master/test/fvt_master.C new file mode 100644 index 00000000..d3fb5b44 --- /dev/null +++ b/src/master/test/fvt_master.C @@ -0,0 +1,511 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/test/fvt_master.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * \ingroup + ******************************************************************************/ +#include + +extern "C" +{ + #include "mc_test.h" +} + +/* + * NOTE: + * IO request can be send by VLBA only + * and read/write opcode only + * so PLBA related TC became Error path + * because failing with afu rc : 0x21 + * Lun discovery also error path now + */ + +//mc_register tests. +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_reg_api) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_REG)); +} + + +//mc_unregister tests. +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_unreg_api) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_UNREG)); +} + + +//mc_open tests. +//positive test 1 +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_open_api) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_OPEN)); +} +//Negative test 2 +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_open_rdonly_io) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_OPEN_ERROR1)); +} +//Negative test 3 +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_open_wronly_io) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_OPEN_ERROR2)); +} +//Negative test 4 +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_open_null_io) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_OPEN_ERROR3)); +} + + +//mc_close tests +//Positive test 1 +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_close_api) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_CLOSE)); +} +//Negative test 2 +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_close_io) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_CLOSE_ERROR1)); +} + + +//mc_size tests. +//Positive test 1 +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_size_api) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_SIZE)); +} +//Negative test 2 +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_size_error_null_vdisk) +{ + ASSERT_EQ(0x13, mc_test_engine(TEST_MC_SIZE_ERROR2)); +} +//Negative test 3 +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_size_error_out_range) +{ + ASSERT_EQ(0x13, mc_test_engine(TEST_MC_SIZE_ERROR3)); +} +//Negative test 4 +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_size_error_zero_vdisk) +{ + ASSERT_EQ(4,mc_test_engine(TEST_MC_SIZE_ERROR4)); +} + + +//mc_get_size tests +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_getsize_api) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_GETSIZE)); +} + + +//mc_xlate_lba tests +//Positive test 1 +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_xlt_api_io) +{ + ASSERT_EQ(0,mc_test_engine(TEXT_MC_XLAT_IO)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_xlt_api_io_v2p) +{ + ASSERT_EQ(0x21, mc_test_engine(TEXT_MC_XLAT_IO_V2P)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_xlt_api_io_p2v) +{ + ASSERT_EQ(0x21, mc_test_engine(TEXT_MC_XLAT_IO_P2V)); +} + + +//mc_hdup tests +//Positive test 1 +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_hdup_api_org_io) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_HDUP_ORG_IO)); +} +//Positive test 2 +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_hdup_api_dup_io) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_HDUP_DUP_IO)); +} +//Negative test 3 +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_hdup_close_io) +{ + ASSERT_EQ(3,mc_test_engine(TEST_MC_HDUP_ERROR1)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_clone_rdwr) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_CLONE_RDWR)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_clone_rd) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_CLONE_READ)); +} +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_clone_wr) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_CLONE_WRITE)); +} +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_max_rh) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_MAX_RES_HNDL)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_aun_size) +{ + ASSERT_EQ(0,mc_test_engine(TEST_ONE_UNIT_SIZE)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_lun_size) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_LUN_SIZE)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_max_ctx_hndl) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_MAX_CTX_HNDL)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_max_ctx_n_res) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_MAX_CTX_N_RES)); +} +TEST(DISABLED_Master_FVT_Suite, G_Test_max_ctx_regress) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_MAX_CTX_HNDL2)); +} +TEST(DISABLED_Master_FVT_Suite, G_Test_allocate_complete_lun) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_MAX_SIZE)); +} +TEST(DISABLED_Master_FVT_Suite, G_Test_allocate_lun_max_ctx_prc_rch_thrd) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_CHUNK_REGRESS)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_max_ctx_max_res_both_afu) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_CHUNK_REGRESS_BOTH_AFU)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_xlate_wo_mc_size) +{ + ASSERT_EQ(1,test_mc_xlate_error(1)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_xlate_invalid_rch) +{ + ASSERT_EQ(3,test_mc_xlate_error(3)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_xlate_invalid_vlba) +{ + ASSERT_EQ(4,test_mc_xlate_error(4)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_xlate_mch_rch_diff) +{ + ASSERT_EQ(6,test_mc_xlate_error(6)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_xlate_invalid_mch) +{ + ASSERT_EQ(7,test_mc_xlate_error(7)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_xlate_after_mc_close) +{ + ASSERT_EQ(8,test_mc_xlate_error(8)); +} +TEST(DISABLED_Master_FVT_Suite, G_Test_vdisk_io) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_TEST_VDISK_IO)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_lun_discovery) +{ + ASSERT_EQ(0x21, mc_test_engine(TEST_MC_LUN_DISCOVERY)); +} +TEST(DISABLED_Master_FVT_Suite, G_Test_onectx_twothrd) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_ONE_CTX_TWO_THRD)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_cln_opn_rdwr_cln_rd) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_CLN_O_RDWR_CLN_RD)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_cln_opn_rdwr_cln_wr) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_CLN_O_RDWR_CLN_WR)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_cln_opn_rd_cln_rd) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_CLN_O_RD_CLN_RD)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_cln_opn_rd_cln_wr) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_CLN_O_RD_CLN_WR)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_cln_opn_wr_cln_rd) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_CLN_O_WR_CLN_RD)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_cln_opn_wr_cln_wr) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_CLN_O_WR_CLN_WR)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_cln_opn_rd_cln_rdwr) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_CLN_O_RD_CLN_RDWR)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_cln_opn_wr_cln_rdwr) +{ + ASSERT_EQ(0x5,mc_test_engine(TEST_MC_CLN_O_WR_CLN_RDWR)); +} + + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_reg_afu_dev) +{ + ASSERT_EQ(3, test_mc_reg_error(3)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_reg_invalid_dev) +{ + ASSERT_EQ(4,test_mc_reg_error(4)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_reg_invalid_ctx) +{ + ASSERT_EQ(5,test_mc_reg_error(5)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_reg_twice) +{ + ASSERT_EQ(7,test_mc_reg_error(7)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_reg_twice_diff_prc) +{ + ASSERT_EQ(8,test_mc_reg_error(8)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_two_ctx_two_thrd_r_wrsize) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_TWO_CTX_RD_WRTHRD)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_two_ctx_two_thrd_rw_plysize) +{ + ASSERT_EQ(0, mc_test_engine(TEST_MC_TWO_CTX_RDWR_SIZE)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_one_thd_rw_one_size) +{ + ASSERT_EQ(0, mc_test_engine(TEST_MC_ONE_CTX_RD_WRSIZE)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_plba_out_of_bound) +{ + ASSERT_EQ(0x21, mc_test_engine(TEST_MC_PLBA_OUT_BOUND)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_rw_close_res_hndl) +{ + int res = mc_test_engine(TEST_MC_RW_CLS_RSH); + if(res == 0x13 || res == 0x05) + res =1; + ASSERT_EQ(1, res); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_rw_unreg_mc_hndl) +{ + int res = mc_test_engine(TEST_MC_UNREG_MC_HNDL); + if(res == 0x13 || res == 0x21 || res == 0x3 || res == 0x5 || res == 0x4) + res =1; + ASSERT_EQ(1, res); +} +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_clone_many_rht) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_CLONE_MANY_RHT)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_aun_wr_plba_rd_vlba) +{ + ASSERT_EQ(0x21, mc_test_engine(TEST_AUN_WR_PLBA_RD_VLBA)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_good_ctx_err_ctx) +{ + ASSERT_EQ(0,mc_test_engine(TEST_GOOD_CTX_ERR_CTX)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_good_ctx_err_ctx_unrg_mch) +{ + ASSERT_EQ(0,mc_test_engine(TEST_GOOD_CTX_ERR_CTX_UNREG_MCH)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_good_ctx_err_ctx_cls_rht) +{ + ASSERT_EQ(0,mc_test_engine(TEST_GOOD_CTX_ERR_CTX_CLS_RHT)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_afu_rc_invalid_opcode) +{ + ASSERT_EQ(0x21, test_mc_invalid_ioarcb(1)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_ioarcb_ea_null) +{ + //ASSERT_EQ(0x31, test_mc_invalid_ioarcb(2)); + int res = test_mc_invalid_ioarcb(2); + if(res == 0x0 || res == 0x31) + res =1; + ASSERT_EQ(1, res); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_ioarcb_invalid_flags) +{ + ASSERT_EQ(0x58, test_mc_invalid_ioarcb(3)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_ioarcb_invalid_lun_fc_port) +{ + ASSERT_EQ(0x21, test_mc_invalid_ioarcb(4)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_afu_rc_rht_invalid) +{ + ASSERT_EQ(0x5, test_mc_invalid_ioarcb(5)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_afu_rc_rht_out_of_bounds) +{ + ASSERT_EQ(0x3, test_mc_invalid_ioarcb(6)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_error_page_fault) +{ + //ASSERT_EQ(0x31, test_mc_invalid_ioarcb(7)); + ASSERT_EQ(0x0, test_mc_invalid_ioarcb(7)); + +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_ioarcb_invalid_ctx_id) +{ + ASSERT_EQ(0x21, test_mc_invalid_ioarcb(8)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_rc_flags_underrun) +{ + ASSERT_EQ(0x2, test_mc_invalid_ioarcb(9)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_scsi_rc_check) +{ + ASSERT_EQ(0x2, test_mc_invalid_ioarcb(11)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_ioarcb_d_len_0) +{ + ASSERT_EQ(0x2, test_mc_invalid_ioarcb(12)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_ioarcb_blk_len_0) +{ + ASSERT_EQ(0x55, test_mc_invalid_ioarcb(13)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_ioarcb_intr_prcs_ctx) +{ + ASSERT_EQ(0x21, test_mc_inter_prcs_ctx(1)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_ioarcb_intr_prcs_ctx_rsh_clsd) +{ + ASSERT_EQ(0x21, test_mc_inter_prcs_ctx(2)); +} +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_ioarcb_ea_alignmnt_16) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_IOARCB_EA_ALGNMNT_16)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_ioarcb_ea_alignmnt_128) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_IOARCB_EA_ALGNMNT_128)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_ioarcb_ea_invalid_alignmnt) +{ + ASSERT_EQ(3,mc_test_engine(TEST_MC_IOARCB_EA_INVLD_ALGNMNT)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_all_afu_devices) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_ALL_AFU_DEVICES)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_rwbuff_in_global) +{ + ASSERT_EQ(0,mc_test_engine(MC_TEST_RWBUFF_GLOBAL)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_rwbuff_in_shared_memory) +{ + ASSERT_EQ(0,mc_test_engine(MC_TEST_RWBUFF_SHM)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_rw_size_parallel_sync) +{ + ASSERT_EQ(0,mc_test_engine(MC_TEST_RW_SIZE_PARALLEL)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_afu_rc_cap_violation) +{ + ASSERT_EQ(0x21,mc_test_engine(TEST_MC_AFU_RC_CAP_VIOLATION)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_good_path_afu_err_path_afu) +{ + ASSERT_EQ(0,mc_test_engine(MC_TEST_GOOD_ERR_AFU_DEV)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_size_invalid_mch) +{ + ASSERT_EQ(1,test_mc_size_error(1)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_size_invalid_rsh) +{ + ASSERT_EQ(2,test_mc_size_error(2)); +} + +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_size_after_mc_close) +{ + ASSERT_EQ(4,test_mc_size_error(4)); +} +TEST(DISABLED_Master_FVT_Suite, E_Test_mc_size_after_mc_unreg) +{ + ASSERT_EQ(5,test_mc_size_error(5)); +} +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_regress_resource) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_REGRESS_RESOURCE)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_regress_ctx_create_destroy) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_REGRESS_CTX_CRT_DSTR)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_regress_ctx_create_destroy_io) +{ + ASSERT_EQ(0,mc_test_engine(TEST_MC_REGRESS_CTX_CRT_DSTR_IO)); +} + +TEST(DISABLED_Master_FVT_Suite, G_Test_mc_size_regress) +{ + ASSERT_EQ(0, mc_test_engine(TEST_MC_SIZE_REGRESS)); +} diff --git a/src/master/test/makefile b/src/master/test/makefile new file mode 100644 index 00000000..9250a244 --- /dev/null +++ b/src/master/test/makefile @@ -0,0 +1,83 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/master/test/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +UNAME=$(shell uname) + +ROOTPATH = ../../.. +USER_DIR = . +SUBDIRS = +TESTDIR = ${ROOTPATH}/obj/tests + +#test code != production code, so allow warnings here. +ALLOW_WARNINGS = yes + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${ROOTPATH}/img +LIBPATHS = -L${ROOTPATH}/img +LINKLIBS = -lcflshcom -lcflsh_block -lmclient + +BTESTS= +BIN_TESTS=$(addprefix ${TESTDIR}/, ${BTESTS}) + +GTESTS = run_master_fvt +GTESTS_DIR = $(addprefix $(TESTDIR)/, $(GTESTS)) + +# AIX only +ifeq ($(UNAME),AIX) +LINKLIBS+=-lpthreads + +GTESTS64 = $(addsuffix 64, $(GTESTS)) +GTESTS64_DIR = $(addprefix $(TESTDIR)/, $(GTESTS64)) + +#Linux only +else +LINKLIBS+=-lpthread + +endif + +run_master_fvt_OFILES = fvt_master.o mc_test.o mc_test2.o \ + mc_test_engine.o mc_test_error.o \ + mc_test_io.o mc_test_util.o + +DEPS=$(addprefix $(TESTDIR)/, $(run_master_fvt_OFILES:.o=.dep)) + +CFLAGS += \ + -g \ + -D__FVT__\ + -I$(ROOTPATH)/src/master \ + -I$(ROOTPATH)/src/test/framework/googletest/googletest/include +CXXFLAGS+=$(CFLAGS) + +#VPATH += ${ROOTPATH}/src/master + +include ${ROOTPATH}/config.mk + +include $(ROOTPATH)/src/test/framework/gtest.objtests.mk + +unit: + -@if [[ -e /dev/cxl ]]; then \ + $(TESTDIR)/run_master_fvt --gtest_output=xml:$(TESTDIR)/master_fvt_results.xml; \ + else \ + echo "SKIPPING run_master_fvt"; \ + fi diff --git a/src/master/test/mc_signal.bash b/src/master/test/mc_signal.bash new file mode 100755 index 00000000..a7783edc --- /dev/null +++ b/src/master/test/mc_signal.bash @@ -0,0 +1,68 @@ +#!/bin/bash +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/master/test/mc_signal.bash $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +i=1 + +#Check if user is root +if [ $(whoami) != 'root' ] +then + echo "Signals should not kill mserv" + while [ "$i" -le 64 ] + do + echo "$i sending SIG"`kill -l $i`" to mserv process" + pkill -$i mserv + pidof mserv + if [ `echo $?` -eq 1 ] + then + echo "mserv killed by non root user with signal SIG"`kill -l $i` + exit 1 + fi + sleep 1 + i=$[$i+1] + done + +else + while [ "$i" -le 64 ]; + do + echo "$i sending SIG"`kill -l $i`" to mserv process" + pkill -$i mserv + echo "Waiting" + sleep 8 + pidof mserv + + if [ `echo $?` -eq 1 ] + then + echo "mserv not running" + ps -ef | grep msev | grep -v grep + exit 1 + else + echo "mserv running" + fi + + i=$[$i+1] + sleep 2 + done +fi diff --git a/src/master/test/mc_test.c b/src/master/test/mc_test.c new file mode 100644 index 00000000..4d1b3608 --- /dev/null +++ b/src/master/test/mc_test.c @@ -0,0 +1,1260 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/test/mc_test.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "mc_test.h" +#include +#include +#include + +#define MMIO_MAP_SIZE 64*1024 + +extern char master_dev_path[MC_PATHLEN]; +extern char afu_path[MC_PATHLEN]; + +int mc_permute(void *arg); + +extern int g_error; +extern pid_t pid; + +//Routine to call mc APIs in order. +int mc_permute(void *arg) +{ + ctx_p p_ctx = (ctx_p)arg; + int rc = 0; + mc_stat_t l_mc_stat; + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map, &p_ctx->mc_hndl_p->mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register() failed: Error registering ctx_hndl %d, rc %d\n",p_ctx->ctx_hndl, rc ); + g_error = -1; + return -1; + } + + rc = mc_open(p_ctx->mc_hndl_p->mc_hndl, MC_RDWR, &p_ctx->mc_hndl_p->res_hndl_p->res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_open() failed: Error opening res_hndl rc %d\n", rc); + g_error = -1; + return -1; + } + + rc = mc_stat(p_ctx->mc_hndl_p->mc_hndl, p_ctx->mc_hndl_p->res_hndl_p->res_hndl, &l_mc_stat); + debug("Size of new lba = %lu\n", l_mc_stat.size); + + rc = mc_close(p_ctx->mc_hndl_p->mc_hndl, p_ctx->mc_hndl_p->res_hndl_p->res_hndl ); + if(rc != 0) { + fprintf(stderr, "mc_close() failed: Error closing res_hndl %d\n", p_ctx->mc_hndl_p->res_hndl_p->res_hndl); + g_error = -1; + return -1; + } + + rc = mc_unregister(p_ctx->mc_hndl_p->mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_unregister() failed: Error unregistering for mc_hndl %p\n", p_ctx->mc_hndl_p->mc_hndl); + g_error = -1; + return -1; + } + + return 0; +} + +int mc_register_tst() +{ + struct ctx myctx; + int rc = 0; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init() failed.\n"); + return -1; + } + + debug("mc_init() success.\n"); + + rc = ctx_init(&myctx); + if(rc != 0) { + fprintf(stderr, "contex initialization failed.\n"); + mc_term(); + return -1; + } + + rc = mc_register(master_dev_path, myctx.ctx_hndl, (volatile __u64 *)myctx.p_host_map, &myctx.mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register() failed: Error registering ctx_hndl %d, rc %d\n", myctx.ctx_hndl, rc ); + mc_term(); + return -1; + } + + debug("mc_register() success: registered mc_hndl %p for ctx_hndl %d\n", myctx.mc_hndl, myctx.ctx_hndl); + + ctx_close(&myctx); + mc_term(); + return 0; +} + +int mc_unregister_tst() +{ + struct ctx myctx; + int rc = 0; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init() failed.\n"); + return -1; + } + + debug("mc_init() success.\n"); + + rc = ctx_init(&myctx); + if(rc != 0) { + fprintf(stderr, "contex initialization failed.\n"); + mc_term(); + return -1; + } + + rc = mc_register(master_dev_path, myctx.ctx_hndl, (volatile __u64 *)myctx.p_host_map, &myctx.mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register() failed: Error registering mc_hndl for ctx_hndl %d, rc %d\n", myctx.ctx_hndl, rc ); + mc_term(); + return -1; + } + + rc = mc_unregister(myctx.mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_unregister() failed: Error unregistering mc_hndl %p\n", myctx.mc_hndl); + mc_term(); + return -1; + } + + debug("mc_unregister() success: unregistered mc_hndl for ctx_hndl %d\n", myctx.ctx_hndl); + + ctx_close(&myctx); + mc_term(); + return 0; +} + +int mc_open_tst(int cmd) +{ + struct ctx myctx; + pthread_t thread; + int rc = 0, res = 0; + __u64 size = 16; + __u64 actual_size; + __u64 start_lba = 0; + __u64 stride = 0x1000; + mc_stat_t l_mc_stat; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init() failed.\n"); + return -1; + } + + debug("mc_init() success.\n"); + + pid=getpid(); + + rc = ctx_init(&myctx); + if(rc != 0) { + fprintf(stderr, "contex initialization failed.\n"); + mc_term(); + return -1; + } + + pthread_create(&thread, NULL, ctx_rrq_rx, &myctx); + + rc = mc_register(master_dev_path, myctx.ctx_hndl, (volatile __u64 *)myctx.p_host_map, &myctx.mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register() failed: Error registering mc-hndl for ctx_hndl %d, rc %d\n", myctx.ctx_hndl, rc ); + mc_term(); + return -1; + } + + //Negative test 4: Do IO without doing opening any resource. + if(cmd == 4) { + debug("Time to attempt IO without mc_open().\n"); + + rc = send_write(&myctx, start_lba, stride, pid, VLBA); + if(rc == 0) { + fprintf(stderr, "send_write() on read-only vdisk should fail. rc %d\n", rc); + mc_term(); + return -1; + } + + rc = send_read(&myctx, start_lba, stride, VLBA); + if(rc == 0) { + fprintf(stderr, "send_read() on write-only vdisk should fail. rc %d\n", rc); + mc_term(); + return -1; + } + + res = rc; + } + + //Positive test 1: Do mc_open with MC_RDWR flag + if(cmd == 1) { + rc = mc_open(myctx.mc_hndl, MC_RDWR, &myctx.res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_open() failed: Error opening res_hndl for mc_hndl %p, rc %d\n", myctx.mc_hndl, rc); + return -1; + } + debug("mc_open() success: opened res_hndl %d for mc_hndl %p\n", myctx.res_hndl, myctx.mc_hndl); + + res = 0; + } + + //Negative test 2: Do mc_open with MC_RDONLY and do IO read write. Expect write fail + if(cmd == 2) { + rc = mc_open(myctx.mc_hndl, MC_RDONLY, &myctx.res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_open() failed: Error opening res_hndl for mc_hndl %p, rc %d\n", myctx.mc_hndl, rc); + return -1; + } + debug("mc_open() success: opened res_hndl %d for mc_hndl %p\n", myctx.res_hndl, myctx.mc_hndl); + + //Setting some size + rc = mc_size(myctx.mc_hndl, myctx.res_hndl, size, &actual_size); + if(rc != 0) { + fprintf(stderr, "mc_size() failed: Unable to resize vdisk to %lu.\n", size); + mc_term(); + return -1; + } + + rc = mc_stat(myctx.mc_hndl, myctx.res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + debug("Size of new lba = %lu\n", actual_size); + + stride = (1 << l_mc_stat.nmask); + debug("%d : Time to do IO on 0x%lX sized vdisks.\n", pid, actual_size); + rc = send_write(&myctx, start_lba, stride, pid, VLBA); + if(rc == 0) { + fprintf(stderr, "send_write() on read-only vdisk should fail. rc %d\n", rc); + mc_term(); + return -1; + } + + rc = send_read(&myctx, start_lba, stride, VLBA); + if(rc != 0) { + fprintf(stderr, "send_read() success: rc %d\n", rc); + mc_term(); + return -1; + } + + res = 0x5; + } + + //Negative test 3: mc_open with MC_WRONLY and do IO read write. Expect read fail. + if(cmd == 3) { + rc = mc_open(myctx.mc_hndl, MC_WRONLY, &myctx.res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_open() failed: Error opening res_hndl for mc_hndl %p, rc %d\n", myctx.mc_hndl, rc); + return -1; + } + debug("mc_open() success: opened res_hndl %d for mc_hndl %p\n", myctx.res_hndl, myctx.mc_hndl); + + //Setting some size + rc = mc_size(myctx.mc_hndl, myctx.res_hndl, size, &actual_size); + if(rc != 0) { + fprintf(stderr, "mc_size() failed: Unable to resize vdisk to %lu.\n", size); + mc_term(); + return -1; + } + + debug("Size of new lba = %lu\n", actual_size); + + debug("Time to do IO on 0x%lX sized vdisks.\n", actual_size); + rc = send_write(&myctx, start_lba, stride, pid, VLBA); + if(rc != 0) { + fprintf(stderr, "send_write() failed: rc %d\n", rc); + mc_term(); + return -1; + } + + rc = send_read(&myctx, start_lba, stride, VLBA); + if(rc == 0) { + fprintf(stderr, "send_read() on write-only vdisk should fail. rc %d\n", rc); + mc_term(); + return -1; + } + + res = 0x5; + } + + pthread_cancel(thread); + mc_close(myctx.mc_hndl,myctx.res_hndl); + mc_unregister(myctx.mc_hndl); + + ctx_close(&myctx); + mc_term(); + return res; +} + +int mc_close_tst(int cmd) +{ + struct ctx myctx; + pthread_t thread; + int rc = 0, res = 0; + __u64 size = 10; + __u64 actual_size; + __u64 start_lba = 0; + __u64 stride; + mc_stat_t l_mc_stat; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init() failed.\n"); + return -1; + } + + debug("mc_init() success.\n"); + + rc = ctx_init(&myctx); + if(rc != 0) { + fprintf(stderr, "context initialization failed.\n"); + mc_term(); + return -1; + } + + pthread_create(&thread, NULL, ctx_rrq_rx, &myctx); + + rc = mc_register(master_dev_path, myctx.ctx_hndl, (volatile __u64 *)myctx.p_host_map, &myctx.mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register() failed: Error registering mc-hndl for ctx_hndl %d, rc %d\n", myctx.ctx_hndl, rc ); + mc_term(); + return -1; + } + + rc = mc_open(myctx.mc_hndl, MC_RDWR, &myctx.res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_open() failed: Error opening res_hndl for mc_hndl %p, rc %d\n", myctx.mc_hndl, rc); + return -1; + } + debug("mc_open() success: opened res_hndl %d for mc_hndl %p\n", myctx.res_hndl, myctx.mc_hndl); + + rc = mc_size(myctx.mc_hndl, myctx.res_hndl, size, &actual_size); + if(rc != 0) { + fprintf(stderr, "mc_size() failed: Unable to resize vdisk to %lu, rc %d\n",size, rc); + mc_term(); + return -1; + } + + rc = mc_stat(myctx.mc_hndl, myctx.res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + stride = (1 << l_mc_stat.nmask); + //Positive test 1: Checking mc_close. + if(cmd == 1) { + rc = mc_close(myctx.mc_hndl,myctx.res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_close() failed: rc %d\n", rc); + return -1; + } + debug("mc_close() success: res_hndl %d closed for mc_hndl %p\n", myctx.res_hndl, myctx.mc_hndl); + res = 0; + } + + pid = getpid(); + //Negative test 2: Doing read write after freeing res_hndl. + if(cmd == 2) { + rc = mc_close(myctx.mc_hndl,myctx.res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_close() failed: rc %d\n", rc); + return -1; + } + debug("mc_close() success: res_hndl %d closed for mc_hndl %p\n", myctx.res_hndl, myctx.mc_hndl); + + debug("Doing IO on freed res_hndl %d\n", myctx.res_hndl); + rc = send_write(&myctx, start_lba, stride, pid, VLBA); + if(rc == 0) { + fprintf(stderr, "send_write() on freed res_hndl vdisk should fail, rc %d\n", rc); + mc_term(); + return -1; + } + + rc = send_read(&myctx, start_lba, stride, VLBA); + if(rc == 0) { + fprintf(stderr, "send_read() on freed res_hndl vdisk should fail, rc %d\n", rc); + mc_term(); + return -1; + } + + res = 0x5; + } + + pthread_cancel(thread); + mc_unregister(myctx.mc_hndl); + ctx_close(&myctx); + mc_term(); + return res; +} + +int mc_size_tst(int cmd) +{ + struct ctx myctx; + pthread_t thread; + int rc = 0, res = 0; + pid_t pid; + __u64 size = 0; + __u64 nlba = 0; + __u64 actual_size; + __u64 start_lba = 0; + __u64 stride = 0x1000; + __u64 chunk[] = {10, 15, 23, 45, 90, 55, 40, 5, 20 }; + mc_stat_t l_mc_stat; + + int i = 0; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init() failed.\n"); + return -1; + } + + debug("mc_init() success.\n"); + + rc = ctx_init(&myctx); + if(rc != 0) { + fprintf(stderr, "contex initialization failed.\n"); + mc_term(); + return -1; + } + + pthread_create(&thread, NULL, ctx_rrq_rx, &myctx); + + rc = mc_register(master_dev_path, myctx.ctx_hndl, (volatile __u64 *)myctx.p_host_map, &myctx.mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register() failed: Error registering mc-hndl for ctx_hndl %d, rc %d\n", myctx.ctx_hndl, rc ); + mc_term(); + return -1; + } + + rc = mc_open(myctx.mc_hndl, MC_RDWR, &myctx.res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_open() failed: Error opening res_hndl for mc_hndl %p, rc %d\n", myctx.mc_hndl, rc); + mc_term(); + return -1; + } + + rc = mc_stat(myctx.mc_hndl, myctx.res_hndl, &l_mc_stat); + if(rc != 0) { + fprintf(stderr, "mc_stat() failed: rc %d\n",rc); + mc_term(); + return -1; + } + size = l_mc_stat.size; + if(size != 0) { + fprintf(stderr, "Wrong size value. Expected = 0, Output = %lu \n",size); + mc_term(); + return -1; + } + + debug("Size of new lba = %lu\n", size); + + pid=getpid(); + + //Negative test 2: Doing IO on a zero size vdisk + if(cmd == 2) { + debug("Doing IO on NULL sized vdisks.\n"); + rc = send_write(&myctx, 0, stride, pid, VLBA); + if(rc == 0) { + fprintf(stderr, "send_write() on NULL vdisk should fail. rc %d\n", rc); + mc_term(); + return -1; + } + + rc = send_read(&myctx, 0, stride, VLBA); + if(rc == 0) { + fprintf(stderr, "send_read() on NULL vdisk should fail. rc %d\n", rc); + mc_term(); + return -1; + } + + return rc; + } + /*This loop does mc_size for various chunk sizes and performs in range + and out of range IO*/ + for(i = 0; imc_hndl); + if(rc == 0) { + fprintf(stderr, "mc_register() succes: Expecting failure. mc_register() with same values should fail for subsequent call but first. rc %d\n", rc); + mc_term(); + return -1; + }*/ + + pid = getpid(); + + //Duping the mc_hndl + rc = mc_hdup(myctx.mc_hndl_p[0].mc_hndl, &myctx.mc_hndl_p[1].mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_hdup() failed: Error duping mc_hndl %p for ctx_hndl %d, rc %d\n", + myctx.mc_hndl_p[0].mc_hndl, myctx.ctx_hndl, rc); + mc_term(); + return -1; + } + + //Opening two resources using original mc_hndl. + rc = mc_open(myctx.mc_hndl_p[0].mc_hndl, MC_RDWR, &myctx.mc_hndl_p[0].res_hndl_p[0].res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_open() failed: Error opening res_hndl for mc_hndl %p, rc %d\n", myctx.mc_hndl_p[0].mc_hndl, rc); + mc_term(); + return -1; + } + + debug("res_hndl %d created for first mc_hndl %p\n", myctx.mc_hndl_p[0].res_hndl_p[0].res_hndl, myctx.mc_hndl_p[0].mc_hndl); + + rc = mc_open(myctx.mc_hndl_p[0].mc_hndl, MC_RDWR, &myctx.mc_hndl_p[0].res_hndl_p[1].res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_open() failed: Error opening res_hndl for mc_hndl %p, rc %d\n", myctx.mc_hndl_p[0].mc_hndl, rc); + mc_term(); + return -1; + } + + debug("res_hndl %d created for first mc_hndl %p\n", myctx.mc_hndl_p[0].res_hndl_p[1].res_hndl, myctx.mc_hndl_p[0].mc_hndl); + + //Opening two more resources using duplicated mc_hndl + rc = mc_open(myctx.mc_hndl_p[1].mc_hndl, MC_RDWR, &myctx.mc_hndl_p[1].res_hndl_p[0].res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_open() failed: Error opening res_hndl for mc_hndl %p, rc %d\n", myctx.mc_hndl_p[0].mc_hndl, rc); + mc_term(); + return -1; + } + + debug("res_hndl %d created for second mc_hndl %p\n", myctx.mc_hndl_p[1].res_hndl_p[0].res_hndl, myctx.mc_hndl_p[1].mc_hndl); + + rc = mc_open(myctx.mc_hndl_p[0].mc_hndl, MC_RDWR, &myctx.mc_hndl_p[1].res_hndl_p[1].res_hndl); + if(rc != 0) { + fprintf(stderr, "mc_open() failed: Error opening res_hndl for mc_hndl %p, rc %d\n", myctx.mc_hndl_p[0].mc_hndl, rc); + mc_term(); + return -1; + } + + debug("res_hndl %d created for second mc_hndl %p\n", myctx.mc_hndl_p[1].res_hndl_p[1].res_hndl, myctx.mc_hndl_p[0].mc_hndl); + + int i = 0; + int j = 0; + int k = 0; + + debug("Time to do IO\n"); + + while(i<2) + { + j = 0; + while(j<2){ + myctx.res_hndl = myctx.mc_hndl_p[i].res_hndl_p[j].res_hndl; + debug("Choosing res_hndl %d\n", myctx.res_hndl); + k = 0; + while(k<5) + { + //Size get size verification on original and duplicate mc_hndls + mc_size(myctx.mc_hndl_p[0].mc_hndl, myctx.res_hndl, chunk[k], &actual_size); + mc_stat(myctx.mc_hndl_p[1].mc_hndl, myctx.res_hndl, &l_mc_stat); + size = l_mc_stat.size; + + if(size != actual_size) { + fprintf(stderr, "different size values for res_hndl %d on original mc_hndl %p and duplicate mc_hndl %p\n", + myctx.res_hndl, myctx.mc_hndl_p[0].mc_hndl, myctx.mc_hndl_p[1].mc_hndl); + fprintf(stderr, "Expected size: 0x%lX actual size: 0x%lX\n", actual_size, size); + ctx_close(&myctx); + mc_term(); + return -1; + } + + debug("Expecte size 0x%lX actual size 0x%lX \n", actual_size, size); + + nlba = chunk[k] * (1 << l_mc_stat.nmask); + //Positive test 1: Allocate resource on both mc_hndl. Do write on original and read from duplicate. Compare. + //Positive test 2: Allocate resource on both mc_hndl. Do write on duplicate and read from original. Compare. + if(cmd == 1 || cmd == 2) { + //IO Loop + for(start_lba = 0; start_lba res_hndl_p = &res_hndl_a[i]; + pthread_create(&(threads_a[i]), NULL, (void *)&mc_permute, (void *)&p_ctx[i]); + if(rc) { + fprintf(stderr, "Error creating thread %d, errno %d\n", i, errno); + return -1; + } + } + + //joining again + for(i = 0; i < MAX_OPENS; i++) { + pthread_join(threads_a[i], NULL); + } + + //Closing all opened contexts. + for(i = 0; i < MAX_OPENS; i++) { + rc = pthread_create(&(threads_a[i]), NULL, (void *)&ctx_close_thread, (void *)&p_ctx[i]); + if(rc) { + fprintf(stderr, "Error creating thread %d, errno %d\n", i, errno); + return -1; + } + } + + for(i = 0; i < MAX_OPENS; i++) { + pthread_join(threads_a[i], NULL); + } + rc = g_error; + g_error = 0; + + free(p_ctx); + free(threads_a); + + mc_term(); + + return rc; +} + +/* mc_open_close_tst initializes all possible contexts and then closes a random + * number of contexts. It then reopens same number of contexts and checks if any + * duplicate context Id is not generated. + * + * Inputs: + * NONE + * + * Output: + * 0 Success + * -1 Failure + */ + +int mc_open_close_tst() +{ + struct ctx *ctx_a; + bool ctx_hndl_a[MAX_OPENS+1]; //Keep track of which context is open/close. + int ctx_idx_a[MAX_OPENS+1]; //Stores indexes of ctx_a. + bool not_stress = true; + int loop = 5; + int i = 0; + int rc = 0; + int j=1; + + char *str = getenv("LONG_RUN"); + if((str != NULL) && !strcmp(str, "TRUE")) { + not_stress = false; + } + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init() failed.\n"); + return -1; + } + + debug("mc_init() success.\n"); + + rc = posix_memalign((void **)&ctx_a, 0x1000, sizeof(struct ctx)*MAX_OPENS+1); + if(rc != 0) { + fprintf(stderr, "Can not allocate ctx structs, errno %d\n", errno); + return -1; + } + + + memset(ctx_idx_a, 0, sizeof(ctx_idx_a)); + memset(ctx_hndl_a, 0, sizeof(ctx_hndl_a)); + + if(not_stress) { + while(--loop >= 0) { + for(i = 0; i < MAX_OPENS/2; i++) { + rc = ctx_init(&ctx_a[i]); + CHECK_RC(rc , "ctx_init"); + } + for(i = 0; i < MAX_OPENS/2; i++) { + ctx_close(&ctx_a[i]); + CHECK_RC(rc, "ctx_close"); + } + } + free(ctx_a); + return 0; + } + + while(j <= MAX_OPENS) { + ctx_init(&ctx_a[j]); + if(ctx_hndl_a[ctx_a[j].ctx_hndl]) { + fprintf(stderr, "Duplicate context Id generated %d \n", ctx_a[j].ctx_hndl); + mc_term(); + return -1; + } + + ctx_hndl_a[ctx_a[j].ctx_hndl] = true; + debug("context generated %d\n", ctx_a[j].ctx_hndl); + j++; + } + + //Number of contexts to be closed. + srand(time(NULL)); + int num_ctx = rand()%MAX_OPENS+1; + + debug("Will close %d contexts now.\n", num_ctx); + + j=0; + while(j < num_ctx) { + i = rand()%MAX_OPENS+1; //random index of the context to be closed. + //Preventing multiple generation of same number. + while(!ctx_hndl_a[ctx_a[i].ctx_hndl]) { + i = rand()%MAX_OPENS+1; + } + + ctx_idx_a[j] = i; + int ctx_hndl_temp = ctx_a[i].ctx_hndl; + ctx_close(&ctx_a[i]); + ctx_hndl_a[ctx_hndl_temp] = false; + debug("context closed %d\n", ctx_hndl_temp); + + j++; + } + + j = 0; + while(j < num_ctx) { + i = ctx_idx_a[j]; + ctx_init(&ctx_a[i]); + if(ctx_hndl_a[ctx_a[i].ctx_hndl]) { + fprintf(stderr, "Duplicate context generated %d\n",ctx_a[i].ctx_hndl); + mc_term(); + return -1; + } + + ctx_hndl_a[ctx_a[i].ctx_hndl] = true; + debug("Context re-generated %d\n", ctx_a[i].ctx_hndl); + j++; + } + + j=1; + //free everything. + while(j <= MAX_OPENS) { + int ctx_hndl_temp = ctx_a[j].ctx_hndl; + ctx_close(&ctx_a[j]); + ctx_hndl_a[ctx_hndl_temp] = false; + debug("context closed %d\n", ctx_hndl_temp); + + j++; + } + + free(ctx_a); + mc_term(); + return 0; +} diff --git a/src/master/test/mc_test.h b/src/master/test/mc_test.h new file mode 100755 index 00000000..cb34c954 --- /dev/null +++ b/src/master/test/mc_test.h @@ -0,0 +1,343 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/test/mc_test.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +#ifndef __MC_H__ +#define __MC_H__ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +// not implemented in AIX +#ifndef WCOREDUMP +#define WCOREDUMP(x) ((x) & 0x80) +#endif + +#define MAX_TRY_WAIT 100 +#define MC_BLOCK_DELAY_ROOM 1000 + +#define ENABLE_HEXDUMP 0 //0 for disable hexdump +#define MMIO_MAP_SIZE 64*1024 + +#define LBA_BLK 8 //1 for 4K blk size & 8 for 512 BLK + +//Flags to be used while sending +//send_read & send_write +#define PLBA 0x0 +#define VLBA 0x1 +#define NO_XLATE 0x2 + +//max allow ctx handler +#define MAX_OPENS 507 + +//max allow RHT per context +#define MAX_NUM_THREADS 16 /*hardcoded 16 MAX_RHT_CONTEXT*/ +#define MAX_RES_HANDLE 16 /*hardcoded 16 MAX_RHT_CONTEXT */ + +#define B_DONE 0x01 +#define B_ERROR 0x02 +#define NUM_RRQ_ENTRY 16 + +#define NUM_CMDS 16 /* max is NUM_RRQ_ENTRY */ + +#define CL_SIZE 128 /* Processor cache line size */ +#define CL_SIZE_MASK 0x7F /* Cache line size mask */ + +#define DEBUG 0 //0 disable, 1 some level, 2 complete + +#define CHECK_RC(rc, msg);\ + if(rc){\ + fprintf(stderr,"Failed @ %s:%d:%s rc = %d on %s\n",\ + __FILE__, __LINE__, __func__,rc, msg);\ + return rc;}\ + +#define CHECK_RC_EXIT(rc, msg);\ + if(rc){\ + fprintf(stderr,"Failed @ %s:%d:%s rc = %d on %s\n",\ + __FILE__, __LINE__, __func__,rc, msg);\ + exit(rc);}/ + +#define debug\ + if(DEBUG >= 1) printf + +#define debug_2\ + if(DEBUG ==2) printf + + //fprintf(stderr, "\n%s:%d:%s:", __FILE__, __LINE__, __func__); +struct res_hndl_s { + res_hndl_t res_hndl; + __u64 size; +}; + +struct mc_hndl_s { + mc_hndl_t mc_hndl; + struct res_hndl_s *res_hndl_p; +}; + +//Context structure. +struct ctx { + /* Stuff requiring alignment go first. */ + + /* Command & data for AFU commands issued by test. */ + char rbuf[NUM_CMDS][0x1000]; // 4K read data buffer (page aligned) + char wbuf[NUM_CMDS][0x1000]; // 4K write data buffer (page aligned) + __u64 rrq_entry[NUM_RRQ_ENTRY]; // 128B RRQ (page aligned) + + struct afu_cmd { + sisl_ioarcb_t rcb; // IOARCB (cache line aligned) + sisl_ioasa_t sa; // IOASA follows RCB + pthread_mutex_t mutex; + pthread_cond_t cv; + + __u8 cl_pad[CL_SIZE - + ((sizeof(sisl_ioarcb_t) + + sizeof(sisl_ioasa_t) + + sizeof(pthread_mutex_t) + + sizeof(pthread_cond_t)) & CL_SIZE_MASK)]; + } cmd[NUM_CMDS]; + + // AFU interface + int afu_fd; + struct cxl_ioctl_start_work work; + char event_buf[0x1000]; + volatile struct sisl_host_map *p_host_map; + ctx_hndl_t ctx_hndl; + + __u64 *p_hrrq_start; + __u64 *p_hrrq_end; + volatile __u64 *p_hrrq_curr; + unsigned int toggle; + + // MC client interface + mc_hndl_t mc_hndl; + res_hndl_t res_hndl; + struct mc_hndl_s *mc_hndl_p; //For handling multiple mc_hndl and res_hndl +} __attribute__ ((aligned (0x1000))); + +typedef struct ctx * ctx_p; + +struct ctx_alloc { + struct ctx ctx; + __u8 page_pad[0x1000 - (sizeof(struct ctx) & 0xFFF)]; +}; + +struct pthread_alloc { + pthread_t rrq_thread; +}; + +struct rwbuf{ + char wbuf[NUM_CMDS][0x2000]; //8K for EA alignment + char rbuf[NUM_CMDS][0x2000]; //8K for EA alignment +}__attribute__ ((aligned (0x1000))); + +struct rwshmbuf{ + char wbuf[1][0x1000]; + char rbuf[1][0x1000]; +}__attribute__ ((aligned (0x1000))); + +typedef +enum { + TEST_MC_REG =1, + TEST_MC_UNREG, + TEST_MC_OPEN, + TEST_MC_OPEN_ERROR1, + TEST_MC_OPEN_ERROR2, + TEST_MC_OPEN_ERROR3, + TEST_MC_CLOSE, + TEST_MC_CLOSE_ERROR1, + TEST_MC_SIZE, + TEST_MC_SIZE_ERROR2, + TEST_MC_SIZE_ERROR3, + TEST_MC_SIZE_ERROR4, + TEST_MC_GETSIZE, + TEXT_MC_XLAT_IO, + TEXT_MC_XLAT_IO_V2P, + TEXT_MC_XLAT_IO_P2V, + TEST_MC_HDUP_ORG_IO, + TEST_MC_HDUP_DUP_IO, + TEST_MC_HDUP_ERROR1, + TEST_MC_DUP, + TEST_MC_CLONE_RDWR, + TEST_MC_CLONE_READ, + TEST_MC_CLONE_WRITE, + TEST_MC_CLN_O_RDWR_CLN_RD, + TEST_MC_CLN_O_RDWR_CLN_WR, + TEST_MC_CLN_O_RD_CLN_RD, + TEST_MC_CLN_O_RD_CLN_WR, + TEST_MC_CLN_O_WR_CLN_RD, + TEST_MC_CLN_O_WR_CLN_WR, + TEST_MC_CLN_O_RD_CLN_RDWR, + TEST_MC_CLN_O_WR_CLN_RDWR, + TEST_MC_MAX_SIZE, + TEST_MC_MAX_CTX_N_RES, + TEST_MC_MAX_RES_HNDL, + TEST_MC_MAX_CTX_HNDL, + TEST_MC_MAX_CTX_HNDL2, + TEST_MC_CHUNK_REGRESS, + TEST_MC_CHUNK_REGRESS_BOTH_AFU, + TEST_MC_SIGNAL, + TEST_ONE_UNIT_SIZE, + TEST_MC_LUN_SIZE, + TEST_MC_PLBA_OUT_BOUND, + TEST_MC_INVALID_OPCODE, + TEST_MC_IOARCB_EA_NULL, + TEST_MC_IOARCB_INVLD_FLAG, + TEST_MC_IOARCB_INVLD_LUN_FC, + TEST_MC_ONE_CTX_TWO_THRD, + TEST_MC_ONE_CTX_RD_WRSIZE, + TEST_MC_TWO_CTX_RD_WRTHRD, + TEST_MC_TWO_CTX_RDWR_SIZE, + TEST_MC_LUN_DISCOVERY, + TEST_MC_TEST_VDISK_IO, + TEST_MC_RW_CLS_RSH, + TEST_MC_UNREG_MC_HNDL, + TEST_MC_RW_CLOSE_CTX, + TEST_MC_CLONE_MANY_RHT, + TEST_AUN_WR_PLBA_RD_VLBA, + TEST_GOOD_CTX_ERR_CTX, + TEST_GOOD_CTX_ERR_CTX_CLS_RHT, + TEST_GOOD_CTX_ERR_CTX_UNREG_MCH, + TEST_MC_IOARCB_EA_ALGNMNT_16, + TEST_MC_IOARCB_EA_ALGNMNT_128, + TEST_MC_MNY_CTX_ONE_RRQ_C_NULL, + TEST_MC_ALL_AFU_DEVICES, + TEST_MC_IOARCB_EA_INVLD_ALGNMNT, + MC_TEST_RWBUFF_GLOBAL, + MC_TEST_RW_SIZE_PARALLEL, + MC_TEST_RWBUFF_SHM, + MC_TEST_GOOD_ERR_AFU_DEV, + TEST_MC_AFU_RC_CAP_VIOLATION, + TEST_MC_REGRESS_RESOURCE, + TEST_MC_REGRESS_CTX_CRT_DSTR, + TEST_MC_REGRESS_CTX_CRT_DSTR_IO, + TEST_MC_SIZE_REGRESS, +}mc_test_t; + + +int mc_max_open_tst(); +int mc_open_close_tst(); +int mc_register_tst(); +int mc_unregister_tst(); +int mc_open_tst(int); +int mc_size_tst(int); +int mc_xlate_tst(int); +int mc_hdup_tst(int cmd); +int mc_max_vdisk_thread(); +int test_mc_clone_api(__u32 flags); +int test_mc_clone_error(__u32 oflg, __u32 cnflg); +int test_mc_max_size(); +int test_max_ctx_n_res(); +int mc_test_engine(mc_test_t); +int test_one_aun_size(); +int test_mc_clone_read(); +int test_mc_clone_write(); +int test_mc_lun_size(int cmd); +int test_mc_dup_api(); +int mc_close_tst(); +int mc_test_chunk_regress(int cmd); +int mc_test_chunk_regress_long(); +int mc_test_chunk_regress_both_afu(); +int test_mc_xlate_error(int); +int test_vdisk_io(); +int ctx_init(struct ctx *p_ctx); +int ctx_init_thread(void *); +int test_lun_discovery(int cmd); +int test_onectx_twothrd(int cmd); +int test_mc_reg_error(int cmd); +int test_two_ctx_two_thrd(int cmd); +int test_mc_invalid_ioarcb(int cmd); +int test_rw_close_hndl(int cmd); +int test_mc_clone_many_rht(); +int test_good_ctx_err_ctx(int cmd); +int test_mc_ioarcb_ea_alignment(int cmd); +int check_mc_null_params(int cmd); +int test_many_ctx_one_rrq_curr_null(); +int test_all_afu_devices(); +int mc_test_rwbuff_global(); +int test_mc_rwbuff_shm(); +int test_mc_rw_size_parallel(); +int test_mc_good_error_afu_dev(); +int test_mc_regress_ctx_crt_dstr(int cmd); +int test_mc_size_error(int cmd); +int test_mc_null_params(int cmd); +int test_mc_inter_prcs_ctx(int cmd); +int mc_test_ctx_regress(int cmd); +void ctx_close(struct ctx *p_ctx); +void ctx_close_thread(void *); +int get_fvt_dev_env(); + +int test_init(struct ctx *p_ctx); +void *ctx_rrq_rx(void *arg); +int send_write(struct ctx *p_ctx, __u64 start_lba, + __u64 stride, __u64 data,__u32 flags); +int send_single_write(struct ctx *p_ctx, __u64 vlba, __u64 data); +int send_read(struct ctx *p_ctx, __u64 start_lba, __u64 stride, __u32 flags); +int send_single_read(struct ctx *p_ctx, __u64 vlba); +int rw_cmp_buf(struct ctx *p_ctx, __u64 start_lba); +int rw_cmp_buf_cloned(struct ctx *p_ctx, __u64 start_lba); +int cmp_buf_cloned(__u64* p_buf, unsigned int len); +int rw_cmp_single_buf(struct ctx *p_ctx, __u64 vlba); +int send_cmd(struct ctx *p_ctx); +int wait_resp(struct ctx *p_ctx); +int wait_single_resp(struct ctx *p_ctx); +void fill_buf(__u64* p_buf, unsigned int len, __u64 data); +int cmp_buf(__u64* p_buf1, __u64 *p_buf2, unsigned int len); +int send_report_luns(struct ctx *p_ctx, __u32 port_sel, + __u64 **lun_ids,__u32 *nluns); +int send_read_capacity(struct ctx *p_ctx, __u32 port_sel, + __u64 lun_id, __u64 *lun_capacity, __u64 *blk_len); +int check_status(sisl_ioasa_t *p_ioasa); +void send_single_cmd(struct ctx *p_ctx); +int send_rw_rcb(struct ctx *p_ctx, struct rwbuf *p_rwb, + __u64 start_lba, __u64 stride, + int align, int where); +int send_rw_shm_rcb(struct ctx *p_ctx, struct rwshmbuf *p_rwb, + __u64 vlba); +void hexdump(void *data, long len, const char *hdr); + +#endif diff --git a/src/master/test/mc_test2.c b/src/master/test/mc_test2.c new file mode 100644 index 00000000..fc51448f --- /dev/null +++ b/src/master/test/mc_test2.c @@ -0,0 +1,2276 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/test/mc_test2.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "mc_test.h" +#include +#include + +extern char master_dev_path[MC_PATHLEN]; +extern char afu_path[MC_PATHLEN]; + +extern pid_t pid; +extern __u8 rrq_c_null; +extern int dont_displa_err_msg; +/*each thread can modify this value incase of any failure */ +static int g_error =0; + +/* + * * Serialization is required for a mc_handle + * * which is shared by multiple threads. + * + */ +static pthread_mutex_t mutex; +static pthread_mutex_t counter=PTHREAD_MUTEX_INITIALIZER; +static int count=0; + +void* test_mc_api(void *arg); +int do_basic_setting(struct ctx *p_ctx); + +void* test_mc_api(void *arg) +{ + struct ctx *p_ctx = (struct ctx*)arg; + mc_hndl_t mc_hndl = p_ctx->mc_hndl; + int rc=0; + res_hndl_t res_handl; + __u64 size ; + __u64 actual_size=0; + __u64 plba=0; + __u64 nlba, st_lba; + __u64 stride; + mc_stat_t l_mc_stat; + + pthread_t pthread_id1 =pthread_self(); + unsigned int pthread_id =(unsigned int)pthread_id1; + size = (rand()%10+1)*16; + + pthread_mutex_lock(&mutex); + rc = mc_open(mc_hndl, MC_RDWR, &res_handl); + pthread_mutex_unlock(&mutex); + if(rc != 0) { + fprintf(stderr, "thread : 0x%x:mc_open: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + pthread_mutex_lock(&counter); + count++; + pthread_mutex_unlock(&counter); + + pthread_mutex_lock(&mutex); + rc = mc_size(mc_hndl, res_handl, size,&actual_size); + pthread_mutex_unlock(&mutex); + if(rc != 0) { + fprintf(stderr, "thread : 0x%x:mc_size: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + + pthread_mutex_lock(&mutex); + rc = mc_stat(mc_hndl, res_handl, &l_mc_stat); + pthread_mutex_unlock(&mutex); + if(rc != 0) { + fprintf(stderr, "thread : 0x%x:mc_stat: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + size = l_mc_stat.size; + if(size != actual_size) + { + fprintf(stderr,"thread : 0x%x:size mismatched: %lu : %lu\n", pthread_id,size,actual_size); + g_error = -1; + return NULL; + } + + nlba = size * (1 << l_mc_stat.nmask); + pthread_mutex_lock(&mutex); + rc = mc_xlate_lba(mc_hndl, res_handl, nlba-1,&plba); + pthread_mutex_unlock(&mutex); + if(rc != 0) { + fprintf(stderr, "thread : 0x%x:mc_xlate_lba: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + + stride = 1 << l_mc_stat.nmask; + for(st_lba = 0; st_lba < nlba; st_lba +=(NUM_CMDS*stride)) + { + pthread_mutex_lock(&mutex); + p_ctx->res_hndl = res_handl; + debug("res hnd: %d send write for 0X%lX \n",res_handl, st_lba); + + rc = send_write(p_ctx, st_lba, stride, pthread_id1, VLBA); + rc += send_read(p_ctx, st_lba, stride,VLBA); + rc += rw_cmp_buf(p_ctx, st_lba); + + pthread_mutex_unlock(&mutex); + if(rc) { + g_error = rc; + break; + } + } + + pthread_mutex_lock(&mutex); + rc = mc_close(mc_hndl, res_handl); + pthread_mutex_unlock(&mutex); + if(rc != 0) { + fprintf(stderr, "thread : 0x%x:mc_close: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + return 0; +} + +int mc_max_vdisk_thread() +{ + struct ctx_alloc p_ctx_a; + struct pthread_alloc *p_thread_a; + struct ctx *p_ctx = &(p_ctx_a.ctx); + pthread_mutexattr_t mattr; + pthread_t thread; + int rc = 0; + int i; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + //Allocating structures for pthreads. + p_thread_a = (struct pthread_alloc *) malloc(sizeof(struct pthread_alloc) * MAX_NUM_THREADS); + if(p_thread_a == NULL) { + fprintf(stderr, " Can not allocate thread structs, errno %d\n", errno); + return -1; + } + rc = ctx_init(p_ctx); + if(rc != 0) + { + fprintf(stderr, "Context initialization failed, errno %d\n", errno); + return -1; + } + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + debug("master_dev_path : %s\n",master_dev_path); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map, + &p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register: failed. Error registering ctx_hndl %d, rc %d\n",p_ctx->ctx_hndl, rc ); + return -1; + } + debug_2("%d : ctx hand & mc hnd: %d & %p\n", + pid, p_ctx->ctx_hndl, p_ctx->mc_hndl); + + //initialize the mutex + pthread_mutexattr_init(&mattr); + pthread_mutex_init(&mutex, &mattr); + //create threads + for(i=0;i< MAX_NUM_THREADS; ++i) + { + rc = pthread_create(&(p_thread_a[i].rrq_thread), NULL, &test_mc_api, (void *)p_ctx); + if(rc) { + fprintf(stderr, "Error creating thread %d, errno %d\n", i, errno); + return -1; + } + } + + //destroy mutexattr + pthread_mutexattr_destroy(&mattr); + //joining + for(i=0;i< MAX_NUM_THREADS; ++i) + { + pthread_join(p_thread_a[i].rrq_thread, NULL); + } + + //destroy the mutex + pthread_mutex_destroy(&mutex); + + pthread_cancel(thread); + debug_2("%d : ctx hand & mc hnd: %d & %p\n", + pid, p_ctx->ctx_hndl, p_ctx->mc_hndl); + rc = mc_unregister(p_ctx->mc_hndl); + if(rc != 0) + { + fprintf(stderr, "mc_unregister failed for mc_hdl %p\n", p_ctx->mc_hndl); + return -1; + } + + ctx_close(p_ctx); + mc_term(); + //free allocated space + free(p_thread_a); + rc = g_error; + g_error =0; + return rc; +} + +int do_basic_setting(struct ctx *p_ctx) +{ + int rc; + rc = ctx_init(p_ctx); + if (rc != 0) { + fprintf(stderr, "error instantiating ctx, rc %d\n", rc); + return -1; + } + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *) p_ctx->p_host_map, + &p_ctx->mc_hndl); + if (rc != 0) { + fprintf(stderr, "error registering ctx_hndl %d, rc %d\n", + p_ctx->ctx_hndl, rc); + return -1; + } + return 0; +} + +int test_mc_clone_api(__u32 flags) +{ + int rc; + struct ctx testctx; + mc_hndl_t mc_hndl_old; + mc_hndl_t mc_hndl; + pid_t child_pid; + __u64 chunks =512; + __u64 newchunks = 0X100; + __u64 actual_size; + __u64 stride; + __u64 st_lba; + __u64 nlba; + struct ctx *p_ctx = &testctx; + pthread_t thread; + mc_stat_t l_mc_stat; + + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + if(do_basic_setting(p_ctx) != 0) { + return -1; + } + + mc_hndl = p_ctx->mc_hndl; + rc = mc_hdup(mc_hndl, &p_ctx->mc_hndl); + mc_unregister(mc_hndl); + + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &(p_ctx->res_hndl)); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + return -1; + } + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + chunks, &actual_size); + if (rc != 0 || chunks != actual_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + return -1; + } + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + pid = getpid(); + + nlba = actual_size * (1 << l_mc_stat.nmask); + stride = 1 << l_mc_stat.nmask; + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)) { + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + if(rc != 0) + { + fprintf(stderr,"send write failed 0X%lX vlba\n",st_lba); + return rc; + } + } + pthread_cancel(thread); + child_pid = fork(); + if(child_pid == 0) + { + mc_init(); + //child process + mc_hndl_old = p_ctx->mc_hndl; + + if(do_basic_setting(p_ctx) != 0) + { + exit(-1); + } + mc_hndl = p_ctx->mc_hndl; + rc = mc_hdup(mc_hndl, &p_ctx->mc_hndl); + mc_unregister(mc_hndl); + //do mc_clone + rc = mc_clone(p_ctx->mc_hndl, mc_hndl_old, flags); + if (rc != 0) { + fprintf(stderr, "error cloning rc %d\n", rc); + exit(-1); + } + + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + // close parent inherited interfaces + rc = mc_unregister(mc_hndl_old); + //ctx_close(p_ctx); + rc = mc_stat(p_ctx->mc_hndl,p_ctx->res_hndl,&l_mc_stat); + if (rc != 0) { + fprintf(stderr, "error in mc_stat rc %d\n", rc); + exit(-1); + } + actual_size = l_mc_stat.size; + if(chunks != actual_size) + { + fprintf(stderr, "size mismatch rc %d\n", rc); + exit(-1); + } + + if(flags & MC_RDONLY ){ //RDWR & RDONLY both works + debug("%d : Do read for clone cmp \n", getpid()); + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)) { + rc = send_read(p_ctx, st_lba, stride, VLBA); + if(rc != 0) { + fprintf(stderr,"send read failed 0X%lX vlba\n",st_lba); + exit(rc); + } + rc = rw_cmp_buf_cloned(p_ctx, st_lba); + if(rc){ + exit(rc); + } + } + } + pid = getpid(); + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + newchunks,&actual_size); + debug("pid:%u newchunks:%lu actual_size: %lu\n",pid,newchunks,actual_size); + if (rc != 0 || newchunks != actual_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + exit(-1); + } + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + nlba = newchunks * (1 << l_mc_stat.nmask);; + if(flags == MC_RDWR) { + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)){ + rc = send_write(p_ctx, st_lba, stride, pid,VLBA); + if(rc){ + fprintf(stderr,"send write failed @ 0X%lX vlba\n",st_lba); + exit(rc); + } + rc = send_read(p_ctx, st_lba, stride,VLBA); + if(rc){ + fprintf(stderr,"send read failed @ 0X%lX vlba\n",st_lba); + exit(rc); + } + rc = rw_cmp_buf(p_ctx, st_lba); + if(rc){ + exit(rc); + } + } + } + if(flags == MC_WRONLY) { //WRONLY & RDWR both works + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)){ + rc = send_write(p_ctx, st_lba, stride, pid,VLBA); + if(rc){ + fprintf(stderr,"send write failed @ 0X%lX vlba\n",st_lba); + exit(rc); + } + } + } + newchunks = 0; + debug("Calling again mc_size with 0 value\n"); + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + 0,&actual_size); + + debug("pid:%u newchunks:%lu actual_size: %lu\n",pid,newchunks,actual_size); + if (rc != 0 || newchunks != actual_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + exit(-1); + } + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + exit(0); + } + else + { + //let child process done cloning + sleep(2); + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + newchunks,&actual_size); + if (rc != 0 || newchunks != actual_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + return -1; + } + if (rc != 0 || newchunks != actual_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + return -1; + } + + pid = getpid(); + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + nlba = newchunks * (1 << l_mc_stat.nmask);; + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)){ + rc = send_write(p_ctx, st_lba, stride, pid,VLBA); + if(rc){ + fprintf(stderr,"send write failed @ 0X%lX vlba\n",st_lba); + return rc; + } + rc = send_read(p_ctx, st_lba, stride,VLBA); + if(rc){ + fprintf(stderr,"send read failed @ 0X%lX vlba\n",st_lba); + return rc; + } + rc = rw_cmp_buf(p_ctx, st_lba); + if(rc){ + exit(rc); + return rc; + } + } + + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + debug("pid = %u, cpid = %u\n", pid, child_pid); + fflush(stdout); + child_pid = wait(&rc); + if (WIFEXITED(rc)) { + rc = WEXITSTATUS(rc); + } + debug("%d terminated, signalled %s, signo %d\n", + child_pid, WIFSIGNALED(rc) ? "yes" : "no", WTERMSIG(rc)); + fflush(stdout); + } + ctx_close(p_ctx); + mc_term(); + return rc; +} + +int test_mc_max_size() +{ + int rc; + struct ctx testctx; + pthread_t thread; + __u64 chunks =0x1000; + __u64 actual_size; + __u64 max_size = 0; + __u64 rnum; + __u64 lun_size; + __u64 stride = 0x1000; //4K + __u64 st_lba = 0; + int loop_stride = 1000; + __u64 nlba; + mc_stat_t l_mc_stat; + bool is_stress = false; + + struct ctx *p_ctx = &testctx; + unsigned int i; + char *str = getenv("LONG_RUN"); + if((str != NULL) && !strcmp(str, "TRUE")) { + chunks = 1; //increment one by one + loop_stride = 10; + is_stress = true; + } + + pid = getpid(); + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + if(do_basic_setting(p_ctx) != 0) + { + return -1; + } + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &(p_ctx->res_hndl)); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + return -1; + } + //allocate max allow size for a vdisk + while(1) + { + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + chunks, &actual_size); + if(rc != 0) + { + fprintf(stderr,"mc_size failed rc=%d\n",rc); + return -1; + } + if(chunks != actual_size) + { + debug("now reaching extreme..chunk(0X%lX) act(0X%lX)\n", chunks,actual_size); + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl,&l_mc_stat); + if(rc != 0) + { + fprintf(stderr,"mc_stat failed rc = %d\n",rc); + return -1; + } + max_size = l_mc_stat.size; + break; + } + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl,&l_mc_stat); + if(rc != 0) + { + fprintf(stderr,"mc_stat failed rc = %d\n",rc); + return -1; + } + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + debug("chunk(0X%lX)lba (0X%lX) i/o@(0X%lX)\n", + actual_size,nlba, nlba-(NUM_CMDS*stride)); + rc = send_write(p_ctx, nlba-(NUM_CMDS*stride), stride, pid, VLBA); + if(rc) break; + rc += send_read(p_ctx, nlba-(NUM_CMDS*stride), stride, VLBA); + if(rc) break; + rc = rw_cmp_buf(p_ctx, nlba-(NUM_CMDS*stride)); + if(rc) break; + if(is_stress) + chunks++; + else + chunks += 0x1000; + } + + if(max_size == 0) + { + debug("lets check more chunk can be allocated\n"); + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + chunks+1, &actual_size); + debug("new chunk =0X%lX & LBAs = 0X%lX\n", + actual_size, actual_size*(1 << l_mc_stat.nmask)); + fprintf(stderr, "some errors happend\n"); + return -1; + } + debug("OK, I got the lun size 0X%lX & nlba 0X%lX\n", + max_size, max_size*(1 << l_mc_stat.nmask)); + lun_size = (max_size*(1 << l_mc_stat.nmask)*(l_mc_stat.blk_len)/(1024*1024*1024)); + + + nlba = max_size * (1 << l_mc_stat.nmask); + if(is_stress) { + printf("%d : now do IO till 0X%lX lbas\n", pid, nlba-1); + for(st_lba = 0; st_lba < nlba; st_lba += NUM_CMDS * stride) { + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + CHECK_RC(rc, "send_write"); + rc = send_read(p_ctx, st_lba, stride, VLBA); + CHECK_RC(rc, "send_read"); + rc = rw_cmp_buf(p_ctx, st_lba); + CHECK_RC(rc, "rw_cmp_buf"); + } + } + //allocate & dallocate max_size + chunks = max_size; + while(chunks > 0) + { + chunks = chunks/2; + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + chunks, &actual_size); + if(rc != 0 || chunks != actual_size) + { + fprintf(stderr,"mc_size api failed.. rc=%d\n",rc); + fprintf(stderr,"expected & actual : %lu & %lu\n",chunks,actual_size); + return -1; + } + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl,&l_mc_stat); + if(rc != 0 || chunks != actual_size) + { + fprintf(stderr,"mc_stat api failed.. rc=%d\n",rc); + fprintf(stderr,"expected & actual : %lu & %lu\n",chunks,actual_size); + return -1; + } + actual_size = l_mc_stat.size; + if(actual_size == 0) break; + nlba = actual_size * (1 << l_mc_stat.nmask); + debug("chunk(0X%lX)lba (0X%lX) i/o@(0X%lX)\n", + actual_size,nlba, nlba-(NUM_CMDS*stride)); + rc = send_write(p_ctx, nlba-(NUM_CMDS*stride), stride, pid, VLBA); + if(rc) break; + rc += send_read(p_ctx, nlba-(NUM_CMDS*stride), stride, VLBA); + if(rc) break; + rc = rw_cmp_buf(p_ctx, nlba-(NUM_CMDS*stride)); + if(rc) break; + } + + for(i=0;i<=max_size;i+=loop_stride) + { + rnum = rand()% max_size +1; + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + rnum, &actual_size); + + if((rc != 0)||(rnum != actual_size)) + { + fprintf(stderr,"mc_size api failed.. rc=%d\n",rc); + fprintf(stderr,"expected & actual : %lu & %lu\n",chunks,actual_size); + return -1; + } + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl,&l_mc_stat); + if((rc != 0 )|| (rnum != actual_size)) + { + fprintf(stderr,"mc_stat api failed.. rc=%d\n",rc); + fprintf(stderr,"expected & actual : %lu & %lu\n",chunks,actual_size); + return -1; + } + actual_size = l_mc_stat.size; + nlba = actual_size * (1 << l_mc_stat.nmask); + debug("chunk(0X%lX)lba (0X%lX) i/o@(0X%lX)\n", + actual_size,nlba, nlba-(NUM_CMDS*stride)); + rc = send_write(p_ctx, nlba-(NUM_CMDS*stride), stride, pid, VLBA); + if(rc) break; + rc += send_read(p_ctx, nlba-(NUM_CMDS*stride), stride, VLBA); + if(rc) break; + rc = rw_cmp_buf(p_ctx, nlba-(NUM_CMDS*stride)); + if(rc) break; + } + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + printf("LUN size is :%lu GB\n",lun_size); + return rc; +} + +void *max_ctx_res(void *arg) +{ + int rc; + res_hndl_t my_res_hndl[MAX_RES_HANDLE]; + struct ctx *p_ctx = (struct ctx *)arg; + int i; + pthread_t pthread_id = pthread_self(); + __u64 size = 16; + __u64 actual_size; + __u64 nlba, st_lba; + __u64 stride; + mc_stat_t l_mc_stat; + + pid = getpid(); + rc = ctx_init(p_ctx); + if(rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + g_error =-1; + return NULL; + } + + //pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register: failed. ctx_hndl %d, rc %d\n",p_ctx->ctx_hndl, rc ); + g_error =-1; + return NULL; + } + debug("thread:%lx ctx hand %d & mc hnd:%p\n", pthread_id,p_ctx->ctx_hndl, p_ctx->mc_hndl); + + //open max allowed res for a context + size = (rand()%10+1)*16; + for(i=0;imc_hndl,MC_RDWR,&my_res_hndl[i]); + if(rc != 0) { + fprintf(stderr, "ctx: %d:mc_open: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + debug("ctx:%d res hndl:%u\n",p_ctx->ctx_hndl,my_res_hndl[i]); + } + + for(i=0;imc_hndl,my_res_hndl[i],size,&actual_size); + if(rc != 0) { + fprintf(stderr, "thread : %lx:mc_size: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + rc = mc_stat(p_ctx->mc_hndl,my_res_hndl[i], &l_mc_stat); + if(rc != 0) { + fprintf(stderr, "thread : %lx:mc_stat: failed,rc %d\n", pthread_id,rc); + g_error = -1; + return NULL; + } + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + stride = 1 << l_mc_stat.nmask; + nlba = 0; //NO IO here + for(st_lba = 0;st_lba < nlba; st_lba += (NUM_CMDS*stride)) { + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + if((rc != 0) && (actual_size == 0)){ + printf("%d : Fine, IO @(0X%lX) but range is(0X%lX)\n",pid, st_lba, nlba-1); + size = 16; + break; + }else { + fprintf(stderr,"%d : Send write failed @ (0X%lX) LBA\n", pid, st_lba); + g_error = -1; + return NULL; + } + rc = send_read(p_ctx, st_lba, stride, VLBA); + rc += rw_cmp_buf(p_ctx, st_lba); + if(rc){ + g_error = -1; + return NULL; + } + } + + debug("ctx:%d res_hand:%d size:%lu\n",p_ctx->ctx_hndl,my_res_hndl[i],actual_size); + size += 16; + } + + for(i=0;imc_hndl,my_res_hndl[i]); + if(rc != 0) { + fprintf(stderr, "ctx: %d:mc_close: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + } + + rc = mc_unregister(p_ctx->mc_hndl); + if(rc != 0) + { + fprintf(stderr, "mc_unregister failed for mc_hdl %p\n", p_ctx->mc_hndl); + g_error = -1; + return NULL; + } + debug("mc unregistered for ctx:%d\n",p_ctx->ctx_hndl); + return 0; +} + +int test_max_ctx_n_res() +{ + int rc; + int i; + pthread_t threads[MAX_OPENS]; + struct ctx *p_ctx; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + + debug("mc_init success.\n"); + + rc = posix_memalign((void **)&p_ctx, 0x1000, sizeof(struct ctx)*MAX_OPENS); + if(rc != 0) + { + fprintf(stderr, "Can not allocate ctx structs, errno %d\n", errno); + return -1; + } + + //Creating threads for ctx_init + for(i = 0; i < MAX_OPENS; i++) { + rc = pthread_create(&threads[i],NULL, (void *)&max_ctx_res, (void *)&p_ctx[i]); + if(rc) { + fprintf(stderr, "Error creating thread %d, errno %d\n", i, errno); + free(p_ctx); + return -1; + } + } + + //joining + for(i = 0; i < MAX_OPENS; i++) { + pthread_join(threads[i], NULL); + } + for(i = 0; i < MAX_OPENS; i++) { + ctx_close(&p_ctx[i]); + } + free(p_ctx); + rc = g_error; + g_error = 0; + return rc; +} + +int test_one_aun_size() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 aun = 1; + __u64 actual_size; + __u64 nlba; + __u64 size; + __u64 stride = 0x1000; + pthread_t thread; + mc_stat_t l_mc_stat; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + rc = ctx_init(p_ctx); + if(rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register: failed. ctx_hndl %d, rc %d\n",p_ctx->ctx_hndl, rc ); + return -1; + } + debug("ctx hand %d & mc hnd:%p\n", p_ctx->ctx_hndl, p_ctx->mc_hndl); + rc = mc_open(p_ctx->mc_hndl,MC_RDWR,&p_ctx->res_hndl); + if(rc != 0) { + fprintf(stderr, "ctx: %d:mc_open: failed,rc %d\n", p_ctx->ctx_hndl,rc); + return -1; + } + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, aun, &actual_size); + if(rc != 0) { + fprintf(stderr,"mc_size: failed,rc %d\n", rc); + return -1; + } + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl,&l_mc_stat); + if((rc != 0) || (aun != l_mc_stat.size)) + { + fprintf(stderr,"mc_get_size failed rc =%d: %lu : %lu\n", rc,aun,actual_size); + return -1; + } + debug("mc_stat:\nblk_len = 0X%X\nnmask = 0X%X\nsize = 0X%lX\nflags = 0X%lX\n", + l_mc_stat.blk_len, l_mc_stat.nmask, l_mc_stat.size, l_mc_stat.flags); + //printf("ctx:%d res_hand:%d vdisk unit: %lu\n",p_ctx->ctx_hndl,p_ctx->res_hndl,actual_size); + nlba = aun*(1 << l_mc_stat.nmask); + size = nlba*(l_mc_stat.blk_len); + debug("ONE AUN = %lu(0x%lx) LBAs and One AUN size =%lu(0x%lx)Bytes\n",nlba,nlba,size,size); + debug("ONE AUN = %lu MB\n",size/(1024*1024)); + + pid = getpid(); + rc = send_single_write(p_ctx, nlba-1, pid); + rc = send_single_read(p_ctx, nlba-1); + stride = LBA_BLK; + rc = send_write(p_ctx, nlba/2, stride, pid, VLBA); + rc += send_read(p_ctx, nlba/2, stride, VLBA); + rc += rw_cmp_buf(p_ctx, nlba/2); + CHECK_RC(rc, "IO"); + + pthread_cancel(thread); + rc = mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + if(rc != 0) { + fprintf(stderr,"mc_close failed rc=%d\n",rc); + return -1; + } + rc = mc_unregister(p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr,"mc_unregister failed rc=%d\n",rc); + return -1; + } + ctx_close(p_ctx); + return 0; +} + +int test_mc_clone_error(__u32 oflg, __u32 cnflg) +{ + int rc,rc1,rc2; + struct ctx testctx; + mc_hndl_t mc_hndl_old; + mc_hndl_t mc_hndl; + pid_t child_pid; + pid_t pid; + __u64 chunks =16; + __u64 newchunks = 32; + __u64 actual_size; + __u64 stride = 0x1000; + __u64 nlba=0; + __u64 st_lba=0; + struct ctx *p_ctx = &testctx; + pthread_t thread; + mc_stat_t l_mc_stat; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + if(do_basic_setting(p_ctx) != 0) + { + return -1; + } + + mc_hndl = p_ctx->mc_hndl; + rc = mc_hdup(mc_hndl, &p_ctx->mc_hndl); + mc_unregister(mc_hndl); + + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + rc = mc_open(p_ctx->mc_hndl, oflg, &(p_ctx->res_hndl)); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + return -1; + } + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + chunks, &actual_size); + if (rc != 0 || chunks != actual_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + return -1; + } + + pid = getpid(); + if(oflg & MC_WRONLY) {//true for MC_WRONLY & MC_RDWR + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)) { + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + if(rc != 0) + { + fprintf(stderr,"send write failed 0X%lX vlba\n",st_lba); + return rc; + } + } + } + + pthread_cancel(thread); + child_pid = fork(); + if(child_pid == 0) + { + //child process + mc_hndl_old = p_ctx->mc_hndl; + if(do_basic_setting(p_ctx) != 0) + { + exit(-1); + } + + //do mc_clone + mc_hndl = p_ctx->mc_hndl; + rc = mc_hdup(mc_hndl, &p_ctx->mc_hndl); + mc_unregister(mc_hndl); + + rc = mc_clone(p_ctx->mc_hndl, mc_hndl_old, cnflg); + if (rc != 0) { + fprintf(stderr, "error cloning rc %d\n", rc); + exit(-1); + } + + // close parent inherited interfaces + rc = mc_unregister(mc_hndl_old); + + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + rc = mc_stat(p_ctx->mc_hndl,p_ctx->res_hndl,&l_mc_stat); + if (rc != 0) { + fprintf(stderr, "error in mc_stat rc %d\n", rc); + exit(-1); + } + pid = getpid(); + //printf("pid:%u chunks:%lu actual_size: %lu\n",pid,chunks,actual_size); + actual_size = l_mc_stat.size; + if(chunks != actual_size) + { + fprintf(stderr, "size mismatch rc %d\n", rc); + exit(-1); + } + + nlba = chunks * (1 << l_mc_stat.nmask); + st_lba = nlba/2; + if(cnflg == MC_RDONLY){ + rc = send_write(p_ctx, st_lba, stride, pid,VLBA); + if(rc == 0){ + fprintf(stderr,"write should fail MC_RDONLY clone 0X%lX vlba\n",st_lba); + exit(-1); + } + g_error = rc; + } + else if(cnflg == MC_WRONLY) + { + rc = send_read(p_ctx,st_lba,stride,VLBA); + if(rc == 0){ + fprintf(stderr,"read should fail for MC_WRONLY clone 0X%lX vlba\n",st_lba); + exit(-1); + } + g_error = rc; + } + else if(cnflg == MC_RDWR) + { + rc1 = send_write(p_ctx, st_lba, stride, pid,VLBA); + rc2 = send_read(p_ctx,st_lba,stride,VLBA); + if((rc1 == 0) && (rc2 == 0)) + { + fprintf(stderr,"Read/Write should fail @ 0X%lX vlba\n",st_lba); + exit(-1); + } + if(rc1) + g_error = rc1; + else + g_error = rc2; + } + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + newchunks,&actual_size); + //printf("pid:%u newchunks:%lu actual_size: %lu\n",pid,newchunks,actual_size); + if (rc != 0 || newchunks != actual_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + exit(-1); + } + newchunks = 0; + //printf("Calling again mc_size with 0 value\n"); + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + 0,&actual_size); + + //printf("pid:%u newchunks:%lu actual_size: %lu\n",pid,newchunks,actual_size); + if (rc != 0 || newchunks != actual_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + exit(-1); + } + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + exit(g_error); + } + else + { + //let child process done cloning + sleep(2); + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, + newchunks,&actual_size); + if (rc != 0 || newchunks != actual_size) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + return -1; + } + debug("pid:%u newchunks:%lu actual_size: %lu\n",pid,newchunks,actual_size); + mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + debug("pid = %u, cpid = %u\n", pid, child_pid); + fflush(stdout); + child_pid = wait(&rc); + if (WIFEXITED(rc)) { + rc = WEXITSTATUS(rc); + } + debug("%d terminated, signalled %s, signo %d\n", + child_pid, WIFSIGNALED(rc) ? "yes" : "no", WTERMSIG(rc)); + fflush(stdout); + } + ctx_close(p_ctx); + mc_term(); + return rc; +} + +void *exploit_chunk(void *arg) +{ + struct ctx *p_ctx = (struct ctx*)arg; + mc_hndl_t mc_hndl = p_ctx->mc_hndl; + int rc=0; + int i; + res_hndl_t res_handl; + __u64 size = 16; + __u64 actual_size=0; + __u64 plba=0; + __u64 st_lba; + __u64 nlba; + int myloop = 5; + int inner_loop =2; + __u64 stride; + mc_stat_t l_mc_stat; + bool is_regress = false; + + char *str = getenv("LONG_RUN"); + if((str != NULL) && !strcmp(str, "TRUE")) { + myloop = 100; + inner_loop = 1000; + is_regress = true; + debug("%d : %s : %d :Regress Outerloop: %d & inner loop:%d\n", + pid, __func__, __LINE__, myloop, inner_loop); + } + while(myloop > 0){ + pthread_mutex_lock(&mutex); + rc = mc_open(mc_hndl, MC_RDWR, &res_handl); + pthread_mutex_unlock(&mutex); + if(rc != 0) { + fprintf(stderr, "ctx: %d:mc_open: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + debug_2("%d : rsh %d started\n", pid, res_handl); + for(i = 0; i< inner_loop;i++){ + pthread_mutex_lock(&mutex); + rc = mc_size(mc_hndl, res_handl, size,&actual_size); + pthread_mutex_unlock(&mutex); + if(rc != 0) { + fprintf(stderr, "ctx: %d:mc_size: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + pthread_mutex_lock(&mutex); + rc = mc_stat(mc_hndl, res_handl, &l_mc_stat); + pthread_mutex_unlock(&mutex); + if(rc != 0) { + fprintf(stderr, "ctx: %d:mc_stat: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + stride = 1 << l_mc_stat.nmask; + if(is_regress) + stride = 0x1000; + + debug_2("%d : R/W started for rsh %d from 0X0 to 0X%lX\n", + pid, res_handl, nlba-1); + for(st_lba = 0; st_lba < nlba; st_lba += NUM_CMDS*stride) { + debug_2("%d : start lba 0X%lX total lba 0X%lX rsh %d\n", + pid, st_lba, nlba,res_handl); + pthread_mutex_lock(&mutex); + p_ctx->res_hndl = res_handl; + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + if(rc) { + mc_stat(mc_hndl, res_handl, &l_mc_stat); + if(size == 0 || (l_mc_stat.size * (1 << l_mc_stat.nmask)) <= st_lba) { + printf("%d : Fine, send write(0X%lX) was out of bounds, MAX LBAs(0X%lX)\n", + pid, st_lba, size * (1 << l_mc_stat.nmask)); + pthread_mutex_unlock(&mutex); + break; + }else { + g_error = -1; + fprintf(stderr,"%d : chunk(0X%lX)IO failed rsh %d st_lba(0X%lX) range(0X%lX)\n", + pid, size, res_handl, st_lba, nlba-1); + pthread_mutex_unlock(&mutex); + return NULL; + } + }else { + rc = send_read(p_ctx, st_lba, stride,VLBA); + rc += rw_cmp_buf(p_ctx, st_lba); + pthread_mutex_unlock(&mutex); + if(rc){ + g_error = -1; + return NULL; + } + } + } + debug_2("%d : R/W done for rsh %d from 0X0 to 0X%lX\n", + pid, res_handl, nlba-1); + size = (rand()%10+1)*16; + } + pthread_mutex_lock(&mutex); + rc = mc_stat(mc_hndl, res_handl, &l_mc_stat); + pthread_mutex_unlock(&mutex); + size= l_mc_stat.size; + if(size != 0){ + pthread_mutex_lock(&mutex); + rc = mc_xlate_lba(mc_hndl, res_handl, 0,&plba); + pthread_mutex_unlock(&mutex); + if(rc != 0) { + fprintf(stderr, "ctx: %d:mc_xlate_lba: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + } + pthread_mutex_lock(&mutex); + rc = mc_close(mc_hndl, res_handl); + pthread_mutex_unlock(&mutex); + debug_2("%d : now closing rsh %d\n", pid, res_handl); + if(rc != 0) { + fprintf(stderr, "ctx: %d:mc_close: failed,rc %d\n", p_ctx->ctx_hndl,rc); + g_error = -1; + return NULL; + } + myloop--; + debug("%d : %d loop remains was rsh %d\n", pid, myloop, res_handl); + } + return 0; +} +int chunk_regress() +{ + struct ctx_alloc p_ctx_a; + struct pthread_alloc *p_thread_a; + struct ctx *p_ctx = &(p_ctx_a.ctx); + pthread_mutexattr_t mattr; + pthread_t thread; + int rc = 0; + int i; + + pid = getpid(); + debug("%d : afu=%s and master=%s\n",pid, afu_path, master_dev_path); + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("%d : mc_init success.\n", pid); + + //Allocating structures for pthreads. + p_thread_a = (struct pthread_alloc *) malloc(sizeof(struct pthread_alloc) * MAX_NUM_THREADS); + if(p_thread_a == NULL) { + fprintf(stderr, " Can not allocate thread structs, errno %d\n", errno); + return -1; + } + rc = ctx_init(p_ctx); + if(rc != 0) + { + fprintf(stderr, "Context initialization failed, errno %d\n", errno); + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map, + &p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register: failed. Error registering ctx_hndl %d, rc %d\n",p_ctx->ctx_hndl, rc ); + return -1; + } + debug_2("%d : ctx hand & mc hnd: %d & %p\n", + pid, p_ctx->ctx_hndl, p_ctx->mc_hndl); + + //initialize the mutex + pthread_mutexattr_init(&mattr); + pthread_mutex_init(&mutex, &mattr); + + //create threads + for(i=0;i< MAX_NUM_THREADS; ++i) + { + rc = pthread_create(&(p_thread_a[i].rrq_thread), NULL, &exploit_chunk, (void *)p_ctx); + if(rc) { + fprintf(stderr, "Error creating thread %d, errno %d\n", i, errno); + return -1; + } + } + + //destroy mutexattr + pthread_mutexattr_destroy(&mattr); + //joining + for(i=0;i< MAX_NUM_THREADS; ++i) + { + pthread_join(p_thread_a[i].rrq_thread, NULL); + } + + //destroy the mutex + pthread_mutex_destroy(&mutex); + + pthread_cancel(thread); + rc = mc_unregister(p_ctx->mc_hndl); + if(rc != 0) + { + fprintf(stderr, "mc_unregister failed for mc_hdl %p\n", p_ctx->mc_hndl); + return -1; + } + + ctx_close(p_ctx); + mc_term(); + //free allocated space + free(p_thread_a); + rc = g_error; + g_error =0; + debug("%d : I am returning %d\n", pid, rc); + return rc; +} + +int mc_size_regress_internal() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=32; + __u64 actual_size=0; + __u64 nlba; + __u64 stride = LBA_BLK; + __u32 i; + int mc_size_regrss_l = 10; + mc_stat_t l_mc_stat; + pthread_t thread; + pid = getpid(); + + char *str = getenv("LONG_RUN"); + if((str != NULL) && !strcmp(str, "TRUE")) { + mc_size_regrss_l = 8000; + } + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + for(i = 1 ; i <= mc_size_regrss_l; i++) { + chunks = rand()%100 +1; + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + if(actual_size) { + nlba = actual_size * (1 << l_mc_stat.nmask); + debug("%d : IO from 0X%lX to 0X%lX\n", + pid, nlba-LBA_BLK*NUM_CMDS, nlba-1); + rc = send_write(p_ctx, nlba-LBA_BLK*NUM_CMDS, stride, pid,VLBA); + CHECK_RC(rc, "send_write"); + rc = send_read(p_ctx, nlba-LBA_BLK*NUM_CMDS, stride,VLBA); + CHECK_RC(rc, "send_read"); + rc = rw_cmp_buf(p_ctx, nlba-LBA_BLK*NUM_CMDS); + CHECK_RC(rc, "send_read"); + } + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,0, &actual_size); + CHECK_RC(rc, "mc_size"); + if( i % 1000 == 0) + printf("%d : loop %d(%d) done...\n", pid, i, mc_size_regrss_l); + } + pthread_cancel(thread); + return 0; +} + +int mc_test_chunk_regress(int cmd) +{ + int rc; + int i; + pid_t pid1; + int max_p= 2; + + pid = getpid(); + char *str = getenv("LONG_RUN"); + if((str != NULL) && !strcmp(str, "TRUE")) { + max_p = MAX_OPENS; + debug("%d : Do %s Regress for %d context processes\n", + pid, __func__, max_p); + if(4 == cmd) + debug("mc_size api(0 to value & viceversa) Regress 1000 loops...\n"); + } + for(i = 0; i< max_p;i++) + { + if(fork() == 0) + { + debug("%d process created...\n",i+1); + usleep(1000); + if(1 == cmd) // chunk regress + rc = chunk_regress(); + else if(4 == cmd) //mc_size regress + rc = mc_size_regress_internal(); + else //ctx regress create &destroy with io & wo io + rc = mc_test_ctx_regress(cmd); + + if(rc) + debug("%d : exiting with rc = %d\n", pid, rc); + + fflush(stdout); + exit(rc); + } + } + + while((pid1 = waitpid(-1,&rc,0))) + { + if (WIFEXITED(rc)) { + rc = WEXITSTATUS(rc); + if(rc) + g_error = -1; + } + + if(WIFSIGNALED(rc)) { + debug("%d : killed by %d signal\n", pid1, WTERMSIG(rc)); + if(WCOREDUMP(rc)) + fprintf(stderr, "%d : was core dupmed ...\n", pid1); + } + + debug("pid %d exited with rc = %d\n", pid1, rc); + + if(pid1 == -1) + break; + } + fflush(stdout); + rc = g_error; + g_error = 0; + return rc; +} + +int mc_test_chunk_regress_long() +{ + int rc; + int i; + int lrun=2; + char *str = getenv("LONG_RUN"); + if((str != NULL) && !strcmp(str, "TRUE")) { + lrun = 100; + printf("%d : Do %s Regress loop : %d\n", + pid, __func__, lrun); + fflush(stdout); + } + pid = getpid(); + for(i = 1; i <= lrun; i++){ + debug("Loop %d(%d) started...\n", i, lrun); + rc = mc_test_chunk_regress(1); + debug("Loop %d(%d) done...\n", i, lrun); + if(i%10 == 0){ + printf("Loop %d(%d) done...\n", i, lrun); + fflush(stdout); + } + if(rc){ + fprintf(stderr, "Loop %d is failed with rc = %d\n", i, rc); + break; + } + } + return rc; +} +int mc_test_chunk_regress_both_afu() +{ + int rc; + int i; + pid_t pid; + int max_p = 4; + char l_afu[MC_PATHLEN]; + char l_master[MC_PATHLEN]; + char buffer[MC_PATHLEN]; + char *str; + char *afu_1 = "0.0s"; + char *afu_2 = "1.0s"; + strcpy(l_afu, afu_path); + strcpy(l_master, master_dev_path); + for(i = 0; i < max_p; i++) { + if(i%2){ + strcpy(afu_path, l_afu); + strcpy(master_dev_path, l_master); + } + else { + str = strstr(l_afu, afu_1); + if(str == NULL) {//ENV var set with 1.0 + strncpy(buffer, l_afu, strlen(l_afu)-strlen(afu_2)); + buffer[strlen(l_afu)-strlen(afu_2)]='\0'; + strcat(buffer, afu_1); + }else { + strncpy(buffer, l_afu, strlen(l_afu)-strlen(afu_1)); + buffer[strlen(l_afu)-strlen(afu_1)]='\0'; + strcat(buffer, afu_2); + } + strcpy(afu_path, buffer); + strncpy(master_dev_path, afu_path, strlen(afu_path)-1); + master_dev_path[strlen(afu_path)-1] ='\0'; + strcat(master_dev_path, "m"); + } + if(fork() == 0) { + rc =chunk_regress(); + exit(rc); + } + } + + while((pid = waitpid(-1, &rc, 0))) { + if(rc != 0) { + g_error = -1; + } + debug("pid %d exited. \n", pid); + fflush(stdout); + + if(pid == -1) { + break; + } + } + + rc = g_error; + g_error = 0; + + return rc; +} + +int test_mc_lun_size(int cmd) +{ + int rc; + int rc1 =0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 max_chunks = 0xFFFFFF0000000000; + __u64 actual_size; + __u64 nlba; + __u64 size; + __u64 plba = 0; + __u64 stride; + __u64 st_lba =0; + pthread_t thread; + mc_stat_t l_mc_stat; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + rc = ctx_init(p_ctx); + if(rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register: failed. ctx_hndl %d, rc %d\n",p_ctx->ctx_hndl, rc ); + return -1; + } + debug("ctx hand %d & mc hnd:%p\n", p_ctx->ctx_hndl, p_ctx->mc_hndl); + rc = mc_open(p_ctx->mc_hndl,MC_RDWR,&p_ctx->res_hndl); + if(rc != 0) { + fprintf(stderr, "ctx: %d:mc_open: failed,rc %d\n", p_ctx->ctx_hndl,rc); + return -1; + } + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, max_chunks, &actual_size); + if(rc != 0) { + fprintf(stderr,"mc_size: failed,rc %d\n", rc); + return -1; + } + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl,&l_mc_stat); + if(rc != 0) + { + fprintf(stderr,"mc_get_size failed rc =%d\n", rc); + return -1; + } + //printf("ctx:%d res_hand:%d vdisk unit: %lu\n",p_ctx->ctx_hndl,p_ctx->res_hndl,actual_size); + stride = 1 << l_mc_stat.nmask; + actual_size = l_mc_stat.size; + nlba = actual_size * (1 << l_mc_stat.nmask); + size = nlba * l_mc_stat.blk_len; + debug("LUN = %lu(0x%lx) LBAs and One LUN size =%lu(0x%lx)Bytes\n",nlba,nlba,size,size); + printf("LUN = %lu GB\n",size/(1024*1024*1024)); + + if(1 == cmd) { //Test PLBA out of range + plba = nlba; + pid = getpid(); + rc1 = send_write(p_ctx, plba, stride, pid, NO_XLATE); + }else if(2 == cmd) { + //start writing plba + nlba = (1 << l_mc_stat.nmask); + //nlba = 10 * (1 << l_mc_stat.nmask); + for(st_lba = 0; st_lba < nlba; st_lba++) { + rc = send_write(p_ctx, st_lba, stride, st_lba, NO_XLATE); + if(rc) { + fprintf(stderr,"send write failed for 0X%lX PLBA\n",st_lba); + return rc; + } + } + //now start reading vlba + for(st_lba = 0; st_lba < nlba; st_lba++) { + rc = send_read(p_ctx, st_lba, stride, VLBA); + CHECK_RC(rc, "send_read"); + rc = mc_xlate_lba(p_ctx->mc_hndl, p_ctx->res_hndl, st_lba, &plba); + CHECK_RC(rc, "mc_xlate_lba"); + + if(memcmp((__u64*)&p_ctx->rbuf[0][0], &plba, sizeof(__u64))) + { + fprintf(stderr, "miscompare of 0X%lX & data \n",plba); + return -1; + } + } + } + pthread_cancel(thread); + rc = mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + if(rc != 0) { + fprintf(stderr,"mc_close failed rc=%d\n",rc); + return -1; + } + rc = mc_unregister(p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr,"mc_unregister failed rc=%d\n",rc); + return -1; + } + ctx_close(p_ctx); + if(1 == cmd){ + return rc1; + } + return 0; +} + +int test_mc_dup_api() +{ + int rc; + struct ctx *p_ctx; + struct ctx *main_p_ctx; + struct ctx *dup_p_ctx; + res_hndl_t my_res_hndl[MAX_RES_HANDLE]; + __u64 size = 1; + __u64 actual_size; + int i; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + rc = posix_memalign((void **)&p_ctx, 0x1000, sizeof(struct ctx)*2); + if(rc != 0) + { + fprintf(stderr, "Can not allocate ctx structs, errno %d\n", errno); + return -1; + } + + main_p_ctx = &p_ctx[0]; + dup_p_ctx = &p_ctx[1]; + + rc = ctx_init(main_p_ctx); + if(rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + rc = ctx_init(dup_p_ctx); + if(rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + + rc = mc_register(master_dev_path, main_p_ctx->ctx_hndl, + (volatile __u64 *)main_p_ctx->p_host_map,&main_p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register: failed. ctx_hndl %d, rc %d\n",main_p_ctx->ctx_hndl, rc ); + return -1; + } + debug("ctx hand %d & mc hnd:%p\n", main_p_ctx->ctx_hndl, main_p_ctx->mc_hndl); + + rc = mc_dup(main_p_ctx->mc_hndl,dup_p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_dup: failed. rc %d\n",rc ); + ctx_close(main_p_ctx); + ctx_close(dup_p_ctx); + mc_term(); + return -1; + } + dup_p_ctx->mc_hndl = main_p_ctx->mc_hndl; + + for(i = 0; i< MAX_RES_HANDLE; i++) + { + rc = mc_open(main_p_ctx->mc_hndl,MC_RDWR,&my_res_hndl[i]); + if(rc != 0) { + fprintf(stderr, "mc_open: failed,rc %d\n",rc); + return -1; + } + rc = mc_size(main_p_ctx->mc_hndl,my_res_hndl[i],size,&actual_size); + if(rc != 0) { + fprintf(stderr, "mc_size: failed,rc %d\n", rc); + return -1; + } + if(size != actual_size) + { + fprintf(stderr,"ctx: %d:size mismatched: %lu : %lu\n", main_p_ctx->ctx_hndl,size,actual_size); + return -1; + } + + } + for(i = 0; i< MAX_RES_HANDLE; i++) + { + rc = mc_close(main_p_ctx->mc_hndl,my_res_hndl[i]); + if(rc != 0) { + fprintf(stderr, "mc_open: failed,rc %d\n",rc); + return -1; + } + } + mc_unregister(main_p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr,"mc_unregister failed rc=%d\n",rc); + return -1; + } + + ctx_close(main_p_ctx); + ctx_close(dup_p_ctx); + mc_term(); + return 0; + +} + +int test_mc_clone_many_rht() +{ + int rc; + int i; + struct ctx testctx; + mc_hndl_t mc_hndl_old; + mc_hndl_t mc_hndl; + pid_t child_pid; + __u64 chunks =16; + __u64 actual_size =0; + __u64 stride=0; + __u64 st_lba; + __u64 nlba; + struct ctx *p_ctx = &testctx; + pthread_t thread; + res_hndl_t my_rsh[MAX_RES_HANDLE]; + res_hndl_t RES_CLOSED = 0xFFFF; + res_hndl_t closed[5] = {1,5,8,10,14}; + mc_stat_t l_mc_stat; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + if(do_basic_setting(p_ctx) != 0) { + return -1; + } + + mc_hndl = p_ctx->mc_hndl; + rc = mc_hdup(mc_hndl, &p_ctx->mc_hndl); + mc_unregister(mc_hndl); + + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + for(i = 0; i < MAX_RES_HANDLE; i++) + { + chunks = (i+1)*16; + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &my_rsh[i]); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + return rc; + } + rc = mc_size(p_ctx->mc_hndl, my_rsh[i], chunks, &actual_size); + if (rc !=0) { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + return rc; + } + } + + for(i = 0; i < 5; i++) + { + rc = mc_close(p_ctx->mc_hndl, my_rsh[closed[i]]); + if(rc) { + fprintf(stderr, "error close res_hndl rc %d\n", rc); + return rc; + } + my_rsh[closed[i]] = RES_CLOSED; + } + + pid = getpid(); + for(i = 0; i < MAX_RES_HANDLE; i++) { + if(RES_CLOSED == my_rsh[i]){ + continue; + } + rc = mc_stat(p_ctx->mc_hndl, my_rsh[i], &l_mc_stat); + if(rc) { + fprintf(stderr, "mc_stat failed, rc = %d\n",rc); + return -1; + } + actual_size = l_mc_stat.size; + stride = (1 << l_mc_stat.nmask); + nlba = actual_size * (1 << l_mc_stat.nmask); + p_ctx->res_hndl = my_rsh[i]; + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)) { + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + if(rc != 0) + { + fprintf(stderr,"send write failed 0X%lX vlba\n",st_lba); + return rc; + } + } + } + pthread_cancel(thread); + child_pid = fork(); + if(child_pid == 0) + { + //child process + mc_hndl_old = p_ctx->mc_hndl; + if(do_basic_setting(p_ctx) != 0) + { + exit(-1); + } + mc_hndl = p_ctx->mc_hndl; + rc = mc_hdup(mc_hndl, &p_ctx->mc_hndl); + mc_unregister(mc_hndl); + + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + //do mc_clone + rc = mc_clone(p_ctx->mc_hndl, mc_hndl_old, MC_RDWR); + if (rc != 0) { + fprintf(stderr, "error cloning rc %d\n", rc); + exit(-1); + } + + // close parent inherited interfaces + rc = mc_unregister(mc_hndl_old); + for(i = 0; i < MAX_RES_HANDLE; i++) { + if(RES_CLOSED == my_rsh[i]){ + continue; + } + rc = mc_stat(p_ctx->mc_hndl, my_rsh[i], &l_mc_stat); + if(rc) { + fprintf(stderr, "mc_stat failed, rc = %d\n",rc); + exit(-1); + } + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + p_ctx->res_hndl = my_rsh[i]; + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)) { + rc = send_read(p_ctx, st_lba, stride, VLBA); + rc += rw_cmp_buf_cloned(p_ctx, st_lba); + if(rc != 0) + { + fprintf(stderr,"send write failed 0X%lX vlba\n",st_lba); + exit(rc); + } + } + } + + pthread_cancel(thread); + for(i = 0; i < MAX_RES_HANDLE; i++) { + if(RES_CLOSED == my_rsh[i]){ + continue; + } + mc_close(p_ctx->mc_hndl, my_rsh[i]); + } + mc_unregister(p_ctx->mc_hndl); + exit(0); + } + else + { + //let child process done cloning + sleep(2); + //lets open all + chunks = 16; + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + for(i = 0; i < 5; i++) { + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + if(rc) { + fprintf(stderr, "mc open failed, rc = %d\n", rc); + return -1; + } + closed[i]=p_ctx->res_hndl; + rc = mc_size(p_ctx->mc_hndl,p_ctx->res_hndl,chunks,&actual_size); + if(rc) { + fprintf(stderr, "mc size failed, rc = %d\n", rc); + return -1; + } + chunks = (i+1)*16; + } + + for(i = 0; i < MAX_RES_HANDLE; i++) { + if(RES_CLOSED == my_rsh[i]){ + continue; + } + p_ctx->res_hndl = my_rsh[i]; + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + if(rc) { + fprintf(stderr, "mc_stat failed, rc = %d\n",rc); + exit(-1); + } + actual_size = l_mc_stat.size; + nlba = actual_size * (1 << l_mc_stat.nmask); + + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)) { + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + rc += send_read(p_ctx, st_lba, stride, VLBA); + rc += rw_cmp_buf(p_ctx, st_lba); + if(rc != 0) + { + fprintf(stderr,"send write failed 0X%lX vlba\n",st_lba); + return rc; + } + } + } + for(i = 0; i < 5; i++) { + p_ctx->res_hndl = closed[i]; + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + if(rc) { + fprintf(stderr, "mc_stat failed, rc = %d\n",rc); + exit(-1); + } + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)) { + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + rc += send_read(p_ctx, st_lba, stride, VLBA); + rc += rw_cmp_buf(p_ctx, st_lba); + if(rc != 0) + { + fprintf(stderr,"send write failed 0X%lX vlba\n",st_lba); + return rc; + } + } + } + + pthread_cancel(thread); + for(i = 0; i < MAX_RES_HANDLE; i++) { + if(RES_CLOSED == my_rsh[i]){ + continue; + } + mc_close(p_ctx->mc_hndl, my_rsh[i]); + } + for(i = 0; i < 5; i++) { + mc_close(p_ctx->mc_hndl, closed[i]); + } + mc_unregister(p_ctx->mc_hndl); + debug("pid = %u, cpid = %u\n", pid, child_pid); + fflush(stdout); + child_pid = wait(&rc); + if (WIFEXITED(rc)) { + rc = WEXITSTATUS(rc); + } + debug("%d terminated, signalled %s, signo %d\n", + child_pid, WIFSIGNALED(rc) ? "yes" : "no", WTERMSIG(rc)); + fflush(stdout); + } + ctx_close(p_ctx); + mc_term(); + return rc; +} + +int ctx_null_rrq() +{ + struct ctx myctx; + struct ctx *p_ctx = &myctx; + int rc; + __u64 chunks=16; + __u64 actual_size=0; + __u64 st_lba; + __u64 stride; + __u64 nlba; + mc_stat_t l_mc_stat; + pthread_t thread; + + pid = getpid(); + rc = mc_init(); + CHECK_RC(rc, "mc_init"); + + rrq_c_null =1; //make hrrq_current = NULL + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx_init"); + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + if(chunks != actual_size) + { + CHECK_RC(1, "doesn't have enough chunk space"); + } + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + nlba = actual_size * (1 << l_mc_stat.nmask); + stride = (1 << l_mc_stat.nmask); + for(st_lba =0; st_lba < nlba; st_lba +=(NUM_CMDS * stride)) { + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + CHECK_RC(rc, "send_write"); + rc = send_read(p_ctx, st_lba, stride,VLBA); + CHECK_RC(rc, "send_read"); + rc = rw_cmp_buf(p_ctx, st_lba); + CHECK_RC(rc, "rw_cmp_buf"); + } + pthread_cancel(thread); + rc = mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + debug("%d : properly exiting now\n", getpid()); + fflush(stdout); + return rc; +} +int test_many_ctx_one_rrq_curr_null() +{ + int rc = 0; + pid_t pid; + int max_p= 3; + int i; + pid_t er_pid; + for(i = 0; i< max_p;i++) { + if(fork() == 0) { + debug("%d : good process created\n", getpid()); + rc = chunk_regress(); + debug("%d : good process exiting now\n", getpid()); + exit(rc); + } + //sleep(1); + } + //now create another ctx with null rrq + er_pid = fork(); + if(er_pid == 0) { + debug("%d : ctx_null_rrq process created\n", getpid()); + rc = ctx_null_rrq(); + debug("%d : ctx_null_rrq process exiting now \n", getpid()); + exit(rc); + } + + while((pid = waitpid(-1,&rc,0))) { + if (WIFEXITED(rc)) { + rc = WEXITSTATUS(rc); + if((rc != 0)&& (er_pid != pid)){ //ignore error process rc + g_error = -1; + } + } + debug("pid %d exited with rc =%d \n", pid, rc); + + if(pid == -1) + break; + } + rc = g_error; + g_error = 0; + fflush(stdout); + return rc; +} + +int test_all_afu_devices() +{ + int rc; + pid_t mypid; + int i = 0; + char afu_dev[MC_PATHLEN]; + int afu_1 = 4; //length of 0.0s or 1.0s + int fd; + + strncpy(afu_dev, afu_path, strlen(afu_path)-afu_1); + afu_dev[strlen(afu_path)-afu_1]='\0'; + while(1) + { + sprintf(afu_path, "%s%d.0s", afu_dev, i); + sprintf(master_dev_path, "%s%d.0m", afu_dev, i); + fd = open(afu_path, O_RDWR); + if(fd < 0) { + debug("No more afu devices left\n"); + break; + } + close(fd); + if(fork() == 0){ + printf("Running IO on AFU %s\n", afu_path); + rc = chunk_regress(); + exit(rc); + } + i++; + } + + while((mypid = waitpid(-1,&rc,0))) + { + if(rc != 0){ + g_error = -1; + } + debug("pid %d exited...\n", mypid); + + if(mypid == -1) { + break; + } + } + rc = g_error; + g_error = 0; + fflush(stdout); + return rc; +} + +int test_mix_in_out_bound_lba() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + int myloop = 2; + __u64 chunks=256; + __u64 actual_size=0; + __u64 st_lba,nlba; + __u64 stride; + mc_stat_t l_mc_stat; + pthread_t thread; + char *str = getenv("LONG_RUN"); + if((str != NULL) && !strcmp(str, "TRUE")) { + myloop = 100; + } + + pid = getpid(); + rc = mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + nlba = actual_size * (1 << l_mc_stat.nmask); + stride = 1 << l_mc_stat.nmask; + debug("%d : chunk(0X%lX) & lba range(0X%lX)\n", pid, actual_size, nlba-1); + while(myloop-- > 0) { + for(st_lba =0; st_lba < nlba; st_lba += (NUM_CMDS * stride)) { + //in bound + send_write(p_ctx, st_lba, stride, pid, VLBA); + send_read(p_ctx, st_lba, stride,VLBA); + //out bound + send_write(p_ctx, nlba +st_lba, stride, pid, VLBA); + send_read(p_ctx, nlba +st_lba, stride,VLBA); + } + } + pthread_cancel(thread); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + fflush(stdout); + return 0; +} + +/* +* Function : test_mc_good_error_afu_dev +* return : 0 success else failure +* +* Run Good path test case like Chunk regress on one AFU dev +* And Error path test case on another AFU dev +* Make sure good path test case should run smoothly +* Doesn't bother about error test cases +*/ +int test_mc_good_error_afu_dev() +{ + int rc = 0;; + int status=0; + int i; + int lloop = 5; + char buffer[MC_PATHLEN]; + char *str; + char *afu_1 = "0.0s"; + char *afu_2 = "1.0s"; + int j; + + char *str1 = getenv("LONG_RUN"); + if((str1 != NULL) && !strcmp(str1, "TRUE")) { + lloop = 100; + } + pid = getpid(); + debug("%d : Good path on %s\n", pid, afu_path); + //now create a child process & do error path + if(fork() == 0) + { + dont_displa_err_msg = 1; + pid = getpid(); + str = strstr(afu_path, afu_1); + if(str == NULL) {//ENV var set with 1.0 + strncpy(buffer, afu_path, strlen(afu_path)-strlen(afu_2)); + buffer[strlen(afu_path)-strlen(afu_2)]='\0'; + strcat(buffer, afu_1); + }else { + strncpy(buffer, afu_path, strlen(afu_path)-strlen(afu_1)); + buffer[strlen(afu_path)-strlen(afu_1)]='\0'; + strcat(buffer, afu_2); + } + + strcpy(afu_path, buffer); + strncpy(master_dev_path, afu_path, strlen(afu_path)-1); + master_dev_path[strlen(afu_path)-1]='\0'; + strcat(master_dev_path, "m"); + debug("%d : Error path on afu %s and master %s\n", + pid, afu_path, master_dev_path); + debug("%d: error path process started..\n", pid); + for(i = 0; i < lloop; i++) { + debug("%d : starting loop %d(%d)\n", pid, i , lloop); + rc = test_mix_in_out_bound_lba(); + + for(j=1; j<=13; j++) { + rc = test_mc_invalid_ioarcb(j); + } + rc = mc_test_engine(TEST_MC_RW_CLS_RSH); + rc = mc_test_engine(TEST_MC_UNREG_MC_HNDL); + } + debug("%d: error path process exited..\n", pid); + exit(rc); + } else { + debug("%d: Good path process started..\n", pid); + for(i = 0; i < lloop; i++) { + rc += chunk_regress(); + } + debug("%d: Good path process exited..\n", pid); + wait(&status); + } + return rc; +} + +int mc_test_ctx_regress(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=32; + __u64 actual_size=0; + __u64 nlba; + __u64 stride = LBA_BLK; + mc_stat_t l_mc_stat; + pthread_t thread; + + pid = getpid(); + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + if(3 == cmd){ + nlba = actual_size * (1 << l_mc_stat.nmask); + rc = send_write(p_ctx, nlba-LBA_BLK*NUM_CMDS, stride, pid,VLBA); + CHECK_RC(rc, "send_write"); + rc = send_read(p_ctx, nlba-LBA_BLK*NUM_CMDS, stride,VLBA); + CHECK_RC(rc, "send_read"); + rc = rw_cmp_buf(p_ctx, nlba-LBA_BLK*NUM_CMDS); + CHECK_RC(rc, "rw_cmp_buf"); + } + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + return rc; +} + +int test_mc_regress_ctx_crt_dstr(int cmd) +{ + int rc; + int i; + int lrun=10; + char *str = getenv("LONG_RUN"); + if((str != NULL) && !strcmp(str, "TRUE")) { + lrun = 8000; + printf("%s : %d : Regress loop : %d\n", __func__, __LINE__, lrun); + } + for(i = 1; i <= lrun; i++){ + debug("Loop %d(%d) started...\n", i, lrun); + if(1 == cmd) //mc_regress_ctx_crt_dstr without io + rc = mc_test_chunk_regress(2); + else //mc_regress_ctx_crt_dstr with io + rc = mc_test_chunk_regress(3); + + debug("Loop %d(%d) done...\n", i, lrun); + if(i%100 == 0) { + printf("%d : Loop %d(%d) done...\n", getpid(), i, lrun); + } + if(rc){ + fprintf(stderr, "Loop %d is failed with rc = %d\n", i, rc); + break; + } + } + return rc; +} diff --git a/src/master/test/mc_test_engine.c b/src/master/test/mc_test_engine.c new file mode 100644 index 00000000..70c8194e --- /dev/null +++ b/src/master/test/mc_test_engine.c @@ -0,0 +1,331 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/test/mc_test_engine.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "mc_test.h" + +int test1() +{ + return -1; +} + +int mc_test_engine(mc_test_t test_name) +{ + int rc = 0; + if(get_fvt_dev_env()) return -1; + + if(fork() == 0){ + switch(test_name) + { + case TEST_MC_REG: + rc = mc_register_tst(); + break; + + case TEST_MC_UNREG: + rc = mc_unregister_tst(); + break; + + case TEST_MC_OPEN: + rc = mc_open_tst(1); + break; + + case TEST_MC_OPEN_ERROR1: + rc = mc_open_tst(2); + break; + + case TEST_MC_OPEN_ERROR2: + rc = mc_open_tst(3); + break; + + case TEST_MC_OPEN_ERROR3: + rc = mc_open_tst(4); + break; + + case TEST_MC_CLOSE: + rc = mc_close_tst(1); + break; + + case TEST_MC_CLOSE_ERROR1: + rc = mc_close_tst(2); + break; + + case TEST_MC_SIZE: + rc = mc_size_tst(1); + break; + + case TEST_MC_SIZE_ERROR2: + rc = mc_size_tst(2); + break; + + case TEST_MC_SIZE_ERROR3: + rc = mc_size_tst(3); + break; + + case TEST_MC_SIZE_ERROR4: + rc = mc_size_tst(4); + break; + + case TEST_MC_GETSIZE: + rc = mc_size_tst(0); + break; + + case TEXT_MC_XLAT_IO: + rc = mc_xlate_tst(1); + break; + + case TEXT_MC_XLAT_IO_V2P: + rc = mc_xlate_tst(2); + break; + + case TEXT_MC_XLAT_IO_P2V: + rc = mc_xlate_tst(3); + break; + + case TEST_MC_HDUP_ORG_IO: + rc = mc_hdup_tst(1); + break; + + case TEST_MC_HDUP_DUP_IO: + rc = mc_hdup_tst(2); + break; + + case TEST_MC_HDUP_ERROR1: + rc = mc_hdup_tst(3); + break; + + case TEST_MC_DUP: + rc = test_mc_dup_api(); + break; + + case TEST_MC_CLONE_RDWR: + rc = test_mc_clone_api(MC_RDWR); + break; + + case TEST_MC_CLONE_READ: + rc = test_mc_clone_api(MC_RDONLY); + break; + + case TEST_MC_CLONE_WRITE: + rc = test_mc_clone_api(MC_WRONLY); + break; + + case TEST_MC_CLN_O_RDWR_CLN_RD: + rc = test_mc_clone_error(MC_RDWR,MC_RDONLY); + break; + + case TEST_MC_CLN_O_RDWR_CLN_WR: + rc = test_mc_clone_error(MC_RDWR,MC_WRONLY); + break; + + case TEST_MC_CLN_O_RD_CLN_RD: + rc = test_mc_clone_error(MC_RDONLY,MC_RDONLY); + break; + + case TEST_MC_CLN_O_RD_CLN_WR: + rc = test_mc_clone_error(MC_RDWR,MC_WRONLY); + break; + + case TEST_MC_CLN_O_WR_CLN_RD: + rc = test_mc_clone_error(MC_WRONLY,MC_RDONLY); + break; + + case TEST_MC_CLN_O_WR_CLN_WR: + rc = test_mc_clone_error(MC_WRONLY,MC_WRONLY); + break; + + case TEST_MC_CLN_O_RD_CLN_RDWR: + rc = test_mc_clone_error(MC_RDONLY, MC_RDWR); + break; + + case TEST_MC_CLN_O_WR_CLN_RDWR: + rc = test_mc_clone_error(MC_WRONLY, MC_RDWR); + break; + + case TEST_MC_MAX_SIZE: + rc = test_mc_max_size(); + break; + + case TEST_MC_MAX_CTX_N_RES: + rc = test_max_ctx_n_res(); + break; + + case TEST_MC_MAX_RES_HNDL: + rc = mc_max_vdisk_thread(); + break; + + case TEST_MC_MAX_CTX_HNDL: + rc = mc_max_open_tst(); + break; + + case TEST_MC_MAX_CTX_HNDL2: + rc = mc_open_close_tst(); + break; + + case TEST_MC_CHUNK_REGRESS: + rc = mc_test_chunk_regress(1); + break; + + case TEST_MC_SIZE_REGRESS: + rc = mc_test_chunk_regress(4); + break; + + case TEST_MC_CHUNK_REGRESS_BOTH_AFU: + rc = mc_test_chunk_regress_both_afu(); + break; + + case TEST_MC_SIGNAL: + rc = test1(); + break; + + case TEST_ONE_UNIT_SIZE: + rc = test_one_aun_size(); + break; + case TEST_MC_LUN_SIZE: + rc = test_mc_lun_size(0); + break; + + case TEST_MC_PLBA_OUT_BOUND: + rc = test_mc_lun_size(1); + break; + + case TEST_MC_ONE_CTX_TWO_THRD: + rc = test_onectx_twothrd(1); + break; + + case TEST_MC_ONE_CTX_RD_WRSIZE: + rc = test_onectx_twothrd(2); + break; + + case TEST_MC_TWO_CTX_RD_WRTHRD: + rc = test_two_ctx_two_thrd(1); + break; + + case TEST_MC_TWO_CTX_RDWR_SIZE: + rc = test_two_ctx_two_thrd(2); + break; + + case TEST_MC_TEST_VDISK_IO: + rc = test_vdisk_io(); + break; + + case TEST_MC_LUN_DISCOVERY: + rc = test_lun_discovery(1); + break; + + case TEST_MC_AFU_RC_CAP_VIOLATION: + rc = test_lun_discovery(2); + break; + + case TEST_MC_RW_CLS_RSH: + rc = test_rw_close_hndl(1); + break; + + case TEST_MC_UNREG_MC_HNDL: + rc = test_rw_close_hndl(2); + break; + + case TEST_MC_RW_CLOSE_CTX: + rc = test_rw_close_hndl(3); + break; + + case TEST_MC_CLONE_MANY_RHT: + rc = test_mc_clone_many_rht(); + break; + + case TEST_AUN_WR_PLBA_RD_VLBA: + rc = test_mc_lun_size(2); + break; + + case TEST_GOOD_CTX_ERR_CTX_CLS_RHT: + rc = test_good_ctx_err_ctx(1); + break; + + case TEST_GOOD_CTX_ERR_CTX_UNREG_MCH: + rc = test_good_ctx_err_ctx(2); + break; + + case TEST_GOOD_CTX_ERR_CTX: + rc = test_good_ctx_err_ctx(3); + break; + + case TEST_MC_IOARCB_EA_ALGNMNT_16: + rc = test_mc_ioarcb_ea_alignment(1); + break; + + case TEST_MC_IOARCB_EA_ALGNMNT_128: + rc = test_mc_ioarcb_ea_alignment(2); + break; + + case TEST_MC_IOARCB_EA_INVLD_ALGNMNT: + rc = test_mc_ioarcb_ea_alignment(3); + break; + + + case TEST_MC_MNY_CTX_ONE_RRQ_C_NULL: + rc = test_many_ctx_one_rrq_curr_null(); + break; + + case TEST_MC_ALL_AFU_DEVICES: + rc = test_all_afu_devices(); + break; + + case MC_TEST_RWBUFF_GLOBAL: + rc = mc_test_rwbuff_global(); + break; + + case MC_TEST_RW_SIZE_PARALLEL: + rc = test_mc_rw_size_parallel(); + break; + + case MC_TEST_RWBUFF_SHM: + rc = test_mc_rwbuff_shm(); + break; + + case MC_TEST_GOOD_ERR_AFU_DEV: + rc = test_mc_good_error_afu_dev(); + break; + + case TEST_MC_REGRESS_RESOURCE: + rc = mc_test_chunk_regress_long(); + break; + + case TEST_MC_REGRESS_CTX_CRT_DSTR: + rc = test_mc_regress_ctx_crt_dstr(1); + break; + + case TEST_MC_REGRESS_CTX_CRT_DSTR_IO: + rc = test_mc_regress_ctx_crt_dstr(2); + break; + + default: + rc = -1; + break; + } + exit(rc); + } + wait(&rc); + if (WIFEXITED(rc)) { + rc = WEXITSTATUS(rc); + } + return rc; +} diff --git a/src/master/test/mc_test_error.c b/src/master/test/mc_test_error.c new file mode 100644 index 00000000..5e69edfa --- /dev/null +++ b/src/master/test/mc_test_error.c @@ -0,0 +1,874 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/test/mc_test_error.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "mc_test.h" +#include + +extern char master_dev_path[MC_PATHLEN]; +extern char afu_path[MC_PATHLEN]; + +extern pid_t pid; +extern __u64 lun_id; +extern __u64 fc_port; +extern uint8_t rc_flags; +extern int dont_displa_err_msg; + +typedef int(myfunc)(int); + +int child_mc_xlate_error(int cmd); +int child_mc_reg_error(int cmd); +int check_mc_null_params(int cmd); +int mc_invalid_ioarcb(int cmd); +int child_mc_size_error(int cmd); +int mc_test_inter_prcs_ctx_int(int cmd); + +int test_mc_error(myfunc test1,int cmd) +{ + + int rc; + if(get_fvt_dev_env()) return -1; + + if(fork() ==0) + { + rc = test1(cmd); + exit(rc); + } + wait(&rc); + if (WIFEXITED(rc)) { + rc = WEXITSTATUS(rc); + } + return rc; + +} + +int test_mc_xlate_error(int cmd) +{ + return test_mc_error(&child_mc_xlate_error, cmd); +} +int test_mc_reg_error(int cmd) +{ + return test_mc_error(&child_mc_reg_error, cmd); +} + +int test_mc_null_params(int cmd) +{ + return test_mc_error(&check_mc_null_params, cmd); +} + +int test_mc_invalid_ioarcb(int cmd) +{ + return test_mc_error(&mc_invalid_ioarcb, cmd); +} + +int test_mc_size_error(int cmd) +{ + return test_mc_error(&child_mc_size_error, cmd); +} +int test_mc_inter_prcs_ctx(int cmd) +{ + return test_mc_error(&mc_test_inter_prcs_ctx_int, cmd); +} +int child_mc_xlate_error(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + int invalid=0; + __u64 plba; + __u64 size; + mc_stat_t l_mc_stat; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + rc = ctx_init(p_ctx); + if(rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "mc_register: failed. ctx_hndl %d, rc %d\n",p_ctx->ctx_hndl, rc ); + return -1; + } + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR,&p_ctx->res_hndl); + if(rc != 0) { + fprintf(stderr, "ctx: %d:mc_open: failed,rc %d\n", p_ctx->ctx_hndl,rc); + return -1; + } + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + if(1 == cmd) //without mc_size + { + rc = mc_xlate_lba(p_ctx->mc_hndl, p_ctx->res_hndl, 0,&plba); + rc = rc ? 1:0; + } + else + { + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,1,&size); + if(2 == cmd) //MCH NULL + { + rc = mc_xlate_lba(NULL,p_ctx->res_hndl,0,&plba); + debug("MCH NULL rc = %d\n",rc); + rc = rc ? 2:0; + } + else if(3 == cmd) //invalid RCH + { + rc = mc_xlate_lba(p_ctx->mc_hndl,(p_ctx->res_hndl +4),0,&plba); + rc = rc ? 3:0; + } + else if(4 == cmd) //invalid VLBA + { + rc = mc_xlate_lba(p_ctx->mc_hndl,p_ctx->res_hndl,((1 << l_mc_stat.nmask)+5),&plba); + rc = rc ? 4:0; + } + else if(5 == cmd) //NULL to plba + { + rc = mc_xlate_lba(p_ctx->mc_hndl,p_ctx->res_hndl,0,NULL); + rc = rc ? 5:0; + } + else if(6 == cmd) //diff MCH(no mc_open) & RCH with mc_size + { + struct ctx tctx; + struct ctx *p_tctx= &tctx; + rc = ctx_init(p_tctx); + rc = mc_register(master_dev_path, p_tctx->ctx_hndl, + (volatile __u64 *)p_tctx->p_host_map,&p_tctx->mc_hndl); + rc = mc_open(p_tctx->mc_hndl,MC_RDWR,&p_tctx->res_hndl); + rc = mc_xlate_lba(p_tctx->mc_hndl,p_ctx->res_hndl,0,&plba); + rc = rc ? 6:0; + mc_close(p_tctx->mc_hndl,p_tctx->res_hndl); + mc_unregister(p_tctx->mc_hndl); + ctx_close(p_tctx); + } + else if(7 == cmd) //invaliud MCH + { + rc = mc_xlate_lba((mc_hndl_t)&invalid,p_ctx->res_hndl,0,&plba); + rc = rc ? 7:0; + } + + } + + mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + if(8 == cmd) //after mc_close + { + rc = mc_xlate_lba(p_ctx->mc_hndl,p_ctx->res_hndl,0,&plba); + rc = rc ? 8:0; + } + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + return rc; +} + +int child_mc_reg_error(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 *map=(__u64 *)0xabcdf; + __u64 actual_size=0; + __u64 stride; + __u64 st_lba =0; + __u64 nlba; + mc_hndl_t new_mc_hndl, dup_mc_hndl; + int rc1, rc2, rc3, rc4, rc5; + pthread_t thread; + mc_stat_t l_mc_stat; + __u64 size = 128; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + rc = ctx_init(p_ctx); + if(rc != 0) { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + pid = getpid(); + if(1 == cmd) //mc_reg with NULL MMIOP + { + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + NULL,&p_ctx->mc_hndl); + if(rc) return rc; + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + if(rc) return rc; + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + if(rc) return rc; + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, 2, &actual_size); + if(rc) return rc; + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + if(rc) return rc; + pid = getpid(); + stride = (1 << l_mc_stat.nmask); + st_lba = (actual_size * (1 << l_mc_stat.nmask))-1; + rc = send_write(p_ctx, st_lba ,stride, pid, VLBA); + if(rc) return rc; + rc = send_read(p_ctx, st_lba ,stride, VLBA); + if(rc) return rc; + rc = rw_cmp_buf(p_ctx, st_lba); + rc = rc ? 1:0; + } + else if(2 == cmd) //NULL device path + { + rc = mc_register(NULL, p_ctx->ctx_hndl, + (volatile __u64 *) p_ctx->p_host_map,&p_ctx->mc_hndl); + rc = rc ? 2:0; + } + else if(3 == cmd) //with afu_path device + { + rc = mc_register(afu_path, p_ctx->ctx_hndl, + (volatile __u64 *) p_ctx->p_host_map,&p_ctx->mc_hndl); + rc = rc ? 3:0; + } + else if(4 == cmd) //with invalid device path + { + rc = mc_register("/dev/cxl/afu50.0m", p_ctx->ctx_hndl, + (volatile __u64 *) p_ctx->p_host_map,&p_ctx->mc_hndl); + rc = rc ? 4:0; + } + else if(5 == cmd) //with invalid ctx hndl(not assigned) + { + debug("actual ctx hndl :%d\n", p_ctx->ctx_hndl); + p_ctx->ctx_hndl = p_ctx->ctx_hndl + 4; + debug("invalid ctx hndl :%d\n", p_ctx->ctx_hndl); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *) p_ctx->p_host_map,&p_ctx->mc_hndl); + rc = rc ? 5:0; + } + else if(6 == cmd) //with invalid mmap adress + { + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)map,&p_ctx->mc_hndl); + rc = rc ? 6:0; + } + else if(7 == cmd) //twice mc_reg + { + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map, &p_ctx->mc_hndl); + CHECK_RC(rc, "mc_register"); + + rc = mc_open(p_ctx->mc_hndl, MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "mc_open"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, size, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + + //twice mc_register on same ctx + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map, &new_mc_hndl); + //send write on 1st mc hndl + rc1 = send_single_write(p_ctx, 0, pid); + //do mc_size & open on old mc_reg + rc2 = mc_open(p_ctx->mc_hndl, MC_RDWR, &p_ctx->res_hndl); + rc3 = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, size, &actual_size); + rc4 = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + rc5 = mc_hdup(p_ctx->mc_hndl, &dup_mc_hndl); + debug("mc_hdup rc is : %d\n", rc5); + + //now do mc_unreg on old one + rc = mc_unregister(p_ctx->mc_hndl); + CHECK_RC(rc, "mc_unregister"); + //do everything on new mc hndl + p_ctx->mc_hndl = new_mc_hndl; + rc = mc_open(p_ctx->mc_hndl, MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "mc_open"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, size, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + stride = 1 << l_mc_stat.nmask; + for(st_lba = 0; st_lba < nlba; st_lba += (stride * NUM_CMDS)) { + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + CHECK_RC(rc, "send_write"); + } + if(rc1 && rc2 && rc3 && rc4 && rc5) { + rc = 7; + } + pthread_cancel(thread); + mc_unregister(p_ctx->mc_hndl); + } + else if(8 == cmd) //mc_reg twice from 2 diff process + { + + if(fork() == 0) {//mc_reg in child process as well + pid = getpid(); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map, &p_ctx->mc_hndl); + sleep(1); + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + if(!rc) { + fprintf(stderr, "%d : mc_open should fail rc = %d\n", pid, rc); + exit(-1); + } + else { + debug("%d : mc_open failed as expectd\n", pid); + } + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, 2, &actual_size); + if(!rc) { + fprintf(stderr, "%d : mc_size should fail rc = %d\n", pid, rc); + exit(-1); + } + else { + debug("%d : mc_size failed as expectd\n", pid); + } + rc = rc ? 8:0; + exit(rc); + } + else + { + sleep(1); //let child proc cal mc_reg 1str + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map, &p_ctx->mc_hndl); + CHECK_RC(rc, "mc_register"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "mc_open"); + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, 2, &actual_size); + CHECK_RC(rc, "mc_mc_size"); + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + st_lba = (actual_size * (1 << l_mc_stat.nmask))-1; + rc += send_single_write(p_ctx, st_lba, pid); + wait(&rc); + pthread_cancel(thread); + if (WIFEXITED(rc)) { + rc = WEXITSTATUS(rc); + rc = rc ? 8:0; + } + mc_unregister(p_ctx->mc_hndl); + } + } + + ctx_close(p_ctx); + if(9 == cmd) //mc_reg with closed ctx + { + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + printf("calling mc_reg api after ctx close..\n"); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map, &p_ctx->mc_hndl); + rc = rc ? 9:0; + } + + mc_term(); + return rc; +} + +void fill_send_write(struct ctx *p_ctx, __u64 vlba, + __u64 data, __u64 stride, __u32 flags) +{ + __u64 *p_u64; + __u32 *p_u32; + __u64 plba; + int i; + + for(i = 0 ; i < NUM_CMDS; i++) { + vlba = i * stride; + + fill_buf((__u64*)&p_ctx->wbuf[i][0], + sizeof(p_ctx->wbuf[i])/sizeof(__u64),data); + + memset(&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + if(flags & VLBA){ + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_64(p_u64, vlba); // write(16) Virtual LBA + }else { + p_ctx->cmd[i].rcb.lun_id = lun_id; + p_ctx->cmd[i].rcb.port_sel = fc_port; // either FC port + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_HOST_WRITE; + if(flags & NO_XLATE){ + plba = vlba; + } + else { + (void)mc_xlate_lba(p_ctx->mc_hndl, p_ctx->res_hndl, vlba, &plba); + } + write_64(p_u64, plba); // physical LBA# + } + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->wbuf[0][0]; + + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0]); + p_ctx->cmd[i].rcb.cdb[0] = 0x8A; + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, LBA_BLK); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } +} +void fill_send_read(struct ctx *p_ctx, __u64 vlba, + __u32 flags) +{ + __u64 *p_u64; + __u32 *p_u32; + __u64 plba; + + memset(&p_ctx->rbuf[0][0], 0, sizeof(p_ctx->rbuf[0])); + + memset(&p_ctx->cmd[0].rcb.cdb[0], 0, sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_ctx->cmd[0].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + + if (flags & VLBA){ + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + write_64(p_u64, vlba); // Read(16) Virtual LBA + debug("send read for vlba =0x%lX\n",vlba); + + } + else + { + p_ctx->cmd[0].rcb.lun_id = lun_id; + p_ctx->cmd[0].rcb.port_sel = fc_port; // either FC port + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_HOST_READ; + if(flags & NO_XLATE){ + plba = vlba; + } + else { + (void)mc_xlate_lba(p_ctx->mc_hndl, p_ctx->res_hndl, vlba, &plba); + } + write_64(p_u64, plba); // physical LBA# + debug("send read for plba =0x%lX\n",plba); + } + + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->rbuf[0]); + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->rbuf[0][0]; + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + + write_32(p_u32, LBA_BLK); + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; +} +int mc_invalid_ioarcb(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=16; + __u64 actual_size=0; + __u64 vlba =0; + __u32 *p_u32; + __u64 stride; + pthread_t thread; + mc_stat_t l_mc_stat; + int i; + + rc = mc_init(); + CHECK_RC(rc, "mc_init failed"); + debug("mc_init success :%d\n",rc); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + stride = 1 << l_mc_stat.nmask; + + pid = getpid(); + vlba = (actual_size * (1 << l_mc_stat.nmask))-1; + fill_send_write(p_ctx, vlba, pid, stride, VLBA); + for(i = 0; i < NUM_CMDS; i++) { + if (1 == cmd){ //invalid upcode + debug("invalid upcode(0xFA) action = %d\n",cmd); + p_ctx->cmd[i].rcb.cdb[0] = 0xFA; + }else if (2 == cmd) {//EA = NULL + debug("EA = NULL action = %d\n",cmd); + p_ctx->cmd[i].rcb.data_ea = (__u64)NULL; + }else if(3 == cmd){ //invalid flgas + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + debug("invalid flag = 0X%X\n",p_ctx->cmd[i].rcb.req_flags); + }else if(5 == cmd) {//SISL_AFU_RC_RHT_INVALID + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl + 2; + }else if( 6 == cmd) {//SISL_AFU_RC_RHT_OUT_OF_BOUNDS + p_ctx->cmd[i].rcb.res_hndl = MAX_RES_HANDLE; + }else if(7 == cmd) { //invalid address for page fault + debug("setting EA = 0x1234 to generate error page fault\n"); + p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234; + }else if(8 == cmd) { //invalid ctx_id + debug("%d : sending invalid ctx id\n", pid); + p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl +10; + }else if(9 == cmd) { //test flag underrun + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0])/2; + }else if(10 == cmd) {// test flag overrun + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0]) +2; + }else if(11 == cmd) { //rc scsi_rc_check + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, LBA_BLK +1); + }else if(12 == cmd) { //data len 0 in ioarcb + p_ctx->cmd[i].rcb.data_len = 0; + }else if(13 == cmd) { //NUM BLK to write 0 + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, 0); + } + } + //send_single_cmd(p_ctx); + send_cmd(p_ctx); + //rc = wait_single_resp(p_ctx); + rc = wait_resp(p_ctx); + if( cmd >= 9 && cmd <= 13) { + if(!rc_flags) { + if(!dont_displa_err_msg) + fprintf(stderr, "%d : Expecting rc flags non zero\n", pid); + rc = -1; + } + } + if(4 == cmd) {//invalid fc port & lun id + debug("invalid fc port(0xFF)&lun id(0X1200), action=%d",cmd); + fill_send_write(p_ctx, vlba, pid, stride, PLBA); + for(i = 0; i < NUM_CMDS; i++) { + p_ctx->cmd[i].rcb.lun_id = 0x12000; + p_ctx->cmd[i].rcb.port_sel = 0xff; + } + //send_single_cmd(p_ctx); + send_cmd(p_ctx); + rc = wait_resp(p_ctx); + } + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl,p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + return rc; +} +int test_mc_invalid_opcode() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=10; + __u64 actual_size=0; + __u64 vlba =0; + __u64 *p_u64; + __u32 *p_u32; + mc_stat_t l_mc_stat; + pthread_t thread; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + rc = ctx_init(p_ctx); + if(rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) + { + fprintf(stderr, "ctx _reg failed, ctx_hndl %d,rc %d\n",p_ctx->ctx_hndl, rc ); + return -1; + } + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + return -1; + } + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + pid = getpid(); + vlba = (actual_size * (1 << l_mc_stat.nmask))-1; + fill_buf((__u64*)&p_ctx->wbuf[0][0], + sizeof(p_ctx->wbuf[0])/sizeof(__u64),pid); + + memset(&p_ctx->cmd[0].rcb.cdb[0], 0, sizeof(p_ctx->cmd[0].rcb.cdb)); + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_64(p_u64, vlba); // write(16) Virtual LBA + + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->wbuf[0][0]; + + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->wbuf[0]); + p_ctx->cmd[0].rcb.cdb[0] = 0xFA; // invalid opcode + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + write_32(p_u32, 8); // 8 LBAs for 4K + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + return rc; +} + +int check_mc_null_params(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 size = 10; + __u64 actual_size=0; + + if(mc_init() !=0 ) { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + rc = ctx_init(p_ctx); + if(rc != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + if(1 == cmd) { //mc_reg with NULL MCH + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *) p_ctx->p_host_map,NULL); + rc = rc ? 1:0; + } + else { + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "ctx _reg failed, ctx_hndl %d,rc %d\n", + p_ctx->ctx_hndl, rc ); + return -1; + } + if(2 == cmd){ + rc = mc_unregister(NULL); + rc = rc ? 2:0; + } + else if(3 == cmd){ //mc_open NULL + rc = mc_open(NULL, MC_RDWR, &p_ctx->res_hndl); + rc = rc ? 3:0; + } + else if (4 == cmd) { + rc = mc_hdup(NULL, p_ctx->mc_hndl); + rc = rc ? 4:0; + } + else { + rc = mc_open(p_ctx->mc_hndl, MC_RDWR, &p_ctx->res_hndl); + if(5 == cmd) { + rc = mc_close(NULL, p_ctx->res_hndl); + rc = rc ? 5:0; + }else if( 6 == cmd) { + rc = mc_size(NULL, p_ctx->res_hndl, size, &actual_size); + rc = rc ? 6:0; + }else if(7 == cmd) { + rc = mc_clone(NULL, p_ctx->mc_hndl, MC_RDWR); + rc = rc ? 7:0; + } + } + } + return rc; +} + +int child_mc_size_error(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 size=0; + int invalid=0; + + pid = getpid(); + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + if(1 == cmd) { //invalid MCH + rc = mc_size((mc_hndl_t)&invalid, p_ctx->res_hndl,1,&size); + rc = rc ? 1:0; + } else if( 2 == cmd) { //invalid RSH + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl+20,1,&size); + rc = rc ? 2:0; + } else if(3 == cmd) { //NULL size + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,1, NULL); + rc = rc ? 3:0; + } else if(4 == cmd) { //after mc_close + mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,1, &size); + rc = rc ? 4:0; + } else if(5 == cmd) { //after mc_unregister + mc_unregister(p_ctx->mc_hndl); + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,1, &size); + rc = rc ? 5:0; + } + ctx_close(p_ctx); + mc_term(); + return rc; +} + +int init_mc(struct ctx *p_ctx, res_hndl_t *res_hndl) +{ + int rc; + __u64 chunks=16; + __u64 actual_size=0; + + rc = mc_init(); + CHECK_RC(rc, "mc_init failed"); + debug("mc_init success :%d\n",rc); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "Context init failed"); + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size(p_ctx->mc_hndl, *res_hndl, chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + return 0; +} + +/* + * create two ctx process & 2 resource handler each ctx + * use diff ctx handler in diff process, get another process + * ctx handler through PIPE. + */ +int mc_test_inter_prcs_ctx_int(int cmd) +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + res_hndl_t res_hndl; + ctx_hndl_t ctx_hndl; + int pdes[2]; + pid_t cpid; + pthread_t thread; + __u64 stride = 0x1000; + int i; + //create pipe, child open for write + // parent open for read + + pipe(pdes); + cpid = fork(); + if( 0 == cpid) { //child one running + pid = getpid(); + debug("%d : child do init_mc \n", pid); + rc = init_mc(p_ctx, &res_hndl); + if(rc) { + fprintf(stderr, "%d : exiting due to init_mc\n:", pid); + exit(rc); + } + //do write into pipe & wait until parent kill me + close(pdes[0]); //close read des + write(pdes[1], &p_ctx->ctx_hndl, sizeof(ctx_hndl_t)); + while(1); + } else { //parent + close(pdes[1]); //close write des + //lets child do there work & wait for me + sleep(1); + pid = getpid(); + rc = init_mc(p_ctx, &res_hndl); + if(rc) { + kill(cpid, SIGKILL); + return rc; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + read(pdes[0], &ctx_hndl, sizeof(ctx_hndl_t)); + fill_send_write(p_ctx, 0, pid, stride, VLBA); + //set another process ctx + debug("%d : use child(%d)process ctx hndl: %d\n", pid, cpid, ctx_hndl); + for(i = 0; i< NUM_CMDS; i++) { + p_ctx->cmd[i].rcb.ctx_id = ctx_hndl; + } + if(2 == cmd) { + //another test is to close one of my ctx res hndl + //and use child ctx handler here + //(child has opened 2 res handler) + mc_close(p_ctx->mc_hndl, res_hndl); + debug("%d : close res_hndl(%d) but child (%d)has opened\n", + pid, res_hndl, cpid); + for(i = 0; i< NUM_CMDS; i++) { + p_ctx->cmd[i].rcb.res_hndl = res_hndl; + } + } + send_cmd(p_ctx); + rc = wait_resp(p_ctx); + kill(cpid, SIGKILL); + pthread_cancel(thread); + } + return rc; +} diff --git a/src/master/test/mc_test_io.c b/src/master/test/mc_test_io.c new file mode 100644 index 00000000..e6f5dc59 --- /dev/null +++ b/src/master/test/mc_test_io.c @@ -0,0 +1,1080 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/test/mc_test_io.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "mc_test.h" +#include +#include +#include + +extern char master_dev_path[MC_PATHLEN]; +extern char afu_path[MC_PATHLEN]; + +extern pid_t pid; +extern int dont_displa_err_msg; + +static pthread_mutex_t mutex; +static pthread_cond_t condv; + +static int g_error; +static __u32 W_READ = 0; +static __u32 W_WRITE = 0; +static int count=50; + +//global allocation of read/write buffer +static struct ctx gs_ctx; + +int N_done; +__u64 clba; + +void *write_io(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + __u64 actual_size; + __u64 nlba; + __u64 stride; + __u64 st_lba; + __u64 chunk; + mc_stat_t l_mc_stat; + int rc; + + pid = getpid(); + pthread_mutex_lock(&mutex); + rc = mc_stat(p_ctx->mc_hndl,p_ctx->res_hndl,&l_mc_stat); + pthread_mutex_unlock(&mutex); + if(rc) { + g_error = rc; + fprintf(stderr, "%d : %s : %d : mc_stat failed\n", + pid, __func__, __LINE__); + return NULL; + } + actual_size = l_mc_stat.size; + nlba = actual_size * (1 << l_mc_stat.nmask); + stride = (1 << l_mc_stat.nmask); + st_lba = nlba/2; + while(count > 0) + { + if(count%2){ + chunk = actual_size - 1; + } + else{ + chunk =actual_size +1; + } + pthread_mutex_lock(&mutex); + debug("write st_lba= 0x%lX,nlba=0x%lX\n",st_lba,nlba); + send_write(p_ctx, st_lba, stride, pid,VLBA); + clba = st_lba; + W_READ = 1; + (void)mc_size(p_ctx->mc_hndl,p_ctx->res_hndl, chunk, &actual_size); + pthread_cond_signal(&condv); + pthread_mutex_unlock(&mutex); + nlba = actual_size * (1 << l_mc_stat.nmask);; + st_lba = nlba/2; + debug("chunk 0x%lX, nlba 0x%lX, st_lba 0x%lX\n",actual_size,nlba,st_lba); + debug("Loop.. %d remaining............\n",count); + count--; + } + if(rc != 0) + { + g_error = rc; + } + return 0; +} + + +void *play_size(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + __u64 actual_size; + __u64 nlba; + __u64 st_lba; + __u64 w_chunk; + int rc; + pid = getpid(); + mc_stat_t l_mc_stat; + + pthread_mutex_lock(&mutex); + rc =mc_stat(p_ctx->mc_hndl,p_ctx->res_hndl, &l_mc_stat); + w_chunk = l_mc_stat.size; + pthread_mutex_unlock(&mutex); + while(count > 0) + { + pthread_mutex_lock(&mutex); + while(W_WRITE != 1){ + pthread_cond_wait(&condv,&mutex); + } + + rc = mc_size(p_ctx->mc_hndl,p_ctx->res_hndl, w_chunk, &actual_size); + if(actual_size == 0) + { + debug("%d : chunk size reduced to 0, now increase by 128\n", pid); + w_chunk = 128; + rc = mc_size(p_ctx->mc_hndl,p_ctx->res_hndl, w_chunk, &actual_size); + } + debug("%d :mc_size done 0X%lX\n",pid, actual_size); + nlba = actual_size * (1 << l_mc_stat.nmask); + if(count % 2){ + st_lba = nlba/2; + w_chunk = actual_size/2+1; + } + else { + st_lba = nlba -1; + w_chunk = actual_size-1; + } + clba = st_lba; + W_WRITE = 0; + debug("%d : clba 0X%lX: lba range:0X%lX\n",pid, clba, nlba-1); + pthread_mutex_unlock(&mutex); + debug("%d : chunk 0x%lX, nlba 0x%lX, st_lba 0x%lX\n",pid, actual_size,nlba,st_lba); + debug("%d : Loop.. %d remaining ............\n",pid, count); + count--; + if(rc != 0) + { + g_error = rc; + fprintf(stderr,"%d : failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + pthread_exit(0); + } + } + return 0; +} + +void *rw_io(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + int rc = 0; + __u64 llba =clba; + __u64 actual_size; + mc_stat_t l_mc_stat; + + pid = getpid(); + while(count-- > 0){ + debug("%d :about to process 0X%lX lba\n",pid,llba); + pthread_mutex_lock(&mutex); + llba =clba; + debug("%d : Writing for 0X%lX\n",pid,llba); + mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + actual_size = l_mc_stat.size; + debug("%d : actual lba range is 0X%lX\n", + pid, actual_size * (1 << l_mc_stat.nmask)-1); + rc = send_single_write(p_ctx, llba, pid); + W_WRITE = 1; + pthread_cond_signal(&condv); + pthread_mutex_unlock(&mutex); + if(rc && (llba >= actual_size * (1 << l_mc_stat.nmask))) { + printf("%d : expected one, write(0X%lX) and range(0X%lX)\n", + pid, llba, actual_size * (1 << l_mc_stat.nmask)-1); + rc = 0; + } + if(rc != 0) { + g_error = rc; + fprintf(stderr, "%d : failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + //return NULL; + } + + pthread_mutex_lock(&mutex); + mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + actual_size = l_mc_stat.size; + debug("%d : reading for 0X%lX\n",pid, llba); + debug("%d : actual lba range is 0X%lX\n", + pid, actual_size*(1 << l_mc_stat.nmask)-1); + rc = send_single_read(p_ctx, llba); + if(rc && llba >= actual_size*(1 << l_mc_stat.nmask)) { + printf("%d : expected one, read (0X%lX) and range(0X%lX)\n", + pid, llba, actual_size*(1 << l_mc_stat.nmask)-1); + rc = 0; + }else { + rc = rw_cmp_single_buf(p_ctx, llba); + } + pthread_mutex_unlock(&mutex); + if(rc != 0) { + g_error = rc; + fprintf(stderr, "%d : failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + //return NULL; + } + } + return 0; +} + +void *read_io(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + __u64 stride; + int rc; + mc_stat_t l_mc_stat; + + pthread_mutex_lock(&mutex); + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + pthread_mutex_unlock(&mutex); + if(rc) { + g_error = rc; + fprintf(stderr, "read_io mc_stat failed\n"); + return NULL; + } + stride = (1 << l_mc_stat.nmask); + while(count-- > 0){ + pthread_mutex_lock(&mutex); + while ( W_READ != 1){ + pthread_cond_wait(&condv,&mutex); + } + W_READ = 0; + send_read(p_ctx, clba, stride,VLBA); + rc = rw_cmp_buf(p_ctx, clba); + pthread_mutex_unlock(&mutex); + if(rc != 0) + { + g_error = rc; + return NULL; + } + } + return 0; +} + +int test_onectx_twothrd(int cmd) +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + mc_stat_t l_mc_stat; + pthread_t thread; + pthread_t rhthread[2]; + + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + //int i; + __u64 chunks=512; + __u64 actual_size; + + pthread_mutexattr_init(&mattr); + pthread_mutex_init(&mutex, &mattr); + + pthread_condattr_init(&cattr); + pthread_cond_init(&condv, &cattr); + if(test_init(p_ctx) != 0) + { + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) + { + fprintf(stderr, "ctx _reg failed, ctx_hndl %d,rc %d\n",p_ctx->ctx_hndl, rc ); + return -1; + } + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + return -1; + } + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + if(1 == cmd) { //a thread write_io & another read_io + pthread_create(&rhthread[0], NULL, write_io, p_ctx); + pthread_create(&rhthread[1], NULL, read_io, p_ctx); + } + else if(2 == cmd) { //a thread rw & another mc size + clba = (actual_size * (1 << l_mc_stat.nmask))/2; + pthread_create(&rhthread[0], NULL, rw_io, p_ctx); + pthread_create(&rhthread[1], NULL, play_size, p_ctx); + } + //pthread_create(&rhthread[1], NULL, inc_dec_size, p_ctx); + + pthread_mutexattr_destroy(&mattr); + pthread_condattr_destroy(&cattr); + + pthread_join(rhthread[0], NULL); + pthread_join(rhthread[1], NULL); + + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl,p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + rc = g_error; + g_error = 0; + return rc; +} + +int test_two_ctx_two_thrd(int cmd) +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + pthread_t rhthread[2]; + + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + __u64 chunks=480; + __u64 actual_size; + mc_stat_t l_mc_stat; + + pthread_mutexattr_init(&mattr); + pthread_mutex_init(&mutex, &mattr); + + pthread_condattr_init(&cattr); + pthread_cond_init(&condv, &cattr); + int i; + for(i = 0;i < 2;i++) { + if(fork() == 0) {//child process + if(test_init(p_ctx) != 0) + { + exit(-1); + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "ctx req failed,ctx=%d,rc %d\n", + p_ctx->ctx_hndl, rc); + exit(-1); + } + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + exit(-1); + } + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + if(1 == cmd) { //a thread write_io & another read_io + pthread_create(&rhthread[0], NULL, write_io, p_ctx); + pthread_create(&rhthread[1], NULL, read_io, p_ctx); + } + else if(2 == cmd) { //a thread rw & another mc size + clba = (actual_size * (1 << l_mc_stat.nmask))/2; + pthread_create(&rhthread[0], NULL, rw_io, p_ctx); + pthread_create(&rhthread[1], NULL, play_size, p_ctx); + } + + pthread_mutexattr_destroy(&mattr); + pthread_condattr_destroy(&cattr); + + pthread_join(rhthread[0], NULL); + pthread_join(rhthread[1], NULL); + + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl,p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + rc = g_error; + debug("%d : I am exiting from here .....rc = %d\n", pid, rc); + exit(rc); + } + } + while((pid = waitpid(-1,&rc,0))) + { + debug("%d : wait is over for me............rc = %d\n", pid, rc); + if(rc != 0){ + g_error = -1; + } + if(pid == -1) { + break; + } + } + rc = g_error; + g_error=0; + return rc; +} + +int test_lun_discovery(int cmd) +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + int fc_port =0; + __u64 *lun_ids; + __u32 n_luns; + int port=2; + int i; + __u64 lun_cap,blk_len; + + if(test_init(p_ctx) != 0) + { + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + // Send Report LUNs to get the list of LUNs and LUN ID + for(i =1; i <= port;i++) { + rc = send_report_luns(p_ctx, i, &lun_ids,&n_luns); + if(rc) { + fprintf(stderr, "Report LUNs failed on FC Port %d\n", i); + } + else{ + fc_port = i; + break; + } + } + if(rc || n_luns == 0) + { + ctx_close(p_ctx); + return rc; + } + debug("Report Lun success, num luns= 0x%x\n",n_luns); + for(i = 0; i< n_luns;i++) + { + rc = send_read_capacity(p_ctx,fc_port,lun_ids[i],&lun_cap, &blk_len); + if(rc != 0) + { + fprintf(stderr,"Read capacity failed,lun id =0x%lX, rc = %d\n",lun_ids[i],rc); + break; + } + debug("LUN id = 0x%lX Capacity = 0x%lX Blk len = 0x%lX\n", + lun_ids[i],lun_cap,blk_len); + } + free(lun_ids); + pthread_cancel(thread); + ctx_close(p_ctx); + return rc; + +} + +int test_vdisk_io() +{ + int rc; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + + __u64 chunk = 256; + __u64 nlba; + __u64 actual_size; + __u64 st_lba =0; + __u64 stride; + mc_stat_t l_mc_stat; + + + if(mc_init() != 0) + { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + if(ctx_init(p_ctx) != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *) p_ctx->p_host_map,&p_ctx->mc_hndl); + if (rc != 0) { + fprintf(stderr, "error registering(%s) ctx_hndl %d, rc %d\n", + master_dev_path, p_ctx->ctx_hndl, rc); + return -1; + } + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + return -1; + } + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, chunk, &actual_size); + if(rc != 0 || actual_size < 1) //might be chunk want to allocate whole lun + { + fprintf(stderr, "error sizing res_hndl rc %d\n", rc); + return -1; + } + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + pid = getpid(); + + stride = (1 << l_mc_stat.nmask); + nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); + debug("%d : st_lba = 0X0 and range lba = 0X%lX\n", pid, nlba-1); + for (st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS*stride)) { + send_write(p_ctx, st_lba, stride, pid,VLBA); + send_read(p_ctx, st_lba, stride,VLBA); + rc = rw_cmp_buf(p_ctx, st_lba); + if(rc){ + fprintf(stderr,"buf cmp failed for vlba 0x%lX,rc =%d\n", + st_lba,rc); + break; + } + } + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + return rc; +} + +void *only_rw_io(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + __u64 stride = 0x1000; //4K + int rc; + __u64 nlba = clba; + __u64 st_lba; + + pid = getpid(); + while(1) { + for(st_lba =0; st_lba < nlba; st_lba += (NUM_CMDS * stride)){ + //rc = send_single_write(p_ctx,st_lba,pid); + rc = send_write(p_ctx,st_lba,stride,pid, VLBA); + if(rc != 0) { + g_error = rc; + if(!dont_displa_err_msg) + fprintf(stderr, "%d : failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + //rc = send_single_read(p_ctx, st_lba); + rc = send_read(p_ctx, st_lba, stride, VLBA); + if(rc != 0) { + g_error = rc; + if(!dont_displa_err_msg) + fprintf(stderr, "%d : failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + //rc = rw_cmp_single_buf(p_ctx, st_lba); + rc = rw_cmp_buf(p_ctx, st_lba); + if(rc != 0) { + g_error = rc; + if(!dont_displa_err_msg) + fprintf(stderr, "%d : failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + } + if(N_done) { + break; + } + } + return 0; +} + +int test_rw_close_hndl(int cmd) +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + pthread_t rhthread; + mc_stat_t l_mc_stat; + __u64 chunks=128; + __u64 actual_size; + + if(test_init(p_ctx) != 0) + { + return -1; + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) + { + fprintf(stderr, "ctx _reg failed, ctx_hndl %d,rc %d\n",p_ctx->ctx_hndl, rc ); + return -1; + } + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + return -1; + } + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + clba = actual_size * (1 << l_mc_stat.nmask); + pthread_create(&rhthread, NULL, only_rw_io, p_ctx); + sleep(1); + + if(1 == cmd) { //while IO close RHT + mc_close(p_ctx->mc_hndl,p_ctx->res_hndl); + }else if(2 == cmd) { //while IO unreg MC HNDL + mc_unregister(p_ctx->mc_hndl); + }else if(3 == cmd) { //While IO ctx close + munmap((void*)p_ctx->p_host_map, 0x10000); + close(p_ctx->afu_fd); + //ctx_close(p_ctx); + } + + N_done = 1; //tell pthread that -ve test performed + sleep(1); + pthread_cancel(rhthread); + //pthread_join(rhthread, NULL); + N_done = 0; + + pthread_cancel(thread); + // do proper closing + if(1 == cmd) { + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + } else if (2 == cmd) { + ctx_close(p_ctx); + } + mc_term(); + rc = g_error; + g_error = 0; + return rc; +} +int test_good_ctx_err_ctx(int cmd) +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + pthread_t rhthread; + mc_stat_t l_mc_stat; + + __u64 chunks=128; + __u64 actual_size; + + pid_t mypid = fork(); + //let both process do basic things + if(test_init(p_ctx) != 0) + { + exit(-1); + } + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + if(rc != 0) { + fprintf(stderr, "ctx _reg failed, ctx_hndl %d,rc %d\n", + p_ctx->ctx_hndl, rc ); + exit(-1); + } + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + if (rc != 0) { + fprintf(stderr, "error opening res_hndl rc %d\n", rc); + exit(-1); + } + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + clba = actual_size * (1 << l_mc_stat.nmask); + pthread_create(&rhthread, NULL, only_rw_io, p_ctx); + + if(mypid == 0) { //child process do err ctx + debug("child pid is :%d\n",getpid()); + sleep(1); //let thrd do some io + printf("error ctx pid is %d\n",getpid()); + if(1 == cmd) { //while IO close RHT + mc_close(p_ctx->mc_hndl,p_ctx->res_hndl); + }else if(2 == cmd) { //while IO unreg MC HNDL + mc_unregister(p_ctx->mc_hndl); + }else if(3 == cmd) { //While IO ctx close + //munmap((void*)p_ctx->p_host_map, 0x10000); + close(p_ctx->afu_fd); + //ctx_close(p_ctx); + } + sleep(1); + N_done = 1; //tell pthread that -ve test performed + pthread_cancel(rhthread); + //pthread_join(rhthread, NULL); + N_done = 0; + debug("%d : exiting with rc = %d\n", pid, g_error); + pthread_cancel(thread); + exit(g_error); + } + else { + debug("Good ctx pid is : %d\n",getpid()); + sleep(2); //main process sleep but thrd keep running + N_done = 1; // + pthread_join(rhthread, NULL); + N_done = 0; + + // do proper closing + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl,p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + debug("%d : waiting for child process %d\n", pid, mypid); + wait(&rc); + } + rc = g_error; + g_error = 0; + mc_term(); + return rc; +} + +int test_mc_ioarcb_ea_alignment(int cmd) +{ + int rc; + int a; + struct rwbuf *p_rwb; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=128; + __u64 actual_size=0; + __u64 st_lba, nlba; + __u64 stride = 0x1000; + int offset; + pthread_t thread; + int max; + mc_stat_t l_mc_stat; + + + if(1 == cmd) //16 byte ea alignment + offset = 16; + else if(2 == cmd) //128 byte ea alignment + offset = 128; + else //invalid ea alignment + offset = 5; + + max = offset * 10; //try for next 10 offset + pid = getpid(); + rc = posix_memalign((void **)&p_rwb, 0x1000, sizeof( struct rwbuf )); + CHECK_RC(rc, "rwbuf allocation failed"); + debug("initial buf address : %p\n",p_rwb); + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + if(chunks != actual_size) + { + CHECK_RC(1, "doesn't have enough chunk space"); + } + st_lba = 0; + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + nlba = actual_size * (1 << l_mc_stat.nmask); + debug("EA alignment from begining of 4K\n"); + for(a=offset; a <= max; a+=offset){ + debug("send alignment offset : %u\n",a); + rc = send_rw_rcb(p_ctx, p_rwb, st_lba, stride, a, 0); + if(rc) break; + rc = send_rw_rcb(p_ctx, p_rwb, nlba/2, stride, a, 0); + if(rc) break; + rc = send_rw_rcb(p_ctx, p_rwb, nlba-(NUM_CMDS * stride), stride, a, 0); + if(rc) break; + } + //CHECK_RC(rc, "send_rw_rcb"); + debug("EA alignment from end of a 4K\n"); + for(a=offset; a <= max; a+=offset){ + debug("send alignment offset from last : %u\n", a); + rc = send_rw_rcb(p_ctx, p_rwb, st_lba, stride, a, 1); + if(rc) break; + rc = send_rw_rcb(p_ctx, p_rwb, nlba/2, stride, a, 1); + if(rc) break; + rc = send_rw_rcb(p_ctx, p_rwb, nlba-(NUM_CMDS * stride), stride, a, 1); + if(rc) break; + } + pthread_cancel(thread); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + free(p_rwb); + mc_term(); + if(rc!=0 && cmd == 3) + return 3; + return rc; +} + +int mc_test_rwbuff_global() +{ + int rc; + struct ctx *p_ctx = &gs_ctx; + __u64 chunks=256; + __u64 actual_size=0; + __u64 st_lba; + __u64 stride; + __u64 nlba; + pthread_t thread; + mc_stat_t l_mc_stat; + + + pid = getpid(); + + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + nlba = actual_size * (1 << l_mc_stat.nmask); + stride = (1 << l_mc_stat.nmask); + for(st_lba = 0; st_lba < nlba; st_lba += (NUM_CMDS * stride)) { + rc = send_write(p_ctx, st_lba, stride, pid, VLBA); + CHECK_RC(rc, "send_write"); + rc = send_read(p_ctx, st_lba, stride,VLBA); + CHECK_RC(rc, "send_read"); + rc = rw_cmp_buf(p_ctx, st_lba); + CHECK_RC(rc, "rw_cmp_buf"); + } + pthread_cancel(thread); + rc = mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + return rc; +} + +void *only_play_size(void *arg) +{ + struct ctx *p_ctx = (struct ctx *)arg; + __u64 actual_size; + __u64 w_chunk; + int rc; + int myloop = count * 10; + mc_stat_t l_mc_stat; + + rc =mc_stat(p_ctx->mc_hndl,p_ctx->res_hndl, &l_mc_stat); + w_chunk = l_mc_stat.size; + while(myloop-- > 0) + { + w_chunk +=128; + debug("%d : doing mc size from 0X%lX to 0X%lX\n", pid, actual_size, w_chunk); + rc = mc_size(p_ctx->mc_hndl,p_ctx->res_hndl, w_chunk, &actual_size); + if(rc != 0) { + g_error = rc; + N_done = 1; + fprintf(stderr, "%d : failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + w_chunk -=128; + debug("%d : doing mc size from 0X%lX to 0X%lX\n", pid, actual_size, w_chunk); + rc = mc_size(p_ctx->mc_hndl,p_ctx->res_hndl, w_chunk, &actual_size); + if(rc != 0) { + g_error = rc; + N_done = 1; + fprintf(stderr, "%d : failed here %s:%d:%s\n",pid, + __FILE__,__LINE__,__func__); + return NULL; + } + } + N_done = 1; //now tell other thread, i m done + return 0; +} + +int test_mc_rw_size_parallel() +{ + int rc=0; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + pthread_t thread; + pthread_t rhthread[2]; + mc_stat_t l_mc_stat; + int i; + + __u64 chunks=64; + __u64 actual_size; + + for(i =0 ;i < 4; i++){ + if(fork() == 0) { + sleep(1); //lets all process get created + rc = test_init(p_ctx); + CHECK_RC(rc, "test init"); + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "mc_register"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "mc_open"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + clba = (actual_size * (1 << l_mc_stat.nmask)); + pthread_create(&rhthread[0], NULL, only_rw_io, p_ctx); + pthread_create(&rhthread[1], NULL, only_play_size, p_ctx); + + pthread_join(rhthread[0], NULL); + pthread_join(rhthread[1], NULL); + + pthread_cancel(thread); + mc_close(p_ctx->mc_hndl,p_ctx->res_hndl); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + rc = g_error; + exit(rc); + } + } + while((pid = waitpid(-1,&rc,0))) + { + if (WIFEXITED(rc)) { + rc = WEXITSTATUS(rc); + if(rc != 0){ + g_error = -1; + } + } + debug("pid %d exited...\n",pid); + + if(pid == -1) { + break; + } + } + rc = g_error; + g_error = 0; + return rc; +} + +int test_mc_rwbuff_shm() +{ + int rc = 0; + struct rwshmbuf l_rwb; + struct rwshmbuf *p_rwb; + struct ctx myctx; + struct ctx *p_ctx = &myctx; + __u64 chunks=256; + __u64 actual_size=0; + __u64 st_lba; + __u64 nlba; + __u64 stride = 0x1000; + pthread_t thread; + pid_t cpid; + mc_stat_t l_mc_stat; + + int shmid; + key_t key=2345; + char *shm; + pid = getpid(); + + if((shmid = shmget(key,sizeof(struct rwshmbuf), IPC_CREAT | 0666)) < 0){ + fprintf(stderr, "shmget failed\n"); + return -1; + } + + if((shm = shmat(shmid, NULL, 0)) == (char *)-1) { + fprintf(stderr, "shmat failed\n"); + return -1; + } + debug("%d : shared region created\n",pid); + //lets create a child process to keep reading shared area + cpid = fork(); + if(cpid == 0){ + pid = getpid(); + if((shmid = shmget(key,sizeof(struct rwshmbuf), IPC_CREAT | 0666)) < 0){ + fprintf(stderr, "shmget failed\n"); + exit(-1); + } + + if((shm = shmat(shmid, NULL, 0)) == (char *)-1) { + fprintf(stderr, "shmat failed\n"); + exit(-1); + } + debug("%d: child started accessing shared memory...\n",pid); + while(1) { + memcpy(&l_rwb, shm, sizeof(struct rwshmbuf)); + } + } + + p_rwb = (struct rwshmbuf *)shm; + + rc =mc_init(); + CHECK_RC(rc, "mc_init failed"); + + rc = ctx_init(p_ctx); + CHECK_RC(rc, "ctx init failed"); + + pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); + rc = mc_register(master_dev_path, p_ctx->ctx_hndl, + (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); + CHECK_RC(rc, "ctx reg failed"); + + rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); + CHECK_RC(rc, "opening res_hndl"); + + rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); + CHECK_RC(rc, "mc_size"); + + rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); + CHECK_RC(rc, "mc_stat"); + + nlba = actual_size * (1 << l_mc_stat.nmask); + debug("%d : started IO where rwbuf in shared memory lba range(0X%lX)\n", pid, nlba-1); + for(st_lba =0; st_lba < nlba; st_lba += stride) { + rc = send_rw_shm_rcb(p_ctx, p_rwb, st_lba); + CHECK_RC(rc, "send_rw_rcb"); + } + debug("%d : IO is done now \n", pid); + + debug("%d : now time to kill child %d \n", pid, cpid); + kill(cpid, SIGKILL); + + shmdt(shm); + pthread_cancel(thread); + mc_unregister(p_ctx->mc_hndl); + ctx_close(p_ctx); + mc_term(); + return 0; +} diff --git a/src/master/test/mc_test_util.c b/src/master/test/mc_test_util.c new file mode 100644 index 00000000..55ef53ec --- /dev/null +++ b/src/master/test/mc_test_util.c @@ -0,0 +1,1110 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/test/mc_test_util.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "mc_test.h" +#include +#include + +char master_dev_path[MC_PATHLEN]; +char afu_path[MC_PATHLEN]; + +int dont_displa_err_msg = 0; //0 means display error msg +pid_t pid; +__u8 rrq_c_null = 0; //if 1, then hrrq_current = NULL + +#define DATA_SEED 0xdead000000000000ull + +__u64 lun_id = 0x0; +__u32 fc_port = 0x1; //will find the actual one + +int g_error=0; +uint8_t rc_flags; +//set master dev path & afu device path from env +int get_fvt_dev_env() +{ + char *fvt_dev = getenv("FVT_DEV"); + if(NULL == fvt_dev) { + fprintf(stderr, "FVT_DEV ENV var NOT set, Please set...\n"); + return -1; + } + strcpy(afu_path, fvt_dev); + strncpy(master_dev_path, afu_path, strlen(afu_path)-1); + master_dev_path[strlen(afu_path)-1]='\0'; + strcat(master_dev_path, "m"); + return 0; +} + +int ctx_init_thread(void *args) +{ + return ctx_init((ctx_p)args); +} + +void ctx_close_thread(void *args) +{ + ctx_close((ctx_p)args); +} + +int ctx_init(struct ctx *p_ctx) +{ + int rc=0; + void *map; + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + int i; + pid_t mypid; + + memset(p_ctx, 0, sizeof(struct ctx)); + pthread_mutexattr_init(&mattr); + pthread_condattr_init(&cattr); + + for(i = 0; i < NUM_CMDS; i++) { + pthread_mutex_init(&p_ctx->cmd[i].mutex, &mattr); + pthread_cond_init(&p_ctx->cmd[i].cv, &cattr); + } + + // open non-master device + p_ctx->afu_fd = open(afu_path, O_RDWR); + if(p_ctx->afu_fd < 0) { + fprintf(stderr, "open() failed: device %s, errno %d\n", afu_path, errno); + g_error = -1; + return -1; + } + + p_ctx->work.flags = CXL_START_WORK_NUM_IRQS; + p_ctx->work.num_interrupts = 4; // use num_interrupts from AFU desc + + rc = ioctl(p_ctx->afu_fd,CXL_IOCTL_START_WORK, &p_ctx->work); + if(rc != 0) { + fprintf(stderr, "ioctl() failed: start command failure on AFU, errno %d\n", errno); + g_error = -1; + return -1; + } + + rc = ioctl(p_ctx->afu_fd,CXL_IOCTL_GET_PROCESS_ELEMENT, &p_ctx->ctx_hndl); + if(rc != 0) { + fprintf(stderr, "ioctl() failed: get process element failure on AFU, errno %d\n", errno); + g_error = -1; + return -1; + } + + // mmap 64KB host transport MMIO space of this context + map = mmap(NULL, 0x10000, PROT_READ | PROT_WRITE, MAP_SHARED, p_ctx->afu_fd, 0); + if(map == MAP_FAILED) { + fprintf(stderr, "mmap() failed: errno %d\n", errno); + g_error = -1; + return -1; + } + + p_ctx->p_host_map = (volatile struct sisl_host_map *)map; + + // initialize RRQ pointers + p_ctx->p_hrrq_start = &p_ctx->rrq_entry[0]; + p_ctx->p_hrrq_end = &p_ctx->rrq_entry[NUM_RRQ_ENTRY - 1]; + if(rrq_c_null) + p_ctx->p_hrrq_curr = NULL; + else + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + + p_ctx->toggle = 1; + + // initialize cmd fields that never change + for (i = 0; i < NUM_CMDS; i++) { + p_ctx->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; + p_ctx->cmd[i].rcb.rrq = 0x0; + p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl; + } + + // set up RRQ in AFU + write_64(&p_ctx->p_host_map->rrq_start, (__u64) p_ctx->p_hrrq_start); + write_64(&p_ctx->p_host_map->rrq_end, (__u64) p_ctx->p_hrrq_end); + + mypid = getpid(); + debug("%d : ctx_init() success: p_host_map %p, ctx_hndl %d, rrq_start %p\n", + mypid, p_ctx->p_host_map, p_ctx->ctx_hndl, p_ctx->p_hrrq_start); + + pthread_mutexattr_destroy(&mattr); + pthread_condattr_destroy(&cattr); + return 0; +} + +void ctx_close(struct ctx *p_ctx) +{ + munmap((void*)p_ctx->p_host_map, 0x10000); + close(p_ctx->afu_fd); +} + +void clear_waiting_cmds(struct ctx *p_ctx) +{ + int i; + struct afu_cmd *p_cmd; + if(DEBUG) { + fprintf(stderr, + "%d : Clearing all waiting cmds, some error occurred\n", + getpid()); + } + for(i = 0 ; i < NUM_CMDS; i++) { + p_cmd = &p_ctx->cmd[i]; + pthread_mutex_lock(&p_cmd->mutex); + p_cmd->sa.host_use_b[0] |= B_DONE; + pthread_cond_signal(&p_cmd->cv); + pthread_mutex_unlock(&p_cmd->mutex); + } +} +void ctx_rrq_intr(struct ctx *p_ctx) { + struct afu_cmd *p_cmd; + + // process however many RRQ entries that are ready + while ((*p_ctx->p_hrrq_curr & SISL_RESP_HANDLE_T_BIT) == + p_ctx->toggle) { + p_cmd = (struct afu_cmd*)((*p_ctx->p_hrrq_curr) & (~SISL_RESP_HANDLE_T_BIT)); + + pthread_mutex_lock(&p_cmd->mutex); + p_cmd->sa.host_use_b[0] |= B_DONE; + pthread_cond_signal(&p_cmd->cv); + pthread_mutex_unlock(&p_cmd->mutex); + + if (p_ctx->p_hrrq_curr < p_ctx->p_hrrq_end) { + p_ctx->p_hrrq_curr++; /* advance to next RRQ entry */ + } + else { /* wrap HRRQ & flip toggle */ + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle ^= SISL_RESP_HANDLE_T_BIT; + } + } +} + +void ctx_sync_intr(struct ctx *p_ctx) { + __u64 reg; + __u64 reg_unmasked; + + reg = read_64(&p_ctx->p_host_map->intr_status); + reg_unmasked = (reg & SISL_ISTATUS_UNMASK); + + if (reg_unmasked == 0) { + if(!dont_displa_err_msg) { + fprintf(stderr, + "%d: spurious interrupt, intr_status 0x%016lx, ctx %d\n", + pid, reg, p_ctx->ctx_hndl); + } + clear_waiting_cmds(p_ctx); + return; + } + + if (reg_unmasked == SISL_ISTATUS_PERM_ERR_RCB_READ) { + if(!dont_displa_err_msg) { + fprintf(stderr, "exiting on SISL_ISTATUS_PERM_ERR_RCB_READ\n"); + } + clear_waiting_cmds(p_ctx); + pthread_exit(NULL); + // ok - this is a signal to stop this thread + } + else { + if(!dont_displa_err_msg) { + fprintf(stderr, + "%d: unexpected interrupt, intr_status 0x%016lx, ctx %d, exiting test...\n", + pid, reg, p_ctx->ctx_hndl); + } + clear_waiting_cmds(p_ctx); + return ; + } + + write_64(&p_ctx->p_host_map->intr_clear, reg_unmasked); + + return; +} +void *ctx_rrq_rx(void *arg) { + struct cxl_event *p_event; + int len; + struct ctx *p_ctx = (struct ctx*) arg; + + while (1) { + // + // read afu fd & block on any interrupt + len = read(p_ctx->afu_fd, &p_ctx->event_buf[0], + sizeof(p_ctx->event_buf)); + + if (len < 0) { + if(!dont_displa_err_msg) + fprintf(stderr, "afu has been reset, exiting...\n"); + clear_waiting_cmds(p_ctx); + return NULL; + } + + p_event = (struct cxl_event *)&p_ctx->event_buf[0]; + while (len >= sizeof(p_event->header)) { + if (p_event->header.type == CXL_EVENT_AFU_INTERRUPT) { + switch(p_event->irq.irq) { + case SISL_MSI_RRQ_UPDATED: + ctx_rrq_intr(p_ctx); + break; + + case SISL_MSI_SYNC_ERROR: + ctx_sync_intr(p_ctx); + break; + + default: + if(!dont_displa_err_msg) { + fprintf(stderr, "%d: unexpected irq %d, ctx %d, exiting test...\n", + pid, p_event->irq.irq, p_ctx->ctx_hndl); + } + clear_waiting_cmds(p_ctx); + return NULL; + break; + } + + } + else if (p_event->header.type == CXL_EVENT_DATA_STORAGE) { + if(!dont_displa_err_msg) + fprintf(stderr, "failed, exiting on CXL_EVENT_DATA_STORAGE\n"); + clear_waiting_cmds(p_ctx); + return NULL; + // this is a signal to terminate this thread + } + else { + if(!dont_displa_err_msg) { + fprintf(stderr, "%d: unexpected event %d, ctx %d, exiting test...\n", + pid, p_event->header.type, p_ctx->ctx_hndl); + } + clear_waiting_cmds(p_ctx); + return NULL; + } + + len -= p_event->header.size; + p_event = (struct cxl_event *) + (((char*)p_event) + p_event->header.size); + } + } + + return NULL; +} +void *ctx_rrq_rx_old(void *arg) +{ + struct cxl_event *p_event; + struct ctx *p_ctx = (struct ctx *)arg; + struct afu_cmd *p_cmd; + + while(1){ + // read afu fd block on RRQ written interrupt + p_event = (struct cxl_event *)p_ctx->event_buf; + read(p_ctx->afu_fd, p_event, sizeof(p_ctx->event_buf)); + if(p_event->header.type == CXL_EVENT_AFU_INTERRUPT){ + debug_2("%d : recd afu_intr %d\n", pid, p_event->irq.irq); + fflush(stdout); + } + + + // process however many RRQ entries that are ready + while((*p_ctx->p_hrrq_curr & SISL_RESP_HANDLE_T_BIT) == + p_ctx->toggle) { + //printf("now coming inside the second loop...\n"); + //printf("0X%lX\n",*p_ctx->p_hrrq_curr); + p_cmd = (struct afu_cmd*)((*p_ctx->p_hrrq_curr) & (~SISL_RESP_HANDLE_T_BIT)); + pthread_mutex_lock(&p_cmd->mutex); + p_cmd->sa.host_use[0] |= B_DONE; + pthread_cond_signal(&p_cmd->cv); + pthread_mutex_unlock(&p_cmd->mutex); + + if (p_ctx->p_hrrq_curr < p_ctx->p_hrrq_end) { + //printf("p_ctx->p_hrrq_curr++..\n"); + p_ctx->p_hrrq_curr++; /* advance to next RRQ entry */ + } + else{/* wrap HRRQ & flip toggle */ + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle ^= SISL_RESP_HANDLE_T_BIT; + //printf("else part p_ctx->toggle = %u\n", p_ctx->toggle); + } + } + } + return NULL; +} + + +// len in __u64 +void fill_buf(__u64* p_buf, unsigned int len,__u64 value) +{ + static __u64 data = DATA_SEED; + int i; + + for (i = 0; i < len; i += 2) { + p_buf[i] = value; + p_buf[i + 1] = data++; + } +} + +// Send Report LUNs SCSI Cmd to CAPI Adapter +int send_report_luns(struct ctx *p_ctx, __u32 port_sel, + __u64 **lun_ids, __u32 *nluns) +{ + __u32 *p_u32; + __u64 *p_u64, *lun_id; + int len; + int rc; + + memset(&p_ctx->rbuf[0], 0, sizeof(p_ctx->rbuf)); + memset(&p_ctx->cmd[0].rcb.cdb[0], 0, sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_ctx->cmd[0].rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_HOST_READ); + p_ctx->cmd[0].rcb.port_sel = port_sel; + p_ctx->cmd[0].rcb.lun_id = 0x0; + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->rbuf[0]); + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->rbuf[0][0]; + p_ctx->cmd[0].rcb.timeout = 10; /* 10 Secs */ + + p_ctx->cmd[0].rcb.cdb[0] = 0xA0; /* report lun */ + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[6]; + write_32(p_u32, sizeof(p_ctx->rbuf[0])); /* allocation length */ + + p_ctx->cmd[0].sa.host_use_b[1] = 0; /* reset retry cnt */ + do { + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + if(rc) return rc; + } while(check_status(&p_ctx->cmd[0].sa)); + + + if (p_ctx->cmd[0].sa.host_use_b[0] & B_ERROR) { + return -1; + } + + // report luns success + len = read_32((__u32*)&p_ctx->rbuf[0][0]); + p_u64 = (__u64*)&p_ctx->rbuf[0][8]; /* start of lun list */ + + *nluns = len/8; + lun_id = (__u64 *)malloc((*nluns * sizeof(__u64))); + + if (lun_id == NULL) { + fprintf(stderr, "Report LUNs: ENOMEM\n"); + } else { + *lun_ids = lun_id; + + while (len) { + *lun_id = read_64(p_u64++); + lun_id++; + len -= 8; + } + } + + return 0; +} +// Send Read Capacity SCSI Cmd to the LUN +int send_read_capacity(struct ctx *p_ctx, __u32 port_sel, + __u64 lun_id, __u64 *lun_capacity, __u64 *blk_len) +{ + __u32 *p_u32; + __u64 *p_u64; + int rc; + + memset(&p_ctx->rbuf[0], 0, sizeof(p_ctx->rbuf)); + memset(&p_ctx->cmd[0].rcb.cdb[0], 0, sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_ctx->cmd[0].rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_HOST_READ); + p_ctx->cmd[0].rcb.port_sel = port_sel; + p_ctx->cmd[0].rcb.lun_id = lun_id; + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->rbuf[0]); + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->rbuf[0]; + p_ctx->cmd[0].rcb.timeout = 10; /* 10 Secs */ + + p_ctx->cmd[0].rcb.cdb[0] = 0x9E; /* read cap(16) */ + p_ctx->cmd[0].rcb.cdb[1] = 0x10; /* service action */ + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + write_32(p_u32, sizeof(p_ctx->rbuf[0])); /* allocation length */ + + send_single_cmd(p_ctx); + rc = wait_resp(p_ctx); + if(rc){ + return rc; + } + + p_u64 = (__u64*)&p_ctx->rbuf[0][0]; + *lun_capacity = read_64(p_u64); + + p_u32 = (__u32*)&p_ctx->rbuf[0][8]; + *blk_len = read_32(p_u32); + + return 0; +} + +// len in __u64 +int cmp_buf(__u64* p_buf1, __u64 *p_buf2, unsigned int len) +{ + return memcmp(p_buf1, p_buf2, len*sizeof(__u64)); +} + +int rw_cmp_buf(struct ctx *p_ctx, __u64 start_lba) { + int i; + //char buf[32]; + //int read_fd, write_fd; + for (i = 0; i < NUM_CMDS; i++) { + if (cmp_buf((__u64*)&p_ctx->rbuf[i][0], (__u64*)&p_ctx->wbuf[i][0], + sizeof(p_ctx->rbuf[i])/sizeof(__u64))) { + printf("%d: miscompare at start_lba 0X%lX\n", + pid, start_lba); + + /* + sprintf(buf, "read.%d", pid); + read_fd = open(buf, O_RDWR|O_CREAT); + sprintf(buf, "write.%d", pid); + write_fd = open(buf, O_RDWR|O_CREAT); + + write(read_fd, &p_ctx->rbuf[i][0], sizeof(p_ctx->rbuf[i])); + write(write_fd, &p_ctx->wbuf[i][0], sizeof(p_ctx->wbuf[i])); + + close(read_fd); + close(write_fd); + */ + hexdump(&p_ctx->rbuf[i][0],0x20,"Read buf"); + hexdump(&p_ctx->wbuf[i][0],0x20,"Write buf"); + + return -1; + } + } + return 0; +} + +int send_write(struct ctx *p_ctx, __u64 start_lba, + __u64 stride,__u64 data,__u32 flags) +{ + int i; + __u64 *p_u64; + __u32 *p_u32; + __u64 vlba, plba; + int rc; + + for(i = 0; i< NUM_CMDS; i++){ + fill_buf((__u64*)&p_ctx->wbuf[i][0], + sizeof(p_ctx->wbuf[i])/sizeof(__u64),data); + + memset(&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + vlba = start_lba + i*stride; + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + if(flags & VLBA){ + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_64(p_u64, vlba); // write(16) Virtual LBA + debug_2("%d : send write vlba..: 0X%lX\n",(int)data,vlba); + } + else + { + p_ctx->cmd[i].rcb.lun_id = lun_id; + p_ctx->cmd[i].rcb.port_sel = fc_port; // either FC port + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_HOST_WRITE; + if(flags & NO_XLATE){ + plba = vlba; + } + else { + (void)mc_xlate_lba(p_ctx->mc_hndl, p_ctx->res_hndl, vlba, &plba); + } + write_64(p_u64, plba); // physical LBA# + debug_2("%d : send write plba..: 0x%lX\n",(int)data,plba); + } + + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->wbuf[i][0]; + + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[i]); + p_ctx->cmd[i].rcb.cdb[0] = 0x8A; // write(16) + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, LBA_BLK); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + hexdump(&p_ctx->wbuf[i][0],0x20,"Writing"); + } + //send_single_cmd(p_ctx); + rc = send_cmd(p_ctx); + if(rc) return rc; + rc = wait_resp(p_ctx); + return rc; +} + +void send_single_cmd(struct ctx *p_ctx) { + + p_ctx->cmd[0].sa.host_use_b[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + + /* make memory updates visible to AFU before MMIO */ + asm volatile ( "lwsync" : : ); + + // write IOARRIN + write_64(&p_ctx->p_host_map->ioarrin, (__u64)&p_ctx->cmd[0].rcb); +} + +int send_cmd(struct ctx *p_ctx) { + int cnt = NUM_CMDS; + int wait_try=MAX_TRY_WAIT; + int p_cmd = 0; + int i; + __u64 room; + + /* make memory updates visible to AFU before MMIO */ + asm volatile ( "lwsync" : : ); + + while (cnt) { + room = read_64(&p_ctx->p_host_map->cmd_room); + //room = NUM_CMDS +1; //p_ctx->p_host_map->cmd_room; + if(0 == room) { + usleep(MC_BLOCK_DELAY_ROOM); + wait_try--; + } + if(0 == wait_try) { + fprintf(stderr, "%d : send cmd wait over %d cmd remain\n", + pid, cnt); + return -1; + } + for (i = 0; i < room; i++) { // add a usleep here if room=0 ? + // write IOARRIN + write_64(&p_ctx->p_host_map->ioarrin, + (__u64)&p_ctx->cmd[p_cmd++].rcb); + wait_try = MAX_TRY_WAIT; //each cmd give try max time + if (cnt-- == 1) break; + } + } + return 0; +} + +int wait_resp(struct ctx *p_ctx) +{ + int i; + int rc =0; + int p_rc = 0; + __u64 *p_u64; + + for (i = 0; i < NUM_CMDS; i++) { + pthread_mutex_lock(&p_ctx->cmd[i].mutex); + while (p_ctx->cmd[i].sa.host_use[0] != B_DONE) { + pthread_cond_wait(&p_ctx->cmd[i].cv, &p_ctx->cmd[i].mutex); + } + pthread_mutex_unlock(&p_ctx->cmd[i].mutex); + + if (p_ctx->cmd[i].sa.ioasc) { + rc_flags = p_ctx->cmd[i].sa.rc.flags; + rc = p_ctx->cmd[i].sa.rc.afu_rc | + p_ctx->cmd[i].sa.rc.scsi_rc | + p_ctx->cmd[i].sa.rc.fc_rc; + if(!dont_displa_err_msg) { + hexdump(&p_ctx->cmd[i].sa.sense_data,0x20,"Sense data Writing"); + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + if(p_rc != rc) { + debug("%d : Request was failed 0X%lX vlba\n", pid, read_64(p_u64)); + printf("%d : IOASC = flags 0x%x, afu_rc 0x%x, scsi_rc 0x%x, fc_rc 0x%x\n", + pid, + p_ctx->cmd[i].sa.rc.flags,p_ctx->cmd[i].sa.rc.afu_rc, + p_ctx->cmd[i].sa.rc.scsi_rc, p_ctx->cmd[i].sa.rc.fc_rc); + p_rc = rc; + } + } + } + } + return rc; +} + +int wait_single_resp(struct ctx *p_ctx) +{ + int rc =0; + + pthread_mutex_lock(&p_ctx->cmd[0].mutex); + while (p_ctx->cmd[0].sa.host_use[0] != B_DONE) { + pthread_cond_wait(&p_ctx->cmd[0].cv, &p_ctx->cmd[0].mutex); + } + pthread_mutex_unlock(&p_ctx->cmd[0].mutex); + + if (p_ctx->cmd[0].sa.ioasc) { + if(!dont_displa_err_msg) { + //hexdump(&p_ctx->cmd[0].sa.sense_data,0x20,"Sense data Writing"); + printf("%d:IOASC = flags 0x%x, afu_rc 0x%x, scsi_rc 0x%x, fc_rc 0x%x\n", + pid, + p_ctx->cmd[0].sa.rc.flags,p_ctx->cmd[0].sa.rc.afu_rc, + p_ctx->cmd[0].sa.rc.scsi_rc, p_ctx->cmd[0].sa.rc.fc_rc); + } + rc_flags = p_ctx->cmd[0].sa.rc.flags; + rc = p_ctx->cmd[0].sa.rc.afu_rc | + p_ctx->cmd[0].sa.rc.scsi_rc | + p_ctx->cmd[0].sa.rc.fc_rc; + return rc; + } + return rc; +} + +int send_read(struct ctx *p_ctx, __u64 start_lba, + __u64 stride, __u32 flags) +{ + int i; + __u64 *p_u64; + __u32 *p_u32; + __u64 vlba; + __u64 plba; + int rc; + + for (i = 0; i < NUM_CMDS; i++) { + memset(&p_ctx->rbuf[i][0], 0, sizeof(p_ctx->rbuf[i])); + + vlba = start_lba + i*stride; + + memset(&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + + p_ctx->cmd[i].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + if (flags & VLBA){ + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + write_64(p_u64, vlba); // Read(16) Virtual LBA + debug_2("%d : send read for vlba : 0X%lX\n",pid,vlba); + } + else { + p_ctx->cmd[i].rcb.lun_id = lun_id; + p_ctx->cmd[i].rcb.port_sel = fc_port; // either FC port + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_HOST_READ; + if(flags & NO_XLATE){ + plba = vlba; + } + else { + (void)mc_xlate_lba(p_ctx->mc_hndl, p_ctx->res_hndl, vlba, &plba); + } + write_64(p_u64, plba); // physical LBA# + debug_2("%d :send read for plba =0x%lX\n",pid,plba); + } + + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->rbuf[i]); + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->rbuf[i][0]; + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + + write_32(p_u32, LBA_BLK); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } + + //send_single_cmd(p_ctx); + rc = send_cmd(p_ctx); + rc = wait_resp(p_ctx); + return rc; +} + +// returns 1 if the cmd should be retried, 0 otherwise +// sets B_ERROR flag based on IOASA +int check_status(sisl_ioasa_t *p_ioasa) +{ + // delete urun !!! + if (p_ioasa->ioasc == 0 || + (p_ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN)) { + return 0; + } + else { + p_ioasa->host_use_b[0] |= B_ERROR; + } + + if (p_ioasa->host_use_b[1]++ < 5) { + if (p_ioasa->rc.afu_rc == 0x30) { + // out of data buf + // #define all, to add the 2nd case!!! + // do we delay ? + return 1; + } + + if (p_ioasa->rc.scsi_rc) { + // retry all SCSI errors + // but if busy, add a delay + return 1; + } + } + + return 0; +} + +int test_init(struct ctx *p_ctx) +{ + if(mc_init() != 0) + { + fprintf(stderr, "mc_init failed.\n"); + return -1; + } + debug("mc_init success.\n"); + + if(ctx_init(p_ctx) != 0) + { + fprintf(stderr, "Context init failed, errno %d\n", errno); + return -1; + } + return 0; +} + +/* + * NAME: hexdump + * + * FUNCTION: Display an array of type char in ASCII, and HEX. This function + * adds a caller definable header to the output rather than the fixed one + * provided by the hexdump function. + * + * EXECUTION ENVIRONMENT: + * + * This routine is ONLY AVAILABLE IF COMPILED WITH DEBUG DEFINED + * + * RETURNS: NONE + */ +void +hexdump(void *data, long len, const char *hdr) +{ + + int i,j,k; + char str[18]; + char *p = (char *)data; + if(!ENABLE_HEXDUMP) + return; + + i=j=k=0; + fprintf(stderr, "%s: length=%ld\n", hdr?hdr:"hexdump()", len); + + /* Print each 16 byte line of data */ + while (i < len) + { + if (!(i%16)) /* Print offset at 16 byte bndry */ + fprintf(stderr,"%03x ",i); + + /* Get next data byte, save ascii, print hex */ + j=(int) p[i++]; + if (j>=32 && j<=126) + str[k++] = (char) j; + else + str[k++] = '.'; + fprintf(stderr,"%02x ",j); + + /* Add an extra space at 8 byte bndry */ + if (!(i%8)) + { + fprintf(stderr," "); + str[k++] = ' '; + } + + /* Print the ascii at 16 byte bndry */ + if (!(i%16)) + { + str[k] = '\0'; + fprintf(stderr," %s\n",str); + k = 0; + } + } + + /* If we didn't end on an even 16 byte bndry, print ascii for partial + * line. */ + if ((j = i%16)) { + /* First, space over to ascii region */ + while (i%16) + { + /* Extra space at 8 byte bndry--but not if we + * started there (was already inserted) */ + if (!(i%8) && j != 8) + fprintf(stderr," "); + fprintf(stderr," "); + i++; + } + /* Terminate the ascii and print it */ + str[k]='\0'; + fprintf(stderr," %s\n",str); + } + fflush(stderr); + + return; +} + +int rw_cmp_buf_cloned(struct ctx *p_ctx, __u64 start_lba) { + int i; + for (i = 0; i < NUM_CMDS; i++) { + if (cmp_buf_cloned((__u64*)&p_ctx->rbuf[i][0], + sizeof(p_ctx->rbuf[i])/sizeof(__u64))) { + printf("%d: clone miscompare at start_lba 0X%lX\n", + pid, start_lba); + return -1; + } + } + return 0; +} + +// len in __u64 +int cmp_buf_cloned(__u64* p_buf, unsigned int len) +{ + static __u64 data = DATA_SEED; + int i; + + for (i = 0; i < len; i += 2) { + if (!(p_buf[i] == pid && p_buf[i + 1] == data++)) { + return -1; + } + } + return 0; +} + +int send_rw_rcb(struct ctx *p_ctx, struct rwbuf *p_rwb, + __u64 start_lba, __u64 stride, + int align, int where) +{ + __u64 *p_u64; + __u32 *p_u32; + __u64 vlba; + int rc; + int i; + __u32 ea; + pid = getpid(); + if(0 == where)//begining + ea = align; + else //from end of the block + ea = 0x1000 - align; + for(i = 0; i< NUM_CMDS; i++){ + debug("%d : EA = %p with 0X%X alignment\n",pid, &p_rwb->wbuf[i][ea], ea); + fill_buf((__u64*)&p_rwb->wbuf[i][ea], + sizeof(p_rwb->wbuf[i])/(2*sizeof(__u64)),pid); + memset(&p_ctx->cmd[i].rcb.cdb[0],0,sizeof(p_ctx->cmd[i].rcb.cdb)); + vlba = start_lba + i*stride; + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_64(p_u64, vlba); + debug_2("%d : send write for 0X%lX\n", pid, vlba); + + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_rwb->wbuf[i][ea]; + + p_ctx->cmd[i].rcb.data_len = sizeof(p_rwb->wbuf[i])/2; + p_ctx->cmd[i].rcb.cdb[0] = 0x8A; // write(16) + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, LBA_BLK); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + hexdump(&p_rwb->wbuf[i][ea],0x20,"Write buf"); + } + + rc = send_cmd(p_ctx); + if(rc) return rc; + //send_single_cmd(p_ctx); + rc = wait_resp(p_ctx); + if(rc) return rc; + //fill send read + + for(i = 0; i< NUM_CMDS; i++){ + memset(&p_rwb->rbuf[i][ea], 0, sizeof(p_rwb->rbuf[i])/2); + + vlba = start_lba + i*stride; + memset(&p_ctx->cmd[i].rcb.cdb[0],0,sizeof(p_ctx->cmd[i].rcb.cdb)); + + p_ctx->cmd[i].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl; + write_64(p_u64, vlba); + debug_2("%d : send read for 0X%lX\n", pid, vlba); + + p_ctx->cmd[i].rcb.data_len = sizeof(p_rwb->rbuf[i])/2; + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_rwb->rbuf[i][ea]; + + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + + write_32(p_u32, LBA_BLK); + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } + rc = send_cmd(p_ctx); + if(rc) return rc; + //send_single_cmd(p_ctx); + rc = wait_resp(p_ctx); + if(rc) return rc; + //do cmp r/w buf + for (i = 0; i < NUM_CMDS; i++) { + vlba = start_lba + i*stride; + if (cmp_buf((__u64*)&p_rwb->rbuf[i][ea], (__u64*)&p_rwb->wbuf[i][ea], + sizeof(p_ctx->rbuf[i])/(2 * sizeof(__u64)))) { + printf("%d: miscompare at start_lba 0X%lX\n", + pid, vlba); + hexdump(&p_rwb->rbuf[i][ea],0x20,"Read buf"); + hexdump(&p_rwb->wbuf[i][ea],0x20,"Write buf"); + return -1; + } + } + return 0; +} + +int send_rw_shm_rcb(struct ctx *p_ctx, struct rwshmbuf *p_rwb, + __u64 vlba) +{ + __u64 *p_u64; + __u32 *p_u32; + int rc; + + fill_buf((__u64*)&p_rwb->wbuf[0][0], + sizeof(p_rwb->wbuf[0])/(sizeof(__u64)),pid); + memset(&p_ctx->cmd[0].rcb.cdb[0],0,sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_64(p_u64, vlba); + debug_2("%d : send write for 0X%lX\n", pid, vlba); + + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_rwb->wbuf[0][0]; + + p_ctx->cmd[0].rcb.data_len = sizeof(p_rwb->wbuf[0]); + p_ctx->cmd[0].rcb.cdb[0] = 0x8A; // write(16) + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + write_32(p_u32, LBA_BLK); + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + if(rc) return rc; + + memset(&p_rwb->rbuf[0][0], 0, sizeof(p_rwb->rbuf[0])); + + memset(&p_ctx->cmd[0].rcb.cdb[0],0,sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_ctx->cmd[0].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + write_64(p_u64, vlba); + debug_2("%d : send read for 0X%lX\n", pid, vlba); + + p_ctx->cmd[0].rcb.data_len = sizeof(p_rwb->rbuf[0]); + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_rwb->rbuf[0][0]; + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + + write_32(p_u32, LBA_BLK); + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + if(rc) return rc; + //do cmp r/w buf + + if (cmp_buf((__u64*)&p_rwb->rbuf[0][0], (__u64*)&p_rwb->wbuf[0][0], + sizeof(p_rwb->rbuf[0])/(sizeof(__u64)))) { + printf("%d: miscompare at start_lba 0X%lX\n", + pid, vlba); + hexdump(&p_rwb->rbuf[0][0],0x20,"Read buf"); + hexdump(&p_rwb->wbuf[0][0],0x20,"Write buf"); + return -1; + } + return 0; +} + +int send_single_write(struct ctx *p_ctx, __u64 vlba, __u64 data) +{ + __u64 *p_u64; + __u32 *p_u32; + int rc; + + fill_buf((__u64*)&p_ctx->wbuf[0][0], + sizeof(p_ctx->wbuf[0])/(sizeof(__u64)), data); + memset(&p_ctx->cmd[0].rcb.cdb[0],0,sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + write_64(p_u64, vlba); + debug_2("%d : send write for 0X%lX\n", pid, vlba); + + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->wbuf[0][0]; + + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->wbuf[0]); + p_ctx->cmd[0].rcb.cdb[0] = 0x8A; // write(16) + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + write_32(p_u32, LBA_BLK); + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + return rc; +} + +int send_single_read(struct ctx *p_ctx, __u64 vlba) +{ + __u64 *p_u64; + __u32 *p_u32; + int rc; + + memset(&p_ctx->rbuf[0][0], 0, sizeof(p_ctx->rbuf[0])); + + memset(&p_ctx->cmd[0].rcb.cdb[0],0,sizeof(p_ctx->cmd[0].rcb.cdb)); + + p_ctx->cmd[0].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[0].rcb.cdb[2]; + + p_ctx->cmd[0].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; + p_ctx->cmd[0].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; + p_ctx->cmd[0].rcb.res_hndl = p_ctx->res_hndl; + write_64(p_u64, vlba); + debug_2("%d : send read for 0X%lX\n", pid, vlba); + + p_ctx->cmd[0].rcb.data_len = sizeof(p_ctx->rbuf[0]); + p_ctx->cmd[0].rcb.data_ea = (__u64) &p_ctx->rbuf[0][0]; + + p_u32 = (__u32*)&p_ctx->cmd[0].rcb.cdb[10]; + + write_32(p_u32, LBA_BLK); + + p_ctx->cmd[0].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[0].sa.ioasc = 0; + + send_single_cmd(p_ctx); + rc = wait_single_resp(p_ctx); + return rc; +} + +int rw_cmp_single_buf(struct ctx *p_ctx, __u64 vlba) +{ + if (cmp_buf((__u64*)&p_ctx->rbuf[0][0], (__u64*)&p_ctx->wbuf[0][0], + sizeof(p_ctx->rbuf[0])/(sizeof(__u64)))) { + printf("%d: miscompare at start_lba 0X%lX\n", + pid, vlba); + hexdump(&p_ctx->rbuf[0][0],0x20,"Read buf"); + hexdump(&p_ctx->wbuf[0][0],0x20,"Write buf"); + return -1; + } + return 0; +} diff --git a/src/master/test/run_master_fvt.C b/src/master/test/run_master_fvt.C new file mode 100644 index 00000000..032def4b --- /dev/null +++ b/src/master/test/run_master_fvt.C @@ -0,0 +1,43 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/master/test/run_master_fvt.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/** + ******************************************************************************* + * \file + * \brief + * \ingroup + ******************************************************************************/ +#include + +//DO NOT ADD TEST CASES IN THIS FILE + +class Master_FVT_Suite : public testing::Test +{ + void SetUp() + { + } + void TearDown() + { + } +}; diff --git a/src/provisioning/library/makefile b/src/provisioning/library/makefile new file mode 100644 index 00000000..4a3f0070 --- /dev/null +++ b/src/provisioning/library/makefile @@ -0,0 +1,48 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/provisioning/library/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +ROOTPATH = ../../.. + +CFLAGS += -g + +MODULE = prov + +ALLOW_WARNINGS = yes + +OBJS = provafu.o provutil.o provvpd.o + +OBJS64 = provafu.64o provutil.64o provvpd.64o + +EXPFLAGS = -bexpall + +UNAME=$(shell uname) +ifeq ($(UNAME),AIX) +MODLIBS = -lcxl +else +MODLIBS = -lcxl +endif + + +include ${ROOTPATH}/config.mk diff --git a/src/provisioning/library/provafu.c b/src/provisioning/library/provafu.c new file mode 100644 index 00000000..7abe198a --- /dev/null +++ b/src/provisioning/library/provafu.c @@ -0,0 +1,680 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/provisioning/library/provafu.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include +#include +#include //errno parsing on kernel calls +#include //provides capi support +#include //needed for PRIx64 +#include //needed for mmap! + +#include //mmio's with byte swap +#include +#include +#include +#include +#include + + +void * mmap_afu_wwpn_registers(int afu_master_fd) +{ + //Locals + void *l_ret_ptr = NULL; + + //Code + TRACEI("Attempting to mmap 0x%016X bytes of problem state.\n", FC_PORT_MMAP_SIZE); + l_ret_ptr = mmap(NULL, FC_PORT_MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, + afu_master_fd, 0); + TRACEV("MMAP AFU Master complete\n"); + if (l_ret_ptr == MAP_FAILED) { + TRACEE("Unable to mmap problem state regs. errno = %d (%s)\n", errno, strerror(errno)); + l_ret_ptr = NULL; + } + + return l_ret_ptr; +} + +void munmap_afu_wwpn_registers(void *ps_regs) +{ + if (munmap(ps_regs, FC_PORT_MMAP_SIZE)) + perror("munmap_afu_psa_registers"); +} + + + +#define MAX_PORT_STATUS_RETRY_COUNT 20 //20 500ms retries = 10 seconds +#define PORT_STATUS_RETRY_INTERVAL_US 500000 //microseconds +#define FC_MTIP_STATUS_MASK_OFFLINE 0x00000010 //bit 4 set, bit 5 cleared +#define FC_MTIP_STATUS_MASK_ONLINE 0x00000020 //bit 4 cleared, bit 5 set +bool wait_port_state(const uint8_t* i_afu_psa_addr, const int i_port_offset, const bool i_online) +{ + //Locals + bool l_rc = false; + uint8_t i = 0; + uint32_t mmio_data32 = 0; + uint32_t l_target_status = FC_MTIP_STATUS_MASK_OFFLINE; + if(i_online) + { + l_target_status = FC_MTIP_STATUS_MASK_ONLINE; + } + TRACEV("Waiting for port status mask equal to 0x%08x\n", l_target_status); + //Code + for(i=0; i= MAX_WWPNS_PER_ADAPTER) + { + TRACEE("Port number %d is not a valid port number. We only support up to %d numbers.\n", i_port, MAX_WWPNS_PER_ADAPTER); + l_rc = false; + break; + } + + l_fc_port_offset = FC_PORT_BASE_OFFSET + (i_port * FC_PORT_REG_SIZE); + TRACEV("Target reg bank for port %d = 0x%08x\n",i_port, l_fc_port_offset); + + TRACEI("Attempting to open device '%s'\n", i_afu_path); + + /* now that the AFU is started, lets set config options */ + if ((afu_master_h = cxl_afu_open_dev(i_afu_path)) < 0) { + TRACEE("Unable to open AFU Master cxl device. errno = %d (%s)\n", errno, strerror(errno)); + l_rc = false; + break; + } + TRACEV("Opened %p, now attempting to get FD\n", afu_master_h); + afu_master_fd = cxl_afu_fd(afu_master_h); + TRACEV("Got FD! = %d\n", afu_master_fd); + + if (cxl_afu_attach(afu_master_h, + 0)) //no WEQ needed + { + TRACEE("Call to cxl_afu_attach failed. errno = %d (%s)\n", errno, strerror(errno)); + l_rc = false; + break; + } + + + afu_psa_addr = mmap_afu_wwpn_registers(afu_master_fd); + if (!afu_psa_addr) { + TRACEE("Error attempting to map AFU problem state registers. errno = %d (%s)\n", errno, strerror(errno)); + l_rc = false; + break; + } + + //Loopback enables a special echo mode and checks tx and rx counts to make sure + //the phy works correctly. The test is relatively short. + TRACEI("Enabling loopback mode for adapter %s, port %d\n", i_afu_path, i_port); + //first read back all test register counts. The AFU does not zero them + l_init_txcount = in_mmio64((__u64*)&afu_psa_addr[FC_LOOPBACK_TXCNT + l_fc_port_offset]); + l_init_errcount = in_mmio64((__u64*)&afu_psa_addr[FC_LOOPBACK_ERRCNT + l_fc_port_offset]); + l_init_passcount = in_mmio64((__u64*)&afu_psa_addr[FC_LOOPBACK_PASSCNT + l_fc_port_offset]); + + + //enable the test + mmio_data = in_mmio64((__u64*)&afu_psa_addr[FC_CONFIG2 + l_fc_port_offset]); + mmio_data |= (uint64_t)0x01 << 40; //set ECHO generator for this port (bit 40) + out_mmio64((__u64*)&afu_psa_addr[FC_CONFIG2 + l_fc_port_offset], mmio_data); + + TRACEI("Waiting %"PRIu64" microseconds for the test to complete...\n", i_test_time_us); + usleep(i_test_time_us); + TRACEI("Disabling loopback mode.\n"); + mmio_data = in_mmio64((__u64*)&afu_psa_addr[FC_CONFIG2 + l_fc_port_offset]); + mmio_data &= ~((uint64_t)0x01 << 40); //clear ECHO generator for this port (bit 40) + out_mmio64((__u64*)&afu_psa_addr[FC_CONFIG2 + l_fc_port_offset], mmio_data); + + TRACEI("Waiting for the link to quiesce...\n"); + usleep(LOOPBACK_TEST_QUIET_PERIOD);//wait for quiesce and any final timeouts + + //check test results from HW - subtract out initial readings + l_txcount = in_mmio64((__u64*)&afu_psa_addr[FC_LOOPBACK_TXCNT + l_fc_port_offset]) - l_init_txcount; + l_errcount = in_mmio64((__u64*)&afu_psa_addr[FC_LOOPBACK_ERRCNT + l_fc_port_offset]) - l_init_errcount; + l_passcount = in_mmio64((__u64*)&afu_psa_addr[FC_LOOPBACK_PASSCNT + l_fc_port_offset]) - l_init_passcount; + + if((l_txcount == 0) || + (l_errcount != 0) || + (l_txcount != l_passcount)) + { + TRACED("Loopback diagnostic failure detected. AFU: %s, port: %d, Tx Count: %"PRIu64", Pass Count: %"PRIu64", Error Count: %"PRIu64"\n", i_afu_path, i_port, l_txcount, l_passcount, l_errcount); + l_rc = false; + } + else + { + TRACEV("Loopback test passed. AFU: %s, port: %d, Tx Count: %"PRIu64", Pass Count: %"PRIu64", Error Count: %"PRIu64"\n", i_afu_path, i_port, l_txcount, l_passcount, l_errcount); + l_rc = true; + } + + //done! + munmap_afu_wwpn_registers((void *) afu_psa_addr); + cxl_afu_free(afu_master_h); + + + } while (0); + + return l_rc; + +} + + + +#define LOOPBACK_TEST_TIME_MIN 1000000l //uS units - 1 second test time +#define LOOPBACK_TEST_TIME_MAX 86400000000l //uS units - 24 hrs test time +bool provLoopbackTest(const prov_adapter_info_t* i_adapter, uint64_t i_test_time_us) +{ + //Locals + bool l_rc = true; //rely on l_rc defaulting to true. only write it to false on error + bool l_portrc = true; + uint8_t l_curr_port = 0; + char l_afu_path[DEV_PATH_LENGTH] = {0}; + //Code + do + { + if(i_adapter == NULL) + { + TRACEE("Error! invalid args detected.\n"); + l_rc = false; + break; + } + if((i_test_time_us < LOOPBACK_TEST_TIME_MIN) || (i_test_time_us > LOOPBACK_TEST_TIME_MAX)) + { + TRACEE("Error! Test time of %"PRIu64" is invalid.\n", i_test_time_us); + l_rc = false; + break; + } + snprintf(l_afu_path, sizeof(l_afu_path), "/dev/cxl/%sm", i_adapter->afu_name); + + TRACEI("Running loopback for %s\n", l_afu_path); + for(l_curr_port=0; l_curr_port < MAX_WWPNS_PER_ADAPTER; l_curr_port++) + { + l_portrc = loopbackDiag(l_afu_path, l_curr_port, i_test_time_us); + if(l_portrc == false) + { + TRACEI("Loopback failed.\n"); + l_rc = false; //any single port failure makes the entire test fail + } + } + + } while (0); + + + return l_rc; +} + +//Current goal: clean up set_afu_master_psa_registers() - this code +//needs to handle (correctly) finding the AFU device path, +//opening it, etc. +//once initialize_wwpn can work for a single port, we will iterate across the wwpn +//structures sequentially programming each port for all AFUs. + +bool initialize_wwpn(char* i_afu_path, const AFU_PORT_ID i_port, const uint64_t i_wwpn) +{ + //Locals + struct cxl_afu_h *afu_master_h = NULL; + int afu_master_fd = 0; + uint8_t *afu_psa_addr = NULL; + uint32_t mmio_data32 = 0; + uint64_t mmio_data = 0; + int l_fc_port_offset = 0; //todo: replace all references with "l_fc_port_offset" + bool l_rc = 0; + + //Code + do + { + if(i_port >= MAX_WWPNS_PER_ADAPTER) + { + TRACEE("Port number %d is not a valid port number. We only support up to %d numbers.\n", i_port, MAX_WWPNS_PER_ADAPTER); + l_rc = false; + break; + } + if(i_wwpn == 0) + { + TRACEE("WWPN for %s port %d is zero and therefore invalid.\n", i_afu_path, i_port); + l_rc = false; + break; + } + + l_fc_port_offset = FC_PORT_BASE_OFFSET + (i_port * FC_PORT_REG_SIZE); + TRACEV("Target reg bank for port %d = 0x%08x\n",i_port, l_fc_port_offset); + + TRACEI("Attempting to open device '%s'\n", i_afu_path); + + /* now that the AFU is started, lets set config options */ + if ((afu_master_h = cxl_afu_open_dev(i_afu_path)) < 0) { + TRACEE("Unable to open AFU Master cxl device. errno = %d (%s)\n", errno, strerror(errno)); + l_rc = false; + break; + } + TRACEV("Opened %p, now attempting to get FD\n", afu_master_h); + afu_master_fd = cxl_afu_fd(afu_master_h); + TRACEV("Got FD! = %d\n", afu_master_fd); + + if (cxl_afu_attach(afu_master_h, + 0)) //no WEQ needed + { + TRACEE("Call to cxl_afu_attach failed. errno = %d (%s)\n", errno, strerror(errno)); + l_rc = false; + break; + } + + + afu_psa_addr = mmap_afu_wwpn_registers(afu_master_fd); + if (!afu_psa_addr) { + TRACEE("Error attempting to map AFU problem state registers. errno = %d (%s)\n", errno, strerror(errno)); + l_rc = -1; + break; + } + + //Take port offline + + //TRACED("Bringing port offline\n"); + + + //get status bits for debug + //TODO: maybe wrap up this print into a nice macro... + mmio_data32 = in_mmio32((__u64*)&afu_psa_addr[FC_MTIP_STATUS + l_fc_port_offset]); + TRACEV("FC_MTIP_STATUS (0x%08X): 0x%08X\n", FC_MTIP_STATUS + l_fc_port_offset, mmio_data32); + mmio_data32 = in_mmio32((__u64*)&afu_psa_addr[FC_MTIP_CMDCONFIG + l_fc_port_offset]); + TRACEV("FC_MTIP_CMDCONFIG (0x%08X): 0x%08X\n", FC_MTIP_CMDCONFIG + l_fc_port_offset, mmio_data32); + mmio_data32 &= ~0x20; // clear ON_LINE + mmio_data32 |= 0x40; // set OFF_LINE + TRACEV("FC_MTIP_CMDCONFIG: Proposed: 0x%08X\n", mmio_data32); + out_mmio32((__u64*)&afu_psa_addr[FC_MTIP_CMDCONFIG + l_fc_port_offset], mmio_data32); + + //wait for the port to be offline + l_rc = wait_port_state(afu_psa_addr, l_fc_port_offset, false); + if(l_rc == false) + { + TRACEE("Port not offline in time. Aborting.\n"); + l_rc = -1; + break; + } + + //now we know we are offline, so write the PN... + + + //read out the current PN + //PN register is 64-bit as per spec + mmio_data = in_mmio64((__u64*)&afu_psa_addr[FC_PNAME + l_fc_port_offset]); + TRACEV("FC_PNAME: (0x%08X): 0x%"PRIx64"\n",FC_PNAME + l_fc_port_offset, mmio_data); + TRACEI("Current Port Name is 0x%"PRIx64"\n", mmio_data); + + mmio_data = i_wwpn; + TRACEI("New Port Name will be 0x%"PRIx64"\n", mmio_data); + + out_mmio64((__u64*)&afu_psa_addr[FC_PNAME + l_fc_port_offset], mmio_data); + + //bring the port back online + //read control bits... + mmio_data32 = in_mmio32((__u64*)&afu_psa_addr[FC_MTIP_CMDCONFIG + l_fc_port_offset]); + TRACEV("FC_MTIP_CMDCONFIG (0x%08X): 0x%08X\n", FC_MTIP_CMDCONFIG + l_fc_port_offset, mmio_data32); + mmio_data32 |= 0x20; // set ON_LINE + mmio_data32 &= ~0x40; // clear OFF_LINE + //TODO: ask todd - should we explicitly-set other bits? I needed to force the port into FC mode (set bit 1) + //net result is we write 0x23 to the lowest byte + TRACEV("FC_MTIP_CMDCONFIG: Proposed: 0x%08X\n", mmio_data32); + out_mmio32((__u64*)&afu_psa_addr[FC_MTIP_CMDCONFIG + l_fc_port_offset], mmio_data32); + + //wait for the port to be online + l_rc = wait_port_state(afu_psa_addr, l_fc_port_offset, true); + if(l_rc == false) + { + TRACEE("Port not online in time.\n"); + l_rc = -1; + break; + } + + //done! + munmap_afu_wwpn_registers((void *) afu_psa_addr); + cxl_afu_free(afu_master_h); + + + + TRACEI("Successfully programmed WWPN!\n"); + l_rc = true; + + } while (0); + + return l_rc; + +} + + + +bool provInitAdapter(const prov_adapter_info_t* i_adapter) +{ + //Locals + bool l_rc = false; + uint64_t l_curr_wwpn = 0; + uint8_t l_curr_port = 0; + char l_afu_path[DEV_PATH_LENGTH] = {0}; + //Code + do + { + if(i_adapter == NULL) + { + TRACEE("Error! invalid args detected.\n"); + l_rc = false; + break; + } + snprintf(l_afu_path, sizeof(l_afu_path), "/dev/cxl/%sm", i_adapter->afu_name); + TRACED("Setting up adapter '%s'...\n", l_afu_path); + + + TRACEI("Initializing WWPN Data\n"); + for(l_curr_port=0; l_curr_port < MAX_WWPNS_PER_ADAPTER; l_curr_port++) + { + l_curr_wwpn = strtoul(i_adapter->wwpn[l_curr_port],NULL,16); + TRACEV("Initing AFU '%s' port %d\n", l_afu_path, l_curr_port); + l_rc = initialize_wwpn(l_afu_path, l_curr_port, l_curr_wwpn); + if(l_rc == false) + { + TRACEE("Error occurred while initializing WWPN Data.\n"); + break; + } + } + + //finished! + TRACED("SUCCESS: Initialization Complete!\n"); + } while (0); + + + return l_rc; +} + +void provGetAFUs() +{ + //Locals + struct cxl_afu_h *l_afu; + char *l_pci_path = NULL; + int l_rc = 0; + + //Code + cxl_for_each_afu(l_afu) + { + TRACEI("AFU found: '%s'\n", cxl_afu_devname(l_afu)); + l_rc = cxl_afu_sysfs_pci(&l_pci_path, l_afu); + TRACEI("sysfs rc: %d\n", l_rc); + TRACEI("sysfs path: '%s'\n", l_pci_path); + free(l_pci_path); + } +} + + + +#define MAX_VPD_SIZE 512 + +bool provGetAllAdapters(prov_adapter_info_t* o_info, int* io_num_adapters) +{ + //Locals + struct cxl_afu_h *l_afu; + char *l_pci_path = NULL; + bool l_rc = 0; + //prov_adapter_info_t* l_curr_adapter_info = NULL; + int l_num_adapters = 0; + + //Code + cxl_for_each_afu(l_afu) + { + if(l_num_adapters >= *io_num_adapters) + { + TRACEE("Warning: io_num_adapters = %d, and we have at least one more adapter. Flagging this as an error.\n", *io_num_adapters); + l_rc = false; + break; + } + TRACEI("AFU found: '%s'\n", cxl_afu_devname(l_afu)); + l_rc = cxl_afu_sysfs_pci(&l_pci_path, l_afu); + TRACEI("sysfs rc: %d\n", l_rc); + TRACEI("sysfs path: '%s'\n", l_pci_path); + l_rc = provGetAdapterInfo(l_pci_path, &o_info[l_num_adapters]); + if(l_rc == true) + { + strncpy(o_info[l_num_adapters].afu_name, cxl_afu_devname(l_afu), sizeof(o_info[l_num_adapters].afu_name)); + l_num_adapters++; + } + else + { + TRACEE("Failure occurred parsing adapter info for '%s'\n",l_pci_path); + l_rc = false; + break; + } + + } + + //all exit paths return the same way + if(l_rc == false) + { + *io_num_adapters = 0; + } + else + { + *io_num_adapters = l_num_adapters; + } + return l_rc; +} + + +bool provGetAdapterInfo(const char* i_pci_path, prov_adapter_info_t* o_info) +{ + //Locals + bool l_rc = true; + FILE *l_vpd_file = NULL; + char l_pci_path[DEV_PATH_LENGTH]; + uint8_t l_vpd_buffer[MAX_VPD_SIZE]; + int l_kw_length = 0; + size_t n=0; + + //Code + do + { + TRACEI("Reading WWPN data...\n"); + if((i_pci_path == NULL) || + (strlen(i_pci_path) == 0) || + (strlen(i_pci_path) > DEV_PATH_LENGTH) || + (o_info == NULL)) + { + TRACEI("Found null or invalid parm!\n"); + l_rc = false; + break; + } + + bzero(l_pci_path, DEV_PATH_LENGTH); + bzero(l_vpd_buffer, MAX_VPD_SIZE); + bzero(o_info, sizeof(prov_adapter_info_t)); + + //generate the proper VPD path to get VPD + snprintf(l_pci_path, DEV_PATH_LENGTH, "%s/%s", i_pci_path, "vpd"); + + TRACEV("Opening up '%s'\n", l_pci_path); + + l_vpd_file = fopen(l_pci_path, "rb"); + if (l_vpd_file) + { + n = fread(l_vpd_buffer, 1, MAX_VPD_SIZE, l_vpd_file); + } + else + { + TRACEI("Unable to read file. Do you have permission to open '%s' ?", l_pci_path); + l_rc = false; + break; + // error opening file + } + + if(n < MAX_VPD_SIZE) + { + l_rc = false; + TRACEI("Warning: Buffer underrun. This indicates a potential VPD format problem.\n"); + break; + } + TRACEV("Searching for V5 and V6 KW data...\n"); + l_kw_length = 16; + l_rc = provFindVPDKw("V5", l_vpd_buffer, n, (uint8_t*)o_info->wwpn[0],&l_kw_length); + if(l_rc == false) + { + TRACEE("Error: Unable to find Port name VPD for Port 1 (VPD KW V5)"); + break; + } + l_kw_length = 16; + l_rc = provFindVPDKw("V6", l_vpd_buffer, n, (uint8_t*)o_info->wwpn[1],&l_kw_length); + if(l_rc == false) + { + TRACEE("Error: Unable to find Port name VPD for Port 1 (VPD KW V5)"); + break; + } + //set up the output var + strcpy(o_info->afu_name, "unknown"); + strncpy(o_info->pci_path, i_pci_path, DEV_PATH_LENGTH); + //TODO - need fns to get VPD data here and fill it in. + + //put data in successfully! + l_rc = true; + + } while (0); + //close the file on all paths, if it was opened + if(l_vpd_file) + { + fclose(l_vpd_file); + l_vpd_file = NULL; + } + return l_rc; +} + +uint8_t provGetAllWWPNs(prov_wwpn_info_t* io_wwpn_info, uint16_t *io_num_wwpns) +{ + //Locals + struct cxl_afu_h *l_afu; + char *l_pci_path = NULL; + char l_afu_path[DEV_PATH_LENGTH]; + int l_rc = 0; + uint16_t l_total_wwpns = 0; + uint16_t l_adapter_wwpn_count = 0; + prov_adapter_info_t l_adapter_info; + //Code + do + { + TRACEI("Getting WWPNs\n"); + if((io_num_wwpns == 0) || io_wwpn_info == NULL) + { + TRACEI("Buffer is too small, or null!"); + break; + } + cxl_for_each_afu(l_afu) + { + bzero(l_afu_path, sizeof(l_afu_path)); + l_pci_path = NULL; + TRACEI("AFU found: '%s'\n", cxl_afu_devname(l_afu)); + //TODO: why do i have to build this myself? should we be + //always appending the "master" context on the end here? + snprintf(l_afu_path, sizeof(l_afu_path), "/dev/cxl/%sm", cxl_afu_devname(l_afu)); + l_rc = cxl_afu_sysfs_pci(&l_pci_path, l_afu); + TRACEI("sysfs rc: %d\n", l_rc); + TRACEI("sysfs path: '%s'\n", l_pci_path); + TRACEI("afu path: '%s'\n", l_afu_path); + l_rc = provGetAdapterInfo(l_pci_path, &l_adapter_info); + //free the buffer so we can reallocate it next time + if(l_rc == false) + { + //skip - invalid data came back + TRACEI("Invalid VPD found for this adapter. skipping it.\n"); + continue; + } + for(l_adapter_wwpn_count = 0; l_adapter_wwpn_count < MAX_WWPNS_PER_ADAPTER; l_adapter_wwpn_count++) + { + TRACEI("Got a wwpn: '%s'\n", l_adapter_info.wwpn[l_adapter_wwpn_count]); + //io_wwpn_info[l_total_wwpns].afu_path + + //zero out the next available element + bzero(&io_wwpn_info[l_total_wwpns], sizeof(prov_wwpn_info_t)); + strncpy(io_wwpn_info[l_total_wwpns].pci_path, l_pci_path, DEV_PATH_LENGTH); + strncpy(io_wwpn_info[l_total_wwpns].afu_path, l_afu_path, DEV_PATH_LENGTH); + io_wwpn_info[l_total_wwpns].port_id = l_adapter_wwpn_count; + strncpy(io_wwpn_info[l_total_wwpns].wwpn, l_adapter_info.wwpn[l_adapter_wwpn_count], WWPN_BUFFER_LENGTH); + l_total_wwpns++; + } + free(l_pci_path); + l_pci_path = NULL; + + } + if(l_total_wwpns == 0) + { + TRACEI("No wwpns found!\n"); + } + + + } while (0); + + *io_num_wwpns = l_total_wwpns; + + return 0; + +} diff --git a/src/provisioning/library/provafu.h b/src/provisioning/library/provafu.h new file mode 100644 index 00000000..2fe23c3a --- /dev/null +++ b/src/provisioning/library/provafu.h @@ -0,0 +1,58 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/provisioning/library/provafu.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/** + * @file provafu.h + * @brief Contains all internal headers for provisioning afu logic + * + * + */ + +#ifndef _PROVAFU_H +#define _PROVAFU_H +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +#include +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------*/ +/* Structures */ +/*----------------------------------------------------------------------------*/ + + + + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ + +bool provGetAdapterInfo(const char* i_pci_path, prov_adapter_info_t* o_info); + + +#endif //_PROV_H diff --git a/src/provisioning/library/provutil.c b/src/provisioning/library/provutil.c new file mode 100755 index 00000000..9c71e84b --- /dev/null +++ b/src/provisioning/library/provutil.c @@ -0,0 +1,147 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/provisioning/library/provutil.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ +int32_t g_traceE = 1; /* error traces */ +int32_t g_traceI = 0; /* informative 'where we are in code' traces */ +int32_t g_traceF = 0; /* function exit/enter */ +int32_t g_traceV = 0; /* verbose trace...lots of information */ +/*----------------------------------------------------------------------------*/ +/* Struct / Typedef */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + +void prov_pretty_print( + uint8_t *buffer, + uint32_t buffer_length) +{ + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + uint32_t offset = 0; + uint32_t i; + uint32_t j; + char line_to_print[100]; + char sub_string[100]; + char char_to_print; + uint32_t indent = 23; + uint32_t secondary_indent = 23; + + /*-------------------------------------------------------------------------*/ + /* Code */ + /*-------------------------------------------------------------------------*/ + + sub_string[0] = 0; + line_to_print[0] = 0; + + for (j = 0; j < 80; j++) + { + line_to_print[j] = 0x20; + } + line_to_print[80] = 0; + + TRACED("UPPER-CASE ASCII HEX \n"); + for (i = 0; i < buffer_length; i++) + { + // print byte in hex format + sprintf (sub_string, " %.2x", buffer[i]); + + if (i < 16) + { + strncpy (&line_to_print[indent + (i % 16 * 3)], sub_string, 3); + } + else + { + strncpy (&line_to_print[secondary_indent + (i % 16 * 3)], + sub_string, 3); + } + + // Only print letters in ASCII format + char_to_print = buffer[i]; + + if (((char_to_print >= 0x30) && (char_to_print <= 0x39)) || + ((char_to_print >= 65) && (char_to_print < 65 + 26))) + { + // do nothing. Its a number or letter. + } + else + { + char_to_print = '.'; + } + + line_to_print[0 + (i % 16)] = char_to_print; //change 60 to 0 + + offset++; + + if ((i % 16) == 15) + { + strcat (line_to_print, "\n"); + TRACED("[0x%.4x] %s",offset, line_to_print); + + for (j = 0; j < 80; j++) + { + line_to_print[j] = 0x20; + } + line_to_print[80] = 0; + + sprintf (sub_string, "%.4x", offset); + strncpy (&line_to_print[0], sub_string, 4); + } + + } + + /* Make sure any partial last lines get printed. + */ + if (((i - 1) % 16) != 15) + { + strcat (line_to_print, "\n"); + TRACED("[0x%.4x] %s", offset, line_to_print); + } +} diff --git a/src/provisioning/library/provvpd.c b/src/provisioning/library/provvpd.c new file mode 100644 index 00000000..e876ef16 --- /dev/null +++ b/src/provisioning/library/provvpd.c @@ -0,0 +1,177 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/provisioning/library/provvpd.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +#include +#include +#include +#include +#include +#include +#include +/* + Hmmph. this is getting overly-complicated. what's the flow really need to be? + Parse a segment of a buffer. Return the KW found, the data, and the length of the data. + Tell me where to start for the next segment of the buffer. + + In a separate loop, iterate until we exceed the size of the buffer, or reach + a stop condition. + */ + +bool provFindVPDKw(const char* i_kw, const uint8_t* i_vpd_buffer, size_t i_vpd_buffer_length, uint8_t* o_kwdata, int* io_kwdata_length) +{ + //Locals + bool l_rc = false; + bool l_found_kw = false; + prov_pci_vpd_header_t* l_vpd_header = NULL; + int l_section_length = 0; + uint8_t* l_buffer_ptr = NULL; + char l_curr_kw_name[KWNAME_SZ + 1] = {0}; //+1 b/c we want a terminating null + char l_curr_kw_data[KWDATA_SZ] = {0}; + char l_vpd_name[KWDATA_SZ] = {0}; + int l_vpd_name_sz = 0; + prov_pci_vpd_segment_t* l_vpd_section = NULL; + const uint8_t* l_end_of_buffer = &i_vpd_buffer[i_vpd_buffer_length]; //get the address of the end of the buffer. note this is the 1st byte PAST the end of the array + uint8_t l_curr_kw_sz = 0; + //Code + TRACEV("Entry\n"); + do + { + if((i_kw == NULL) || (i_vpd_buffer == NULL) || + (i_vpd_buffer_length == 0) || + (o_kwdata == NULL) || (io_kwdata_length == NULL)) + { + TRACEE("Invalid or null Args. Unable to parse VPD structures.\n"); + l_rc = false; + break; + } + + //hope for the best + l_vpd_header = (prov_pci_vpd_header_t*) i_vpd_buffer; + + //validate if we have a real PCI VPD or not + //we expect read-only data to come first + if(l_vpd_header->pci_eyecatcher != PCI_FORMAT_EYECATCHER) + { + TRACEE("This doesn't appear to be valid VPD. " + "PCI eyecatcher = 0x%02x, expected 0x%02x.\n", + l_vpd_header->pci_eyecatcher, PCI_FORMAT_EYECATCHER); + l_rc = false; + break; + } + l_vpd_name_sz = PROV_CONVERT_UINT8_ARRAY_UINT16(l_vpd_header->name_sz[0], + l_vpd_header->name_sz[1]); + TRACEV("Got apparently-valid eyecatcher data. Name size is %d.\n", l_vpd_name_sz); + + if(l_vpd_name_sz > KWDATA_SZ) { + TRACEI("Warning: Trimming KW Name down to %d bytes. Original was %d\n", KWDATA_SZ, l_vpd_name_sz); + l_vpd_name_sz = KWDATA_SZ; + } + bzero(l_vpd_name, sizeof(l_vpd_name)); + strncpy(l_vpd_name, l_vpd_header->name, l_vpd_name_sz); + TRACEV("Parsing VPD for '%s'\n", l_vpd_name); + + //get the address of the VPD section that follows the name by relying on + //the fact that the name section is an "array" in the struct, and that we can + //index into the array for the length of the KW. For example - a 0-length + //name would technically mean that the "name" byte of the struct represents + //the next segment of data. A 1-byte name would get the 2nd byte after, etc. + l_vpd_section = (prov_pci_vpd_segment_t*)&l_vpd_header->name[l_vpd_name_sz]; + + l_section_length = PROV_CONVERT_UINT8_ARRAY_UINT16(l_vpd_section->segment_sz[0], + l_vpd_section->segment_sz[1]); + TRACEV("Got %d bytes of RO section data.\n", l_section_length); + //set up the pointer to the beginning of the keyword data + l_buffer_ptr = l_vpd_section->keywords; + //l_buffer_pt + while((l_buffer_ptr < l_end_of_buffer) && + (*l_buffer_ptr != PCI_DATA_ENDTAG)) + { + bzero(l_curr_kw_name, sizeof(l_curr_kw_name)); + bzero(l_curr_kw_data, sizeof(l_curr_kw_data)); + l_curr_kw_sz = 0; + + if(*l_buffer_ptr == PCI_RW_DATA_EYECATCHER) + { + uint8_t lo = *l_buffer_ptr++; + uint8_t hi = *l_buffer_ptr++; + + l_section_length = PROV_CONVERT_UINT8_ARRAY_UINT16(lo, + hi); + TRACEI("RW Data section found of length %d bytes, starting a new section.\n", l_section_length); + continue; //new section found, so continue processing + + } + + //get the name of the KW + its size + l_curr_kw_name[0] = *l_buffer_ptr++; + l_curr_kw_name[1] = *l_buffer_ptr++; + l_curr_kw_sz = *l_buffer_ptr++; + TRACEV("Current KW: '%s' size = %d\n",l_curr_kw_name, l_curr_kw_sz); + + //copy the data out. note this may copy zero bytes if the KW is zero + //length (which seems to be allowed by the spec). + memcpy(l_curr_kw_data, l_buffer_ptr, l_curr_kw_sz); + + + //check to see if we found the desired KW! + if(0 == strcmp(i_kw, l_curr_kw_name)) + { + l_found_kw = true; + break; + } + + //advance the pointer by the size of the KW and loop again... + l_buffer_ptr+=l_curr_kw_sz; + + } //end inner while that is searching the buffer for KW data + + if(l_found_kw) + { + TRACEV("Found VPD for keyword '%s' length %d\n",l_curr_kw_name, l_curr_kw_sz); + if(*io_kwdata_length < l_curr_kw_sz) + { + TRACED("Output buffer %d is too small for keyword '%s' data. We need at least %d bytes.\n", *io_kwdata_length, l_curr_kw_name, l_curr_kw_sz); + l_rc = false; + break; + } + else + { + TRACEV("Copying data to output buffer...\n"); + *io_kwdata_length = l_curr_kw_sz; + memcpy(o_kwdata, l_curr_kw_data, l_curr_kw_sz); + l_rc = true; + } + } + + } while (0); + //all paths exit via the same return path + if(l_rc == false) + { + //set the output size to 0 for consistency + *io_kwdata_length = 0; + } + return l_rc; +} diff --git a/src/provisioning/library/provvpd.h b/src/provisioning/library/provvpd.h new file mode 100644 index 00000000..4c0309ab --- /dev/null +++ b/src/provisioning/library/provvpd.h @@ -0,0 +1,80 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/provisioning/library/provvpd.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef PROVVPD_H +#define PROVVPD_H + +#include +#include + +//assume little-endian data +#define PROV_CONVERT_UINT8_ARRAY_UINT16(lo,hi) \ +(((hi)<<8) | (lo)) + +#define KWDATA_SZ 256 //max size of a VPD buffer + +//PCI Spec 2.1 section 6.4 specifies the structure of this data. However, the +//book "PCI System Architecture" explains it much more clearly. +//See tables 19-27, 19-28, 19-29, and 19-30 of the book for a good explanation. + +//Note! The structres below can't / won't tell the total length of the combined +//read/write AND read-only data of the VPD! + +//Note! The RO section may be followed by a RW section with its own length. +//Finding a null byte does not necessarily mean we've reached the end of the VPD +//for the adapter. The RO section may be zero-padded. In theory we must scan +//forward until we find either the PCI_RW_DATA_EYECATCHER or the PCI_DATA_ENDTAG. +#define PCI_FORMAT_EYECATCHER 0x82 +#define PCI_RO_DATA_EYECATCHER 0x90 +#define PCI_RW_DATA_EYECATCHER 0x91 +#define PCI_DATA_ENDTAG 0x78 +typedef struct __attribute__((__packed__))prov_pci_vpd_header +{ + char pci_eyecatcher; //must be 0x82 + uint8_t name_sz[2]; //length of the name field. byte 0 is lo, byte 1 is hi. + char name[1]; + +}prov_pci_vpd_header_t; + +typedef struct __attribute__((__packed__))prov_pci_vpd_segment +{ + char segment_eyecatcher; //must be 0x90 or 0x91 + uint8_t segment_sz[2]; //TOTAL length of the fields. byte 0 is lo, byte 1 is hi. + uint8_t keywords[1]; //variable length VPD data! +}prov_pci_vpd_segment_t; + +#define KWNAME_SZ 3 +typedef struct __attribute__((__packed__))prov_pci_vpd_kwdata +{ + char kwname[KWNAME_SZ]; + uint8_t kw_sz; + uint8_t kwdata[1]; //variable length! +}prov_pci_vpd_kwdata_t; + + + +bool provFindVPDKw(const char* i_kw, const uint8_t* i_vpd_buffer, size_t i_vpd_buffer_length, uint8_t* o_kwdata, int* io_kwdata_length); + +#endif diff --git a/src/provisioning/makefile b/src/provisioning/makefile new file mode 100644 index 00000000..f57d0d4a --- /dev/null +++ b/src/provisioning/makefile @@ -0,0 +1,36 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/provisioning/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +ROOTPATH = ../.. +UNAME=$(shell uname) + +.NOTPARALLEL: + +ifeq ($(UNAME),AIX) +SUBDIRS = +else +SUBDIRS = library.d tool.d +endif + +include ${ROOTPATH}/config.mk diff --git a/src/provisioning/tool/afuinject.c b/src/provisioning/tool/afuinject.c new file mode 100644 index 00000000..35c28a91 --- /dev/null +++ b/src/provisioning/tool/afuinject.c @@ -0,0 +1,303 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/provisioning/tool/afuinject.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ +struct arguments g_args = {0}; +int32_t g_traceE = 1; +int32_t g_traceI; +int32_t g_traceF; +int32_t g_traceV; + +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ +const char * argp_program_version = "afuinject 1.0\n"; +const char * argp_program_bug_address = "tmpacyga@us.ibm.com"; +char doc[] = "\nafuinject -- AFU register reader/writer\n\v"; + +/*----------------------------------------------------------------------------*/ +/* Struct / Typedef */ +/*----------------------------------------------------------------------------*/ +enum argp_char_options { + AFUINJ_DEVICE = 'd', + AFUINJ_READ = 'r', + AFUINJ_WRITE = 'w', + AFUINJ_REGISTER = 'R', + AFUINJ_DATA = 'D', + AFUINJ_DEBUG = 'v', +}; + +static struct argp_option options[] = { + {"device", AFUINJ_DEVICE, "device path", OPTION_ARG_OPTIONAL, + "Specify device to read/write from."}, + {"read", AFUINJ_READ, 0, 0, "Read from a register."}, + {"write", AFUINJ_WRITE, 0, 0, "Write to a register."}, + {"register", AFUINJ_REGISTER, "register offset", OPTION_ARG_OPTIONAL, + "Register offset to read/write from."}, + {"data", AFUINJ_DATA, "data to write", OPTION_ARG_OPTIONAL, + "Data to write to register."}, + {"debug", AFUINJ_DEBUG, "", 0, "Internal trace level for tool."}, + {0} +}; + +static struct argp argp = {options, parse_opt, 0, doc}; + +/*----------------------------------------------------------------------------*/ +/* Functions */ +/*----------------------------------------------------------------------------*/ +error_t parse_opt(int key, char * arg, struct argp_state * state) { + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + switch (key) + { + case AFUINJ_DEVICE: + if((arg != NULL) && (strlen(arg) != 0)) { + strncpy(g_args.target_device, arg, sizeof(g_args.target_device)); + TRACEI("Setting target device to: '%s'\n", g_args.target_device); + } + g_args.device = 1; + break; + + case AFUINJ_READ: + g_args.read = 1; + break; + + case AFUINJ_WRITE: + g_args.write = 1; + break; + + case AFUINJ_REGISTER: + if((arg != NULL) && (strlen(arg) != 0)) { + strncpy(g_args.register_offset, arg, sizeof(g_args.register_offset)); + TRACEI("Using register offset: '%s'\n", g_args.register_offset); + } + g_args.reg = 1; + break; + + case AFUINJ_DATA: + if((arg != NULL) && (strlen(arg) != 0)) { + strncpy(g_args.data, arg, sizeof(g_args.data)); + TRACEI("Using data: '%s'\n", g_args.data); + } + g_args.data_to_write = 1; + break; + + case AFUINJ_DEBUG: + g_args.verbose = atoi(arg); + if (g_args.verbose >= 1) + g_traceI = 1; + if (g_args.verbose >= 2) + g_traceF = 1; + if (g_args.verbose >= 3) + g_traceV = 1; + TRACEI("Set verbose level to %d.\n", g_args.verbose); + break; + + default: + return (ARGP_ERR_UNKNOWN); + } + + return (0); +} + +void * mmap_afu_registers(int afu_master_fd) { + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + void * l_ret_ptr = NULL; + + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + TRACEI("Attempting to mmap 0x%016zx bytes of problem state.\n", //%zx prints a size_t gracefully + sizeof(struct surelock_afu_map)); + l_ret_ptr = mmap(NULL, sizeof(struct surelock_afu_map), PROT_READ|PROT_WRITE, MAP_SHARED, + afu_master_fd, 0); + TRACEI("MMAP AFU Master complete.\n"); + if (l_ret_ptr == MAP_FAILED) { + TRACEE("Unable to mmap problem state regs. errno = %d (%s).\n", + errno, strerror(errno)); + l_ret_ptr = NULL; + } + + return l_ret_ptr; +} + +void munmap_afu_registers(void * ps_regs) { + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + if(munmap(ps_regs, sizeof(struct surelock_afu_map))) + perror("munmap_afu_registers"); +} + +uint8_t * get_afu_psa_addr(char * i_afu_path) { + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + struct cxl_afu_h * afu_master_h = NULL; + int afu_master_fd = 0; + uint8_t * afu_psa_addr = NULL; + + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + if ((afu_master_h = cxl_afu_open_dev(i_afu_path)) < 0) { + TRACEE("Unable to open AFU Master cxl device. errno = %d (%s).\n", + errno, strerror(errno)); + return NULL; + } + + TRACEI("Opened %p, now attempting to get FD.\n", afu_master_h); + afu_master_fd = cxl_afu_fd(afu_master_h); + TRACEI("Got FD! = %d\n", afu_master_fd); + + if (cxl_afu_attach(afu_master_h, 0)) { + TRACEE("Call to cxl_afu_attach failed. errno = %d (%s)\n", + errno, strerror(errno)); + return NULL; + } + + afu_psa_addr = mmap_afu_registers(afu_master_fd); + + if (!afu_psa_addr) { + TRACEE("Error attempting to map AFU problem state registers. \ + errno = %d (%s)\n", errno, strerror(errno)); + return NULL; + } + + return afu_psa_addr; +} + +int write_afu_register(uint8_t * afu_psa_addr, int reg, uint64_t mmio_data) { + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + out_mmio64((__u64*)&afu_psa_addr[reg], mmio_data); + TRACED("Wrote '0x%"PRIx64"' to regist '0x%08x'.\n", mmio_data, reg); + + return 0; +} + +int read_afu_register(uint8_t * afu_psa_addr, int reg) { + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + uint64_t mmio_data = 0; + + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + mmio_data = in_mmio64((__u64*)&afu_psa_addr[reg]); + TRACED("Register '0x%08x' contains '0x%"PRIx64"'.\n", reg, mmio_data); + + return 0; +} + +int main(int argc, char * argv[]) { + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + uint8_t * afu_psa_addr = NULL; + + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + memset(&g_args, 0, sizeof(g_args)); + argp_parse(&argp, argc, argv, ARGP_IN_ORDER, 0, &g_args); + + if(g_args.device) { + afu_psa_addr = get_afu_psa_addr(g_args.target_device); + + if(!afu_psa_addr) { + TRACEE("Error getting afu_psa_addr.\n"); + return -1; + } + + if(g_args.reg) { + if(g_args.read) { + read_afu_register(afu_psa_addr, + (int)strtol(g_args.register_offset, NULL, 0)); + } + + else if(g_args.write) { + if(g_args.data_to_write) { + write_afu_register(afu_psa_addr, + (int)strtol(g_args.register_offset, NULL, 0), + (uint64_t)strtol(g_args.data, NULL, 0)); + } + else + TRACED("Need to specify data '-D/--data', exiting.\n"); + } + else { + TRACED("Need to specify a read or write \ + '-r/--read or -w/--write', exiting.\n"); + } + } + else + TRACED("Need to specify register '-R/--register', exiting.\n"); + + munmap_afu_registers((void *)afu_psa_addr); + } + else + TRACED("Need to specify device '-d/--device', exiting.\n"); + + return 0; +} diff --git a/src/provisioning/tool/afuinject.h b/src/provisioning/tool/afuinject.h new file mode 100644 index 00000000..bc11e735 --- /dev/null +++ b/src/provisioning/tool/afuinject.h @@ -0,0 +1,73 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/provisioning/tool/afuinject.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#ifndef AFUINJECT_H +#define AFUINJECT_H + +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include + +/*----------------------------------------------------------------------------*/ +/* Enumerations */ +/*----------------------------------------------------------------------------*/ +/** + * \defgroup ExternalEnum External Enumerations + */ +/*@{*/ // Special tag to say everything between it and the ending + // brace is a part of the external enum module in doxygen. + +struct arguments +{ + int16_t device; + char target_device[128]; + int16_t read; + int16_t write; + int16_t reg; + char register_offset[128]; + int16_t data_to_write; + char data[128]; + int16_t verbose; +}; + +/*@}*/ // Ending tag for external structure module in doxygen + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ +extern struct arguments g_args; + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ +error_t parse_opt(int key, char * arg, struct argp_state * state); +void * mmap_afu_registers(int afu_master_fd); +void munmap_afu_registers(void * ps_regs); +uint8_t * get_afu_psa_addr(char * i_afu_path); +int write_afu_register(uint8_t * afu_psa_addr, int reg, uint64_t mmio_data); +int read_afu_register(uint8_t * afu_psa_addr, int reg); + +#endif //AFUINJECT_H diff --git a/src/provisioning/tool/makefile b/src/provisioning/tool/makefile new file mode 100644 index 00000000..129bcfc3 --- /dev/null +++ b/src/provisioning/tool/makefile @@ -0,0 +1,66 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/provisioning/tool/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +UNAME=$(shell uname) + +ROOTPATH = ../../.. +USER_DIR = . +SUBDIRS = +TESTDIR = ${ROOTPATH}/obj/tests + +ALLOW_WARNINGS = yes + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${ROOTPATH}/img +LIBPATHS = -L${ROOTPATH}/img +LINKLIBS = -lprov -lcxl + +PGMDIR = ${ROOTPATH}/obj/programs +TESTDIR = ${ROOTPATH}/obj/tests + +PGMS = provtool afuinject +PROGRAMS = $(addprefix ${PGMDIR}/, ${PGMS}) + + + +provtool_OFILES = + +all: $(BIN_TESTS) +test: $(BIN_TESTS) + +# AIX only +ifeq ($(UNAME),AIX) + + +CXXFLAGS +=$(CFLAGS) +LINKLIBS += +GTESTS_DIR = + +#Linux only +else +LINKLIBS+= + +endif + +include ${ROOTPATH}/config.mk diff --git a/src/provisioning/tool/provtool.c b/src/provisioning/tool/provtool.c new file mode 100644 index 00000000..da2b151d --- /dev/null +++ b/src/provisioning/tool/provtool.c @@ -0,0 +1,384 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/provisioning/tool/provtool.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/*! + * @file provtool.c + * @brief Provisioning utility tooling + */ + + +/*----------------------------------------------------------------------------*/ +/* Includes */ +/*----------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ +const char *argp_program_version = "provtool 1.0\n"; +const char *argp_program_bug_address = "IBM Support"; +char doc[] = +"\nprovtool -- Provisioning for CAPI KV Solution\n\v"; + +const char tpmd_session_entry[] = "provtool"; + + + +/*----------------------------------------------------------------------------*/ +/* Struct / Typedef */ +/*----------------------------------------------------------------------------*/ + +// +enum argp_char_options { + + // Note that we need to be careful to not re-use char's + PROV_LIST_AFUS = 'l', + PROV_QUERY_AFU = 'q', + PROV_WWPNS_READ = 'w', + PROV_DEBUG = 'D', + PROV_WWPNS_PGM = 'W', + PROV_LOOPBACK = 'L', + + +}; + +static struct argp_option options[] = { + //{"query_afu", PROV_QUERY_AFU, 0, 0, "Query AFU for version information"}, + {"wwpn-read", PROV_WWPNS_READ, "device path", OPTION_ARG_OPTIONAL, "Show all WWPNs in VPD"}, + {"wwpn-pgm", PROV_WWPNS_PGM, "device path", OPTION_ARG_OPTIONAL, "Program WWPNs"}, + {"debug", PROV_DEBUG, "", 0, "Internal trace level for tool"}, + {"loopback", PROV_LOOPBACK, "", OPTION_ARG_OPTIONAL, "Run loopback diagnostics on all present adapters. Time defaults to 1 second"}, + {0} +}; + + +static struct argp argp = { options, parse_opt, 0, doc }; + + + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ +struct arguments g_args = {0}; +extern int32_t g_traceE; /* error traces */ +extern int32_t g_traceI; /* informative 'where we are in code' traces */ +extern int32_t g_traceF; /* function exit/enter */ +extern int32_t g_traceV; /* verbose trace...lots of information */ + +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + + + +error_t parse_opt (int key, + char *arg, + struct argp_state *state) +{ + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + static int32_t last_command = -1; + //int32_t l_temp = 0; + + /*-------------------------------------------------------------------------*/ + /* Code */ + /*-------------------------------------------------------------------------*/ + + switch (key) + { + case PROV_LIST_AFUS: + g_args.list_afus = 1; + break; + + case PROV_WWPNS_READ: + if((arg != NULL) && (strlen(arg) != 0)) + { + strncpy(g_args.target_adapter, arg, sizeof(g_args.target_adapter)); + TRACEV("Setting target adapter to: '%s'\n", g_args.target_adapter); + } + g_args.wwpn_read = 1; + break; + + case PROV_WWPNS_PGM: + if((arg != NULL) && (strlen(arg) != 0)) + { + strncpy(g_args.target_adapter, arg, sizeof(g_args.target_adapter)); + TRACEV("Setting target adapter to: '%s'\n", g_args.target_adapter); + } + g_args.wwpn_program = 1; + break; + + case PROV_LOOPBACK: + g_args.loopback = 1; + g_args.loopback_time = LOOPBACK_TEST_DEFAULT_S; //choose a default value and then check to see if we have an arg + if((arg != NULL) && (strlen(arg) != 0)) + { + g_args.loopback_time = atoi(arg); + if((g_args.loopback_time < LOOPBACK_TEST_MIN_S) || (g_args.loopback_time > LOOPBACK_TEST_MAX_S)) + { + TRACED("Invalid loopback test time of %d. Defaulting to %d\n", g_args.loopback_time, LOOPBACK_TEST_DEFAULT_S); + g_args.loopback_time = LOOPBACK_TEST_DEFAULT_S; + } + } + break; + + case PROV_DEBUG: + g_args.verbose = atoi (arg); + TRACEI ("Set verbose level to %d\n", g_args.verbose); + if (g_args.verbose >= 1) + g_traceI = 1; + if (g_args.verbose >= 2) + g_traceF = 1; + if (g_args.verbose >= 3) + g_traceV = 1; + break; + + case 0 : + + if((last_command == PROV_QUERY_AFU) && + ((strncmp(arg,"/",1))) && // They may put input dir/file + ((strncmp(arg,".",1)))) // after comp so look for it + { + TRACEI("Also Look for component %s\n",arg); + last_command = PROV_QUERY_AFU; + goto exit_no_set; + + } + + break; + + default: + return (ARGP_ERR_UNKNOWN); + } + + last_command = -1; + TRACEF ("Last cmd\n"); + return (0); + +exit_no_set: + return(0); + +} + + +int main (int argc, char *argv[]) +{ + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + int32_t rc = 0; + + /*-------------------------------------------------------------------------*/ + /* Code */ + /*-------------------------------------------------------------------------*/ + + memset(&g_args,0,sizeof(g_args)); + + argp_parse (&argp, argc, argv, ARGP_IN_ORDER, 0, &g_args); + + + if(g_args.loopback) + { + TRACEV("Calling prov_get_all_adapters\n"); + bool l_success = prov_loopback(); + if(!l_success) + { + rc = 1; //arbitrary non-zero RC + } + } + + if(g_args.list_afus) + { + TRACEV("Calling prov_get_all_adapters\n"); + prov_get_all_adapters(); + } + + if(g_args.wwpn_read) + { + TRACEV("Calling prov_get_wwpns\n"); + prov_get_wwpns(); + } + if(g_args.wwpn_program) + { + TRACEV("Calling prov_pgm_wwpns\n"); + prov_pgm_wwpns(); + } + + return(rc); +} + +#define MAX_NUM_ADAPTERS 4 + +void prov_get_all_adapters() +{ + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + prov_adapter_info_t l_adapters[MAX_NUM_ADAPTERS]; + int l_num_adapters = MAX_NUM_ADAPTERS; + bool l_rc = false; + int l_curr_adapter = 0; + int l_curr_wwpn = 0; + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + + bzero(l_adapters, sizeof(l_adapters)); + + l_rc = provGetAllAdapters(l_adapters, &l_num_adapters); + if(l_rc == false) + { + TRACEE("Error occurred in call to provGetAllAdapters\n"); + } + else + { + TRACEV("Got back %d adapters\n", l_num_adapters); + for(l_curr_adapter = 0; l_curr_adapter < l_num_adapters; l_curr_adapter++) + { + TRACED("Adapter '%s'\n", l_adapters[l_curr_adapter].afu_name); + TRACED("PCI Path '%s'\n", l_adapters[l_curr_adapter].pci_path); + for(l_curr_wwpn = 0; l_curr_wwpn < MAX_WWPNS_PER_ADAPTER; l_curr_wwpn++) + { + TRACEI(" Port %d WWPN: 0x%s\n", l_curr_wwpn, l_adapters[l_curr_adapter].wwpn[l_curr_wwpn]); + } + } + } +} + + +void prov_pgm_wwpns() +{ + + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + prov_adapter_info_t l_adapters[MAX_NUM_ADAPTERS]; + int l_num_adapters = MAX_NUM_ADAPTERS; + bool l_rc = false; + int l_curr_adapter = 0; + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + + bzero(l_adapters, sizeof(l_adapters)); + + l_rc = provGetAllAdapters(l_adapters, &l_num_adapters); + if(l_rc == false) + { + TRACEE("Error occurred in call to provGetAllAdapters. This is likely due to missing or invalid adapter VPD.\n"); + } + else + { + TRACEV("Got back %d adapters\n", l_num_adapters); + for(l_curr_adapter = 0; l_curr_adapter < l_num_adapters; l_curr_adapter++) + { + l_rc = provInitAdapter(&l_adapters[l_curr_adapter]); + if(l_rc == false) + { + TRACEE("Error init'ing adapter '%s'\n",l_adapters[l_curr_adapter].afu_name); + } + } + } + +} + + +#define TIME_1S_US 1000000 //1million uS in a second +bool prov_loopback() +{ + + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + prov_adapter_info_t l_adapters[MAX_NUM_ADAPTERS]; + int l_num_adapters = MAX_NUM_ADAPTERS; + bool l_rc = true; + bool l_adapter_rc = false; + int l_curr_adapter = 0; + uint64_t l_loopback_time_us = 0; + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + + bzero(l_adapters, sizeof(l_adapters)); + + l_rc = provGetAllAdapters(l_adapters, &l_num_adapters); + if(l_rc == false) + { + printf("ERROR: Error occurred in call to provGetAllAdapters. This is likely due to missing or invalid adapter VPD.\n"); + } + else + { + printf("Running Loopback Diagnostic Tests...\n"); + l_loopback_time_us = (uint64_t)g_args.loopback_time * TIME_1S_US; + TRACEV("Running time for test in microseconds: %"PRIu64"\n",l_loopback_time_us); + TRACEV("Got back %d adapters\n", l_num_adapters); + for(l_curr_adapter = 0; l_curr_adapter < l_num_adapters; l_curr_adapter++) + { + l_adapter_rc = provLoopbackTest(&l_adapters[l_curr_adapter], l_loopback_time_us); + const char *l_result = (l_adapter_rc == true) ? "SUCCESS" : "FAIL"; + printf("Adapter '%s' result: %s\n",l_adapters[l_curr_adapter].afu_name, l_result); + if(l_adapter_rc == false) + { + //if any test fails, all tests fail + l_rc = false; + } + } + } + return l_rc; +} + +void prov_get_wwpns() +{ + /*------------------------------------------------------------------------*/ + /* Local Variables */ + /*------------------------------------------------------------------------*/ + prov_wwpn_info_t wwpn_array[32]; + uint16_t num_wwpns = 32; + int i = 0; + + /*------------------------------------------------------------------------*/ + /* Code */ + /*------------------------------------------------------------------------*/ + provGetAllWWPNs(wwpn_array, &num_wwpns); + TRACED("Number of WWPNs read back: %d\n", num_wwpns); + for(i=0; i +#include +#include +#include + +/*----------------------------------------------------------------------------*/ +/* Constants */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------*/ +/* Defines */ +/*----------------------------------------------------------------------------*/ + +#define TRACE_ENABLED +//Loopback test constants are defined in seconds for UI but internally we use microseconds +#define LOOPBACK_TEST_DEFAULT_S 1 //1 second +#define LOOPBACK_TEST_MIN_S 1 //1 second +#define LOOPBACK_TEST_MAX_S 86400 //24 hours +/*@}*/ // Ending tag for external constants in doxygen + +/*----------------------------------------------------------------------------*/ +/* Enumerations */ +/*----------------------------------------------------------------------------*/ +/** + * \defgroup ExternalEnum External Enumerations + */ +/*@{*/ // Special tag to say everything between it and the ending + // brace is a part of the external enum module in doxygen. + + +struct arguments +{ + int16_t list_afus; + int16_t wwpn_read; + int16_t wwpn_program; + char target_adapter[128]; + bool read_wwpn_from_afu; + int16_t verbose; + int16_t loopback; + int32_t loopback_time; +}; + +/*@}*/ // Ending tag for external structure module in doxygen + + +/*----------------------------------------------------------------------------*/ +/* Globals */ +/*----------------------------------------------------------------------------*/ +extern struct arguments g_args; + +/*----------------------------------------------------------------------------*/ +/* Function Prototypes */ +/*----------------------------------------------------------------------------*/ + +error_t parse_opt (int key, + char *arg, + struct argp_state *state); + + +uint32_t convert_to_binary (uint8_t **output_buffer, + uint32_t *output_buffer_length, + char *input_buffer); + +void static inline toupper_string(char *text) +{ + size_t j; + for(j=0;j < strlen(text);j++) + { + text[j] = toupper(text[j]); + } +} + +void prov_get_wwpns(); + +void prov_pgm_wwpns(); + +void prov_get_all_adapters(); + +bool prov_loopback(); + +#endif //_PROVTOOL_H + diff --git a/src/test/afu.c b/src/test/afu.c new file mode 100644 index 00000000..751adfac --- /dev/null +++ b/src/test/afu.c @@ -0,0 +1,285 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/afu.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#ifndef _MACOSX +#include +#endif /* !_MACOSX */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#undef AFU_PS_REGS_SIZE +#define AFU_PS_REGS_SIZE (64*1024*512)+0x20000 + +#ifndef _MACOSX +// MMIO write, 32 bit +static inline void out_be32 (__u64 *addr, __u32 val) +{ +#ifdef TARGET_ARCH_PPC64EL + __u32 zero = 0; + asm volatile ( "stwbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +#else + __asm__ __volatile__ ("sync; stw%U0%X0 %1, %0" + : "=m" (*addr) : "r" (val) : "memory"); +#endif +} + +// MMIO write, 64 bit +static inline void out_be64 (__u64 *addr, __u64 val) +{ +#ifdef TARGET_ARCH_PPC64EL + __u64 zero = 0; + asm volatile ( "stdbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +#else + __asm__ __volatile__ ("sync; std%U0%X0 %1, %0" + : "=m" (*addr) : "r" (val) : "memory"); +#endif +} + +// MMIO read, 32 bit +static inline __u32 in_be32 (__u64 *addr) +{ + __u32 ret; +#ifdef TARGET_ARCH_PPC64EL + __u32 zero = 0; + asm volatile ( "lwbrx %0, %1, %2" : "=r"(ret) : "r"(zero), "r"(addr) ); +#else + __asm__ __volatile__ ("sync; lwz%U1%X1 %0, %1; twi 0,%0,0; isync" + : "=r" (ret) : "m" (*addr) : "memory"); +#endif + return ret; + +} + +// MMIO read, 64 bit +static inline __u64 in_be64 (__u64 *addr) +{ + __u64 ret; +#ifdef TARGET_ARCH_PPC64EL + __u64 zero = 0; + asm volatile ( "ldbrx %0, %1, %2" : "=r"(ret) : "r"(zero), "r"(addr) ); +#else + __asm__ __volatile__ ("sync; ld%U1%X1 %0, %1; twi 0,%0,0; isync" + : "=r" (ret) : "m" (*addr) : "memory"); +#endif + return ret; +} +#else +// MMIO write, 32 bit +static inline void out_be32 (__u64 *addr, __u32 val) +{ + return 0; +} + +// MMIO write, 64 bit +static inline void out_be64 (__u64 *addr, __u64 val) +{ + return 0; +} + +// MMIO read, 32 bit +static inline __u32 in_be32 (__u64 *addr) +{ + + return 0; +} + +// MMIO read, 64 bit +static inline __u64 in_be64 (__u64 *addr) +{ + return 0; +} +#endif /* !_MACOSX */ + +// mmap AFU MMIO registers +static void mmap_problem_state_registers (struct afu *afu) +{ + void *ret; + debug ("Mapping AFU problem state registers...\n"); + ret = mmap (NULL, AFU_PS_REGS_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, + afu->fd, 0); + if (ret == MAP_FAILED) { + perror ("mmap_problem_state_registers"); + afu->ps_addr = NULL; + return; + } + afu->ps_addr = ret; +} + +// munmap AFU MMIO registers +static void munmap_problem_state_registers (struct afu *afu) +{ + if (afu->ps_addr) { + debug ("Unmapping AFU problem state registers...\n"); + if (munmap (afu->ps_addr, AFU_PS_REGS_SIZE)) + perror ("munmap_problem_state_registers"); + debug ("Done unmapping AFU problem state registers\n"); + afu->ps_addr = NULL; + } +} + +// Create and open AFU device then map MMIO registers +struct afu *afu_map () +{ + char *dev_name = "/dev/cxl/afu0.0m"; + struct afu *afu = (struct afu *) malloc (sizeof (struct afu)); + if (!afu) { + perror ("malloc"); + return NULL; + } + debug ("Allocated memory at 0x%016lx for AFU\n", (__u64) afu); + + memset(afu, 0, sizeof(*afu)); + afu->work.num_interrupts = 4; + afu->work.flags = CXL_START_WORK_NUM_IRQS; + + debug ("Creating and opening AFU file descriptor %s...\n", dev_name); + afu->fd = create_and_open_dev (dev_name, "capi", 1); + if (afu->fd < 0) { + perror ("create_and_open_dev"); + afu_unmap (afu); + return NULL; + } + + // attach the process before mmap + afu_start(afu); + if (afu->started == 0) { + perror ("afu_start failed"); + afu_unmap (afu); + return NULL; + } + + // map this context's problem space MMIO (SIS-Lite regs) + mmap_problem_state_registers (afu); + if (!afu->ps_addr) { + perror ("mmap_problem_state_registers"); + afu_unmap (afu); + return NULL; + } + + printf ("Problem state registers mapped to %p\n", afu->ps_addr); + + return afu; +} + +// Unmap AFU device +void afu_unmap (struct afu *afu) +{ + if (afu) { + munmap_problem_state_registers (afu); + if (afu->fd >= 0) { + debug ("Closing AFU file descriptor...\n"); + close (afu->fd); + } + } +} + +// Set WED address and have PSL send reset and start to AFU +void afu_start (struct afu *afu) +{ + /* Set WED in PSL and send start command to AFU */ + debug ("Sending WED address 0x%016lx to PSL...\n", (__u64) afu->work.work_element_descriptor); + + if (ioctl (afu->fd, CXL_IOCTL_START_WORK, &afu->work) == 0) { + debug ("Start command succeeded on AFU\n"); + afu->started = 1; + } + else { + debug ("Start command to AFU failed\n"); + } + + if (ioctl (afu->fd, CXL_IOCTL_GET_PROCESS_ELEMENT, &afu->process_element) == 0) { + debug ("Get process element succeeded on AFU\n"); + afu->started = 1; + } + else { + debug (" Get process element to AFU failed\n"); + } + + +} + +// MMIO write based on AFU offset, 32-bit +void afu_mmio_write_sw (struct afu *afu, unsigned offset, __u32 value) +{ + __u64 addr = 4 * (__u64) offset; + out_be32 (afu->ps_addr + addr, value); + debug ("Wrote 0x%08x to AFU register offset %x\n", value, offset); +} + +// MMIO write based on AFU offset, 64-bit +void afu_mmio_write_dw (struct afu *afu, unsigned offset, __u64 value) +{ + __u64 addr = 4 * (__u64) (offset & ~0x1); // Force 8byte align + out_be64 (afu->ps_addr + addr, value); + // debug ("Wrote 0x%016lx to AFU register offset %x\n", value, offset); +} + +// MMIO read based on AFU offset, 32-bit +void afu_mmio_read_sw (struct afu *afu, unsigned offset, __u32 *value) +{ + __u64 addr = 4 * (__u64) offset; + *value = in_be32 (afu->ps_addr + addr); + debug ("Read 0x%08x from AFU register offset %x\n", *value, offset); +} + +// MMIO read based on AFU offset, 64-bit +void afu_mmio_read_dw (struct afu *afu, unsigned offset, __u64 *value) +{ + __u64 addr = 4 * (__u64) (offset & ~0x1); // Force 8byte align + *value = in_be64 (afu->ps_addr + addr); + debug ("Read 0x%016lx from AFU register offset %x\n", *value, offset); +} + +// Wait for AFU to complete job +void afu_wait (struct afu *afu) +{ + if (afu->started) { + debug ("Waiting for AFU to finish...\n"); + struct pollfd poll_list = { afu->fd, POLLIN, 0}; + int ret; + + ret = poll (&poll_list, 1, 5000); + if (ret == 0) + printf ("Poll timed out waiting on interrupt.\n"); + + /* For now, assume a non-zero response is a real interrupt + * later, maybe check regs / fd, loop for events, etc. + */ + debug ("AFU finished\n"); + } +} diff --git a/src/test/asyncstress.c b/src/test/asyncstress.c new file mode 100644 index 00000000..670de9a6 --- /dev/null +++ b/src/test/asyncstress.c @@ -0,0 +1,358 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/asyncstress.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static __inline__ uint64_t getticks(void) +{ + unsigned int x, x0, x1; + do { + __asm__ __volatile__ ("mftbu %0" : "=r"(x0)); + __asm__ __volatile__ ("mftb %0" : "=r"(x)); + __asm__ __volatile__ ("mftbu %0" : "=r"(x1)); + } while (x0 != x1); + + return (((uint64_t)x0) << 32) | x; +} +static __inline__ double elapsed(uint64_t t1, uint64_t t0) +{ + return (double)t1 - (double)t0; +} +double time_diff(struct timeval x , struct timeval y) +{ + double x_ms , y_ms , diff; + + x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec; + y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec; + + diff = (double)y_ms - (double)x_ms; + + return diff; +} + +double time_per_tick(int n, int del) { + int i; + + double *td = malloc(n * sizeof(double)); + double *tv = malloc(n * sizeof(double)); + + struct timeval tvs; + struct timeval tve; + + uint64_t ts; + uint64_t te; + + for (i=0; i\n"); + printf("-d 'name of device, usually /dev/cxl/af0.0s or /dev/cxl/afu1.0s'\n"); + printf("-b '# of blocks to run the test over, i.e. lba=0..b-1\n"); + printf("-n '# of ops to run (combination of reads and writes total'\n"); + printf("-a '# of outstanding ops to maintain'\n"); + printf("-r '# for read ratio, read ratio is r/(r+w)'\n"); + printf("-w '# for write ratio, write ratio is w/(r+w)'\n"); + printf("-t '# threads (not implemented yet)'\n"); + printf("-v '0/1 to use virtual or physical lba (not implemented'\n"); + printf("-o '0/1 to retire ops inorder/any order'\n"); + printf("-c '0/1 record history (not implemented'\n"); + printf("-p 'arbitrary name echoed in the stats line to help identify run stats\n"); + exit(1); + default: + printf("Unknown arg '%s'\n", argv[a]); + exit(1); + } + } + } + printf("asyncstress -d %s -n %d -a %d -t %d -b %d -v %d -h %d\n", + dev_name, num_ops, num_asyncs, num_threads, num_blocks, virt_blks, history); + + double ns_per_tick = time_per_tick(1000, 100); + // printf("%g ns/tick\n", ns_per_tick); + + + rc = cblk_init(NULL,0); + + if (rc) { + + fprintf(stderr,"cblk_init failed with rc = %d and errno = %d\n", + rc,errno); + exit(1); + + } + int id = cblk_open(dev_name,num_asyncs,O_RDWR,ext,virt_blks ? CBLK_OPN_VIRT_LUN : 0); + if (id==NULL_CHUNK_ID) { + fprintf(stderr,"Device open failed errno = %d\n", errno); + + cblk_term(NULL,0); + exit(errno); + } else { + printf("Device open success\n"); + } + + int i; + uint64_t status; + size_t lsize; + rc = cblk_get_lun_size(id, &lsize, 0); + printf("lsize = %ld, rc, = %d\n", lsize, rc); + if (rc) {fprintf(stderr, "cblk_get_lun_size failed, rc=%d\n", errno); exit(errno);} + + rc = cblk_set_size(id, num_blocks, 0); + printf("bsize = %d, rc, = %d\n", num_blocks, rc); + if (rc) {fprintf(stderr, "cblk_set_size failed, rc=%d\n", errno); exit(errno);} + + int *wait_order = malloc(num_asyncs * sizeof(int)); + int wait_pos = 0; + int wait_next = 0; + + int *rtag_buf = malloc(num_asyncs * sizeof(int)); + void **rbuf = malloc(num_asyncs * sizeof(void *)); + + int *buf_stack = malloc(num_asyncs * sizeof(int)); + + int *op_type = malloc(num_asyncs * sizeof(int)); + uint64_t *op_start = malloc(num_asyncs * sizeof(uint64_t)); + uint64_t *op_stop = malloc(num_asyncs * sizeof(uint64_t)); + + + uint64_t rop_min = 0x7fffffffffffffff; + uint64_t rop_max = 0; + uint64_t rop_sum = 0; + uint64_t rop_cnt = 0; + uint64_t wop_min = 0x7fffffffffffffff; + uint64_t wop_max = 0; + uint64_t wop_sum = 0; + uint64_t wop_cnt = 0; + + for (i=0; iwop_max) wop_max = op_diff; + if (op_diffrop_max) rop_max = op_diff; + if (op_diff +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static __inline__ uint64_t getticks(void) +{ + unsigned int x, x0, x1; + do { + __asm__ __volatile__ ("mftbu %0" : "=r"(x0)); + __asm__ __volatile__ ("mftb %0" : "=r"(x)); + __asm__ __volatile__ ("mftbu %0" : "=r"(x1)); + } while (x0 != x1); + + return (((uint64_t)x0) << 32) | x; +} +static __inline__ double elapsed(uint64_t t1, uint64_t t0) +{ + return (double)t1 - (double)t0; +} +double time_diff(struct timeval x , struct timeval y) +{ + double x_ms , y_ms , diff; + + x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec; + y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec; + + diff = (double)y_ms - (double)x_ms; + + return diff; +} + +double time_per_tick(int n, int del) { + int i; + + double *td = malloc(n * sizeof(double)); + double *tv = malloc(n * sizeof(double)); + + struct timeval tvs; + struct timeval tve; + + uint64_t ts; + uint64_t te; + + for (i=0; i\n"); + printf("-d 'name of device, usually /dev/cxl/af0.0s or /dev/cxl/afu1.0s'\n"); + printf("-b '# of blocks to run the test over, i.e. lba=0..b-1\n"); + printf("-n '# of ops to run (combination of reads and writes total'\n"); + printf("-a '# of outstanding ops to maintain'\n"); + printf("-r '# for read ratio, read ratio is r/(r+w)'\n"); + printf("-w '# for write ratio, write ratio is w/(r+w)'\n"); + printf("-t '# threads (not implemented yet)'\n"); + printf("-v '0/1 to use virtual or physical lba (not implemented'\n"); + printf("-o '0/1 to retire ops inorder/any order'\n"); + printf("-c 'collect statistics in file 'fname''\n"); + printf("-p 'arbitrary name echoed in the stats line to help identify run stats\n"); + exit(1); + default: + printf("Unknown arg '%s'\n", argv[a]); + exit(1); + } + } + } + printf("asyncstress -d %s -n %d -a %d -t %d -b %d -v %d -c %s\n", + dev_name, num_ops, num_asyncs, num_threads, num_blocks, virt_blks, collect); + + double ns_per_tick = time_per_tick(1000, 100); + printf("%g ns/tick\n", ns_per_tick); + + rc = cblk_init(NULL,0); + + if (rc) { + + fprintf(stderr,"cblk_init failed with rc = %d and errno = %d\n", + rc,errno); + exit(1); + + } + int id = cblk_open(dev_name,num_asyncs,O_RDWR,ext,virt_blks ? CBLK_OPN_VIRT_LUN : 0); + if (id==NULL_CHUNK_ID) { + fprintf(stderr,"Device open failed errno = %d\n", errno); + + cblk_term(NULL,0); + exit(errno); + } else { + printf("Device open success\n"); + } + + int *st_type = NULL; + uint64_t *st_start = NULL; + uint64_t *st_issue = NULL; + uint64_t *st_result = NULL; + uint64_t *st_retire = NULL; + if (collect) { + st_type = malloc(num_ops * sizeof(int)); + st_start = malloc(num_ops * sizeof(uint64_t)); + st_issue = malloc(num_ops * sizeof(uint64_t)); + st_result = malloc(num_ops * sizeof(uint64_t)); + st_retire = malloc(num_ops * sizeof(uint64_t)); + bzero(st_type, num_ops * sizeof(int)); + bzero(st_start, num_ops * sizeof(uint64_t)); + bzero(st_issue, num_ops * sizeof(uint64_t)); + bzero(st_result, num_ops * sizeof(uint64_t)); + bzero(st_retire, num_ops * sizeof(uint64_t)); + } + + int i; + uint64_t status; + size_t lsize; + rc = cblk_get_lun_size(id, &lsize, 0); + printf("lsize = %ld, rc, = %d\n", lsize, rc); + + rc = cblk_set_size(id, num_blocks, 0); + printf("bsize = %d, rc, = %d\n", num_blocks, rc); + + int *wait_order = malloc(num_asyncs * sizeof(int)); + int wait_pos = 0; + int wait_next = 0; + + int *rtag_buf = malloc(num_asyncs * sizeof(int)); + void **rbuf = malloc(num_asyncs * sizeof(void *)); + + int *buf_stack = malloc(num_asyncs * sizeof(int)); + + int *op_type = malloc(num_asyncs * sizeof(int)); + uint64_t *op_start = malloc(num_asyncs * sizeof(uint64_t)); + uint64_t *op_stop = malloc(num_asyncs * sizeof(uint64_t)); + uint64_t *op_num = malloc(num_asyncs * sizeof(uint64_t)); + + uint64_t rop_min = 0x7fffffffffffffff; + uint64_t rop_max = 0; + uint64_t rop_sum = 0; + uint64_t rop_cnt = 0; + uint64_t wop_min = 0x7fffffffffffffff; + uint64_t wop_max = 0; + uint64_t wop_sum = 0; + uint64_t wop_cnt = 0; + + for (i=0; iwop_max) wop_max = op_diff; + if (op_diffrop_max) rop_max = op_diff; + if (op_diff +#include +#include +#include +#include +#include +#ifndef _MACOSX +#include +#endif /* !_MACOS */ +#include +#ifdef _OS_INTERNAL +#include +#else +#include +#endif +#include + + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#define MAX_NUM_THREADS 4096 + +#define MAX_OPENS 520 + + +typedef struct blk_thread_s { + pthread_t ptid; + chunk_id_t chunk_id; +} blk_thread_t; + + +blk_thread_t blk_thread[MAX_NUM_THREADS]; + +blk_thread_t alt_blk_thread[MAX_NUM_THREADS]; + +static pthread_mutex_t completion_lock = PTHREAD_MUTEX_INITIALIZER; +static uint32_t thread_count = 0; + +/* TODO: ??This data buf size may need to change */ + +#define DATA_BUF_SIZE 4096 + +/* + * Global variables declared in this module... + */ +static int aresult_wait_flag; /* indicates the a flag was passed on command + * line which indicates to tell cblk_aresult + * to wait. + */ +static int bflag; /* indicates the b flag was passed on command + * line which indicates a block number was + * specified. + */ +static int no_bg_thread_flag; /* indicates the B flag was passed on command + * line which indicates to not have the block + * library used background threads. + */ +static int shared_contxt_flag; /* indicates the C flag was passed on command + * line which indicates to share contexts. + */ +static int cflag; /* indicates the c flag was passed on command + * line which indicates a cmd_type was + * specified. + */ +static int dif_blk_flag; /* indicates the d flag was passed on command + * line which indicates each thread should + * use a different range of blocks. + */ +static int each_thread_vlun_flag;/* indicates the e flag was passed on command + * line which indicates each thread should + * use its own virtual lun. + */ +static int fflag; /* indicates the f flag was passed on command + * line which indicates a filename is + * specified + */ + +static int fork_rerun_flag; /* indicates the z flag was passed on command + * line which indicates to run the operation + * then fork a child to rerun the same operation. + */ +static int hflag; /* help flag */ + +static int inc_flag; /* indicates the i flag was passed on command + * line which indicates adapter/device name + * is specified + */ +static int lflag; /* indicates the l flag was passed on command + * line which indicates adapter/device name + * is specified + */ +static int mpio_flag; /* indicates the m flag was passed on command + * line which indicates enable MPIO fail over + */ +static int nflag; /* indicates the n flag was passed on command + * line which indicates the number of loops + * or times to issue requests + */ +static int open_flag; /* indicates the o flag was passed on command + * line which indicates the number of opens + * to do for this chunk before attempting an I/O. + */ +static int pool_flag; /* indicates the p flag was passed on command + * line which indicates what command pool + * size should be for this chunk. + */ +static int reserve_flag; /* indicates the r flag was passed on command + * line which indicates to allow the disk + * driver to use normal reserve policy.. + */ +static int break_reserve_flag; /* indicates the r flag was passed on command + * line which indicates to allow the disk + * driver to break resources and then + * use normal reserve policy.. + */ +static int size_flag; /* indicates the s flag was passed on command + * line which indicates what size to usses for + * a chunk. + */ +static int poll_status_flag; /* indicates the S flag was passed on command + * line which indicates poll on user status + * area. + */ + +static int thread_flag; /* indicates the t flag was passed on command + * line which indicates that we are to run + * multiple threads. + */ + +static int timeout_flag; /* indicates the T flag was passed on command + * line which indicates that we have a user + * specified time out value. + */ + +static int verbose_flag; /* verbose mode flag */ + + +static int aresult_next_flag; /* indicates the x flag was passed on command + * line which indicates to tell cblk_aresult + * to return on the next command that completes. + */ + +static int wait_async_issue_flag;/* indicates the w flag was passed on command + * line which indicates that we are to wait + * to issue the async I/O + */ + +static int virtual_lun_flag; /* virtual lun flag */ + +static char *device_name = NULL; /* point to the arg for -l flag */ +static char *alt_device_name = NULL; /* point to the arg for -C flag */ +static char *filename = NULL; /* point to the arg for -f flag */ +static int num_loops = 1; +static uint64_t block_number = 0; +static size_t size = 0; +static uint64_t timeout = 0; +static int num_opens = 1; +static int num_threads = 0; +static int pool_size = 0; + +FILE *file = NULL; + +typedef +enum { + TRANSP_READ_CMD = 0x1, /* Read Operation */ + TRANSP_WRITE_CMD = 0x2, /* Write Operation */ + TRANSP_GET_SIZE = 0x3, /* Get Size */ + TRANSP_SET_SIZE = 0x4, /* Set Size */ + TRANSP_GET_LUN_SIZE = 0x5, /* Get Lun size */ + TRANSP_AREAD_CMD = 0x6, /* Async Read Operation */ + TRANSP_AWRITE_CMD = 0x7, /* Async Write Operation */ + TRANSP_WRITE_READ = 0x8, /* Performan write/read compare */ + TRANSP_AWRITE_AREAD = 0x9, /* Performan async write/read compare */ + TRANSP_MWRITE_MREAD = 0xa, /* Performan mix of write/read */ + /* with async write/read compare */ + TRANSP_GET_STATS = 0xb, /* Get Statistics */ + TRANSP_WRT_FRK_RD = 0xc, /* Write fork read */ + TRANSP_AREAD_NEXT = 0xd, /* Issue a collection of async reads */ + /* then wait for next commands to */ + /* complete. */ + TRANSP_LWRITE_LREAD = 0xe, /* Perform listio write/read compare */ + TRANSP_LAST_CMD = 0xf, /* Not valid command */ +} transp_scsi_cmd_t; + +transp_scsi_cmd_t cmd_type = TRANSP_READ_CMD; // Default to READ + +/* + * NAME: Usage + * + * FUNCTION: print usage message and returns + * + * + * INPUTS: + * argc -- INPUT; the argc parm passed into main(). + * argv -- INPUT; the argv parm passed into main(). + * + * RETURNS: + * 0: Success + * -1: Error + * + */ +static void +usage(void) +{ + + fprintf(stderr,"\n"); + + fprintf(stderr,"Usage: blk_test -l device [-n num_loops] [-c cmd_type ] [-f filename ] [-b block_number] [-o num_opens ] [-p pool_size ] [-s size ] [-t num_threads ] [-T time_out ] [-h] [-a ] [-B ] [-C shared_device] [-i] [-e] [-m] [-S] [-u] [-v][-z]\n\n"); + fprintf(stderr," where:\n"); + fprintf(stderr," -a indicates to cblk_aresult should be called to wait for completion\n"); + fprintf(stderr," -b block number (default is 0 if b flag is not specified) \n"); + fprintf(stderr," -B Do not allow back ground threads in block library\n"); + fprintf(stderr," -c cmd_type which is a number\n"); + fprintf(stderr," defined as following:\n"); + fprintf(stderr," 1 - read\n"); + fprintf(stderr," 2 - write\n"); + fprintf(stderr," 3 - get_size\n"); + fprintf(stderr," 4 - set_size \n"); + fprintf(stderr," 5 - get_lun_size\n"); + fprintf(stderr," 6 - async read\n"); + fprintf(stderr," 7 - async write\n"); + fprintf(stderr," 8 - write read compare\n"); + fprintf(stderr," 9 - async write async read compare\n"); + fprintf(stderr," 10 - mix write read compare with async write async read compare\n"); + fprintf(stderr," 11 - get_statistics\n"); + fprintf(stderr," 12 - write fork read (single threaded only)\n"); + fprintf(stderr," 13 - burst async reads then wait for all to complete(single threaded only)\n"); + fprintf(stderr," 14 - listio write listio read compare\n"); + fprintf(stderr," -C enable shared context\n"); + fprintf(stderr," -d indicates to use different block ranges for each thread\n"); + fprintf(stderr," -e each thread uses its own virtual lun\n"); + fprintf(stderr," -f filename (for input data to be used on writes)\n"); + fprintf(stderr," -h help (this usage)\n"); + fprintf(stderr," -i increment flag: indicates block numbers should be incrementd on each loop\n"); + fprintf(stderr," -l logical device name\n"); + fprintf(stderr," -m Enable MPIO fail over\n"); + fprintf(stderr," -n Number of loops to run\n"); + fprintf(stderr," -o Number of opens before doing I/O\n"); + fprintf(stderr," -p Pool Size (number of commands in command pool)\n"); + fprintf(stderr," -s size (number of blocks for chunk in hex)\n"); + fprintf(stderr," -S For async I/O poll on user specified status field\n"); + fprintf(stderr," -t Number of threads\n"); + fprintf(stderr," -T Time out in microseconds (hex) for cblk_listio\n"); + fprintf(stderr," -u Virtual Lun for entire process. NOTE -e is a virtual lun for each thread\n"); + fprintf(stderr," -w wait to issue async I/O\n"); + fprintf(stderr," -x indicates to cblk_aresult should be called to return on the next command\n"); + fprintf(stderr," -z One operation to completion, then fork and have child do same operation again.\n"); + fprintf(stderr," -v verbose mode\n"); + + + return; +} + + +/* + * NAME: parse_args + * + * FUNCTION: The parse_args() routine parses the command line arguments. + * The arguments are read, validated, and stored in global + * variables. + * + * + * INPUTS: + * argc -- INPUT; the argc parm passed into main(). + * argv -- INPUT; the argv parm passed into main(). + * + * RETURNS: + * 0: Success + * -1: Error + * + */ + +static int +parse_args(int argc, char **argv) +{ + extern int optind; + extern char *optarg; + int rc,c; + int len,number; + + + rc = len = c = number =0; + + /* Init flags... */ + aresult_wait_flag = FALSE; + aresult_next_flag = FALSE; + bflag = FALSE; + cflag = FALSE; + fflag = FALSE; + hflag = FALSE; + lflag = FALSE; + nflag = FALSE; + fork_rerun_flag = FALSE; + size_flag = FALSE; + thread_flag = FALSE; + timeout_flag = FALSE; + verbose_flag = FALSE; + each_thread_vlun_flag = FALSE; + virtual_lun_flag = FALSE; + poll_status_flag = FALSE; + shared_contxt_flag = FALSE; + mpio_flag = FALSE; + reserve_flag = FALSE; + break_reserve_flag = FALSE; + num_loops = 1; + + + + /* + * Get parameters + */ + while ((c = getopt(argc,argv,"b:c:C:f:l:n:p:o:s:t:T:aBdehimrRSuvwxz")) != EOF) + { + switch (c) + { + case 'a' : + aresult_wait_flag = TRUE; + break; + case 'b' : + if (optarg) { + + block_number = strtoul(optarg,NULL,16); + bflag = TRUE; + } else { + + + fprintf(stderr,"-b flag requires a block number be supplied\n"); + + rc = EINVAL; + } + break; + case 'B' : + no_bg_thread_flag = TRUE; + break; + case 'c' : + if (optarg) { + + cmd_type = atoi(optarg); + + if ((cmd_type < TRANSP_READ_CMD) || + (cmd_type > TRANSP_LAST_CMD)) { + fprintf(stderr,"Invalid cmd_tyupe for -c flag\n"); + + usage(); + rc = -1; + } else { + cflag = TRUE; + } + } else { + + + fprintf(stderr,"-c flag requires a value to be supplied\n"); + + } + break; + case 'C' : + alt_device_name = optarg; + if (alt_device_name) { + shared_contxt_flag = TRUE; + } else { + + fprintf(stderr,"-C flag requires a logical name \n"); + rc = EINVAL; + + } + break; + case 'd' : + dif_blk_flag = TRUE; + break; + case 'e' : + each_thread_vlun_flag = TRUE; + break; + case 'f' : + filename = optarg; + if (filename) { + fflag = TRUE; + } else { + + fprintf(stderr,"-f flag requires a filename \n"); + rc = EINVAL; + + } + break; + + case 'h' : + hflag = TRUE; + break; + case 'i' : + inc_flag = TRUE; + break; + case 'l' : + device_name = optarg; + if (device_name) { + lflag = TRUE; + } else { + + fprintf(stderr,"-l flag requires a logical name \n"); + rc = EINVAL; + + } + break; + case 'm' : +#ifndef _AIX + fprintf(stderr,"-m flag is not supported on this operating system\n"); + rc = EINVAL; +#else + mpio_flag = TRUE; +#endif + + break; + case 'n' : + if (optarg) { + + num_loops = atoi(optarg); + nflag = TRUE; + } else { + + + fprintf(stderr,"-n flag requires a number of loops value to be supplied\n"); + rc = EINVAL; + + } + break; + case 'o' : + if (optarg) { + + num_opens = atoi(optarg); + + if (num_opens > MAX_OPENS) { + fprintf(stderr,"Number of opens %d exceedds the largest value supported %d\n",MAX_OPENS, num_opens); + + rc = EINVAL; + } + open_flag = TRUE; + } else { + + + fprintf(stderr,"-o flag requires a number of op value to be supplied\n"); + + } + break; + case 'p' : + if (optarg) { + + pool_size = atoi(optarg); + pool_flag = TRUE; + } else { + + + fprintf(stderr,"-p flag requires a pool size value to be supplied\n"); + + } + break; + case 'r' : +#ifndef _AIX + fprintf(stderr,"-r flag is not supported on this operating system\n"); + rc = EINVAL; +#else + reserve_flag = TRUE; +#endif + + break; + case 'R' : +#ifndef _AIX + fprintf(stderr,"-R flag is not supported on this operating system\n"); + rc = EINVAL; +#else + break_reserve_flag = TRUE; +#endif + break; + case 's' : + if (optarg) { + + size = strtoul(optarg,NULL,16); + size_flag = TRUE; + } else { + + + fprintf(stderr,"-s flag requires a size be supplied\n"); + + } + break; + case 'S' : + poll_status_flag = TRUE; + break; + case 't' : + if (optarg) { + + num_threads = atoi(optarg); + + if (num_threads > MAX_NUM_THREADS) { + + num_threads = MAX_NUM_THREADS; + + fprintf(stderr,"-number of threads exceeds programs upper limit, reducing number to %d\n",num_threads); + } + thread_flag = TRUE; + } else { + + + fprintf(stderr,"-t flag requires a number of threads value to be supplied\n"); + } + break; + case 'T' : + if (optarg) { + + timeout = strtoul(optarg,NULL,16); + timeout_flag = TRUE; + } else { + + + fprintf(stderr,"-T flag requires a value be supplied\n"); + + } + break; + case 'u' : + virtual_lun_flag = TRUE; + break; + case 'v' : + verbose_flag = TRUE; + break; + case 'w' : + wait_async_issue_flag = TRUE; + break; + case 'x' : + aresult_next_flag = TRUE; + break; + case 'z' : + fork_rerun_flag = TRUE; + break; + default: + usage(); + break; + }/*switch*/ + }/*while*/ + + + if (!lflag) { + fprintf(stderr,"The -l flag is required to specify a device name\n"); + usage(); + rc = EINVAL; + } + + if (thread_flag && (cmd_type == TRANSP_WRT_FRK_RD)) { + + fprintf(stderr,"The -c cmd_type of %d does not support the -t flag \n",TRANSP_WRT_FRK_RD); + usage(); + rc = EINVAL; + } + + if ((pool_size == 0) && (cmd_type == TRANSP_AREAD_NEXT)) { + + fprintf(stderr,"The -c cmd_type of %d pool size be specified (-p flag) \n",TRANSP_AREAD_NEXT); + usage(); + rc = EINVAL; + } + if (thread_flag && (cmd_type == TRANSP_AREAD_NEXT)) { + + fprintf(stderr,"The -c cmd_type of %d does not support the -t flag \n",TRANSP_AREAD_NEXT); + usage(); + rc = EINVAL; + } + + if (thread_flag && (aresult_next_flag)) { + + fprintf(stderr,"The -x does not support the -t flag \n"); + usage(); + rc = EINVAL; + } + + if (poll_status_flag && + (cmd_type != TRANSP_AREAD_CMD) && + (cmd_type != TRANSP_AWRITE_CMD) && + (cmd_type != TRANSP_AWRITE_AREAD) && + (cmd_type != TRANSP_MWRITE_MREAD)) { + + + fprintf(stderr,"The -S flag is not supported with this cmd_type. Instead it requires async I/O \n"); + usage(); + rc = EINVAL; + } + + if (timeout_flag && + (cmd_type != TRANSP_LWRITE_LREAD)) { + + + fprintf(stderr,"The -T flag is not supported with this cmd_type. Instead it requires listio I/O \n"); + usage(); + rc = EINVAL; + } + + if ((cmd_type == TRANSP_SET_SIZE) && + (!size_flag)) { + + fprintf(stderr,"The -c cmd_type of 4 requires the -s flag \n"); + usage(); + rc = EINVAL; + } + return (rc); + + +}/*parse_args*/ + +/* + ******************************************************************* + * NAME : run_loop + * DESCRIPTION: + * Run commands in a loop num_loops times. + * + * PARAMETERS: + * + * GLOBALS ACCESSED: + * + * RETURNS: + * nothing. + ******************************************************************* + */ +void *run_loop(void *data) +{ + void *ret_code = NULL; + int rc = 0; + int tag; + int i; + int exit_loop = FALSE; + blk_thread_t *blk_data = data; + uint32_t local_thread_count = 0; + void *data_buf = NULL; + void *comp_data_buf = NULL; + size_t num_blocks; + uint64_t blk_number; + uint64_t status; + int bytes_read = 0; + transp_scsi_cmd_t local_cmd_type; + chunk_stats_t stats; + int async_flags = 0; + int aresult_flags = 0; + int open_flags = 0; + int active_cmds; + pid_t pid; + cblk_arw_status_t arw_status; + chunk_ext_arg_t ext = 0; + cblk_io_t cblk_issue_io; + cblk_io_t *cblk_issue_ary[1]; + cblk_io_t cblk_complete_io; + cblk_io_t *cblk_complete_ary[1]; + int complete_items; + + + bzero(&arw_status,sizeof(arw_status)); + + pthread_mutex_lock(&completion_lock); + local_thread_count = thread_count++; + + if (dif_blk_flag) { + /* + * Each thread is using a different + * block number range. + */ + blk_number = block_number + (num_loops * thread_count); + + } else { + blk_number = block_number; + } + + if (aresult_wait_flag) { + + aresult_flags = CBLK_ARESULT_BLOCKING; + } + if ((aresult_next_flag) && (!thread_flag)) { + + /* + * For multiple threads all reading/writing + * we can not allow one thread's completion + * to be seen by another thread, since it + * did not issue the request. + */ + + aresult_flags |= CBLK_ARESULT_NEXT_TAG; + } + + pthread_mutex_unlock(&completion_lock); + + if (wait_async_issue_flag) { + + async_flags = CBLK_ARW_WAIT_CMD_FLAGS; + + } + + if (poll_status_flag) { + + async_flags |= CBLK_ARW_USER_STATUS_FLAG; + } + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&data_buf,4096,DATA_BUF_SIZE)) { + + perror("posix_memalign failed for data buffer"); + + return (ret_code); + + } + + errno = 0; + + if (cmd_type == TRANSP_MWRITE_MREAD) { + + /* + * Use both synchronous and asynchronouse + * I/O. To facilitate this have even threads + * use synchronouse and odd ones to use asynchronous + * I/O. + */ + + if (local_thread_count % 2) { + local_cmd_type = TRANSP_AWRITE_AREAD; + } else { + local_cmd_type = TRANSP_WRITE_READ; + } + + } else { + + local_cmd_type = cmd_type; + } + + + if (each_thread_vlun_flag) { + + /* + * If we are using a virtual lun for each + * thread then open the virtual lun now and + * set its size. + */ + + if (verbose_flag && !thread_flag) { + fprintf(stderr,"Calling cblk_open ...\n"); + } + + open_flags = CBLK_OPN_VIRT_LUN; + + if (no_bg_thread_flag) { + + open_flags |= CBLK_OPN_NO_INTRP_THREADS; + } + + if (shared_contxt_flag) { + + open_flags |= CBLK_OPN_SHARE_CTXT; + } + +#ifdef _AIX + + if (mpio_flag) { + + open_flags |= CBLK_OPN_MPIO_FO; + } + + if (reserve_flag) { + + open_flags |= CBLK_OPN_RESERVE; + } + + if (break_reserve_flag) { + + open_flags |= CBLK_OPN_FORCED_RESERVE; + } + + + +#endif + + + blk_data->chunk_id = cblk_open(device_name,pool_size,O_RDWR,ext,open_flags); + + if (blk_data->chunk_id == NULL_CHUNK_ID) { + + fprintf(stderr,"Open of (virtual lun) %s failed with errno = %d\n",device_name,errno); + + free(comp_data_buf); + + free(data_buf); + return (ret_code); + } + + + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_set_size for size = 0x%"PRIX64" ...\n",size); + + } + + + rc = cblk_set_size(blk_data->chunk_id,size,0); + + if (rc) { + fprintf(stderr,"cblk_set_size failed with rc = %d, and errno = %d \n",rc,errno); + + free(comp_data_buf); + + free(data_buf); + return (ret_code); + + } + + } + + + for (i =0; ichunk_id,data_buf,blk_number,1,0); + + + if (!thread_flag) { + printf("Read completed with rc = %d\n",rc); + } + + + if (verbose_flag) { + printf("Returned data is ...\n"); + hexdump(data_buf,DATA_BUF_SIZE,NULL); + } + break; + case TRANSP_AREAD_CMD: + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_aread for lba = 0x%"PRIX64" ...\n",blk_number); + + } + rc = cblk_aread(blk_data->chunk_id,data_buf,blk_number,1,&tag,&arw_status,async_flags); + + if (rc > 0) { + fprintf(stderr,"cblk_aread succeeded for lba = 0x%lx, rc = %d, errno = %d\n",blk_number,rc,errno); + + + printf("Async read data completed ...\n"); + printf("Returned data is ...\n"); + hexdump(data_buf,DATA_BUF_SIZE,NULL); + + } else if (rc < 0) { + fprintf(stderr,"cblk_aread failed for lba = 0x%lx, rc = %d, errno = %d\n",blk_number,rc,errno); + + } else { + + + if (verbose_flag && !thread_flag) { + + if (poll_status_flag) { + fprintf(stderr,"Polling user status for tag = 0x%x ...\n",tag); + } else { + fprintf(stderr,"Calling cblk_aresult for tag = 0x%x ...\n",tag); + } + } + + while (TRUE) { + + if (poll_status_flag) { + + switch (arw_status.status) { + case CBLK_ARW_STATUS_SUCCESS: + rc = arw_status.blocks_transferred; + break; + case CBLK_ARW_STATUS_PENDING: + rc = 0; + break; + default: + rc = -1; + errno = arw_status.fail_errno; + } + + } else { + rc = cblk_aresult(blk_data->chunk_id,&tag, &status,aresult_flags); + } + if (rc > 0) { + + if (verbose_flag) { + printf("Async read data completed ...\n"); + printf("Returned data is ...\n"); + hexdump(data_buf,DATA_BUF_SIZE,NULL); + } + } else if (rc == 0) { + fprintf(stderr,"cblk_aresult completed: command still active for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + usleep(300); + continue; + } else { + fprintf(stderr,"cblk_aresult completed for for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + } + + break; + + } /* while */ + + } + break; + case TRANSP_WRITE_CMD: + + + if (file) { + /* + * If an input file was specified, + * then read the first DATA_BUF_SIZE bytes + * in to write out to the device. + */ + + bytes_read = fread(data_buf, 1, DATA_BUF_SIZE, file); + + if (bytes_read != DATA_BUF_SIZE) { + + fprintf(stderr,"Unable able to read full size of %d, read instead %d\n",DATA_BUF_SIZE,bytes_read); + + /* + * Do not fail, just continue with questionable buffer contents + */ + } + + + } else { + /* + * If no input file is specified then + * put a pattern in the buffer to + * be written + */ + memset((uint8_t *)(data_buf), ((getpid())%256), + DATA_BUF_SIZE); + } + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_write for lba = 0x%"PRIX64" ...\n",blk_number); + + } + rc = cblk_write(blk_data->chunk_id,data_buf,blk_number,1,0); + + if (!thread_flag) { + + printf("write completed with rc = %d\n",rc); + } + break; + case TRANSP_AWRITE_CMD: + + + if (file) { + /* + * If an input file was specified, + * then read the first DATA_BUF_SIZE bytes + * in to write out to the device. + */ + + bytes_read = fread(data_buf, 1, DATA_BUF_SIZE, file); + + if (bytes_read != DATA_BUF_SIZE) { + + fprintf(stderr,"Unable able to read full size of %d, read instead %d\n",DATA_BUF_SIZE,bytes_read); + + /* + * Do not fail, just continue with questionable buffer contents + */ + } + + + } else { + /* + * If no input file is specified then + * put a pattern in the buffer to + * be written + */ + memset((uint8_t *)(data_buf), ((getpid())%256), + DATA_BUF_SIZE); + } + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_awrite for lba = 0x%"PRIX64" ...\n",blk_number); + + } + rc = cblk_awrite(blk_data->chunk_id,data_buf,blk_number,1,&tag,&arw_status,async_flags); + + if (rc) { + fprintf(stderr,"cblk_awrite failed for lba = 0x%lx, rc = %d, errno = %d\n",blk_number,rc,errno); + + } else { + + + if (verbose_flag && !thread_flag) { + + if (poll_status_flag) { + fprintf(stderr,"Polling user status for tag = 0x%x ...\n",tag); + } else { + fprintf(stderr,"Calling cblk_aresult for tag = 0x%x ...\n",tag); + } + } + + while (TRUE) { + + if (poll_status_flag) { + + switch (arw_status.status) { + case CBLK_ARW_STATUS_SUCCESS: + rc = arw_status.blocks_transferred; + break; + case CBLK_ARW_STATUS_PENDING: + rc = 0; + break; + default: + rc = -1; + errno = arw_status.fail_errno; + } + + } else { + rc = cblk_aresult(blk_data->chunk_id,&tag, &status,aresult_flags); + } + + if (rc > 0) { + + if (verbose_flag && !thread_flag) { + printf("Async write data completed ...\n"); + } + } else if (rc == 0) { + fprintf(stderr,"cblk_aresult completed: command still active for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + usleep(300); + continue; + } else { + fprintf(stderr,"cblk_aresult completed for for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + + } + + break; + + } /* while */ + + } + break; + case TRANSP_GET_SIZE: + if (verbose_flag) { + fprintf(stderr,"Calling cblk_get_size ...\n"); + } + rc = cblk_get_size(blk_data->chunk_id,&num_blocks,0); + if (!thread_flag) { + + printf("Get_size returned rc = %d, with num_blocks = 0x%lx", + rc,num_blocks); + } + break; + case TRANSP_GET_LUN_SIZE: + if (verbose_flag) { + fprintf(stderr,"Calling cblk_get_lun_size ...\n"); + } + rc = cblk_get_lun_size(blk_data->chunk_id,&num_blocks,0); + if (!thread_flag) { + + printf("Get_lun_size returned rc = %d, with num_blocks = 0x%lx", + rc,num_blocks); + } + break; + case TRANSP_SET_SIZE: + if (verbose_flag) { + fprintf(stderr,"Calling cblk_set_size ...\n"); + } + rc = cblk_set_size(blk_data->chunk_id,size,0); + + if (!thread_flag) { + + printf("set_size returned rc = %d, with num_blocks = 0x%lx", + rc,size); + } + break; + case TRANSP_WRITE_READ: + + /* + * Perform write then read comparision test + */ + + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&comp_data_buf,4096,DATA_BUF_SIZE)) { + + perror("posix_memalign failed for data buffer"); + + cblk_close(blk_data->chunk_id,0); + return (ret_code); + + } + + if (file) { + /* + * If an input file was specified, + * then read the first DATA_BUF_SIZE bytes + * in to write out to the device. + */ + + bytes_read = fread(comp_data_buf, 1, DATA_BUF_SIZE, file); + + if (bytes_read != DATA_BUF_SIZE) { + + fprintf(stderr,"Unable able to read full size of %d, read instead %d\n",DATA_BUF_SIZE,bytes_read); + + /* + * Do not fail, just continue with questionable buffer contents + */ + } + + + } else { + /* + * If no input file is specified then + * put a pattern in the buffer to + * be written + */ + memset((uint8_t *)(comp_data_buf), ((blk_number)%256), + DATA_BUF_SIZE); + } + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_write for lba = 0x%"PRIX64" ...\n",blk_number); + + } + rc = cblk_write(blk_data->chunk_id,comp_data_buf,blk_number,1,0); + + if (!thread_flag) { + + printf("write completed with rc = %d\n",rc); + } + + if (rc != 1) { + + + fprintf(stderr,"cblk_write failed for lba = 0x%"PRIX64" with rc = 0x%x errno = %d\n",blk_number,rc,errno); + + free(comp_data_buf); + + free(data_buf); + return (ret_code); + } + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_read for lba = 0x%"PRIX64" ...\n",blk_number); + + } + rc = cblk_read(blk_data->chunk_id,data_buf,blk_number,1,0); + + + if (!thread_flag) { + printf("Read completed with rc = %d\n",rc); + } + + if (rc != 1) { + + + fprintf(stderr,"cblk_read failed for lba = 0x%"PRIX64" with rc = 0x%x errno = %d\n",blk_number,rc, errno); + + free(comp_data_buf); + + free(data_buf); + return (ret_code); + } + + rc = memcmp(data_buf,comp_data_buf,DATA_BUF_SIZE); + + if (rc) { + + pthread_mutex_lock(&completion_lock); + + fprintf(stderr,"Memcmp failed with rc = 0x%x, for blk_number = 0x%"PRIX64"\n",rc,blk_number); + + printf("Written data for blk_number = 0x%"PRIX64":\n",blk_number); + dumppage(data_buf,DATA_BUF_SIZE); + printf("**********************************************************\n\n"); + printf("read data for blk_number = 0x%"PRIX64":\n",blk_number); + dumppage(comp_data_buf,DATA_BUF_SIZE); + + + printf("**********************************************************\n\n"); + + pthread_mutex_unlock(&completion_lock); + + rc = cblk_read(blk_data->chunk_id,data_buf,blk_number,1,0); + + if (rc == 1) { + + rc = memcmp(data_buf,comp_data_buf,DATA_BUF_SIZE); + + pthread_mutex_lock(&completion_lock); + if (rc) { + + + fprintf(stderr,"Memcmp for re-read failed\nDump of re-read data for blk_number = 0x%"PRIX64"\n",blk_number); + + dumppage(data_buf,DATA_BUF_SIZE); + } else { + + fprintf(stderr,"Memcmp for re-read successful for blk_number = 0x%"PRIX64"\n",blk_number); + } + + pthread_mutex_unlock(&completion_lock); + + } + + } else if ((verbose_flag) && (!thread_flag)) { + printf("Memcmp succeeded for blk_number = 0x%"PRIX64"\n",blk_number); + } + + + + free(comp_data_buf); + break; + case TRANSP_AWRITE_AREAD: + + /* + * Perform async write then async read comparision test + */ + + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&comp_data_buf,4096,DATA_BUF_SIZE)) { + + perror("posix_memalign failed for data buffer"); + + cblk_close(blk_data->chunk_id,0); + return (ret_code); + + } + + if (file) { + /* + * If an input file was specified, + * then read the first DATA_BUF_SIZE bytes + * in to write out to the device. + */ + + bytes_read = fread(comp_data_buf, 1, DATA_BUF_SIZE, file); + + if (bytes_read != DATA_BUF_SIZE) { + + fprintf(stderr,"Unable able to read full size of %d, read instead %d\n",DATA_BUF_SIZE,bytes_read); + + /* + * Do not fail, just continue with questionable buffer contents + */ + } + + + } else { + /* + * If no input file is specified then + * put a pattern in the buffer to + * be written + */ + memset((uint8_t *)(comp_data_buf), ((blk_number)%256), + DATA_BUF_SIZE); + } + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_awrite for lba = 0x%"PRIX64" ...\n",blk_number); + + } + + + rc = cblk_awrite(blk_data->chunk_id,comp_data_buf,blk_number,1,&tag,&arw_status,async_flags); + + if (rc) { + fprintf(stderr,"cblk_awrite failed for lba = 0x%" PRIx64 ", rc = %d, errno = %d\n",blk_number,rc,errno); + free(comp_data_buf); + + free(data_buf); + return (ret_code); + + } else { + + + if (verbose_flag && !thread_flag) { + + if (poll_status_flag) { + fprintf(stderr,"Polling user status for tag = 0x%x ...\n",tag); + } else { + fprintf(stderr,"Calling cblk_aresult for tag = 0x%x ...\n",tag); + } + } + + while (TRUE) { + + if (poll_status_flag) { + + switch (arw_status.status) { + case CBLK_ARW_STATUS_SUCCESS: + rc = arw_status.blocks_transferred; + break; + case CBLK_ARW_STATUS_PENDING: + rc = 0; + break; + default: + rc = -1; + errno = arw_status.fail_errno; + } + + } else { + rc = cblk_aresult(blk_data->chunk_id,&tag, &status,aresult_flags); + } + + if (rc > 0) { + + if (verbose_flag && !thread_flag) { + printf("Async write data completed ...\n"); + } + } else if (rc == 0) { + if (!thread_flag) { + fprintf(stderr,"cblk_aresult completed: command still active for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + } + usleep(300); + continue; + } else { + fprintf(stderr,"cblk_aresult completed (failed write) for for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + + exit_loop = TRUE; + } + + break; + + } /* while */ + + } + + if (exit_loop) { + + break; + } + + + if (!thread_flag) { + + printf("write completed with rc = %d\n",rc); + } + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_aread for lba = 0x%"PRIX64" ...\n",blk_number); + + } + + + + rc = cblk_aread(blk_data->chunk_id,data_buf,blk_number,1,&tag,&arw_status,async_flags); + + if (rc > 0) { + if (!thread_flag) { + fprintf(stderr,"cblk_aread succeeded for lba = 0x%" PRIx64 " , rc = %d, errno = %d\n",blk_number,rc,errno); + + + printf("Async read data completed ...\n"); + } + + } else if (rc < 0) { + fprintf(stderr,"cblk_aread failed for lba = 0x%" PRIx64 ", rc = %d, errno = %d\n",blk_number,rc,errno); + free(comp_data_buf); + + free(data_buf); + return (ret_code); + + } else { + + + if (verbose_flag && !thread_flag) { + + if (poll_status_flag) { + fprintf(stderr,"Polling user status for tag = 0x%x ...\n",tag); + } else { + fprintf(stderr,"Calling cblk_aresult for tag = 0x%x ...\n",tag); + } + } + + while (TRUE) { + + if (poll_status_flag) { + + switch (arw_status.status) { + case CBLK_ARW_STATUS_SUCCESS: + rc = arw_status.blocks_transferred; + break; + case CBLK_ARW_STATUS_PENDING: + rc = 0; + break; + default: + rc = -1; + errno = arw_status.fail_errno; + } + + } else { + rc = cblk_aresult(blk_data->chunk_id,&tag, &status,aresult_flags); + } + + if (rc > 0) { + + if (verbose_flag && !thread_flag) { + printf("Async read data completed ...\n"); + } + } else if (rc == 0) { + if (!thread_flag) { + fprintf(stderr,"cblk_aresult completed: command still active for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + } + usleep(300); + continue; + } else { + fprintf(stderr,"cblk_aresult completed (failed read) for for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + exit_loop = TRUE; + } + + break; + + } /* while */ + + } + + if (exit_loop) { + + break; + } + if (!thread_flag) { + printf("Read completed with rc = %d\n",rc); + } + + rc = memcmp(data_buf,comp_data_buf,DATA_BUF_SIZE); + + if (rc) { + + pthread_mutex_lock(&completion_lock); + + fprintf(stderr,"Memcmp failed with rc = 0x%x, for blk_number = 0x%"PRIX64"\n",rc,blk_number); + + printf("Written data for blk_number = 0x%"PRIX64":\n",blk_number); + dumppage(data_buf,DATA_BUF_SIZE); + printf("**********************************************************\n\n"); + printf("read data for blk_number = 0x%"PRIX64":\n",blk_number); + dumppage(comp_data_buf,DATA_BUF_SIZE); + + printf("**********************************************************\n\n"); + pthread_mutex_unlock(&completion_lock); + + rc = cblk_read(blk_data->chunk_id,data_buf,blk_number,1,0); + + if (rc == 1) { + + rc = memcmp(data_buf,comp_data_buf,DATA_BUF_SIZE); + + pthread_mutex_lock(&completion_lock); + if (rc) { + + + + fprintf(stderr,"Memcmp for re-read failed\nDump of re-read data for blk_number = 0x%"PRIX64"\n",blk_number); + + dumppage(data_buf,DATA_BUF_SIZE); + } else { + + fprintf(stderr,"Memcmp for re-read successful for blk_number = 0x%"PRIX64"\n",blk_number); + } + + pthread_mutex_unlock(&completion_lock); + } + + } else if ((verbose_flag) && (!thread_flag)) { + printf("Memcmp succeeded for blk_number = 0x%"PRIX64"\n",blk_number); + } + + + free(comp_data_buf); + break; + case TRANSP_GET_STATS: + + bzero (&stats, sizeof(stats)); + if (verbose_flag) { + fprintf(stderr,"Calling cblk_get_stats ...\n"); + } + rc = cblk_get_stats(blk_data->chunk_id,&stats,0); + if (!thread_flag) { + + printf("Get_stats returned rc = %d",rc); + + + hexdump(&stats,sizeof(stats),NULL); + } + break; + case TRANSP_WRT_FRK_RD: + + /* + * Perform write then fork and child read. comparision test + */ + + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&comp_data_buf,4096,DATA_BUF_SIZE)) { + + perror("posix_memalign failed for data buffer"); + + cblk_close(blk_data->chunk_id,0); + return (ret_code); + + } + + if (file) { + /* + * If an input file was specified, + * then read the first DATA_BUF_SIZE bytes + * in to write out to the device. + */ + + bytes_read = fread(comp_data_buf, 1, DATA_BUF_SIZE, file); + + if (bytes_read != DATA_BUF_SIZE) { + + fprintf(stderr,"Unable able to read full size of %d, read instead %d\n",DATA_BUF_SIZE,bytes_read); + + /* + * Do not fail, just continue with questionable buffer contents + */ + } + + + } else { + /* + * If no input file is specified then + * put a pattern in the buffer to + * be written + */ + memset((uint8_t *)(comp_data_buf), ((blk_number)%256), + DATA_BUF_SIZE); + } + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_write for lba = 0x%"PRIX64" ...\n",blk_number); + + } + rc = cblk_write(blk_data->chunk_id,comp_data_buf,blk_number,1,0); + + if (!thread_flag) { + + printf("write completed with rc = %d\n",rc); + } + + if (rc != 1) { + + + fprintf(stderr,"cblk_write failed for lba = 0x%"PRIX64" with rc = 0x%x errno = %d\n",blk_number,rc,errno); + + free(comp_data_buf); + + free(data_buf); + return (ret_code); + } + + if ((pid = fork()) < 0) { /* fork failed */ + perror("Fork failed \n"); + free(comp_data_buf); + + break; + } + else if (pid > 0) { /* parents fork call */ + + if (verbose_flag && !thread_flag) { + fprintf(stderr,"Fork succeeded \n"); + } + + /* + * Sleep for a while to allow child process to complete + */ + + sleep(10); + free(comp_data_buf); + + + break; + } + + /* + * Only the child process will reach this point + * ie fork = 0. + */ + + + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_clone_after_fork...\n"); + + } + rc = cblk_clone_after_fork(blk_data->chunk_id,O_RDONLY,0); + + + if (verbose_flag && !thread_flag) { + printf("clone completed with rc = %d, errno = %d\n",rc,errno); + } + + if (rc) { + + + fprintf(stderr,"cblk_chunk_clone failed with rc = 0x%x errno = %d\n",rc, errno); + free(comp_data_buf); + + free(data_buf); + return (ret_code); + } + + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_read for lba = 0x%"PRIX64" ...\n",blk_number); + + } + rc = cblk_read(blk_data->chunk_id,data_buf,blk_number,1,0); + + + if (!thread_flag) { + printf("Read completed with rc = %d\n",rc); + } + + if (rc != 1) { + + + fprintf(stderr,"cblk_read failed for lba = 0x%"PRIX64" with rc = 0x%x errno = %d\n",blk_number,rc, errno); + + free(comp_data_buf); + + free(data_buf); + return (ret_code); + } + + rc = memcmp(data_buf,comp_data_buf,DATA_BUF_SIZE); + + if (rc) { + + pthread_mutex_lock(&completion_lock); + + fprintf(stderr,"Memcmp failed with rc = 0x%x, for blk_number = 0x%"PRIX64"\n",rc,blk_number); + + printf("Written data for blk_number = 0x%"PRIX64":\n",blk_number); + dumppage(data_buf,DATA_BUF_SIZE); + printf("**********************************************************\n\n"); + printf("read data for blk_number = 0x%"PRIX64":\n",blk_number); + dumppage(comp_data_buf,DATA_BUF_SIZE); + + printf("**********************************************************\n\n"); + + pthread_mutex_unlock(&completion_lock); + + + rc = cblk_read(blk_data->chunk_id,data_buf,blk_number,1,0); + + if (rc == 1) { + + rc = memcmp(data_buf,comp_data_buf,DATA_BUF_SIZE); + + pthread_mutex_lock(&completion_lock); + if (rc) { + + + fprintf(stderr,"Memcmp for re-read failed\nDump of re-read data for blk_number = 0x%"PRIX64"\n",blk_number); + + dumppage(data_buf,DATA_BUF_SIZE); + } else { + + fprintf(stderr,"Memcmp for re-read successful for blk_number = 0x%"PRIX64"\n",blk_number); + } + + pthread_mutex_unlock(&completion_lock); + + } + + } else if ((verbose_flag) && (!thread_flag)) { + printf("Memcmp succeeded for blk_number = 0x%"PRIX64"\n",blk_number); + + } + + free(comp_data_buf); + + break; + case TRANSP_AREAD_NEXT: + + + if (!pool_size) { + + fprintf(stderr,"Pool size not specified\n"); + } + + active_cmds = 0; + while (active_cmds < pool_size) { + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_aread for lba = 0x%"PRIX64" ...\n",blk_number); + + } + rc = cblk_aread(blk_data->chunk_id,data_buf,blk_number,1,&tag,&arw_status,async_flags); + + if (rc > 0) { + fprintf(stderr,"cblk_aread succeeded for lba = 0x%lx, rc = %d, errno = %d\n",blk_number,rc,errno); + + + printf("Async read data completed ...\n"); + printf("Returned data is ...\n"); + hexdump(data_buf,DATA_BUF_SIZE,NULL); + + } else if (rc < 0) { + fprintf(stderr,"cblk_aread failed for lba = 0x%lx, rc = %d, errno = %d\n",blk_number,rc,errno); + + + } else { + + active_cmds++; + + if (inc_flag) { + + blk_number++; + i++; + + if (i == num_loops) { + + break; + } + } + + + } + + } /* while loop */ + + + /* + * Now wait for all queued requests to complete. + */ + + aresult_flags |= CBLK_ARESULT_NEXT_TAG; + + while (active_cmds) { + + + if (verbose_flag && !thread_flag) { + fprintf(stderr,"Calling cblk_aresult...\n"); + } + + rc = cblk_aresult(blk_data->chunk_id,&tag, &status,aresult_flags); + + if (rc > 0) { + + active_cmds--; + if (verbose_flag) { + printf("Async read data completed for tag = 0x%x...\n",tag); + printf("Returned data is ...\n"); + hexdump(data_buf,DATA_BUF_SIZE,NULL); + } + } else if (rc == 0) { + fprintf(stderr,"cblk_aresult completed: command still active for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + usleep(300); + continue; + } else { + fprintf(stderr,"cblk_aresult completed for for tag = 0x%x, rc = %d, errno = %d\n",tag,rc,errno); + + + active_cmds--; + + } + + } /* while loop */ + + + break; + + case TRANSP_LWRITE_LREAD: + + /* + * Perform async write then async read comparision test + */ + + + + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&comp_data_buf,4096,DATA_BUF_SIZE)) { + + perror("posix_memalign failed for data buffer"); + + cblk_close(blk_data->chunk_id,0); + return (ret_code); + + } + + if (file) { + /* + * If an input file was specified, + * then read the first DATA_BUF_SIZE bytes + * in to write out to the device. + */ + + bytes_read = fread(comp_data_buf, 1, DATA_BUF_SIZE, file); + + if (bytes_read != DATA_BUF_SIZE) { + + fprintf(stderr,"Unable able to read full size of %d, read instead %d\n",DATA_BUF_SIZE,bytes_read); + + /* + * Do not fail, just continue with questionable buffer contents + */ + } + + + } else { + /* + * If no input file is specified then + * put a pattern in the buffer to + * be written + */ + memset((uint8_t *)(comp_data_buf), ((blk_number)%256), + DATA_BUF_SIZE); + } + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_listio write for lba = 0x%"PRIX64" ...\n",blk_number); + + } + + bzero(&cblk_issue_io,sizeof(cblk_issue_io)); + + cblk_issue_io.request_type = CBLK_IO_TYPE_WRITE; + cblk_issue_io.buf = comp_data_buf; + cblk_issue_io.lba = blk_number, + cblk_issue_io.nblocks = 1; + + + bzero(&cblk_complete_io,sizeof(cblk_complete_io)); + + cblk_issue_ary[0] = &cblk_issue_io; + cblk_complete_ary[0] = &cblk_complete_io; + complete_items = 1; + + rc = cblk_listio(blk_data->chunk_id,cblk_issue_ary,1,NULL,0,NULL,0,cblk_complete_ary,&complete_items,timeout,0); + + + if (rc) { + fprintf(stderr,"cblk_listio write failed for lba = 0x%" PRIx64 ", rc = %d, errno = %d\n",blk_number,rc,errno); + free(comp_data_buf); + + free(data_buf); + return (ret_code); + + } else { + + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_listio poll for write ...\n"); + + } + + while (TRUE) { + + complete_items = 1; + rc = cblk_listio(blk_data->chunk_id,NULL,0,NULL,0,cblk_issue_ary,1,cblk_complete_ary,&complete_items,timeout,0); + + + + + if (rc) { + + fprintf(stderr,"cblk_listio poll for write completed (failed write) for rc = %d, errno = %d\n",rc,errno); + + exit_loop = TRUE; + + } else { + + if (cblk_issue_io.stat.status == CBLK_ARW_STATUS_PENDING) { + + fprintf(stderr,"cblk_listio poll for write completed: command still active rc = %d, errno = %d\n",rc,errno); + usleep(300); + continue; + } else { + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"write listio completed with status = %d, errno = %d\n", + cblk_issue_io.stat.status,cblk_complete_io.stat.fail_errno); + + } + } + } + + break; + + } /* while */ + + } + + + if (exit_loop) { + + break; + } + + + if (!thread_flag) { + + printf("write completed with rc = %d\n",rc); + } + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_listio read for lba = 0x%"PRIX64" ...\n",blk_number); + + } + + + bzero(&cblk_issue_io,sizeof(cblk_issue_io)); + + cblk_issue_io.request_type = CBLK_IO_TYPE_READ; + cblk_issue_io.buf = data_buf; + cblk_issue_io.lba = blk_number, + cblk_issue_io.nblocks = 1; + + + bzero(&cblk_complete_io,sizeof(cblk_complete_io)); + + cblk_issue_ary[0] = &cblk_issue_io; + cblk_complete_ary[0] = &cblk_complete_io; + complete_items = 1; + + + + rc = cblk_listio(blk_data->chunk_id,cblk_issue_ary,1,NULL,0,NULL,0,cblk_complete_ary,&complete_items,timeout,0); + + + if (rc > 0) { + if (!thread_flag) { + fprintf(stderr,"cblk_listio read succeeded for lba = 0x%" PRIx64 " , rc = %d, errno = %d\n",blk_number,rc,errno); + + + printf("Async read data completed ...\n"); + } + + } else if (rc < 0) { + fprintf(stderr,"cblk_listio read failed for lba = 0x%" PRIx64 ", rc = %d, errno = %d\n",blk_number,rc,errno); + free(comp_data_buf); + + free(data_buf); + return (ret_code); + + } else { + + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_listio poll ...\n"); + + } + + while (TRUE) { + + complete_items = 1; + rc = cblk_listio(blk_data->chunk_id,NULL,0,NULL,0,cblk_issue_ary,1,cblk_complete_ary,&complete_items,timeout,0); + + + + if (rc) { + + fprintf(stderr,"cblk_listio poll for read completed (failed write) for rc = %d, errno = %d\n",rc,errno); + + exit_loop = TRUE; + + } else { + + if (cblk_issue_io.stat.status == CBLK_ARW_STATUS_PENDING) { + + fprintf(stderr,"cblk_listio poll for read completed: command still active rc = %d, errno = %d\n",rc,errno); + usleep(300); + continue; + } else { + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"read listio completed with status = %d, errno = %d\n", + cblk_issue_io.stat.status,cblk_complete_io.stat.fail_errno); + + } + } + } + + + break; + + } /* while */ + + } + + if (exit_loop) { + + break; + } + if (!thread_flag) { + printf("Read completed with rc = %d\n",rc); + } + + rc = memcmp(data_buf,comp_data_buf,DATA_BUF_SIZE); + + if (rc) { + + pthread_mutex_lock(&completion_lock); + + fprintf(stderr,"Memcmp failed with rc = 0x%x, for blk_number = 0x%"PRIX64"\n",rc,blk_number); + + printf("Written data for blk_number = 0x%"PRIX64":\n",blk_number); + dumppage(data_buf,DATA_BUF_SIZE); + printf("**********************************************************\n\n"); + printf("read data for blk_number = 0x%"PRIX64":\n",blk_number); + dumppage(comp_data_buf,DATA_BUF_SIZE); + + printf("**********************************************************\n\n"); + pthread_mutex_unlock(&completion_lock); + + rc = cblk_read(blk_data->chunk_id,data_buf,blk_number,1,0); + + if (rc == 1) { + + rc = memcmp(data_buf,comp_data_buf,DATA_BUF_SIZE); + + pthread_mutex_lock(&completion_lock); + if (rc) { + + + + fprintf(stderr,"Memcmp for re-read failed\nDump of re-read data for blk_number = 0x%"PRIX64"\n",blk_number); + + dumppage(data_buf,DATA_BUF_SIZE); + } else { + + fprintf(stderr,"Memcmp for re-read successful for blk_number = 0x%"PRIX64"\n",blk_number); + } + + pthread_mutex_unlock(&completion_lock); + } + + } else if ((verbose_flag) && (!thread_flag)) { + printf("Memcmp succeeded for blk_number = 0x%"PRIX64"\n",blk_number); + } + + + free(comp_data_buf); + break; + + default: + + fprintf(stderr,"Invalid local_cmd_type = %d\n",local_cmd_type); + i = num_loops; + } /* switch */ + + + if (inc_flag) { + + blk_number++; + } + + +/* ?? + if (rc) { + + break; + } +*/ + } + + if (each_thread_vlun_flag) { + + + /* + * If we are using a virtual lun per thread + * then close it now. + */ + + + if (verbose_flag && !thread_flag) { + fprintf(stderr,"Calling cblk_close ...\n"); + } + + rc = cblk_close(blk_data->chunk_id,0); + + if (rc) { + + fprintf(stderr,"Close of chunk_id %d failed with errno = %d\n",blk_data->chunk_id,errno); + } + + } + + free(data_buf); + + return (ret_code); +} + +int main (int argc, char **argv) +{ + int rc = 0; /* Return code */ + int rc2 = 0; /* Return code */ + uint64_t file_len = 0; + chunk_id_t chunk_id = 0; + chunk_id_t alt_chunk_id = 0; + int flags = 0; + int i; + pid_t pid; + void *status; + chunk_ext_arg_t ext = 0; + + + /* parse the input args & handle syntax request quickly */ + rc = parse_args( argc, argv ); + if (rc) + { + return (0); + } + + + if ( hflag) + { + usage(); + return (0); + } + + if (fflag) { + + if (cmd_type == TRANSP_WRITE_CMD) { + /* + * If we are doing a write and + * filename is specified then this + * file will be input file for data + * we will write to the device. + */ + + /* Open file */ + file = fopen(filename, "rb"); + + if (!file) { + fprintf(stderr,"Failed to open filename = %s\n",filename); + + return errno; + } + + /* Get file length */ + + fseek(file, 0, SEEK_END); + file_len=ftell(file); + fseek(file, 0, SEEK_SET); + + if (file_len < DATA_BUF_SIZE) { + + fprintf(stderr,"Input file must be at least 4096 bytes in size\n"); + + fclose(file); + return 0; + } + + + + } else { + + fflag = FALSE; + } + } + + + rc = cblk_init(NULL,0); + if (rc) { + + fprintf(stderr,"cblk_init failed with errno = %d and rc = %d\n", + errno,rc); + } + + + if (!each_thread_vlun_flag) { + + if (virtual_lun_flag) { + + flags = CBLK_OPN_VIRT_LUN; + } + + if (no_bg_thread_flag) { + + flags |= CBLK_OPN_NO_INTRP_THREADS; + } + + if (shared_contxt_flag) { + + flags |= CBLK_OPN_SHARE_CTXT; + } + +#ifdef _AIX + + if (mpio_flag) { + + flags |= CBLK_OPN_MPIO_FO; + } + + if (reserve_flag) { + + flags |= CBLK_OPN_RESERVE; + } + + if (break_reserve_flag) { + + flags |= CBLK_OPN_FORCED_RESERVE; + } + + +#endif + + for (i = 0; i < num_opens; i++) { + if (verbose_flag) { + fprintf(stderr,"Calling cblk_open ...\n"); + } + chunk_id = cblk_open(device_name,pool_size,O_RDWR,ext,flags); + } + + if (chunk_id == NULL_CHUNK_ID) { + + fprintf(stderr,"Open of %s failed with errno = %d\n",device_name,errno); + cblk_term(NULL,0); + return -1; + } + + + blk_thread[0].chunk_id = chunk_id; + + if (shared_contxt_flag) { + alt_chunk_id = cblk_open(alt_device_name,pool_size,O_RDWR,ext,flags); + if (alt_chunk_id == NULL_CHUNK_ID) { + + fprintf(stderr,"Open of %s failed with errno = %d\n",alt_device_name,errno); + cblk_close(chunk_id,0); + cblk_term(NULL,0); + return -1; + } + alt_blk_thread[0].chunk_id = alt_chunk_id; + } + + + if (virtual_lun_flag) { + + rc = cblk_set_size(chunk_id,size,0); + + if (rc) { + fprintf(stderr,"cblk_set_size failed with rc = %d, and errno = %d \n",rc,errno); + + rc = cblk_close(chunk_id,0); + + if (rc) { + + fprintf(stderr,"Close of %s failed with errno = %d\n",device_name,errno); + } + + if (shared_contxt_flag) { + rc = cblk_close(alt_chunk_id,0); + + if (rc) { + + fprintf(stderr,"Close of %s failed with errno = %d\n",alt_device_name,errno); + } + } + + + cblk_term(NULL,0); + return -1; + + } + + if (shared_contxt_flag) { + + rc = cblk_set_size(alt_chunk_id,size,0); + + if (rc) { + fprintf(stderr,"cblk_set_size failed with rc = %d, and errno = %d \n",rc,errno); + + rc = cblk_close(chunk_id,0); + + if (rc) { + + fprintf(stderr,"Close of %s failed with errno = %d\n",device_name,errno); + } + + rc = cblk_close(alt_chunk_id,0); + + if (rc) { + + fprintf(stderr,"Close of %s failed with errno = %d\n",alt_device_name,errno); + } + + + cblk_term(NULL,0); + return -1; + + } + + } + + } + + + } /* !each_thread_vlun_flag */ + + if ((thread_flag) && + (num_threads > 1)) { + + /* + * Create all threads here + */ + + + for (i=0; i< num_threads; i++) { + + blk_thread[i].chunk_id = chunk_id; + + rc = pthread_create(&blk_thread[i].ptid,NULL,run_loop,(void *)&blk_thread[i]); + + if (rc) { + + fprintf(stderr, "pthread_create failed for %d rc 0x%x, errno = 0x%x\n", + i, rc,errno); + + /* + * If we fail to create this thread and we are sharing contexts, + * then do not create the shared context associated with this. + */ + continue; + + } + + if (shared_contxt_flag) { + + + alt_blk_thread[i].chunk_id = alt_chunk_id; + + rc = pthread_create(&alt_blk_thread[i].ptid,NULL,run_loop,(void *)&alt_blk_thread[i]); + + if (rc) { + + fprintf(stderr, "pthread_create failed for %d rc 0x%x, errno = 0x%x\n", + i, rc,errno); + + } + + } + + + } + + + /* + * Wait for all threads to complete + */ + + + errno = 0; + + for (i=0; i< num_threads; i++) { + + rc = pthread_join(blk_thread[i].ptid,&status); + + if (rc) { + + fprintf(stderr, "pthread_join failed for %d rc 0x%x, errno = 0x%x\n", + i, rc,errno); + + } + + if (shared_contxt_flag) { + + rc = pthread_join(alt_blk_thread[i].ptid,&status); + + if (rc) { + + fprintf(stderr, "pthread_join failed for %d rc 0x%x, errno = 0x%x\n", + i, rc,errno); + + } + + + } + } + + + fprintf(stderr, "All threads exited\n"); + + } else { + run_loop(&blk_thread[0]); + + + if (shared_contxt_flag) { + run_loop(&alt_blk_thread[0]); + } + } + + if (fork_rerun_flag) { + + /* + * We need to rerun the same operations again on the + * child after a fork. + */ + + if ((pid = fork()) < 0) { /* fork failed */ + perror("Fork failed \n"); + + } + else if (pid > 0) { /* parents fork call */ + + if (verbose_flag && !thread_flag) { + fprintf(stderr,"Fork succeeded \n"); + } + + /* + * Let parent sleep long enough for child to start up. + */ + + sleep(2); + + + } else { + + /* + * Only the child process will reach this point + * ie fork = 0. + */ + + + + if (verbose_flag && !thread_flag) { + + fprintf(stderr,"Calling cblk_clone_after_fork...\n"); + + } + rc = cblk_clone_after_fork(chunk_id,O_RDWR,0); + + + if (verbose_flag && !thread_flag) { + printf("clone completed with rc = %d, errno = %d\n",rc,errno); + } + + if (rc) { + + + fprintf(stderr,"cblk_chunk_clone failed with rc = 0x%x errno = %d\n",rc, errno); + + + } + + + if ((!rc) && (shared_contxt_flag)) { + + + rc2 = cblk_clone_after_fork(alt_chunk_id,O_RDWR,0); + + + if (verbose_flag && !thread_flag) { + printf("clone completed with rc = %d, errno = %d\n",rc,errno); + } + + if (rc2) { + + + fprintf(stderr,"cblk_chunk_clone failed with rc = 0x%x errno = %d\n",rc, errno); + + + } + } + + + if ((!rc) && (!rc2) && + (thread_flag) && + (num_threads > 1)) { + + /* + * Create all threads here + */ + + + for (i=0; i< num_threads; i++) { + + blk_thread[i].chunk_id = chunk_id; + + rc = pthread_create(&blk_thread[i].ptid,NULL,run_loop,(void *)&blk_thread[i]); + + if (rc) { + + fprintf(stderr, "pthread_create failed for %d rc 0x%x, errno = 0x%x\n", + i, rc,errno); + + + /* + * If we fail to create this thread and we are sharing contexts, + * then do not create the shared context associated with this. + */ + continue; + + } + + if (shared_contxt_flag) { + + + alt_blk_thread[i].chunk_id = alt_chunk_id; + + rc = pthread_create(&alt_blk_thread[i].ptid,NULL,run_loop,(void *)&alt_blk_thread[i]); + + if (rc) { + + fprintf(stderr, "pthread_create failed for %d rc 0x%x, errno = 0x%x\n", + i, rc,errno); + + + } + + } + } + + + /* + * Wait for all threads to complete + */ + + + errno = 0; + + for (i=0; i< num_threads; i++) { + + rc = pthread_join(blk_thread[i].ptid,&status); + + if (rc) { + + fprintf(stderr, "pthread_join failed for %d rc 0x%x, errno = 0x%x\n", + i, rc,errno); + + + } + + if (shared_contxt_flag) { + + rc = pthread_join(alt_blk_thread[i].ptid,&status); + + if (rc) { + + fprintf(stderr, "pthread_join failed for %d rc 0x%x, errno = 0x%x\n", + i, rc,errno); + + + } + + } + } + + + fprintf(stderr, "All threads exited after fork\n"); + + } else if (!rc) { + run_loop(&blk_thread[0]); + + if (shared_contxt_flag) { + run_loop(&alt_blk_thread[0]); + } + + + } + } + + } + + if (!each_thread_vlun_flag) { + if (verbose_flag) { + fprintf(stderr,"Calling cblk_close ...\n"); + } + rc = cblk_close(chunk_id,0); + + if (rc) { + + fprintf(stderr,"Close of %s failed with errno = %d\n",device_name,errno); + } + + if (shared_contxt_flag) { + rc = cblk_close(alt_chunk_id,0); + + if (rc) { + + fprintf(stderr,"Close of %s failed with errno = %d\n",alt_device_name,errno); + } + } + + } + + + cblk_term(NULL,0); + + return 0; + +} diff --git a/src/test/capi_dev_nodes.c b/src/test/capi_dev_nodes.c new file mode 100644 index 00000000..735a6591 --- /dev/null +++ b/src/test/capi_dev_nodes.c @@ -0,0 +1,134 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/capi_dev_nodes.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + + +#include +#include +#include +#include +#include + +/* + * Parse /proc/devices to find the major number of the character device + * specified with name. + * + * Returns -1 on error or if name was not found. + */ +static int find_major_no(const char *name) +{ + const char *chardev_str = "Character devices:"; + FILE *fp; + char *line = NULL; + size_t len = 0; + int major = -1; + int in_char = 0; + int tmp_major; + char *tmp_name; + + fp = fopen("/proc/devices", "r"); + if (!fp) { + perror("find_major_no: Unable to open /proc/devices"); + return -1; + } +#if !defined(_AIX) && !defined(_MACOSX) + while (getline(&line, &len, fp) != -1) { + if (!strncmp(line, chardev_str, strlen(chardev_str))) + in_char = 1; + else if (in_char) { + /* %ms requires glibc >= 2.7 */ + if (sscanf(line, "%i %ms", &tmp_major, &tmp_name) < 2) { + in_char = 0; + continue; + } + if (!strncmp(tmp_name, name, strlen(name))) { + major = tmp_major; + free(tmp_name); + break; + } + free(tmp_name); + } + } +#endif /* !_AIX and !_MACOSX */ + if (major < 0) + fprintf(stderr, "Unable to find %s in /proc/devices\n", name); + + free(line); + fclose(fp); + return major; +} + +/* + * If the device specified by path does not exist it will attempt to create it + * by matching the major number of name from /proc/devices and the given minor + * number. + * + * Note that if the device already exists this will NOT verify the major & + * minor numbers. + */ +int create_dev(const char *path, const char *name, const int minor) +{ + struct stat sb; + int result, major; + + result = stat(path, &sb); + if (result < 0) { + major = find_major_no(name); + if (major < 0) + return -1; +#if !defined(_AIX) && !defined(_MACOSX) + if (mknod(path, 0600 | S_IFCHR, makedev(major, minor))) { + perror("create_dev: Unable to create device"); + return -1; + } +#else + perror("create_dev: Unable to create device"); + return -1; +#endif + } + + return 0; +} + +/* + * Open the device specified by path, creating it if it doesn't exist with + * create_dev(name). + */ +int create_and_open_dev(const char *path, const char *name, const int minor) +{ + int fd; + + if (create_dev(path, name, minor)) + return -1; + + /* TODO: Enforce close on exec in driver: */ + fd = open(path, O_RDWR); + if (fd < 0) { + perror("create_and_open_dev: Unable to open device"); + return -1; + } + + return fd; +} diff --git a/src/test/capi_dev_nodes.h b/src/test/capi_dev_nodes.h new file mode 100644 index 00000000..35db1033 --- /dev/null +++ b/src/test/capi_dev_nodes.h @@ -0,0 +1,33 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/capi_dev_nodes.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _CAPI_DEV_NODES_H +#define _CAPI_DEV_NODES_H + + +int create_dev(const char *path, const char *name, const int minor); + +int create_and_open_dev(const char *path, const char *name, const int minor); + +#endif diff --git a/src/test/ffdc/afu.c b/src/test/ffdc/afu.c new file mode 100755 index 00000000..392a4ae9 --- /dev/null +++ b/src/test/ffdc/afu.c @@ -0,0 +1,267 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/ffdc/afu.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +//#define AFU_PS_REGS_SIZE (64*1024*1024) + +#include +#include + +#if __BYTE_ORDER == __BIG_ENDIAN +#ifndef htobe16 +#define htobe16(x) (x) +#endif +#ifndef htobe32 +#define htobe32(x) (x) +#endif +#ifndef htobe64 +#define htobe64(x) (x) +#endif +#else +#ifndef htobe16 +#define htobe16(x) __bswap_16(x) +#endif +#ifndef htobe32 +#define htobe32(x) __bswap_32(x) +#endif +#ifndef htobe64 +#define htobe64(x) __bswap_64(x) +#endif +#endif + + +// MMIO write, 32 bit +static inline void out_be32 (__u64 *addr, __u32 val) +{ + __asm__ __volatile__ ("sync; stw%U0%X0 %1, %0" + : "=m" (*addr) : "r" (val) : "memory"); +} + +// MMIO write, 64 bit +static inline void out_be64 (__u64 *addr, __u64 val) +{ + __asm__ __volatile__ ("sync; std%U0%X0 %1, %0" + : "=m" (*addr) : "r" (val) : "memory"); +} + +// MMIO read, 32 bit +static inline __u32 in_be32 (__u64 *addr) +{ + __u32 ret; + __asm__ __volatile__ ("sync; lwz%U1%X1 %0, %1; twi 0,%0,0; isync" + : "=r" (ret) : "m" (*addr) : "memory"); + return ret; +} + +// MMIO read, 64 bit +static inline __u64 in_be64 (__u64 *addr) +{ + __u64 ret; + __asm__ __volatile__ ("sync; ld%U1%X1 %0, %1; twi 0,%0,0; isync" + : "=r" (ret) : "m" (*addr) : "memory"); + return ret; +} + +// mmap AFU MMIO registers +static void mmap_problem_state_registers (struct afu *afu) +{ + void *ret; + debug0 ("Mapping AFU problem state registers...\n"); + ret = mmap (NULL, AFU_PS_REGS_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, + afu->fd, 0); + if (ret == MAP_FAILED) { + perror ("mmap_problem_state_registers"); + afu->ps_addr = NULL; + return; + } + afu->ps_addr = ret; +} + +// munmap AFU MMIO registers +static void munmap_problem_state_registers (struct afu *afu) +{ + if (afu->ps_addr) { + debug0 ("Unmapping AFU problem state registers...\n"); + if (munmap (afu->ps_addr, AFU_PS_REGS_SIZE)) + perror ("munmap_problem_state_registers"); + debug0 ("Done unmapping AFU problem state registers\n"); + afu->ps_addr = NULL; + } +} + +// Create and open AFU device then map MMIO registers +struct afu *afu_map (char *afu_path) +{ + struct afu *afu = (struct afu *) malloc (sizeof (struct afu)); + if (!afu) { + perror ("malloc"); + return NULL; + } + debug ("Allocated memory at 0x%016lx for AFU\n", (__u64) afu); + + memset(afu, 0, sizeof(*afu)); + afu->work.num_interrupts = -1; + + fprintf(stderr,"Creating and opening AFU file descriptor %s\n",afu_path); + // afu->fd = create_and_open_dev ("/dev/cxl/afu0.0m", "capi", 1); + // afu->fd = open("/dev/cxl/afu0.0m", O_RDWR); + afu->fd = open(afu_path, O_RDWR); + + if (afu->fd < 0) { + perror ("create_and_open_dev"); + afu_unmap (afu); + return NULL; + } + + // attach the process before mmap + afu_start(afu); + if (afu->started == 0) { + perror ("afu_start failed"); + afu_unmap (afu); + return NULL; + } + printf("afu_start: context=%d\n",afu->process_element); fflush(stdout); + + // map this context's problem space MMIO (SIS-Lite regs) + mmap_problem_state_registers (afu); + if (!afu->ps_addr) { + perror ("mmap_problem_state_registers"); + afu_unmap (afu); + return NULL; + } + + printf ("Problem state registers mapped to %p\n", afu->ps_addr); + + return afu; +} + +// Unmap AFU device +void afu_unmap (struct afu *afu) +{ + if (afu) { + munmap_problem_state_registers (afu); + if (afu->fd >= 0) { + debug0 ("Closing AFU file descriptor...\n"); + close (afu->fd); + } + } +} + +// Set WED address and have PSL send reset and start to AFU +void afu_start (struct afu *afu) +{ + /* Set WED in PSL and send start command to AFU */ + debug ("Sending WED address 0x%016lx to PSL...\n", (__u64) afu->work.work_element_descriptor); + + if (ioctl (afu->fd, CXL_IOCTL_START_WORK, &afu->work) == 0) { + debug0 ("Start command succeeded on AFU\n"); + afu->started = 1; + } + else { + debug0 ("Start command to AFU failed\n"); + } + + if (ioctl (afu->fd, CXL_IOCTL_GET_PROCESS_ELEMENT,&(afu->process_element)) == 0) { + debug0 ("Get process element succeeded on AFU\n"); + } else { + debug0 ("Get process element succeeded on AFU\n"); + } +} + +// MMIO write based on AFU offset, 32-bit +void afu_mmio_write_sw (struct afu *afu, unsigned offset, __u32 value) +{ +#ifdef SLAVE_CONTEXT + if (offset < (512*0x4000)) offset = offset * (0x4000 * afu->process_element); +#endif + __u64 addr = 4 * (__u64) offset; + out_be32 (afu->ps_addr + addr, htobe32(value)); + debug ("Wrote 0x%08x to AFU register offset %x\n", value, offset); +} + +// MMIO write based on AFU offset, 64-bit +void afu_mmio_write_dw (struct afu *afu, unsigned offset, __u64 value) +{ +#ifdef SLAVE_CONTEXT + if (offset < (512*0x4000)) offset = offset + (0x4000 * afu->process_element); +#endif + __u64 addr = 4 * (__u64) (offset & ~0x1); // Force 8byte align + out_be64 (afu->ps_addr + addr, htobe64(value)); + debug ("Wrote 0x%016lx to AFU register offset %x\n", value, offset); +} + +// MMIO read based on AFU offset, 32-bit +void afu_mmio_read_sw (struct afu *afu, unsigned offset, __u32 *value) +{ +#ifdef SLAVE_CONTEXT + if (offset < (512*0x4000)) offset = offset + (0x4000 * afu->process_element); +#endif + __u64 addr = 4 * (__u64) offset; + *value = htobe32(in_be32 (afu->ps_addr + addr)); + debug ("Read 0x%08x from AFU register offset %x\n", *value, offset); +} + +// MMIO read based on AFU offset, 64-bit +void afu_mmio_read_dw (struct afu *afu, unsigned offset, __u64 *value) +{ +#ifdef SLAVE_CONTEXT + if (offset < (512*0x4000)) offset = offset + (0x4000 * afu->process_element); +#endif + __u64 addr = 4 * (__u64) (offset & ~0x1); // Force 8byte align + *value = htobe64(in_be64 (afu->ps_addr + addr)); + debug ("Read 0x%016lx from AFU register offset %x\n", *value, offset); +} + +// Wait for AFU to complete job +int afu_wait (struct afu *afu) +{ + if (afu->started) { + debug0 ("Waiting for AFU to finish...\n"); + struct pollfd poll_list = { afu->fd, POLLIN, 0}; + int ret; + + ret = poll (&poll_list, 1, 5000); + if (ret == 0) + printf ("Poll timed out waiting on interrupt.\n"); + + /* For now, assume a non-zero response is a real interrupt + * later, maybe check regs / fd, loop for events, etc. + */ + debug0 ("AFU finished\n"); + } +} diff --git a/src/test/ffdc/afu.h b/src/test/ffdc/afu.h new file mode 100755 index 00000000..f9af1b5b --- /dev/null +++ b/src/test/ffdc/afu.h @@ -0,0 +1,69 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/ffdc/afu.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _AFU_MEMCOPY_H_ +#define _AFU_MEMCOPY_H_ + +#include +#include +#define AFU_PS_REGS_SIZE 0x4000000 + + +#define DEBUG 0 +#define debug(fmt, ...) do { if (DEBUG) fprintf(stderr,fmt,__VA_ARGS__); fflush(stderr);} while (0) +#define debug0(fmt) do { if (DEBUG) fprintf(stderr,fmt); fflush(stderr);} while (0) + +struct afu { + int fd; /* file descriptor */ + void *ps_addr; /* problem state registers */ + struct cxl_ioctl_start_work work; + __u32 process_element; + int started; /* AFU state */ +}; + +// Create and open AFU device then map MMIO registers +struct afu *afu_map (char *path); + +// Unmap AFU device +void afu_unmap (struct afu *afu); + +// Set WED address and have PSL send reset and start to AFU +void afu_start (struct afu *afu); + +// MMIO write based on AFU offset, 32-bit +void afu_mmio_write_sw (struct afu *afu, unsigned offset, __u32 value); + +// MMIO write based on AFU offset, 64-bit +void afu_mmio_write_dw (struct afu *afu, unsigned offset, __u64 value); + +// MMIO read based on AFU offset, 32-bit +void afu_mmio_read_sw (struct afu *afu, unsigned offset, __u32 *value); + +// MMIO read based on AFU offset, 64-bit +void afu_mmio_read_dw (struct afu *afu, unsigned offset, __u64 *value); + +// Wait for AFU to complete job +int afu_wait (struct afu *afu); + +#endif /* #define _AFU_MEMCOPY_H_ */ diff --git a/src/test/ffdc/afu_fc.h b/src/test/ffdc/afu_fc.h new file mode 100755 index 00000000..0eb25b69 --- /dev/null +++ b/src/test/ffdc/afu_fc.h @@ -0,0 +1,131 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/ffdc/afu_fc.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef AFU_FC_H +#define AFU_FC_H + + +// FC module register address offset for each port (byte address) + +#define FC_PORT0_OFFSET 0x2012000 +#define FC_PORT1_OFFSET 0x2013000 + +// FC module register offset (byte address) +// perl -ne 'if( /localparam\s+(\S+)\s*=\s*12.h([0-9a-f]+)/i ) { print "#define $1 0x$2\n";}' fc_module/src/ctms_fc_regs.v + +#define FC_MTIP_REV 0x000 +#define FC_MTIP_SCRATCH 0x008 +#define FC_MTIP_CMDCONFIG 0x010 +#define FC_MTIP_STATUS 0x018 +#define FC_MTIP_INITTIMER 0x020 +#define FC_MTIP_EVENTTIME 0x028 +#define FC_MTIP_CREDIT 0x030 +#define FC_MTIP_BB_SCN 0x038 +#define FC_MTIP_RX_SF 0x030 +#define FC_MTIP_TX_SE 0x048 +#define FC_MTIP_TX_SF 0x050 +#define FC_MTIP_RX_AE 0x058 +#define FC_MTIP_RX_AF 0x060 +#define FC_MTIP_TX_AE 0x068 +#define FC_MTIP_TX_AF 0x070 +#define FC_MTIP_FRMLEN 0x078 +#define FC_MTIP_SD_RCFG_CMD 0x100 +#define FC_MTIP_SD_RCFG_WRDAT 0x108 +#define FC_MTIP_SD_RCFG_RDDAT 0x110 +#define FC_MTIP_TX_FRM_CNT 0x200 +#define FC_MTIP_TX_CRC_ERR_CNT 0x208 +#define FC_MTIP_RX_FRM_CNT 0x210 +#define FC_MTIP_RX_CRC_ERR_CNT 0x218 +#define FC_MTIP_RX_LGTH_ERR_CNT 0x220 +#define FC_MTIP_FRM_DISC_CNT 0x228 + + +#define FC_PNAME 0x300 +#define FC_NNAME 0x308 +#define FC_PORT_ID 0x310 +#define FC_CONFIG 0x320 +#define FC_CONFIG2 0x328 +#define FC_STATUS 0x330 +#define FC_TIMER 0x338 +#define FC_E_D_TOV 0x340 +#define FC_ERROR 0x380 +#define FC_ERRCAP 0x388 +#define FC_ERRMSK 0x390 +#define FC_ERRINJ 0x3A0 +#define FC_TGT_D_ID 0x400 +#define FC_TGT_PNAME 0x408 +#define FC_TGT_NNAME 0x410 +#define FC_TGT_LOGI 0x418 +#define FC_TGT_B2BCR 0x420 +#define FC_TGT_E_D_TOV 0x428 +#define FC_TGT_CLASS3 0x430 +#define FC_CNT_TXRDWR 0x518 +#define FC_CNT_LOGI 0x520 +#define FC_CNT_TXDATA 0x528 +#define FC_CNT_LINKERR 0x530 +#define FC_CNT_CRCERR 0x538 +#define FC_CNT_CRCERRRO 0x540 +#define FC_CNT_OTHERERR 0x548 +#define FC_CNT_TIMEOUT 0x550 +#define FC_CRC_THRESH 0x580 +#define FC_DBGDISP 0x600 +#define FC_DBGDATA 0x608 +#define FC_CNT_CRCTOT 0x610 +#define FC_CNT_AFURD 0x618 +#define FC_CNT_AFUWR 0x620 +#define FC_CNT_AFUABORT 0x628 +#define FC_CNT_RSPOVER 0x630 +#define FC_CNT_ABORT1 0x638 +#define FC_CNT_ABORT2 0x640 +#define FC_CNT_ABORT3 0x648 +#define FC_CNT_ABORT4 0x650 +#define FC_CNT_WBUFREL 0x658 +#define FC_CNT_RXRSP 0x660 + + + +// command encodes +// perl -ne 'if( /localparam\s+(\S+)\s*=\s*32.h([0-9a-f]+)/i ) { print "#define $1 0x$2\n";}' fc_module/src/ctms_fc_afu.inc +#define FCP_READ 0x01 +#define FCP_WRITE 0x02 +#define FCP_GSCSI_RD 0x03 +#define FCP_GSCSI_WR 0x04 +#define FCP_ABORT 0x05 + +// response interface encodes +#define FCP_RSP_GOOD 0x00 +#define FCP_RSP_CHECK 0x02 +#define FCP_RSP_BUSY 0x08 +#define FCP_RSP_CRCERR 0x51 +#define FCP_RSP_ABORTPEND 0x52 +#define FCP_RSP_BADREQ 0x53 +#define FCP_RSP_NOLOGI 0x54 +#define FCP_RSP_NOEXP 0x55 +#define FCP_RSP_INUSE 0x56 +#define FCP_RSP_LINKDOWN 0x57 +#define FCP_RSP_ABORTOK 0x58 +#define FCP_RSP_ABORTFAIL 0x59 +#define FCP_RSP_FCPERR 0x60 + +#endif diff --git a/src/test/ffdc/capi_dev_nodes.c b/src/test/ffdc/capi_dev_nodes.c new file mode 100755 index 00000000..0834d990 --- /dev/null +++ b/src/test/ffdc/capi_dev_nodes.c @@ -0,0 +1,127 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/ffdc/capi_dev_nodes.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include "capi_dev_nodes.h" + +#include +#include +#include +#include +#include + +/* + * Parse /proc/devices to find the major number of the character device + * specified with name. + * + * Returns -1 on error or if name was not found. + */ +static int find_major_no(const char *name) +{ + const char *chardev_str = "Character devices:"; + FILE *fp; + char *line = NULL; + size_t len = 0; + int major = -1; + int in_char = 0; + int tmp_major; + char *tmp_name; + + fp = fopen("/proc/devices", "r"); + if (!fp) { + perror("find_major_no: Unable to open /proc/devices"); + return -1; + } + + while (getline(&line, &len, fp) != -1) { + if (!strncmp(line, chardev_str, strlen(chardev_str))) + in_char = 1; + else if (in_char) { + /* %ms requires glibc >= 2.7 */ + if (sscanf(line, "%i %ms", &tmp_major, &tmp_name) < 2) { + in_char = 0; + continue; + } + if (!strncmp(tmp_name, name, strlen(name))) { + major = tmp_major; + free(tmp_name); + break; + } + free(tmp_name); + } + } + if (major < 0) + fprintf(stderr, "Unable to find %s in /proc/devices\n", name); + + free(line); + fclose(fp); + return major; +} + +/* + * If the device specified by path does not exist it will attempt to create it + * by matching the major number of name from /proc/devices and the given minor + * number. + * + * Note that if the device already exists this will NOT verify the major & + * minor numbers. + */ +int create_dev(const char *path, const char *name, const int minor) +{ + struct stat sb; + int result, major; + + result = stat(path, &sb); + if (result < 0) { + major = find_major_no(name); + if (major < 0) + return -1; + if (mknod(path, 0600 | S_IFCHR, makedev(major, minor))) { + perror("create_dev: Unable to create device"); + return -1; + } + } + + return 0; +} + +/* + * Open the device specified by path, creating it if it doesn't exist with + * create_dev(name). + */ +int create_and_open_dev(const char *path, const char *name, const int minor) +{ + int fd; + + // if (create_dev(path, name, minor)) + //return -1; + + /* TODO: Enforce close on exec in driver: */ + fd = open(path, O_RDWR | O_CLOEXEC); + if (fd < 0) { + fprintf(stderr,"create_and_open_dev: Unable to open device %s",path); + return -1; + } + + return fd; +} diff --git a/src/test/ffdc/capi_dev_nodes.h b/src/test/ffdc/capi_dev_nodes.h new file mode 100755 index 00000000..6c0b7d67 --- /dev/null +++ b/src/test/ffdc/capi_dev_nodes.h @@ -0,0 +1,32 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/ffdc/capi_dev_nodes.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _CAPI_DEV_NODES_H +#define _CAPI_DEV_NODES_H + +int create_dev(const char *path, const char *name, const int minor); + +int create_and_open_dev(const char *path, const char *name, const int minor); + +#endif diff --git a/src/test/ffdc/cxl_afu_dump2.c b/src/test/ffdc/cxl_afu_dump2.c new file mode 100644 index 00000000..0c9a3d0e --- /dev/null +++ b/src/test/ffdc/cxl_afu_dump2.c @@ -0,0 +1,120 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/ffdc/cxl_afu_dump2.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include "afu.h" +#include "afu_fc.h" +#ifdef SIM +#include "sim_pthread.h" +#else +#include +#endif + + +void print_64(FILE *f, int a, uint64_t v) { + if (v != 0xFFFFFFFFFFFFFFFF) + fprintf(f,"%x : %lx\n",a,v); +} + +void dump_range(FILE *f, struct afu *afu, int lwr, int upr) { + uint64_t r; + fprintf(f,"# mmio address range 0x%07x to 0x%07x\n",lwr, upr); + while (lwr < upr) { + // skip heartbeat reg otherwise mserv reports a mismatch + if( lwr != 0x2010020 ) { + afu_mmio_read_dw(afu,(lwr >> 2),(__u64 *)&r); + print_64(f,lwr,r); + } + lwr+=8; + } +} + +void dump_count(FILE *f, struct afu *afu, int base, int n) { + dump_range(f,afu,base,base+(n*8)); +} + +void dump_ctxt(FILE *f, struct afu* afu, int ctxt) { + int i; + uint64_t r; + dump_count(f,afu,0x10000*ctxt,16); +} + +void dump_cpc(FILE *f, struct afu* afu, int ctxt) { + dump_count(f,afu,0x2000000+(16*8*ctxt),16); +} +void dump_gbl(FILE *f, struct afu *afu) { + dump_range(f,afu,0x2010000,0x2012000); +} +void dump_afu_dbg(FILE *f, struct afu *afu){ + dump_range(f,afu,0x2040000,0x2060000); +} +void dump_fc_dbg(FILE *f, struct afu* afu, int fcp) { + dump_range(f,afu,0x2060000+(fcp*0x20000),0x2060000+(fcp*0x20000)+0x20000); +} + +void dump_perf (FILE *f, struct afu * afu) { + dump_range(f,afu,0x2018000,0x2020000); +} + +void print_version(FILE *f, struct afu *afu) +{ + uint64_t r; + afu_mmio_read_dw(afu,0x804200,&r); + if (r == -1l) { + fprintf(f, "# Version number not implemented\n"); + } + else { + char version[9]; + int i; + for (i=0; i<8; i++) { + version[7-i] = (r & 0xFFl); + r = r >> 8; + } + version[8] = 0; + fprintf(f,"# AFU Version = %s\n",version); + } +} + + +int main(int argc, char **argv) { + if (argc < 2) { + printf("usage: %s \n",argv[0]); + exit(2); + } + struct afu* afu = afu_map(argv[1]); + FILE *f = stdout; + print_version(f,afu); + int i; + for(i=0; i<512; i++) dump_ctxt(f,afu,i); + for(i=0; i<512; i++) dump_cpc(f,afu,i); + dump_gbl(f,afu); + dump_perf(f,afu); + dump_afu_dbg(f,afu); + dump_fc_dbg(f,afu,0); + dump_fc_dbg(f,afu,1); +} + diff --git a/src/test/ffdc/makefile b/src/test/ffdc/makefile new file mode 100644 index 00000000..27104378 --- /dev/null +++ b/src/test/ffdc/makefile @@ -0,0 +1,42 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/test/ffdc/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +UNAME=$(shell uname) + +# Where to find user code.(Relative path(from the makefile) preferred for portability) +ROOTPATH = ../../.. + +#test code != production code, so allow warnings here. +ALLOW_WARNINGS = yes + +include ${ROOTPATH}/config.mk + +#this application originates in surelock-afu/bringup/src and must be shipped for FFDC gathering +ifeq ($(UNAME),AIX) # need to have a valid ffdc mechanism for AIX... +else #Linux only +test: ${TESTDIR}/cxl_afu_dump +${TESTDIR}/cxl_afu_dump: + mkdir -p ${TESTDIR} + gcc -m64 -I . -I${ROOTPATH}/src/include -lpthread -o ${TESTDIR}/cxl_afu_dump -O0 afu.c capi_dev_nodes.c cxl_afu_dump2.c +endif diff --git a/src/test/framework/README.md b/src/test/framework/README.md new file mode 100644 index 00000000..8505be7d --- /dev/null +++ b/src/test/framework/README.md @@ -0,0 +1,14 @@ +# Test Framework # +This package relies on the [Google Test](https://github.com/google/googletest/) framework. + +It was last-tested with Google Test 1.7. Appropriate makefiles are included. + +To quickly get started: + +``` +$ pushd src/test/framework +$ git clone git@github.com:google/googletest.git +$ popd +``` + +Once an initial copy of google test is downloaded, "make tests" will automatically build and link appropriate objects. diff --git a/src/test/framework/gtest.objtests.mk b/src/test/framework/gtest.objtests.mk new file mode 100644 index 00000000..fdc7adc6 --- /dev/null +++ b/src/test/framework/gtest.objtests.mk @@ -0,0 +1,88 @@ +# IBM_PROLOG_BEGIN_TAG +# IBM_PROLOG_END +# A sample Makefile for building Google Test and using it in user +# tests. Please tweak it to suit your environment and project. You +# may want to move it to your project's root directory. +# +# SYNOPSIS: +# +# make [all] - makes everything. +# make TARGET - makes the given target. +# make clean - removes all files generated by make. + +# Please tweak the following variable definitions as needed by your +# project, except GTEST_HEADERS, which you can use in your own targets +# but shouldn't modify. + +# Points to the root of Google Test, relative to where this file is. +# Remember to tweak this if you move this file. +GTEST_DIR =$(ROOTPATH)/src/test/framework/googletest/googletest + +UNAME=$(shell uname) + +# Flags passed to the preprocessor. +# Set Google Test's header directory as a system directory, such that +# the compiler doesn't generate warnings in Google Test headers. +ifeq ($(UNAME),Linux) +CPPFLAGS += -isystem $(GTEST_DIR)/include +else +CPPFLAGS += -DOLD_ANSIC_AIX_VERSION -I $(GTEST_DIR)/include +endif + +# Flags passed to the C++ compiler. +#CXXFLAGS += -g -Wall -Wextra -pthread + + + +# All Google Test headers. Usually you shouldn't change this +# definition. +GTEST_HEADERS = $(GTEST_DIR)/include/gtest/*.h \ + $(GTEST_DIR)/include/gtest/internal/*.h + + +# Builds gtest.a and gtest_main.a. + +# Usually you shouldn't tweak such internal variables, indicated by a +# trailing _. +GTEST_SRCS_ = $(GTEST_DIR)/src/*.cc $(GTEST_DIR)/src/*.h $(GTEST_HEADERS) + +# For simplicity and to avoid depending on Google Test's +# implementation details, the dependencies specified below are +# conservative and not optimized. +GTEST_TARGETS = $(TESTDIR)/gtest-all.o \ + $(TESTDIR)/gtest_main.o \ + $(TESTDIR)/gtest.o + +ifeq ($(UNAME),AIX) +GTEST_TARGETS += $(TESTDIR)/64obj/gtest-all.o \ + $(TESTDIR)/64obj/gtest_main.o \ + $(TESTDIR)/64obj/gtest.o + +$(TESTDIR)/64obj/gtest-all.o : $(GTEST_SRCS_) + @mkdir -p ${TESTDIR}/64obj + $(CXX) -q64 $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -c \ + $(GTEST_DIR)/src/gtest-all.cc -o $@ + +$(TESTDIR)/64obj/gtest_main.o : $(GTEST_SRCS_) + @mkdir -p ${TESTDIR}/64obj + $(CXX) -q64 $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -c \ + $(GTEST_DIR)/src/gtest_main.cc -o $@ + +$(TESTDIR)/64obj/gtest.o : $(GTEST_SRCS_) + @mkdir -p ${TESTDIR}/64obj + $(CXX) -q64 $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -c \ + $(GTEST_DIR)/src/gtest.cc -o $@ +endif + +$(TESTDIR)/gtest-all.o : $(GTEST_SRCS_) + $(CXX) $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -c \ + $(GTEST_DIR)/src/gtest-all.cc -o $@ + + +$(TESTDIR)/gtest_main.o : $(GTEST_SRCS_) + $(CXX) $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -c \ + $(GTEST_DIR)/src/gtest_main.cc -o $@ + +$(TESTDIR)/gtest.o : $(GTEST_SRCS_) + $(CXX) $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -c \ + $(GTEST_DIR)/src/gtest.cc -o $@ diff --git a/src/test/makefile b/src/test/makefile new file mode 100644 index 00000000..effd550e --- /dev/null +++ b/src/test/makefile @@ -0,0 +1,99 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/test/makefile $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2014,2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG + +UNAME=$(shell uname) + +# Where to find user code.(Relative path(from the makefile) preferred for portability) +ROOTPATH = ../.. +USER_DIR = . +SUBDIRS = ffdc.d +TESTDIR = ${ROOTPATH}/obj/tests + +OBJS = afu.o capi_dev_nodes.o + +OBJS64 = afu.64o capi_dev_nodes.64o + +MODULE = afu +EXPFLAGS = -bexpall + +#test code != production code, so allow warnings here. +ALLOW_WARNINGS = yes + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${ROOTPATH}/img +LIBPATHS = -L${ROOTPATH}/img +LINKLIBS = -lcflshcom -lcflsh_block -lafu -larkdb -larkalloc + + +BTESTS =transport_test blk_test pvtestauto xlate pblkread asyncstress asyncstress2 +BIN_TESTS=$(addprefix ${TESTDIR}/, ${BTESTS}) + +GTESTS = run_fvt +GTESTS_DIR = $(addprefix $(TESTDIR)/, $(GTESTS)) + +ifeq ($(UNAME),AIX) # AIX only +LINKLIBS+=-lpthreads + +BTESTS64 = $(addsuffix 64, ${BTESTS}) +BIN_TESTS64 = $(addprefix ${TESTDIR}/, ${BTESTS64}) +GTESTS64 = $(addsuffix 64, ${GTESTS}) +GTESTS64_DIR = $(addprefix $(TESTDIR)/, $(GTESTS64)) +BITS = 64 + +else #Linux only +LINKLIBS+=-lpthread -ludev +endif + +run_fvt_OFILES = \ + blk_api_tst.o fvt_block.o \ + fvt_kv_utils.o kv_utils_db.o fvt_kv_utils_async_cb.o fvt_kv_utils_sync_pth.o \ + fvt_kv_utils_ark_io.o fvt_trace.o \ + fvt_kv_tst_simple.o fvt_kv_tst_scenario.o fvt_kv_tst_sync_pth.o \ + fvt_kv_tst_async_cb.o fvt_kv_tst_errors.o fvt_kv_tst_sync_async.o + + +MODULE = afu +OBJS = afu.o capi_dev_nodes.o +OBJS64 = afu.64o capi_dev_nodes.64o + +CFLAGS += \ + -D__FVT__\ + -I$(ROOTPATH)/src/kv \ + -I$(ROOTPATH)/src/kv/test \ + -I$(ROOTPATH)/src/block \ + -I$(ROOTPATH)/src/common \ + -I$(ROOTPATH)/src/test/framework/googletest/googletest/include +CXXFLAGS+=$(CFLAGS) + +VPATH += \ + ${ROOTPATH}/src/kv \ + ${ROOTPATH}/src/kv/test \ + ${ROOTPATH}/src/block/test \ + +include ${ROOTPATH}/config.mk +include $(ROOTPATH)/src/test/framework/gtest.objtests.mk + +fvt: +# @-if [[ $(UNAME) != "AIX" ]]; then \ + $(TESTDIR)/run_fvt --gtest_output=xml:$(TESTDIR)/run_fvt.xml; diff --git a/src/test/mc.c b/src/test/mc.c new file mode 100755 index 00000000..cee61833 --- /dev/null +++ b/src/test/mc.c @@ -0,0 +1,303 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/mc.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include +#include +#include +#include + +/* + * Basic program to send IO to the AFU in physical LBA mode. + * Uses libafu.so so that it can be run on HW or in sim. + * Sim provides its own libafu.so. + */ + +#define USE_INTR 1 + +#ifdef SIM +#include "sim_pthread.h" +#else +#include +#endif + +#define MIN(a,b) ((a)<(b) ? (a) : (b)) + +#ifdef SIM +#define NUM_CMDS 8 +#else +#define NUM_CMDS 1024 +#endif + +#define B_DONE 0x01 +#define B_ERROR 0x02 + +// in libafu, the mapped address is to the base of the MMIO space, +// MY_OFFSET gets to the 64K space for my_ctxt. +// Offset is in 4byte words. +// +#define MY_OFFSET (0x4000*(my_ctxt)) +#define MY_CTRL (0x4000*512 + 0x20*(my_ctxt)) + +#ifdef TARGET_ARCH_PPC64EL +static inline void write_64(volatile __u64 *addr, __u64 val) +{ + __u64 zero = 0; + asm volatile ( "stdbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +} + +static inline void write_32(volatile __u32 *addr, __u32 val) +{ + __u32 zero = 0; + asm volatile ( "stwbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +} +#endif + +struct ctx { + __u64 *p_hrrq_start; + __u64 *p_hrrq_end; + volatile __u64 *p_hrrq_curr; + unsigned int toggle; + int fd; +}; + +struct data_buf { + char buf[0x1000]; +} __attribute__ ((aligned (0x1000))); + + +struct hrrq { + __u64 entry[NUM_CMDS]; +}; + +sisl_iocmd_t cmd[NUM_CMDS]; +struct data_buf data[NUM_CMDS]; +struct hrrq rrq; +pthread_mutex_t mutex; +pthread_cond_t cv; +struct ctx ctx; +struct afu *p_afu; +int recv_fn_kill = 0; +__u16 my_ctxt; + +void send_cmd(sisl_iocmd_t *p_cmd); +void wait_resp(sisl_iocmd_t *p_cmd); + + +void *recv_fn(void *p_arg) { + struct cxl_event event; + + struct ctx *p_ctx = (struct ctx *) p_arg; + sisl_iocmd_t *p_cmd; + + // init + p_ctx->p_hrrq_start = &rrq.entry[0]; + p_ctx->p_hrrq_end = &rrq.entry[NUM_CMDS-1]; + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle = 1; + p_ctx->fd = p_afu->fd; + + printf("startng receive function\n"); fflush(stdout); + while (recv_fn_kill == 0) { + // read fd here & block OR just poll +#ifdef SIM + sched_yield(); +#elif (USE_INTR) + read(p_ctx->fd, &event, sizeof(event)); + if (event.header.type ==CXL_EVENT_AFU_INTERRUPT) { + printf("recd afu_intr %d\n", event.irq.irq); fflush(stdout); + } +#else + sleep(1); +#endif + + while ((*p_ctx->p_hrrq_curr & SISL_RESP_HANDLE_T_BIT) == + p_ctx->toggle) { + p_cmd = (sisl_iocmd_t*)((*p_ctx->p_hrrq_curr) & (~SISL_RESP_HANDLE_T_BIT)); +#ifdef SIM + printf("got response for ea=%p\n",p_cmd); fflush(stdout); +#endif + pthread_mutex_lock(&mutex); + p_cmd->sa.host_use[0] |= B_DONE; + pthread_cond_signal(&cv); + pthread_mutex_unlock(&mutex); + + if (p_ctx->p_hrrq_curr < p_ctx->p_hrrq_end) { + /* advance to next RRQ entry */ + p_ctx->p_hrrq_curr++; + } + else { /* wrap HRRQ & flip toggle */ + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle ^= SISL_RESP_HANDLE_T_BIT; + } + } + } + return NULL; +} + +// usage: ./a.out num_cmds_active num_reps +// +int main(int argc, char **argv) +{ + int i; + pthread_t thread; + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + int num_cmds = 1; + unsigned int num_reps = -1u; + __u64 mbox; + __u64 *p_u64; + __u32 *p_u32; + + printf("sizeof ioarcb=%zu ioasa=%zu cmd=%zu rrq=%zu start_work=%zu\n", + sizeof(sisl_ioarcb_t), + sizeof(sisl_ioasa_t), + sizeof(sisl_iocmd_t), + sizeof(rrq), + sizeof(struct cxl_ioctl_start_work)); + + fflush(stdout); + + if (argc > 1) num_cmds = MIN(NUM_CMDS, atoi(argv[1])); + if (argc > 2) num_reps = atoi(argv[2]); + + pthread_mutexattr_init(&mattr); + pthread_condattr_init(&cattr); + + pthread_mutex_init(&mutex, &mattr); + pthread_cond_init(&cv, &cattr); + + // afu_map opens Corsa adapter, atatches the process and maps the + // base of the MMIO space. + if ((p_afu = afu_map()) == NULL) { + printf("Cannot open AFU\n"); + exit(1); + } + + my_ctxt = p_afu->process_element; + printf("Opened AFU. PE = %d\n",my_ctxt); fflush(stdout); + + // set up RRQ + // offset is in 4-byte words. + afu_mmio_write_dw(p_afu, MY_OFFSET+10, (__u64)&rrq.entry[0]); // start_ea + afu_mmio_write_dw(p_afu, MY_OFFSET+12, (__u64)&rrq.entry[NUM_CMDS-1]); // end_ea + + afu_mmio_read_dw (p_afu, MY_CTRL+6, &mbox); // mbox_r + asm volatile ( "eieio" : : ); + afu_mmio_write_dw (p_afu, MY_CTRL+4, // ctx_cap + SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | + SISL_CTX_CAP_WRITE_CMD | SISL_CTX_CAP_READ_CMD); + + + for (i = 0; i < NUM_CMDS; i++) { + cmd[i].rcb.data_ea = (__u64) &data[i]; + cmd[i].rcb.data_len = 0x1000; + cmd[i].rcb.msi = 0x2; // 0x0 for no interrupts + cmd[i].rcb.rrq = 0x0; + cmd[i].rcb.ctx_id = my_ctxt; + cmd[i].rcb.lun_id = 0x1000000000000ull; + cmd[i].rcb.port_sel = 0x3; // either FC port + cmd[i].rcb.cdb[0] = 0x88; // read(16) + + p_u64 = (__u64*)&cmd[i].rcb.cdb[2]; + p_u32 = (__u32*)&cmd[i].rcb.cdb[10]; + +#ifdef TARGET_ARCH_PPC64EL + write_64(p_u64, i*8); + write_32(p_u32, 8); +#else + *p_u64 = i*8; // LBA# + *p_u32 = 8; // blksz=512 & 8 LBAs for 4K +#endif + } + + recv_fn_kill = 0; + pthread_create(&thread, NULL, recv_fn, &ctx); + +#ifdef SIM + sched_yield(); +#endif + + while (num_reps) { + for (i = 0; i < num_cmds; i++) { + send_cmd(&cmd[i]); + } + printf("Sent %d cmds\n", num_cmds); + fflush(stdout); + + for (i = 0; i < num_cmds; i++) { + wait_resp(&cmd[i]); + printf("cmd %d: flags=0x%x, port=%d, afu_rc=0x%x, scsi_rc=0x%x, fc_rc=0x%x\n", + i, cmd[i].sa.rc.flags, cmd[i].sa.port, cmd[i].sa.rc.afu_rc, + cmd[i].sa.rc.scsi_rc, cmd[i].sa.rc.fc_rc); fflush(stdout); + } + printf("Recd %d cmds\n", num_cmds); + fflush(stdout); + num_reps--; + } + + recv_fn_kill = 1; // this may or maynot terminate the recv thread + pthread_join(thread, NULL); + + // ask the afu to terminate + // afu_mmio_write_dw(p_afu,MY_OFFSET+4,0); + // + afu_unmap(p_afu); + + pthread_cond_destroy(&cv); + pthread_mutex_destroy(&mutex); + + pthread_condattr_destroy(&cattr); + pthread_mutexattr_destroy(&mattr); + + return 0; +} + + + +void send_cmd(sisl_iocmd_t *p_cmd) { + pthread_mutex_lock(&mutex); + p_cmd->sa.host_use[0] = 0; // 0 means active + p_cmd->sa.ioasc = 0; + + // make RCB visible to AFU before MMIO + asm volatile ( "lwsync" : : ); + + // write IOARRIN + afu_mmio_write_dw(p_afu, MY_OFFSET+8, (__u64) p_cmd); // IOARRIN + printf("Sent EA cmd 0x%p data 0x%" PRIx64 "\n", p_cmd, p_cmd->rcb.data_ea); + fflush(stdout); + + pthread_mutex_unlock(&mutex); +} + +void wait_resp(sisl_iocmd_t *p_cmd) { + pthread_mutex_lock(&mutex); + while (p_cmd->sa.host_use[0] != B_DONE) { + pthread_cond_wait(&cv, &mutex); + } + pthread_mutex_unlock(&mutex); +} + + + diff --git a/src/test/multi_process_perf b/src/test/multi_process_perf new file mode 100755 index 00000000..52638cc3 --- /dev/null +++ b/src/test/multi_process_perf @@ -0,0 +1,153 @@ +#!/bin/ksh + +if [[ $1 = "-h" ]] +then + echo "Usage: multi_process_perf [-csv] [libpath] [binpath]" + echo " ex: multi_process_perf" + echo " ex: multi_process_perf -csv" + echo " ex: multi_process_perf /.../surelock-sw/img /.../surelock-sw/obj/tests" + echo " ex: multi_process_perf -csv /.../surelock-sw/img /.../surelock-sw/obj/tests" + exit +fi + +_uname=$(uname -a|awk '{print $1}') +if [[ $(whoami) != root ]]; +then + echo "must be run as root" + exit +fi + +######## Set these DEVx vars to the correct luns for the best results ########## +######## 1 lun per FC port, across two cards ########## +DEVS=4 +if [[ $_uname = "AIX" ]] +then + DEV1=/dev/hdisk0 + DEV2=/dev/hdisk1 + DEV3=/dev/hdisk2 + DEV4=/dev/hdisk4 + _64=64 + for d in 0 1 2 3 + do + if [[ $(lsmpio -l hdisk$d|grep hdisk$d|wc -l) -ne 2 ]]; + then + echo "running to only one port for hdisk$d" + fi + done +else + # use only /dev/sg* devices + if [[ $(hostname) == cougar* ]] + then + DEV1=/dev/sg10 + DEV2=/dev/sg14 + DEV3=/dev/sg18 + DEV4=/dev/sg22 + elif [[ $(hostname) == p8tul2* ]] + then + DEV1=/dev/sg34 + DEV2=/dev/sg38 + DEV3=/dev/sg39 + DEV4=/dev/sg43 + fi + _64= +fi + +csv=0 +if [[ $1 = "-csv" ]] +then + csv=1 +fi + +if [[ ! -z $1 && $csv -ne 1 ]] +then + if [[ $_uname = "AIX" ]] + then + export LIBPATH=$1 + cmd="$2" + else + cmd="LD_LIBRARY_PATH=$1 $2" + fi + cmd_dir=$cmd +else + cmd_dir="/opt/ibm/capikv/test" +fi + +function ark +{ +echo "ARK Performance, using fvt_ark_perf_tool" +cnt=20 +for len in 100; do #len, 4096 131072 + for q in 1 8 16; do #QD, nasync, ops per ark + for j in 4 20; do #npool, ark threads + for ctx in 1 25 50; do #ctxts/processes (x's 4) + rm -f /tmp/out_p + c=0 + while [ $c -lt $ctx ]; do + cmd="FVT_DEV=$DEV1 $cmd_dir/fvt_ark_perf_tool$_64 -A -c 1 -j $q -n $j -k $len -v $len -s 90 >> /tmp/out_p" + eval $cmd& + cmd="FVT_DEV=$DEV2 $cmd_dir/fvt_ark_perf_tool$_64 -A -c 1 -j $q -n $j -k $len -v $len -s 90 >> /tmp/out_p" + eval $cmd& + cmd="FVT_DEV=$DEV3 $cmd_dir/fvt_ark_perf_tool$_64 -A -c 1 -j $q -n $j -k $len -v $len -s 90 >> /tmp/out_p" + eval $cmd& + cmd="FVT_DEV=$DEV4 $cmd_dir/fvt_ark_perf_tool$_64 -A -c 1 -j $q -n $j -k $len -v $len -s 90 >> /tmp/out_p" + eval $cmd& + let c=c+1 + PID=$! + done + wait + iops=0 + for d in $(cat /tmp/out_p|awk -F "io/s:" '{print $2}'|grep sec|awk -F " secs" '{print $1}'); do ((iops+=$d)); done + if [[ $iops -gt 800000 ]]; then iops=800000; fi + if [[ $csv -eq 1 ]] + then + echo "2Devs,KV,$len,contexts,$(($ctx*$DEVS)),arkthPerContext,$j,queue,$q,iops,$iops" + else + printf "Processes:%-3d npool:%-2d QD:%-3d klen:%-2d vlen:%-4d IOPS:%-6d\n" $(($ctx*$DEVS)) $j $q $len $len $iops + fi + if [[ $iops -gt 700000 ]]; then break; fi + done + done + done +done +} + +function block +{ +echo "BLOCK Performance, using blockio" +for rd in 100 70; do #%reads + for q in 1 8 16; do #QD + for j in 1 25 50 #Processes + do + rm -f /tmp/out_p + c=0 + while [ $c -lt $j ]; do + cmd="$cmd_dir/blockio$_64 -d $DEV1 -q $q -r $rd -s 30 >> /tmp/out_p" + eval $cmd& + cmd="$cmd_dir/blockio$_64 -d $DEV2 -q $q -r $rd -s 30 >> /tmp/out_p" + eval $cmd& + cmd="$cmd_dir/blockio$_64 -d $DEV3 -q $q -r $rd -s 30 >> /tmp/out_p" + eval $cmd& + cmd="$cmd_dir/blockio$_64 -d $DEV4 -q $q -r $rd -s 30 >> /tmp/out_p" + eval $cmd& + let c=c+1 + PID=$! + done + wait + iops=0 + for d in $(cat /tmp/out_p|grep iops|awk '{print $10}'|awk -F : '{print $2}'); do ((iops+=$d)); done; + if [[ $iops -gt 800000 ]]; then iops=800000; fi + if [[ $csv -eq 1 ]] + then + echo "2Devs,QdPerJob,$q,JobPerDev,$j,Rd,$rd,Wt,$((100-$rd)),iops,$iops" + else + printf "Processes:%-3d QD:%-4d Rd:%-3d Wt:%-3d iops:%-6d\n" $(($j*$DEVS)) $q $rd $((100-$rd)) $iops + fi + if [[ $iops -gt 700000 ]]; then break; fi + done + done +done +} + +block +printf "\n" +ark diff --git a/src/test/pathlength_test.c b/src/test/pathlength_test.c new file mode 100644 index 00000000..52075645 --- /dev/null +++ b/src/test/pathlength_test.c @@ -0,0 +1,561 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/pathlength_test.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + + +#define MAX_WAIT_CNT 1000 +#define MC_RHT_NMASK 16 /* in bits */ +#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs, see mclient.h */ +#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */ +#define MC_CHUNK_OFF_MASK (MC_CHUNK_SIZE - 1) /* apply to LBA get offset */ + + +#include +#include +#include +#define MMIO_WRITE_64(addr, val) write_64((addr), (val)) +#define MMIO_READ_64(addr, p_val) *(p_val) = read_64((addr)) + + + +/* + Stand alone program to test LBA translation. Does not require + master context/daemon. + */ + +#define B_DONE 0x01 +#define B_ERROR 0x02 +#define NUM_RRQ_ENTRY 64 +#define NUM_CMDS 64 /* max is NUM_RRQ_ENTRY */ +#define LUN_INDEX 1 /* lun index to use, should be something other than 0 + used by mserv */ + +#define CL_SIZE 128 /* Processor cache line size */ +#define CL_SIZE_MASK 0x7F /* Cache line size mask */ +#define DATA_SEED 0xdead000000000000ull +#define CHUNK_BASE 0x100 /* chunk# 0x100, i.e. RLBA=0x1000000 */ + +struct ctx { + /* Stuff requiring alignment go first. */ + + /* Command & data for AFU commands issued by test. */ + char rbuf[NUM_CMDS][0x1000]; // 4K read data buffer (page aligned) + char wbuf[NUM_CMDS][0x1000]; // 4K write data buffer (page aligned) + char rbufm[NUM_CMDS][0x1000]; // 4K read data buffer (page aligned) + __u64 rrq_entry[NUM_RRQ_ENTRY]; // 128B RRQ (page aligned) + + struct afu_cmd { + sisl_ioarcb_t rcb; // IOARCB (cache line aligned) + sisl_ioasa_t sa; // IOASA follows RCB + + __u8 cl_pad[CL_SIZE - + ((sizeof(sisl_ioarcb_t) + + sizeof(sisl_ioasa_t)) & CL_SIZE_MASK)]; + } cmd[NUM_CMDS]; + + int cur_cmd_index; + // AFU interface + int afu_fd; + struct cxl_ioctl_start_work work; + char event_buf[0x1000]; /* Linux cxl event buffer (interrupts) */ + volatile struct sisl_host_map *p_host_map; + volatile struct sisl_ctrl_map *p_ctrl_map; + volatile struct surelock_afu_map *p_afu_map; + ctx_hndl_t ctx_hndl; + + __u64 *p_hrrq_start; + __u64 *p_hrrq_end; + volatile __u64 *p_hrrq_curr; + unsigned int toggle; + + // LBA xlate + sisl_rht_entry_t rht; + sisl_lxt_entry_t lxt[NUM_CMDS]; // each cmd targets 1 chunk + +} __attribute__ ((aligned (0x1000))); + +int ctx_init(struct ctx *p_ctx, char *dev_path); +int async_read(struct ctx *p_ctx, __u64 start_lba); +int async_poll(struct ctx *p_ctx,int *status); + + + + +char *afu_path; /* points to argv[] string */ +pid_t pid; +__u64 lun_id = 0x0; +__u64 read_lba = 0x0; +__u64 chunk_base = CHUNK_BASE; +int ctx_fd; +char ctx_file[32]; +struct ctx *p_ctx; + + +void usage(char *prog) +{ + printf("Usage: %s [-l lun_id] [-a lba ] master_dev_path\n", prog); + printf("e. g.: %s -l 0x1000000000000 /dev/cxl/afu0.0m\n", prog); +} + +void +get_parameters(int argc, char** argv) +{ + extern int optind; /* for getopt function */ + extern char *optarg; /* for getopt function */ + int ch; + + while ((ch = getopt(argc,argv,"pl:c:h")) != EOF) { + switch (ch) { + case 'a' : /* lba to use */ + sscanf(optarg, "%lx", &read_lba); + break; + + case 'l' : /* LUN_ID to use */ + sscanf(optarg, "%lx", &lun_id); + break; + + case 'h': + usage(argv[0]); + exit(0); + + default: + usage(argv[0]); + exit(-1); + } + } + + if ((argc - optind) != 1) { /* number of afus specified in cmd line */ + usage(argv[0]); + exit(-11); + } + + afu_path = argv[optind]; +} + + + + +// dev_path must be master device +// dev_path is not used when running w/libafu - see afu.c +int ctx_init(struct ctx *p_ctx, char *dev_path) +{ + + + void *map; + __u32 proc_elem; + + int i; + __u64 reg; + + // general init, no resources allocated + memset(p_ctx, 0, sizeof(*p_ctx)); + + // open master device + p_ctx->afu_fd = open(dev_path, O_RDWR); + if (p_ctx->afu_fd < 0) { + fprintf(stderr, "open failed: device %s, errno %d", dev_path, errno); + return -1; + } + + // enable the AFU. This must be done before mmap. + p_ctx->work.num_interrupts = 4; + p_ctx->work.flags = CXL_START_WORK_NUM_IRQS; + if (ioctl(p_ctx->afu_fd, CXL_IOCTL_START_WORK, &p_ctx->work) != 0) { + fprintf(stderr, "start command failed on AFU, errno %d\n", errno); + return -1; + } + if (ioctl(p_ctx->afu_fd, CXL_IOCTL_GET_PROCESS_ELEMENT, + &proc_elem) != 0) { + fprintf(stderr, "get_process_element failed, errno %d\n", errno); + return -1; + } + + // mmap entire MMIO space of this AFU + map = mmap(NULL, sizeof(struct surelock_afu_map), + PROT_READ|PROT_WRITE, MAP_SHARED, p_ctx->afu_fd, 0); + if (map == MAP_FAILED) { + fprintf(stderr, "mmap failed, errno %d\n", errno); + return -1; + } + p_ctx->p_afu_map = (volatile struct surelock_afu_map *) map; + p_ctx->ctx_hndl = proc_elem; // ctx_hndl is 16 bits in CAIA + + + // copy frequently used fields into p_ctx + p_ctx->p_host_map = &p_ctx->p_afu_map->hosts[p_ctx->ctx_hndl].host; + p_ctx->p_ctrl_map = &p_ctx->p_afu_map->ctrls[p_ctx->ctx_hndl].ctrl; + + // initialize RRQ pointers + p_ctx->p_hrrq_start = &p_ctx->rrq_entry[0]; + p_ctx->p_hrrq_end = &p_ctx->rrq_entry[NUM_RRQ_ENTRY - 1]; + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle = 1; + + printf("p_host_map %p, ctx_hndl %d, rrq_start %p\n", + p_ctx->p_host_map, p_ctx->ctx_hndl, p_ctx->p_hrrq_start); + + // initialize cmd fields that never change + for (i = 0; i < NUM_CMDS; i++) { + //p_ctx->cmd[i].rcb.msi = 0x2; + p_ctx->cmd[i].rcb.rrq = 0x0; + p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl; + } + + p_ctx->cur_cmd_index = 0; + + // set up RRQ in AFU + MMIO_WRITE_64(&p_ctx->p_host_map->rrq_start, (__u64) p_ctx->p_hrrq_start); + MMIO_WRITE_64(&p_ctx->p_host_map->rrq_end, (__u64) p_ctx->p_hrrq_end); + + // program FC_PORT LUN Tbl + MMIO_WRITE_64(&p_ctx->p_afu_map->global.fc_port[0][LUN_INDEX], lun_id); + MMIO_WRITE_64(&p_ctx->p_afu_map->global.fc_port[1][LUN_INDEX], lun_id); + + // AFU configuration + MMIO_READ_64(&p_ctx->p_afu_map->global.regs.afu_config, ®); + reg |= 0x7F00; // enable auto retry + MMIO_WRITE_64(&p_ctx->p_afu_map->global.regs.afu_config, reg); + + // turn off PSL page-mode translation, use in-order translation + // MMIO_WRITE_64((__u64*)&p_ctx->p_afu_map->global.page1[0x28], 0); + + // set up my own CTX_CAP to allow real mode, host translation + // tbls, allow read/write cmds + MMIO_READ_64(&p_ctx->p_ctrl_map->mbox_r, ®); + asm volatile ( "eieio" : : ); + MMIO_WRITE_64(&p_ctx->p_ctrl_map->ctx_cap, + SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | + SISL_CTX_CAP_WRITE_CMD | SISL_CTX_CAP_READ_CMD); + + // set up LBA xlate + // + for (i = 0; i < NUM_CMDS; i++) { + // LUN_INDEX & select both ports, use r/w perms from RHT + p_ctx->lxt[i].rlba_base + = (((chunk_base + i) << MC_CHUNK_SHIFT) | (LUN_INDEX << 8) | 0x33); + } + p_ctx->rht.lxt_start = &p_ctx->lxt[0]; + p_ctx->rht.lxt_cnt = NUM_CMDS; + p_ctx->rht.nmask = MC_RHT_NMASK; + p_ctx->rht.fp = SISL_RHT_FP(0u, 0x3); /* format 0 & RW perms */ + + // make tables visible to AFU before MMIO + asm volatile ( "lwsync" : : ); + + // make MMIO registers for this context point to the single entry + // RHT. The RHT is under this context. + MMIO_WRITE_64(&p_ctx->p_ctrl_map->rht_start, + (__u64)&p_ctx->rht); + MMIO_WRITE_64(&p_ctx->p_ctrl_map->rht_cnt_id, + SISL_RHT_CNT_ID((__u64)1, + (__u64)(p_ctx->ctx_hndl))); + return 0; +} + +void ctx_close(struct ctx *p_ctx) +{ + + munmap((void*)p_ctx->p_afu_map, sizeof(struct surelock_afu_map)); + close(p_ctx->afu_fd); + +} + +// read into rbuf using virtual LBA +inline int async_read(struct ctx *p_ctx, __u64 start_lba) { + __u64 *p_u64; + __u64 room; + + + + p_ctx->cmd[p_ctx->cur_cmd_index].rcb.res_hndl = 0; // only 1 resource open at RHT[p_ctx->cur_cmd_index] + p_ctx->cmd[p_ctx->cur_cmd_index].rcb.data_len = sizeof(p_ctx->rbuf[p_ctx->cur_cmd_index]); + p_ctx->cmd[p_ctx->cur_cmd_index].rcb.req_flags = (SISL_REQ_FLAGS_RES_HNDL | + SISL_REQ_FLAGS_HOST_READ); + p_ctx->cmd[p_ctx->cur_cmd_index].rcb.data_ea = (__u64) &p_ctx->rbuf[p_ctx->cur_cmd_index][0]; + + /* + * + * + * READ(16) Command + * +=====-======-======-======-======-======-======-======-======+ + * | Bit| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * |Byte | | | | | | | | | + * |=====+=======================================================| + * | 0 | Operation Code (88h) | + * |-----+-------------------------------------------------------| + * | 1 | | DPO | FUA | Reserved |RelAdr| + * |-----+-------------------------------------------------------| + * | 2 | (MSB) | + * |-----+--- ---| + * | 3 | | + * |-----+--- ---| + * | 4 | | + * |-----+--- ---| + * | 5 | Logical Block Address | + * |-----+--- ---| + * | 6 | | + * |-----+--- ---| + * | 7 | | + * |-----+--- ---| + * | 8 | | + * |-----+--- ---| + * | 9 | (LSB) | + * |-----+-------------------------------------------------------| + * | 10 | (MSB) | + * |-----+--- ---| + * | 11 | | + * |-----+--- Transfer Length ---| + * | 12 | | + * |-----+--- ---| + * | 13 | (LSB) | + * |-----+-------------------------------------------------------| + * | 14 | Reserved | + * |-----+-------------------------------------------------------| + * | 15 | Control | + * +=============================================================+ + * + */ + memset(&p_ctx->cmd[p_ctx->cur_cmd_index].rcb.cdb[p_ctx->cur_cmd_index], 0, sizeof(p_ctx->cmd[p_ctx->cur_cmd_index].rcb.cdb)); + p_ctx->cmd[p_ctx->cur_cmd_index].rcb.cdb[p_ctx->cur_cmd_index] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[p_ctx->cur_cmd_index].rcb.cdb[2]; + + write_64(p_u64, start_lba); // virtual LBA# + + p_ctx->cmd[p_ctx->cur_cmd_index].rcb.cdb[13] = 0x8; + + p_ctx->cmd[p_ctx->cur_cmd_index].sa.host_use[p_ctx->cur_cmd_index] = 0; // 0 means active + p_ctx->cmd[p_ctx->cur_cmd_index].sa.ioasc = 0; + + + asm volatile ( "lwsync" : : ); /* make memory updates visible to AFU */ + + + MMIO_READ_64(&p_ctx->p_host_map->cmd_room, &room); + + if (room) { + + /* + * AFU can accept this command + */ + + + // write IOARRIN + MMIO_WRITE_64(&p_ctx->p_host_map->ioarrin, + (__u64)&p_ctx->cmd[p_ctx->cur_cmd_index].rcb); + + + + if (p_ctx->cur_cmd_index < NUM_CMDS) { + p_ctx->cur_cmd_index++; /* advance to next RRQ entry */ + } + else { /* wrap */ + p_ctx->cur_cmd_index = 0;; + } + + return 0; + } + + return EBUSY; + +} + + +// check for completion of read +inline int async_poll(struct ctx *p_ctx,int *status) { + struct afu_cmd *p_cmd; + + + + if ((*p_ctx->p_hrrq_curr & SISL_RESP_HANDLE_T_BIT) == + p_ctx->toggle) { + + p_cmd = (struct afu_cmd*)((*p_ctx->p_hrrq_curr) & (~SISL_RESP_HANDLE_T_BIT)); + + if (p_ctx->p_hrrq_curr < p_ctx->p_hrrq_end) { + p_ctx->p_hrrq_curr++; /* advance to next RRQ entry */ + } + else { /* wrap HRRQ & flip toggle */ + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle ^= SISL_RESP_HANDLE_T_BIT; + } + + *status = p_cmd->sa.ioasc; + + return TRUE; + + } + + + return FALSE; +} + +// when runing w/libafu, the master device is hard coded in afu.c to +// /dev/cxl/afu0.0m. The cmd line path is not used. +// +int +main(int argc, char *argv[]) +{ + int rc; + void *map; + int status; + __u64 start_build_ic = 0; + __u64 finish_build_ic = 0; + __u64 start_status_ic = 0; + __u64 finish_status_ic = 0; + + + struct sched_param sched; + int sched_policy; + + + get_parameters(argc, argv); + + pid = getpid(); // pid used to create unique data patterns + // or ctx file for mmap + + sched_policy = sched_getscheduler(0); + + if (sched_policy < 0) { + + fprintf(stderr,"getscheduler failed with errno = %d\n",errno); + exit(-1); + } + + bzero((void *)&sched,sizeof(sched)); + + + sprintf(ctx_file, "ctx.%d", pid); + unlink(ctx_file); + ctx_fd = open(ctx_file, O_RDWR|O_CREAT); + if (ctx_fd < 0) { + fprintf(stderr, "open failed: file %s, errno %d", ctx_file, errno); + exit(-1); + } + + // mmap a struct ctx + ftruncate(ctx_fd, sizeof(struct ctx)); + map = mmap(NULL, sizeof(struct ctx), + PROT_READ|PROT_WRITE, MAP_SHARED, ctx_fd, 0); + if (map == MAP_FAILED) { + fprintf(stderr, "mmap failed, errno %d\n", errno); + exit(-1); + } + p_ctx = (struct ctx *) map; + + printf("instantiating ctx on %s...\n", afu_path); + rc = ctx_init(p_ctx, afu_path); + if (rc != 0) { + fprintf(stderr, "error instantiating ctx, rc %d\n", rc); + exit(-1); + } + + + memset(&p_ctx->rbuf[p_ctx->cur_cmd_index][0], 0xB, sizeof(p_ctx->rbuf[p_ctx->cur_cmd_index])); + + + /* + * Get instruction count (using PMC5 register at offset 775) + * before building and queuing a command + */ + + asm volatile ( "mfspr %0, 775" : "=r"(start_build_ic) : ); + + rc = async_read(p_ctx, read_lba); + + if (rc) { + + printf("failed to issue command rc = %d\n",rc); + } + + /* + * Get instruction count (using PMC5 register at offset 775) + * after building and queuing a command + */ + asm volatile ( "mfspr %0, 775" : "=r"(finish_build_ic) : ); + + + printf("start_build_ic = 0x%lx,finish_build_ic = 0x%lx, diff = 0x%lx\n", + start_build_ic, + finish_build_ic, (finish_build_ic-start_build_ic)); + + + usleep(250); + + /* + * Get instruction count (using PMC5 register at offset 775) + * before processing command completion + */ + + asm volatile ( "mfspr %0, 775" : "=r"(start_status_ic) : ); + + rc = async_poll(p_ctx, &status); + + + + /* + * Get instruction count (using PMC5 register at offset 775) + * after processing command completion + */ + asm volatile ( "mfspr %0, 775" : "=r"(finish_status_ic) : ); + + + printf("start_status_ic = 0x%lx,finish_status_ic = 0x%lx, diff = 0x%lx\n", + start_status_ic, + finish_status_ic, (finish_status_ic-start_status_ic)); + + if (rc) { + + // Command completed + + + printf("command completed with status = 0x%x\n",status); + } + + + return 0; +} + diff --git a/src/test/pblkread.c b/src/test/pblkread.c new file mode 100644 index 00000000..f8a38ab1 --- /dev/null +++ b/src/test/pblkread.c @@ -0,0 +1,197 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/pblkread.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +// some things i do occasionally +//for c in $(seq 0 9 191); do sudo cpufreq-set -c $c -r -g performance; done +//echo 0 > /proc/sys/kernel/numa_balancing +//echo 1 > /sys/class/cxl/afu0.0m/device/reset +//sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img ../../obj/tests/pblkread + + + + +/* // simple i adapter */ +/* sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img taskset -c 8 ../../obj/tests/pblkread -n 20000000 -s /dev/cxl/afu0.0s & */ +/* sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img taskset -c 16 ../../obj/tests/pblkread -n 20000000 -s /dev/cxl/afu0.0s & */ + +/* // add the second adapter */ +/* sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img taskset -c 88 ../../obj/tests/pblkread -n 20000000 -d /dev/cxl/afu1.0s & */ +/* sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img taskset -c 96 ../../obj/tests/pblkread -n 20000000 -d /dev/cxl/afu1.0s & */ + +/* //next six run both adapaters */ +/* sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img taskset -c 8 ../../obj/tests/pblkread -n 20000000 -s /dev/cxl/afu0.0s & */ +/* sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img taskset -c 16 ../../obj/tests/pblkread -n 20000000 -s /dev/cxl/afu0.0s & */ +/* sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img taskset -c 24 ../../obj/tests/pblkread -n 20000000 -s /dev/cxl/afu0.0s & */ + +/* sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img taskset -c 88 ../../obj/tests/pblkread -n 20000000 -d /dev/cxl/afu1.0s & */ +/* sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img taskset -c 96 ../../obj/tests/pblkread -n 20000000 -d /dev/cxl/afu1.0s & */ +/* sudo LD_LIBRARY_PATH=/home/capideveloper/capiflash/img taskset -c 104 ../../obj/tests/pblkread -n 20000000 -d /dev/cxl/afu1.0s & */ + + + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#define PRI(wh, args...) fprintf(wh, ##args) +#define PRI(wh, args...) + +char *dev_name = "/dev/cxl/afu0.0m"; /* name of device including dev */ +int num_ops = 1024; /* no of operations per thread */ +int num_asyncs = 128; /* no of outstanding any given at time */ +int num_threads = 16; /* no of threads */ +int num_blocks = 1000000; /* 0..num_blocks-1 lbas to be reads */ +int virt_blks = 1; +int history = 0; + +int main(int argc, char **argv) { + + int a; + int rc; + + chunk_ext_arg_t ext = 0; + + for(a=1; a 0) { + reads_retired++; + reads_inflight--; + use_buf = rtag_buf[rtag]; + buf_stack[--next_buf] = use_buf; + PRI(stdout,"result tag %d, rc %d\n",rtag, rc); + } else if (rc==0) { + result_none++; + PRI(stdout,"ELSE %d aresult rc = %d, tag = %d\n", result_none,rc, rtag); + } else { + result_err++; + PRI(stdout,"ELSE %d aresult rc = %d, tag = %d\n", result_err,rc, rtag); + } + //} + } + + printf("retries = %d, result_none = %d, result_err = %d\n", read_retry, result_none, result_err); + cblk_close(id,0); + cblk_term(NULL,0); + exit(0); +} diff --git a/src/test/pvtestauto.c b/src/test/pvtestauto.c new file mode 100644 index 00000000..8b808a0c --- /dev/null +++ b/src/test/pvtestauto.c @@ -0,0 +1,1132 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/pvtestauto.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + + +/* + * AUTOMATED PVTEST + * + * This test program is executed from the command line with: + * + * "pvtestauto special_file,p|v,low_lba,high_lba,num_loops,s|r,d|n|r, + * num_blocks, block_size, V" + * + * The program performs a read/write/read test on the range of blocks + * passed in the paramater list. The paramaters and their meanings are: + * + * 1) spec_file: the special file name + * 2) p: physical lun + * v: virtual lun + * 3) low_lba: the lowest block number to be tested (in hex) + * 4) high_lba: the highest block number to be tested (in hex) + * 5) num_loops: the number of times to perform the sequential test, + * if 0, test will run forever till kiled. + * 6) s: perform the read/write/read's sequentially on + * the given range of blocks. + * r: perform the read/write/read's on random blocks + * within the given range of blocks. + * 7) d: (destructive) the program writes , read and compares + * data to the disk. + * r: (non-destructive) the data on the disk will read only + * + * w: (destructive) Write only test... + * + * 8) num_blocks the number of blocks to be read/written per + * read/write (in decimal). (current default 1) + * + * 9) block_size This is only used when using blocks sizes other + * Specify the block size in bytes.(current default 4096 + * + * 10) num_threads This is the number of threads to run. + * + * 11) V Verbose + * S print I/O statistics. + * + * 12) F Failover tests (for AIX only). + * + * + */ + + +#include "pvtestauto.h" + +extern int errno; +int version_no = 1; +pthread_t pv_thread[MAX_NUM_THREADS]; +pv_thread_data_t pv_thread_data; +int verbose = 0; +int statflg = 0; +int flags = 0; +int failover = 0; +pv_thread_data_t pv_data; +#define BUFSIZE 4096 +pthread_mutex_t completion_lock; + +void dump_iostats(chunk_stats_t *stats, int *ii, int th); + + + +/*-------------------------------------------------------------------------- + * + * NAME: usage error + * + * FUNCTION: Displays usage + * + * + * (NOTES:) + * + * + * (DATA STRUCTURES:) + * + * + * INPUTS: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * (RECOVERY OPERATION:) + * + * RETURNS: + *---------------------------------------------------------------------------- + */ + +void usage(void) +{ + printf( "\n pvtestauto Version - %d \n\n",version_no); + printf( "\nUsage:\n"); + printf( "\nFormat of command:\npvtestauto spec_file p|v low_lba high_lba #loops s|r d|r|w #blocks block_size threads [V|S] "); +#ifdef _AIX + printf("[F]\n"); +#else + printf("\n"); +#endif + printf( "\n1 spec_file: the special file name"); + printf( "\n2 p: Physical lun"); + printf( "\n v: Virtual lun"); + printf( "\n3 low_lba: the lowest block number to be tested (in hex)"); + printf( "\n4 high_lba: the highest block number to be tested (in hex)"); + printf( "\n5 num_loops: the number of times to perform the tests,"); + printf( "\n if 0, test will run forever till killed."); + printf( "\n6 s: perform the read/write/read's sequentially on"); + printf( "\n the given range of blocks."); + printf( "\n r: perform the read/write/read's on random blocks"); + printf( "\n within the given range of blocks."); + printf( "\n7 d: (destructive) the program changes the data read"); + printf( "\n before it is written back to the disk."); + printf( "\n r: (non-destructive) the data on the disk will not"); + printf( "\n be changed.Read Only test."); + printf( "\n w: (destructive) the data on the disk will be"); + printf( "\n changed. Write Only test."); + printf( "\n8 num_blocks the number of blocks to be read/written per"); + printf( "\n read/write (in decimal).\n"); + printf( "\n9 block_size Specify the block size for read/write in bytes(in decimal).\n"); + printf( "\n for Surelock default is 4096.\n"); + printf( "\n10 num_threads number of threads\n"); + + printf( "\n11 V: Verbose \n"); + printf( "\n S: print io statistics\n"); + printf( "\n"); +#ifdef _AIX + printf( "\n12 F: Failover \n"); + printf( "\n"); +#endif + printf( "\nFor Example:\n"); + printf( "\n To stress I/O on /dev/cxl/afu0.0m"); + printf( "\n virtual lun, lba 0x10 thru 0x1000, sequential lba, destructive I/O"); + printf( "\n num_blocks 1, blk size 4096, threads 10 and loop 100 times.\n"); + printf( "\n Execute following command:"); + printf( "\n./pvtestauto /dev/cxl/afu0.0m v 10 1000 100 s d 1 4096 10\n"); + return; +} +/*-------------------------------------------------------------------------- + * + * NAME: gethexnum + * + * FUNCTION: + * + * + * (NOTES:) + * + * + * (DATA STRUCTURES:) + * + * + * INPUTS: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * (RECOVERY OPERATION:) + * + * RETURNS: + *---------------------------------------------------------------------------- + */ +long long gethexnum(char *strg) +{ + long long r = 0; + char ic; + + ic = *strg++; + while (ic != '\0') { + if ((ic < '0' || ic > '9') && (ic < 'a' || ic > 'f')) + return(-1); + else { + r *= 16; + r += (ic >= '0' && ic <= '9') ? (ic - '0') : (ic - 'a' + 10); + } + ic = *(strg++); + } + return(r); +} + +/*-------------------------------------------------------------------------- + * + * NAME: getdecnum + * + * FUNCTION: + * + * + * (NOTES:) + * + * + * (DATA STRUCTURES:) + * + * + * INPUTS: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * (RECOVERY OPERATION:) + * + * RETURNS: + *---------------------------------------------------------------------------- + */ +int getdecnum(char *strg) +{ + int r = 0; + char ic; + + ic = *strg++; + while (ic != '\0') { + if (ic < '0' || ic > '9') + return(-1); + else { + r *= 10; + r += ic - '0'; + } + ic = *(strg++); + } + return(r); +} + + + + + +/*-------------------------------------------------------------------------- + * + * NAME: parse_inputs + * + * FUNCTION: Initializes the dev + * + * + * (NOTES:) + * + * + * (DATA STRUCTURES:) + * + * + * INPUTS: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * (RECOVERY OPERATION:) + * + * RETURNS: + *---------------------------------------------------------------------------- + */ + +int parse_inputs(int argc, char **argv) +{ + int error = FALSE; + show_errors = TRUE; + bzero (&pv_data, sizeof(pv_thread_data_t)); + + if (strlen(argv[1]) > (PATH_MAX -1) ) { + fprintf(stderr, "Path too long %s\n",argv[1]); + return (TRUE); + } + strcpy(dev_name,argv[1]); + + if (strcmp("v",argv[2]) == 0) + virtual_flag = TRUE; + else if (strcmp("p",argv[2]) == 0) + virtual_flag = FALSE; + else { + fprintf(stderr,"lun must be v|p, It is %s\n",argv[2]); + return (TRUE); + } + + low_lba = gethexnum(argv[3]); + pv_data.low_lba = low_lba; + if (low_lba == -1){ + fprintf(stderr,"Invalid low lba 0x%llx\n",low_lba); + return (TRUE); + } + high_lba = gethexnum(argv[4]); + pv_data.high_lba = high_lba; + if (high_lba == -1){ + fprintf(stderr,"Invalid high lba 0x%llx\n",high_lba); + return (TRUE); + } + if (high_lba < low_lba) { + fprintf(stderr,"Invalid high lba 0x%llx, must be greater than low\n",high_lba); + return (TRUE); + } + + num_loops = getdecnum(argv[5]); + if (num_loops == -1){ + fprintf(stderr,"Invalid num_loops %d\n",num_loops); + return (TRUE); + } else if (!num_loops) { + loop_forever = 1; + } + + if (strcmp("s",argv[6]) == 0) + seq_or_rand = 1; + else if (strcmp("r",argv[6]) == 0) + seq_or_rand = 0; + else { + fprintf(stderr,"Invalid seq_or_rand arg must be s | r \n"); + return (TRUE); + } + + if (strcmp("d",argv[7]) == 0) + destr = 1; + else if (strcmp("w",argv[7]) == 0) + destr = 2; + else if (strcmp("r",argv[7]) == 0) + destr = 3; + else { + fprintf(stderr,"Invalid destr, read or write arg must be d | r | w \n"); + return (TRUE); + } + /* Number of blocks per read/write is default 1 for Surelock */ + num_blocks = getdecnum(argv[8]); + if (virtual_flag && (num_blocks != 1)) { + fprintf(stderr,"\nInvalid Number of blocks = %d, should be 1\n", + num_blocks); + fprintf(stderr,"Setting to Default num_blocks = 1\n"); + num_blocks = 1; + } else { + /* physical lun can support upto 16M (4096*4096) */ + if (num_blocks > 4096) { + fprintf(stderr,"\nInvalid Number of blocks = %d, should _not_ be greater than 512\n",num_blocks); + fprintf(stderr,"Setting to xfersize num_blocks = 512 blocks\n"); + num_blocks = 4096; + } + } + + blk_size = getdecnum(argv[9]); + /* TODO for now donot accept blocksize other than 4096 */ + if (blk_size != BLK_SIZE) { + printf("\nInvalid block size = %d, should be 4096\n",blk_size); + fprintf(stderr,"Setting to Default blocks size to 4096\n"); + blk_size = BLK_SIZE; + } + num_threads = getdecnum(argv[10]); + if (num_threads < 0) { + fprintf(stderr, "Thread count must be specify, Setting to default 0\n"); + thread_flag = 0; + } + + if (num_threads > 0) { + if ((pv_data.high_lba - pv_data.low_lba) < num_threads) { + fprintf(stderr, "Invalid lba range, It needs to be larger than number of threads\n"); + fprintf(stderr, "Either increase the lba range or decrease the number of threads\n"); + return(TRUE); + } + + if (num_threads > MAX_NUM_THREADS) { + num_threads = MAX_NUM_THREADS; + printf("Number of threads exceed maximum allowed, setting to default max %d\n",num_threads); + } + thread_flag = 1; + } + + verbose = 0; + if (argc > 11) { + if (strcmp("V",argv[11]) == 0) + verbose = 1; + else if (strcmp("S",argv[11]) == 0) + statflg = 1; + } +#ifdef _AIX + if (argc > 12) { + if ((strcmp("F",argv[12]) == 0) && + !virtual_flag) { + failover = 1; + } else { + failover = 0; + printf("\nfailover disabled , supported only in physical mode\n"); + } + } +#endif + + return (error); + + +} + +/*----------------------------------------------------------------------- + * + * NAME: Main + * + * FUNCTION: + * + * + * (NOTES:) + * + * + * (DATA STRUCTURES:) + * + * + * INPUTS: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * (RECOVERY OPERATION:) + * + * RETURNS: + *------------------------------------------------------------ + */ + +int main (int argc, char* argv[]) +{ + int error = FALSE; + int rc = 0; + loop_forever = 0; + + rc = cblk_init(NULL,0); + + if (rc) { + + fprintf(stderr,"cblk_init failed with rc = %d, and errno = %d\n", + rc,errno); + return (1); + } + + if (verbose) + printf( "\nWelcome to pvtestauto \n"); + if ((argc < 11) || (argc > 13)) { + fprintf(stderr,"num args less than req\n"); + error = TRUE; + } else { + rc = parse_inputs(argc,argv); + if (!rc) { + if (verbose) + fprintf(stderr,"Initializing %s, lba 0x%llx - 0x%llx, threads = %d, loops %d\n", dev_name, low_lba, high_lba, num_threads, num_loops); + if (dev_init() == 0) { + rc = run_pv_test(&rc,&error); + if (rc) { + fprintf(stderr, "\n\nTest Failed on Loop = %d\n\n", + num_loops); + } else { + if (verbose) + fprintf(stderr, "\n\nTest Completed. Loop Count = %d\n\n",num_loops); + } + if (verbose) + fprintf(stderr,"Calling cblk_close ...\n"); + rc = cblk_close(pv_data.chunk_id,0); + if (rc) + fprintf(stderr,"Close of %s failed with errno = %d\n",dev_name,errno); + } + } else { + error = TRUE; + } + } + if (error ) { + usage(); + cblk_term(NULL,0); + return (1); + } + + cblk_term(NULL,0); + return (rc); +} + + +int dev_init() +{ + int rc = 0; + int ret_code = 0; + size_t lba_range = 0; + size_t chunk_sz = 0; + int flags = 0; + int open_mode = O_RDWR; + chunk_ext_arg_t ext = 0; + size_t lun_sz = 0; + + lba_range = high_lba+1; + if (virtual_flag) + flags = CBLK_OPN_VIRT_LUN; + else + flags =0; +#ifdef _AIX + if (failover) + flags |= CBLK_OPN_MPIO_FO ; +#endif + + if ((pv_data.chunk_id = cblk_open(dev_name,0,open_mode,ext,flags)) + == NULL_CHUNK_ID) { + fprintf(stderr,"Open of %s failed with errno = %d\n",dev_name,errno); + errno_process (errno); + /* + * Save off error code + */ + ret_code = pv_data.chunk_id; + pv_data.chunk_id = 0; + } else { + /* get size of the lun associated with this chunk */ + rc = cblk_get_lun_size(pv_data.chunk_id, &lun_sz, 0); + if (!rc) { + if (lun_sz < lba_range) { + rc = -1; + fprintf(stderr,"requested block size is more than lun size %zu\n",lun_sz); + return(-1); + } + } + if (flags & CBLK_OPN_VIRT_LUN) { + rc =cblk_set_size(pv_data.chunk_id,(size_t)lba_range, 0); + if (verbose) + fprintf(stderr,"\nSet chunk size rc = %d, errno= %d \n",rc, errno); + if (!rc) { + /* Verify it was set correctly */ + rc = cblk_get_size (pv_data.chunk_id,&chunk_sz, 0); + if (verbose) + fprintf(stderr,"\nGet chunk size rc = %d, errno = %d, size (blocks) = %zu\n",rc, errno, chunk_sz); + if (!rc) { + if (chunk_sz != lba_range){ + fprintf(stderr," failed to set correct size expected %zu, received %zu\n", lba_range,chunk_sz); + } + } + } + } + } + if (ret_code || rc) { + fprintf(stderr,"Open %s... Failed\n",dev_name); + errno_process(); + + /* + * Save off error code + */ + ret_code = pv_data.chunk_id; + + } else { + /* + * If open was successful then + * increment open count. + */ + open_cnt++; + if (verbose) + fprintf(stderr,"Open of %s successful\n",dev_name); + } + return(ret_code || rc); +} + +/* + * ---------------------------------------------------------------------------- + * NAME: errno_process + * + * FUNCTION: Parse error and print out its define from errno.h + * + * + * + * CALLED BY: + * + * NOTES: This routine outputs to stdout instead of stderr, because + * when using stderr it causes a crash (memory leak) + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: + * + * ---------------------------------------------------------------------------- + */ +void errno_process(int err) +{ + switch (err) { + case EACCES: + fprintf(stderr, "EACCES"); + break; + case EBUSY: + fprintf(stderr, "EBUSY"); + break; + case EFAULT: + fprintf(stderr, "EFAULT"); + break; + case EINVAL: + fprintf(stderr, "EINVAL"); + break; + case EIO: + fprintf(stderr, "EIO"); + break; + case ENOMEM: + fprintf(stderr, "ENOMEM"); + break; + case ENODEV: + fprintf(stderr, "ENODEV"); + break; + case ENXIO: + fprintf(stderr, "ENXIO"); + break; + case EPERM: + fprintf(stderr, "EPERM"); + break; + case ETIMEDOUT: + fprintf(stderr, "ETIMEDOUT"); + break; + case EBADF: + fprintf(stderr, "EBADF"); + break; + case EAGAIN: + fprintf(stderr, "EAGAIN"); + break; + case EDEADLK: + fprintf(stderr, "EDEADLK"); + break; + case EINTR: + fprintf(stderr, "EINTR"); + break; + case EMFILE: + fprintf(stderr, "EMFILE"); + break; + case ECHILD: + fprintf(stderr, "ECHILD"); + break; + case ESTALE: + fprintf(stderr, "ESTALE"); + break; + case EINPROGRESS: + fprintf(stderr, "EINPROGRESS"); + break; + default: + fprintf(stderr, "Unknown 0x%x",err); + } + fprintf(stderr, "\n"); + + return; +} + + + +void *run_pvthread_loop(void *data) +{ + int rc = 0; + int rtag,wtag; + int i; + pv_thread_data_t *pvt_data = data; + uint32_t t = 0; + void *data_buf = NULL; + void *comp_data_buf = NULL; + uint64_t status; + int cmd_type; + int fd; + int arflag = 0; + uint64_t lba; + uint64_t t_low_lba; + uint64_t t_high_lba; + uint64_t t_lbasz; + int t_loops = 0; + void *ret_code = NULL; + chunk_stats_t stats; + int stat_title=0; + + errno = 0; + + pthread_mutex_lock(&completion_lock); + + t = thread_count++; + + /* + * Each thread is using a different + * block number range. + */ + + t_low_lba = pvt_data->t_data[t].t_low_lba; + t_lbasz = pvt_data->t_lbasz; + t_high_lba = t_low_lba + t_lbasz; + lba = t_low_lba; + t_loops = num_loops; + + + pthread_mutex_unlock(&completion_lock); + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&data_buf,4096,BUFSIZE*num_blocks)) { + + perror("posix_memalign failed for data buffer"); + + pvt_data->t_data[t].ret = 0; + pvt_data->t_data[t].errcode = errno; + return(ret_code); + } + + if ( posix_memalign((void *)&comp_data_buf,4096,BUFSIZE*num_blocks)) { + + perror("posix_memalign failed for data buffer"); + pvt_data->t_data[t].ret= 0; + pvt_data->t_data[t].errcode = errno; + free(data_buf); + return(ret_code); + + } + + for (i =0; (loop_forever || (ichunk_id,data_buf,lba,num_blocks,&rtag,NULL,0); + + if (rc < 0) { + pvt_data->t_data[t].ret= rc; + pvt_data->t_data[t].errcode = errno; + printf("Async write Failed, errno = %d\n", errno); + free(comp_data_buf); + free(data_buf); + return(ret_code); + } + + arflag = 0; + while (TRUE) { + + rc = cblk_aresult(pvt_data->chunk_id,&rtag, &status,arflag); + if (rc < 0) { + pvt_data->t_data[t].ret= rc; + pvt_data->t_data[t].errcode = errno; + fprintf(stderr,"Async result write Failed, errno = %d\n",errno); + free(comp_data_buf); + free(data_buf); + return(ret_code); + } + if (rc > 0) { + if (verbose) + fprintf(stderr,"Async result write completed tag =%d\n",rtag); + break; + } + + } /* while */ + + break; + + case PV_AREAD_ONLY: + + rc = cblk_aread(pvt_data->chunk_id,data_buf,lba,num_blocks,&rtag,NULL,0); + + if (rc < 0) { + pvt_data->t_data[t].ret= rc; + pvt_data->t_data[t].errcode = errno; + printf("Async read Failed, errno = %d\n", errno); + free(comp_data_buf); + free(data_buf); + return(ret_code); + } + + arflag = 0; + while (TRUE) { + + rc = cblk_aresult(pvt_data->chunk_id,&rtag, &status,arflag); + if (rc < 0) { + pvt_data->t_data[t].ret= rc; + pvt_data->t_data[t].errcode = errno; + fprintf(stderr,"Async result read Failed, errno = %d\n",errno); + free(comp_data_buf); + free(data_buf); + return(ret_code); + } + if (rc > 0) { + if (verbose) + fprintf(stderr,"Async result read completed tag =%d\n",rtag); + break; + } + + } /* while */ + + + break; + + case PV_RW_COMP: + + /* + * Perform write then read comparision test + */ + + fd = open ("/dev/urandom", O_RDONLY); + read (fd, comp_data_buf, BUFSIZE); + close (fd); + + rc = cblk_write(pvt_data->chunk_id,comp_data_buf,lba,num_blocks,0); + + if (rc != num_blocks) { + pvt_data->t_data[t].ret = rc; + pvt_data->t_data[t].errcode = errno; + free(comp_data_buf); + free(data_buf); + fprintf(stderr,"Write failed rc = %d, errno = %d\n",rc, errno); + return(ret_code); + } + if (verbose) { + if (thread_flag) + fprintf(stderr,"Thread %d ",t); + fprintf(stderr,"Write complete at lba 0x%lx\n",lba); + } + rc = cblk_read(pvt_data->chunk_id,data_buf,lba,num_blocks,0); + + if (rc != num_blocks) { + + pvt_data->t_data[t].ret = rc; + pvt_data->t_data[t].errcode = errno; + free(comp_data_buf); + free(data_buf); + fprintf(stderr,"Read failed rc = %d, errno = %d\n",rc,errno); + return(ret_code); + } + if (verbose) { + if (thread_flag) + fprintf(stderr,"Thread %d ",t); + fprintf(stderr,"Read complete at lba 0x%lx\n",lba); + } + + rc = memcmp(data_buf,comp_data_buf,BUFSIZE*num_blocks); + + if (rc) { + pvt_data->t_data[t].ret = rc; + pvt_data->t_data[t].errcode = errno; + pthread_mutex_lock(&completion_lock); + if (thread_flag) + fprintf(stderr,"Thread %d ",t); + fprintf(stderr,"Miscompare at lba 0x%lx\n",lba); + fprintf(stderr,"Written data:\n"); + dumppage(data_buf,BUFSIZE); + fprintf(stderr,"**************************************************\n\n"); + fprintf(stderr,"read data:\n"); + dumppage(comp_data_buf,BUFSIZE); + fprintf(stderr,"**************************************************\n\n"); + pthread_mutex_unlock(&completion_lock); + + rc = cblk_read(pvt_data->chunk_id,data_buf,lba,1,0); + if (rc == num_blocks) { + pthread_mutex_lock(&completion_lock); + fprintf(stderr,"Dump of re-read\n"); + dumppage(data_buf,BUFSIZE); + pthread_mutex_unlock(&completion_lock); + } + + } else { + if (verbose) { + if(thread_flag) + fprintf(stderr,"Thread %d",t); + fprintf(stderr,"Compare ok at lba 0x%lx\n",lba); + } + } + break; + case PV_RW_AWAR: + + /* + * Perform write then read comparision test + */ + + fd = open ("/dev/urandom", O_RDONLY); + read (fd, comp_data_buf, BUFSIZE); + close (fd); + + rc = cblk_awrite(pvt_data->chunk_id,comp_data_buf,lba,num_blocks,&wtag,NULL,0); + + if (rc < 0) { + pvt_data->t_data[t].ret= rc; + pvt_data->t_data[t].errcode = errno; + fprintf(stderr,"Async write Failed, errno = %d\n",errno); + free(comp_data_buf); + free(data_buf); + return(ret_code); + } + arflag = 0; + while (TRUE) { + rc = cblk_aresult(pvt_data->chunk_id,&wtag, &status,arflag); + if (rc < 0) { + pvt_data->t_data[t].ret= rc; + pvt_data->t_data[t].errcode = errno; + fprintf(stderr,"Async result write Failed, errno = %d\n",errno); + free(comp_data_buf); + free(data_buf); + return(ret_code); + } + if (rc > 0) { + if (verbose) + fprintf(stderr,"Async result write completed tag=%d \n",wtag); + break; + } + + } /* while */ + + + rc = cblk_aread(pvt_data->chunk_id,data_buf,lba,num_blocks,&rtag,NULL,0); + + if (rc < 0) { + pvt_data->t_data[t].ret= rc; + pvt_data->t_data[t].errcode = errno; + printf("Async read Failed, errno = %d\n", errno); + free(comp_data_buf); + free(data_buf); + return(ret_code); + } + + arflag = 0; + + while (TRUE) { + + rc = cblk_aresult(pvt_data->chunk_id,&rtag, &status,arflag); + if (rc < 0) { + pvt_data->t_data[t].ret= rc; + pvt_data->t_data[t].errcode = errno; + fprintf(stderr,"Async result read Failed, errno = %d\n",errno); + free(comp_data_buf); + free(data_buf); + return(ret_code); + } + if (rc > 0) { + if (verbose) + fprintf(stderr,"Async result read completed tag =%d\n",rtag); + break; + } + + } /* while */ + + + + rc = memcmp(data_buf,comp_data_buf,BUFSIZE*num_blocks); + + if (rc) { + pvt_data->t_data[t].ret = rc; + pvt_data->t_data[t].errcode = errno; + pthread_mutex_lock(&completion_lock); + if (thread_flag) + fprintf(stderr,"Thread %d ",t); + fprintf(stderr,"Miscompare at lba 0x%lx\n",lba); + fprintf(stderr,"**************************************************\n\n"); + fprintf(stderr,"async write data:\n"); + dumppage(data_buf,BUFSIZE); + fprintf(stderr,"**************************************************\n\n"); + fprintf(stderr,"async read data:\n"); + dumppage(comp_data_buf,BUFSIZE); + fprintf(stderr,"**************************************************\n\n"); + pthread_mutex_unlock(&completion_lock); + + rc = cblk_read(pvt_data->chunk_id,data_buf,lba,num_blocks,0); + + if (rc == num_blocks) { + fprintf(stderr,"Dump of re-read\n"); + dumppage(data_buf,BUFSIZE); + } + + } else { + if(verbose) { + if (thread_flag) + fprintf(stderr,"Thread %d ",t); + fprintf(stderr,"Async compare ok at lba 0x%lx\n",lba); + } + } + break; + default: + fprintf(stderr,"Invalid cmd_type = %d\n",cmd_type); + i = t_loops; + } /* switch */ + + if (seq_or_rand) { + lba += 1; + } else { + lba = rand() % (t_high_lba - t_low_lba + 1) + t_low_lba; + } + if (statflg) { + pthread_mutex_lock(&completion_lock); + rc = cblk_get_stats (pvt_data->chunk_id, &stats, 0); + dump_iostats( &stats,&stat_title, t); + pthread_mutex_unlock(&completion_lock); + } + + if(verbose) + fprintf(stderr,"\nTesting block no 0x%lx\n",lba); + + } + if(thread_flag && verbose) + fprintf(stderr,"Thread %d ",t); + if (verbose) + fprintf(stderr,"Loop Completed %d\n",i); + + } + + + free(data_buf); + free(comp_data_buf); + return(ret_code); +} + +void dump_iostats(chunk_stats_t *stats, int *stat_title, int th) +{ + if ((*stat_title == 0) && (th == 0)){ + fprintf(stderr,"\nchunk_statistics:\n"); + fprintf(stderr,"*****************\n"); + fprintf(stderr,"num_reads num_writes num_areads num_awrites num_threads\n"); + *stat_title = 1; + } + fprintf(stderr,"%8lx %8lx %8lx %8lx %8x\r", + stats->num_reads,stats->num_writes,stats->num_areads, + stats->num_awrites, th); +} + +int run_pv_test(int *ret, int *err) +{ + + + int rc = 0; /* Return code */ + int i= 0; + void *status; + + pv_data.size = 64; + + if (virtual_flag) + flags = CBLK_OPN_VIRT_LUN; + + num_opens = 1; // FIXME JAY + + + pv_data.t_lbasz = (pv_data.high_lba - pv_data.low_lba)/num_threads; + + if (num_threads >= 1) { + + /* + * Create all threads here + */ + + for (i=0; i< num_threads; i++) { + + pv_data.t_data[i].t_low_lba = pv_data.low_lba + (pv_data.t_lbasz)*i ; + + if (verbose) + fprintf(stderr,"Setting low_lba for thread %d, 0x%llx\n",i,pv_data.t_data[i].t_low_lba); + rc = pthread_create(&pv_thread[i],NULL,run_pvthread_loop,(void *)&pv_data); + if (rc) { + + fprintf(stderr, "pthread_create failed for %d rc 0x%x, errno = 0x%x\n", + i, rc,errno); + *ret = -1; + *err = errno; + return (rc); + } + if (verbose) + fprintf(stderr, "pthread %d, started\n", i); + } + + + /* + * Wait for all threads to complete + */ + + + errno = 0; + + for (i=0; i< num_threads; i++) { + + rc = pthread_join(pv_thread[i],&status); + + if (rc) { + fprintf(stderr,"Thread %d returned fail ret %d, errno = %d\n",i, + rc,errno); + } + if (verbose) + fprintf(stderr, "pthread %d, exited\n", i); + } + fprintf(stderr,"\n\n"); + return (rc); + } else { + + if(verbose) + fprintf (stderr,"starting w/o thread \n"); + pv_data.t_data[i].t_low_lba = pv_data.low_lba ; + pv_data.t_lbasz = (pv_data.high_lba - pv_data.low_lba); + run_pvthread_loop((void *)&pv_data); + if (pv_data.t_data[i].ret) + fprintf(stderr,"Test failed ret %d, errno = %d\n",pv_data.t_data[i].ret, pv_data.t_data[i].errcode); + + fprintf(stderr,"\n\n"); + return (pv_data.t_data[i].ret); + } +} diff --git a/src/test/pvtestauto.h b/src/test/pvtestauto.h new file mode 100644 index 00000000..a41edbf7 --- /dev/null +++ b/src/test/pvtestauto.h @@ -0,0 +1,212 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/pvtestauto.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef _H_PVTESTAUTO_OBJ +#define _H_PVTESTAUTO_OBJ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#ifdef _OS_INTERNAL +#include +#else +#include +#endif +#include +#include +#include + + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + + + +#define SEQUENTIAL 0x1 +#define RANDOM 0x2 +#define MAX_NUM_BLOCKS 64 +#define NORM_READ 1 /* normal reads */ +#define EXT_READ 2 /* extended reads */ +#define NORM_WRITE 1 /* normal writes */ +#define EXT_WRITE 2 /* extended writes */ +#define BLK_SIZE 4096 /* Number of bytes in one block */ +#define DESTRUCTIVE 0x01 /* Data on device can be modified */ +#define NON_DESTRUCTIVE 0x02 /* Data can't be modified on device. */ +#define READ_ONLY 0x03 /* Data can only be read. */ +#define NUM_IDS 30 /* Number of opens allowed */ +#define MAX_NUM_THREADS 4096 +#define PV_RW_AWAR 1 +#define PV_RW_COMP 2 +#define PV_AREAD_ONLY 3 +#define PV_AWRITE_ONLY 4 + + + + + +long long low_lba; /* lowest lba to use */ +long long high_lba; /* highest lba to use */ +long long lba; /* current lba */ +int num_blocks; /* number of blocks to use in a operation */ +int dev_block_size; /* device block size in bytes */ +int num_loops; /* number of loops to make */ +int loop_forever; /* if 1 test will run for ever till killed. */ +uint8_t destr; /* Destructive is 1 , Write only 2, Read only 3 */ +uint8_t seq_or_rand; /* Sequential is 1, random is 2 */ +int blk_size; /* Devices block size */ +int verbose; /* if 1 print I/O progress */ +int virtual_flag; /* if set open virtual luns, otherwise open phys */ +int thread_flag; /* multi-thread test */ +int num_threads; /* no of threads */ +uint32_t thread_count; /* thread count */ +uint64_t block_number; /* lba no */ +int num_opens; /* open counts */ + + +/* + * Basic fields need for devices + */ +int open_cnt; /* # of times opened w o close*/ +char dev_name [PATH_MAX]; /* name of device including dev */ + +/* + * Buffers for I/O interface to an application + */ +char *wbuf; /* buffer for data to write */ +char *rbuf; /* buffer for data to read */ +char *tbuf; /* buffer for temporary data */ + +/* + * Buffers for I/O interface to an application + */ +int show_errors; /* if true error are displayed */ + +typedef struct th_data { + long long t_low_lba; /* lowest lba to use */ + long long t_high_lba; /* highest lba to use */ + int ret; + int errcode; +} th_data_t; + + +typedef struct pv_thread_data { + chunk_id_t chunk_id; + int flags; + size_t size; + int num_loops; + long long low_lba; /* lowest lba to use */ + long long high_lba; /* highest lba to use */ + uint64_t block_number; /* lba no */ + long long t_lbasz; /* range size to test */ + th_data_t t_data[MAX_NUM_THREADS]; +} pv_thread_data_t; + + +extern char *pv_verbosity; +extern char *dev_path; +extern chunk_id_t chunks[]; +extern void *pv_data_buf; +extern void *pv_comp_data_buf; +extern void *pv_temp_buf; + + + +#define DEBUG_0(A) \ + do \ + { \ + if (verbose) \ + { \ + fprintf(stderr,A); \ + fflush(stderr); \ + } \ + } while (0) + +#define DEBUG_1(A,B) \ + do \ + { \ + if (verbose) \ + {fprintf(stderr,A,B);fflush(stderr);} \ + } while (0) + +#define DEBUG_2(A,B,C) \ + do \ + { \ + if (verbose) \ + {fprintf(stderr,A,B,C);fflush(stderr);} \ + } while (0) + +#define DEBUG_3(A,B,C,D) \ + do \ + { \ + if (verbose) \ + {fprintf(stderr,A,B,C,D);fflush(stderr);} \ + } while (0) + +#define DEBUG_4(A,B,C,D,E) \ + do \ + { \ + if (verbose) \ + {fprintf(stderr,A,B,C,D,E);fflush(stderr);} \ + } while (0) + +/* ------------------- +* Function prototypes +* ------------------- +*/ + +void errno_process(); +void errno_value(); +void errno_parse(int err); +int detect_mismatch_r_t(int); +int open_dev(int,int); +int close_dev(); +int dev_init(); +int dev_open(); +int run(); +int read_dev(int); +int write_dev(int); + +int rd_wrt_rd(uint64_t, uint64_t,uint64_t, int,uint8_t ); +int seqtest(uint64_t, int, uint64_t, uint64_t, int, uint8_t ); +int rndtest(uint64_t, int, uint64_t, uint64_t, int, uint8_t ); +int set_sz(chunk_id_t chunk_id, size_t size, int flags); +int get_phys_lun_sz(chunk_id_t chunk_id, size_t *size, int flags); +int get_sz(chunk_id_t chunk_id, size_t *size, int flags); +int run_pv_test(int *ret, int *err); + + + +#endif diff --git a/src/test/run_FM_tests.sh b/src/test/run_FM_tests.sh new file mode 100755 index 00000000..5be44b19 --- /dev/null +++ b/src/test/run_FM_tests.sh @@ -0,0 +1,29 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/test/run_FM_tests.sh $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +# Following command allowd to run block api tests without +# CAPI hardware installed. +# Run block API fvt tests in filemode + +run_block_fvt --gtest_filter=*_FM_* diff --git a/src/test/run_KMC_tests.sh b/src/test/run_KMC_tests.sh new file mode 100755 index 00000000..9fa79d86 --- /dev/null +++ b/src/test/run_KMC_tests.sh @@ -0,0 +1,28 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/test/run_KMC_tests.sh $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +# Runs block api tests on GA2 hardware that supports +# Kernal MC mode. + +run_block_fvt --gtest_filter=* diff --git a/src/test/run_UMC_tests.sh b/src/test/run_UMC_tests.sh new file mode 100755 index 00000000..a8040882 --- /dev/null +++ b/src/test/run_UMC_tests.sh @@ -0,0 +1,28 @@ +# IBM_PROLOG_BEGIN_TAG +# This is an automatically generated prolog. +# +# $Source: src/test/run_UMC_tests.sh $ +# +# IBM Data Engine for NoSQL - Power Systems Edition User Library Project +# +# Contributors Listed Below - COPYRIGHT 2015 +# [+] International Business Machines Corp. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# IBM_PROLOG_END_TAG +# Run block fvt tests that are supported in User MC mode +# Tests are run on GA1 hardware with User MC enabled. + +run_block_fvt --gtest_filter=*_UMC_* diff --git a/src/test/run_fvt.C b/src/test/run_fvt.C new file mode 100644 index 00000000..d3bbd7b7 --- /dev/null +++ b/src/test/run_fvt.C @@ -0,0 +1,27 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/run_fvt.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +char *env_FVT_DEV = getenv("FVT_DEV"); diff --git a/src/test/transport_test.c b/src/test/transport_test.c new file mode 100644 index 00000000..dce6dc41 --- /dev/null +++ b/src/test/transport_test.c @@ -0,0 +1,1265 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/transport_test.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +#include +#include +#include +#include +#include +#include +#ifndef _MACOSX +#include +#endif /* !_MACOS */ +#include +#include +#include +#include +#include +#if !defined(_AIX) && !defined(_MACOSX) +#include +#endif +#include +#include +#ifdef _USE_LIB_AFU +#include +#endif /* _USE_LIB_AFU */ +#ifdef SIM +#include "sim_pthread.h" +#else +#include +#endif + +#include "transport_test.h" + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + + +#define MIN(a,b) ((a)<(b) ? (a) : (b) + +#ifdef _USE_LIB_AFU +struct afu *p_afu = NULL; +#endif /* _USE_LIB_AFU */ + +#define DATA_BUF_SIZE 4096 + +/* + * Global variables declared in this module... + */ + +static int bflag; /* indicates the b flag was passed on command + * line which indicates a block number was + * specified. + */ +static int cflag; /* indicates the c flag was passed on command + * line which indicates a cmd_type was + * specified. + */ +static int fflag; /* indicates the f flag was passed on command + * line which indicates a filename is + * specified + */ + +static int hflag; /* help flag */ +static int iflag; /* indicates the i flag was passed on command + * line which indicates the lun id + * is specified + */ +static int lflag; /* indicates the l flag was passed on command + * line which indicates adapter/device name + * is specified + */ +static int nflag; /* indicates the n flag was passed on command + * line which indicates the number of loops + * or times to issue requests + */ + +static int verbose_flag; /* verbose mode flag */ + +static char *device_name = NULL; /* point to the arg for -l flag */ +static char *filename = NULL; /* point to the arg for -f flag */ +static int num_loops = 1; +static int port_num = 0x3; +static uint64_t lun_id = 0x0; +static uint64_t block_number = 0; +FILE *file = NULL; + + + +transp_scsi_cmd_t cmd_type = TRANSP_READ_CMD; // Default to SCSI Read 16 + +uint64_t *rrq = NULL; + +uint64_t *rrq_current = NULL; + +int rrq_len = 64; +int contxt_handle = 0; +uint64_t toggle = 1; /* Toggle bit for RRQ */ + +inline void INC_RRQ() +{ + + rrq_current++; + + + + if (rrq_current > &rrq[rrq_len -1]) + { + + rrq_current = rrq; + + toggle ^= SISL_RESP_HANDLE_T_BIT; + + } + + + + return; +} + +/* ---------------------------------------------------------------------------- + * + * NAME: valid_endianess + * + * FUNCTION: Determines the Endianess of the host that + * the binary is running on. + * + * + * + * CALLED BY: + * + * + * INTERNAL PROCEDURES CALLED: + * + * + * + * EXTERNAL PROCEDURES CALLED: + * + * + * + * RETURNS: 1 Host endianess matches compile flags + * 0 Host endianess is invalid based on compile flags + * + * ---------------------------------------------------------------------------- + */ +int valid_endianess(void) +{ + int rc = FALSE; + short test_endian = 0x0102; + char *ptr; + char byte; + + ptr = (char *) &test_endian; + + byte = ptr[0]; + + if (byte == 0x02) { + + /* + * In a Little Endian host, the first indexed + * byte will be 0x2 + */ +#ifdef CFLASH_LITTLE_ENDIAN_HOST + rc = TRUE; +#else + rc = FALSE; +#endif /* !CFLASH_LITTLE_ENDIAN_HOST */ + + + } else { + + /* + * In a Big Endian host, the first indexed + * byte will be 0x1 + */ + +#ifdef CFLASH_LITTLE_ENDIAN_HOST + rc = FALSE; +#else + rc = TRUE; +#endif /* !CFLASH_LITTLE_ENDIAN_HOST */ + + + } + + + + return rc; +} + +/* + * NAME: open_and_setup + * + * FUNCTION: Opens a CAPI adapter, attaches to the AFU + * and mmaps the AFU MMIO space for it use. + * + * + * INPUTS: + * name - adapter name. + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int open_and_setup(const char *name, int *fd, void **mmio) +{ + int rc = 0; /* Return code */ + char adap_path[PATH_MAX]; /* Adapter special filename */ +#ifndef _USE_LIB_AFU + struct cxl_ioctl_start_work start_work; +#endif + + + /* + * Align RRQ on cacheline boundary. + */ + + if ( posix_memalign((void *)&rrq,128,(sizeof(*rrq) * rrq_len))) { + + perror("posix_memalign failed for rrq"); + + return (errno); + + } + + bzero(adap_path,PATH_MAX); + + bzero((void *) rrq,(sizeof(*rrq) * rrq_len)); + + rrq_current = rrq; + + sprintf(adap_path,"/dev/%s",name); + +#ifdef _USE_LIB_AFU + + if (verbose_flag) { + fprintf(stderr,"Calling afu_map ...\n"); + } + if ((p_afu = afu_map()) == NULL) { + fprintf(stderr,"Cannot open AFU\n"); + exit(1); + } + + + if (verbose_flag) { + fprintf(stderr,"Calling afu_start ...\n"); + } + afu_start(p_afu); + + // set up RRQ + // these funcs have syncs in them. offset is in 4-byte words. + // assume problem space starts with the "Host transport MMIO regs" + // in SISLite p 7. + afu_mmio_write_dw(p_afu, 10, (uint64_t)rrq); // START_EA + afu_mmio_write_dw(p_afu, 12, (sizeof(*rrq) * rrq_len)); // RRQ LEN + +#else + + /* + * Don't use libafu + */ + + if (verbose_flag) { + fprintf(stderr,"Opening adapter ...\n"); + } + + *fd = open(adap_path,O_RDWR); /* ??TODO Try without O_CLOEXEC */ + + if (*fd < 0) { + + perror("open_and_setup: Unable to open device"); + + return (errno); + } + + if (verbose_flag) { + fprintf(stderr,"*fd = 0x%x\n",*fd); + } + + bzero(&start_work,sizeof(start_work)); + + start_work.flags = CXL_START_WORK_NUM_IRQS; + start_work.num_interrupts = 4; + + if (verbose_flag) { + fprintf(stderr,"Issuing CXL_IOCTL_START_WORK ioctl ...\n"); + } + + rc = ioctl(*fd,CXL_IOCTL_START_WORK,&start_work); + + + + if (rc) { + + + perror("open_and_setup: Unable to attach"); + + close(*fd); + + return (errno); + + } + + rc = ioctl(*fd,CXL_IOCTL_GET_PROCESS_ELEMENT,&contxt_handle); + + + + if (rc) { + + + perror("open_and_setup: Unable to get process element"); + + close(*fd); + + return (errno); + + } + + if (verbose_flag) { + + fprintf(stderr,"context_handle = 0x%x\n",contxt_handle); + fprintf(stderr,"mmap MMIO space ...\n"); + } + + *mmio = mmap(NULL,CAPI_FLASH_REG_SIZE,PROT_READ|PROT_WRITE, MAP_SHARED,*fd,0); + + if (*mmio == MAP_FAILED) { + perror ("mmap of mmio space failed"); + + close(*fd); + return (errno); + } + + + if (verbose_flag) { + + fprintf(stderr,"mmio = 0x%lx\n",(uint64_t)(*mmio)); + + } + /* + * Set up response queue + */ + + + out_mmio64 (*mmio + CAPI_RRQ0_START_EA_OFFSET, (uint64_t)rrq); + + out_mmio64 (*mmio + CAPI_RRQ0_END_EA_OFFSET, (uint64_t)&rrq[(rrq_len-1)]); + + +#endif /* !_USE_LIB_AFU */ + + + return (rc); +} + + +/* + * NAME: close_and_cleanup + * + * FUNCTION: Cleans up an adapter and closes + * + * + * INPUTS: + * name - adapter name. + * + * RETURNS: + * NONE + * + */ + +void close_and_cleanup(int fd, void *mmio) +{ + + + + if (verbose_flag) { + fprintf(stderr,"munmap MMIO space ...\n"); + } +#ifdef _USE_LIB_AFU + + afu_unmap(p_afu); +#else + if (munmap(mmio,CAPI_FLASH_REG_SIZE)) { + + + perror ("munmap of MMIO space failed"); + + /* + * Don't return here on error. Continue + * to close + */ + } + + + close (fd); + +#endif /* !_USE_LIB_AFU */ + + free(rrq); + + rrq = NULL; + return; +} + + +/* + * NAME: build_ioarcb + * + * FUNCTION: Build and queue one IOARCB + * + * + * INPUTS: + * name - adapter name. + * + * RETURNS: + * NONE + * + */ + +int build_ioarcb(capi_ioarcb_t *ioarcb, int block_number, void *data_buf) +{ + int rc = 0; + int bytes_read = 0; + + bzero(ioarcb,sizeof(*ioarcb)); + + + ioarcb->timeout = CAPI_SCSI_IO_TIME_OUT; + + ioarcb->lun_id = lun_id; + /* + * Allow either FC port to be used. + */ + ioarcb->port_sel = port_num; + + + ioarcb->ctx_id = contxt_handle & 0xffff; + + + + ioarcb->data_ea = (uint64_t)data_buf; + + ioarcb->msi = 2; + + + // TODO ?? Need to add data buffers + + switch (cmd_type) { + case TRANSP_READ_CMD: + /* + * Fill in CDB. Probably this should be + * a separate routine that just builds + * CDBs. For now it is inline. + */ + + if (verbose_flag) { + + fprintf(stderr,"Building Read 16\n"); + } +#ifdef _OLD_CODE + ioarcb->cdb.scsi_op_code = SCSI_READ_16; +#else + ioarcb->cdb[0] = SCSI_READ_16; +#endif + + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_READ; + ioarcb->data_len = DATA_BUF_SIZE; + + CFLASH_BUILD_RW_16((scsi_cdb_t *)&(ioarcb->cdb), + block_number,8); + break; + + case TRANSP_WRITE_CMD: + /* + * Fill in CDB. Probably this should be + * a separate routine that just builds + * CDBs. For now it is inline. + */ + + if (verbose_flag) { + + fprintf(stderr,"Building Write 16\n"); + } + +#ifdef _OLD_CODE + ioarcb->cdb.scsi_op_code = SCSI_WRITE_16; +#else + ioarcb->cdb[0] = SCSI_WRITE_16; +#endif + + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_WRITE; + ioarcb->data_len = DATA_BUF_SIZE; + + if (file) { + /* + * If an input file was specified, + * then read the first DATA_BUF_SIZE bytes + * in to write out to the device. + */ + + bytes_read = fread(data_buf, 1, DATA_BUF_SIZE, file); + + if (bytes_read != DATA_BUF_SIZE) { + + fprintf(stderr,"Unable able to read full size of %d, read instead %d\n",DATA_BUF_SIZE,bytes_read); + + /* + * Do not fail, just continue with questionable buffer contents + */ + } + + + } else { + /* + * If no input file is specified then + * put a pattern in the buffer to + * be written + */ + memset((uint8_t *)(data_buf), ((getpid())%256), + ioarcb->data_len); + } + + CFLASH_BUILD_RW_16((scsi_cdb_t *)&(ioarcb->cdb), + block_number,8); + break; + + case TRANSP_STD_INQ_CMD: + if (verbose_flag) { + fprintf(stderr,"Building Standard Inquiry\n"); + } + ioarcb->data_len = 255; + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_READ; + rc = cflash_build_scsi_inquiry((scsi_cdb_t *)&(ioarcb->cdb),-1,255); + break; + + case TRANSP_PG83_INQ_CMD: + + if (verbose_flag) { + fprintf(stderr,"Building Inquiry for page 0x83\n"); + } + ioarcb->data_len = 255; + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_READ; + rc = cflash_build_scsi_inquiry((scsi_cdb_t *)&(ioarcb->cdb),0x83,255); + break; + + case TRANSP_TUR_CMD: + if (verbose_flag) { + fprintf(stderr,"Building Test Unit Ready\n"); + } + rc = cflash_build_scsi_tur((scsi_cdb_t *)&(ioarcb->cdb)); + break; + case TRANSP_RD_CAP_CMD: + + if (verbose_flag) { + fprintf(stderr,"Building Read Capicity 16\n"); + } + ioarcb->data_len = sizeof(struct readcap16_data); + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_READ; + rc = cflash_build_scsi_read_cap16((scsi_cdb_t *)&(ioarcb->cdb),sizeof(struct readcap16_data)); + break; + case TRANSP_RPT_LUNS_CMD: + + if (verbose_flag) { + fprintf(stderr,"Building Report luns\n"); + } + + ioarcb->data_len = 256; + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_READ; + rc = cflash_build_scsi_report_luns((scsi_cdb_t *)&(ioarcb->cdb),256); + break; + case TRANSP_MSENSE_CMD: + + if (verbose_flag) { + fprintf(stderr,"Building Mode Sense 10\n"); + } + + ioarcb->data_len = 256; + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_READ; + rc = cflash_build_scsi_mode_sense_10((scsi_cdb_t *)&(ioarcb->cdb),256,0); + break; + + case TRANSP_MSELECT_CMD: + + if (verbose_flag) { + fprintf(stderr,"Building Mode Select 10\n"); + } + + ioarcb->data_len = 256; + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_WRITE; + rc = cflash_build_scsi_mode_select_10((scsi_cdb_t *)&(ioarcb->cdb),256,0); + break; + + case TRANSP_REQSNS_CMD: + if (verbose_flag) { + fprintf(stderr,"Building Request Sense\n"); + } + ioarcb->data_len = 255; + ioarcb->req_flags = SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_HOST_READ; + rc = cflash_build_scsi_request_sense((scsi_cdb_t *)&(ioarcb->cdb),255); + break; + + default: + + fprintf(stderr,"Invalid cmd_type specified cmd_type = %d\n",cmd_type); + rc = EINVAL; + + } + + + if (verbose_flag) { + fprintf(stderr,"Hex dump of ioarcb\n"); + hexdump(ioarcb,sizeof(*ioarcb),NULL); + } + return rc; +} + +/* + * NAME: issue_ioarcb + * + * FUNCTION: Issues one IOARCB and waits for completion + * + * + * INPUTS: + * name - adapter name. + * + * RETURNS: + * NONE + * + */ + +int issue_ioarcb(int fd, void *mmio,int block_number) +{ +#define LUN_LIST_SIZE 512 + int rc = 0; + int poll_ret; + void *data_buf = NULL; + capi_ioarcb_t *ioarcb = NULL; + struct pollfd poll_list = { fd, POLLIN, 0}; + char vendor_id[8]; + char product_id[8]; + char wwid[256]; + int read_rc = 0; + struct cxl_event cxl_event; + int num_actual_luns = 0; + uint64_t *actual_lun_list; + int i; + uint32_t block_size; + uint64_t last_lba; + struct readcap16_data *readcap16_data = NULL; + +#ifndef _OLD_CODE + sisl_ioasa_t *ioasa; + struct request_sense_data *sense_data; + sisl_iocmd_t *cmd; +#endif + + /* + * Align IOARCB on cacheline boundary. + */ + +#ifdef _OLD_CODE + if ( posix_memalign((void *)&ioarcb,128,sizeof(*ioarcb))) { +#else + if ( posix_memalign((void *)&cmd,128,sizeof(sisl_iocmd_t))) { +#endif + + perror("posix_memalign failed for ioarcb"); + + return (errno); + + } +#ifndef _OLD_CODE + ioarcb = &(cmd->rcb); +#endif /* !_OLD_CODE */ + + if (verbose_flag) { + fprintf(stderr,"sizeof *ioarcb = 0x%x\n",(int)sizeof(*ioarcb)); + fprintf(stderr,"ioarcb = 0x%p\n", ioarcb); +#ifndef _MACOSX + + fprintf(stderr,"offset of ctx_id = 0x%x\n",(int)offsetof(capi_ioarcb_t,ctx_id)); + fprintf(stderr,"offset of req_flags = 0x%x\n",(int)offsetof(capi_ioarcb_t,req_flags)); + fprintf(stderr,"offset of lun_id = 0x%x\n",(int)offsetof(capi_ioarcb_t,lun_id)); + fprintf(stderr,"offset of data_len = 0x%x\n",(int)offsetof(capi_ioarcb_t,data_len)); + fprintf(stderr,"offset of data_ea = 0x%x\n",(int)offsetof(capi_ioarcb_t,data_ea)); + fprintf(stderr,"offset of msi = 0x%x\n",(int)offsetof(capi_ioarcb_t,msi)); +#ifdef _OLD_CODE + fprintf(stderr,"offset of rrq_num = 0x%x\n",(int)offsetof(capi_ioarcb_t,rrq_num)); + fprintf(stderr,"offset of cdb = 0x%x\n",(int)offsetof(capi_ioarcb_t,cdb)); + fprintf(stderr,"offset of cdb[15] = 0x%x\n",(int)offsetof(capi_ioarcb_t,cdb.scsi_bytes[14])); +#else + fprintf(stderr,"offset of rrq_num = 0x%x\n",(int)offsetof(capi_ioarcb_t,rrq)); + fprintf(stderr,"offset of cdb = 0x%x\n",(int)offsetof(capi_ioarcb_t,cdb)); + fprintf(stderr,"offset of cdb[15] = 0x%x\n",(int)offsetof(capi_ioarcb_t,cdb[15])); +#endif + fprintf(stderr,"offset of timeout = 0x%x\n",(int)offsetof(capi_ioarcb_t,timeout)); +#endif /* !_MACOSX */ + } + + + /* + * Align data buffer on page boundary. + */ + if ( posix_memalign((void *)&data_buf,4096,DATA_BUF_SIZE)) { + + perror("posix_memalign failed for data buffer"); + + free(ioarcb); + return (errno); + + } + + rc = build_ioarcb(ioarcb,block_number,data_buf); + + if (rc) { + + fprintf(stderr,"Failed to build IOARCB with rc = %d\n",rc); + free(ioarcb); + free(data_buf); + return (rc); + } + + + // Issue MMIO to IOARRIN + + if (verbose_flag) { + fprintf(stderr,"MMIO to IOARRIN\n"); + } +#ifdef _USE_LIB_AFU + afu_mmio_write_dw(p_afu, 8, (uint64_t)ioarcb); +#else + out_mmio64 (mmio + CAPI_IOARRIN_OFFSET, (uint64_t)ioarcb); + + fprintf(stderr,"mmio = 0x%lx\n", (uint64_t)(mmio)); + fprintf(stderr,"mmio + CAPI_IOARRIN_OFFSET = 0x%lx\n", (uint64_t)(mmio + CAPI_IOARRIN_OFFSET)); + +#endif /* !_USE_LIB_AFU */ + + + /* + * Wait for completion. + * + * NOTE: The last argument to poll is the time-out value in milliseconds. + * So convert our time-out value to milliseconds and then double it + */ + + if (verbose_flag) { + fprintf(stderr,"polling on fd = 0x%x...\n",fd); + } + + poll_ret = poll(&poll_list,1, (2 *(CAPI_SCSI_IO_TIME_OUT * 1000))); + + if (poll_ret == 0) { + + /* + * We timed-out + */ + fprintf(stderr,"Poll timed out waiting on interrupt.\n"); + + rc = -1; + +#ifdef _REMOVE + fprintf(stderr,"Reading anyway ..\n"); + read_rc = read(fd,&cxl_event,sizeof(struct cxl_event)); + + if (read_rc != sizeof(struct cxl_event)) { + + fprintf(stderr,"read event failed, with rc = %d errno = %d\n",rc, errno); + free(ioarcb); + free(data_buf); + return (read_rc); + } +#endif + + } else if (poll_ret < 0) { + + + /* + * Poll failed, Give up + */ + fprintf(stderr,"Poll failed.\n"); + + rc = -1; + + + + } else { + + /* + * We received interrupt + */ + + + fprintf(stderr,"Poll received interrupt.\n"); + + read_rc = read(fd,&cxl_event,sizeof(struct cxl_event)); + + if (read_rc != sizeof(struct cxl_event)) { + + fprintf(stderr,"read event failed, with rc = %d errno = %d\n",rc, errno); + free(ioarcb); + free(data_buf); + return (read_rc); + } + + fprintf(stderr,"capi event type = %d\n",cxl_event.header.type); + + if (cxl_event.header.type != CXL_EVENT_AFU_INTERRUPT) { + + + fprintf(stderr,"capi event != CXL_EVENT_AFU_INTERRUPT type = %d, proess_element = 0x%x\n",cxl_event.header.type,cxl_event.header.process_element); + + + if (cxl_event.header.type == CXL_EVENT_DATA_STORAGE) { + struct cxl_event_data_storage *capi_ds = + (struct cxl_event_data_storage*)&cxl_event; + + fprintf(stderr,"CXL_EVENT_DATA_STORAGE: addr = 0x%"PRIx64"\n", + capi_ds->addr); + fprintf(stderr,"ioarcb = 0x%"PRIx64", data_buf = 0x%"PRIx64"\n", + (uint64_t) ioarcb, (uint64_t)data_buf); + + + } + free(ioarcb); + free(data_buf); + return (read_rc); + + } + +#ifndef _OLD_CODE + cmd = (sisl_iocmd_t *) ioarcb; + ioasa = &(cmd->sa); + sense_data = (struct request_sense_data*)ioasa->sense_data; +#endif + + if (((*rrq_current) & (SISL_RESP_HANDLE_T_BIT)) == toggle) { + + +#ifdef _OLD_CODE + fprintf(stderr,"\n IOARCB completed with iosa = 0x%x, flags = 0x%x\n",ioarcb->ioasc,ioarcb->ioasa_flags); +#else + fprintf(stderr,"\n IOARCB completed with iosa = 0x%x, resid = 0x%x\n",ioasa->ioasc,ioasa->resid); + fprintf(stderr,"\n sense data: sense_key = 0x%x, asc = 0x%x, ascq = 0x%x", + sense_data->sense_key,sense_data->add_sense_key, sense_data->add_sense_qualifier); +#endif + + + + } else { + + fprintf(stderr,"\n toggle bit does not match on rrq\n"); + + } + + + + + if (verbose_flag) { + fprintf(stderr,"Returned data hex dump\n"); + hexdump(data_buf,DATA_BUF_SIZE,NULL); + } + + switch (cmd_type) { + case TRANSP_STD_INQ_CMD: + + rc = cflash_process_scsi_inquiry(data_buf,255,vendor_id,product_id); + + printf("inq_data: vendor_id = %s, product_id = %s\n",vendor_id,product_id); + break; + + case TRANSP_PG83_INQ_CMD: + cflash_process_scsi_inquiry_dev_id_page(data_buf,255,wwid); + printf("wwid = %s\n",wwid); + break; + + case TRANSP_RD_CAP_CMD: + + readcap16_data = (struct readcap16_data *)data_buf; + if (cflash_process_scsi_read_cap16(readcap16_data,&block_size,&last_lba) == 0) { + printf("last lba = 0x%" PRIx64 ", block length = 0x%x\n", + last_lba, block_size); + } + break; + + case TRANSP_RPT_LUNS_CMD: + + rc = cflash_process_scsi_report_luns(data_buf,DATA_BUF_SIZE,(uint64_t **)&actual_lun_list,&num_actual_luns); + + + printf("Report Luns data: number of luns returned = %d\n",num_actual_luns); + + if (num_actual_luns > LUN_LIST_SIZE) { + num_actual_luns = LUN_LIST_SIZE; + } + + for (i=0; i < num_actual_luns; i++) { + printf("%d: lun_id = %lx\n",i,actual_lun_list[i]); + + } + + break; + default: + break; + } /* switch */ + INC_RRQ(); + } + + free(ioarcb); + free(data_buf); + return rc; +} + + +/* + * NAME: Usage + * + * FUNCTION: print usage message and returns + * + * + * INPUTS: + * argc -- INPUT; the argc parm passed into main(). + * argv -- INPUT; the argv parm passed into main(). + * + * RETURNS: + * 0: Success + * -1: Error + * + */ +static void +usage(void) +{ + + fprintf(stderr,"\n"); + + fprintf(stderr,"Usage: transport_test -l device [-n num_loops] [-c cmd_type ] [-f filename ] [-i lun_id] [-b block_number] [-p port_num] [-h]\n\n"); + fprintf(stderr," where:\n"); + fprintf(stderr," -b block number (default is 0 if b flag is not specified) \n"); + fprintf(stderr," -c cmd_type which is a number\n"); + fprintf(stderr," defined as following:\n"); + fprintf(stderr," 1 - SCSI Read 16 (default if c flag not specified)\n"); + fprintf(stderr," 2 - SCSI Write 16 \n"); + fprintf(stderr," 3 - SCSI Inquiry (std) \n"); + fprintf(stderr," 4 - SCSI Inquiry (0x83)\n"); + fprintf(stderr," 5 - SCSI Read Capacity 16 \n"); + fprintf(stderr," 6 - SCSI Report Luns\n"); + fprintf(stderr," 7 - SCSI Test Unit Readu \n"); + fprintf(stderr," 8 - SCSI Mode Sense 10 \n"); + fprintf(stderr," 9 - SCSI Mode Select 10 \n"); + fprintf(stderr," 10 - SCSI Request Sense\n"); + fprintf(stderr," -f filename\n"); + fprintf(stderr," -h help (this usage)\n"); + fprintf(stderr," -i lun_id in hex (default is 0 if i flag is not specified) \n"); + fprintf(stderr," -l logical device name\n"); + fprintf(stderr," -n Number of loops to run ioctl\n"); + fprintf(stderr," -p Port number mask (0x3 both ports)\n"); + fprintf(stderr," -v verbose mode\n"); + + + return; +} + +/* + * NAME: parse_args + * + * FUNCTION: The parse_args() routine parses the command line arguments. + * The arguments are read, validated, and stored in global + * variables. + * + * + * INPUTS: + * argc -- INPUT; the argc parm passed into main(). + * argv -- INPUT; the argv parm passed into main(). + * + * RETURNS: + * 0: Success + * -1: Error + * + */ + +static int +parse_args(int argc, char **argv) +{ + extern int optind; + extern char *optarg; + int rc,c; + int len,number; + + + rc = len = c = number =0; + + /* Init flags... */ + bflag = FALSE; + cflag = FALSE; + fflag = FALSE; + hflag = FALSE; + iflag = FALSE; + lflag = FALSE; + nflag = FALSE; + verbose_flag = FALSE; + num_loops = 1; + port_num = 0x3; + + + + /* + * Get parameters + */ + while ((c = getopt(argc,argv,"b:c:f:i:l:n:p:hv")) != EOF) + { + switch (c) + { + case 'b' : + if (optarg) { + + block_number = strtoul(optarg,NULL,16); + iflag = TRUE; + } else { + + + fprintf(stderr,"-b flag requires a block number be supplied\n"); + + } + break; + case 'c' : + if (optarg) { + + cmd_type = atoi(optarg); + + if ((cmd_type < TRANSP_READ_CMD) || + (cmd_type > TRANSP_LAST_CMD)) { + fprintf(stderr,"Invalid cmd_tyupe for -c flag\n"); + + usage(); + rc = -1; + } else { + cflag = TRUE; + } + } else { + + + fprintf(stderr,"-c flag requires a value to be supplied\n"); + + } + break; + case 'f' : + filename = optarg; + if (filename) { + fflag = TRUE; + } else { + + fprintf(stderr,"-f flag requires a filename \n"); + + } + break; + + case 'h' : + hflag = TRUE; + break; + case 'i' : + if (optarg) { + + lun_id = strtoul(optarg,NULL,16); + iflag = TRUE; + } else { + + + fprintf(stderr,"-i flag requires a lun id be supplied\n"); + + } + break; + case 'l' : + device_name = optarg; + if (device_name) { + lflag = TRUE; + } else { + + fprintf(stderr,"-l flag requires a logical name \n"); + + } + break; + case 'n' : + if (optarg) { + + num_loops = atoi(optarg); + nflag = TRUE; + } else { + + + fprintf(stderr,"-n flag requires a number of loops value to be supplied\n"); + + } + break; + case 'p' : + if (optarg) { + + port_num = atoi(optarg); + nflag = TRUE; + } else { + + + fprintf(stderr,"-p flag requires port mask value to be supplied\n"); + + } + break; + case 'v' : + verbose_flag = TRUE; + break; + default: + usage(); + break; + }/*switch*/ + }/*while*/ + + + if (!lflag) { + fprintf(stderr,"The -l flag is required to specify a device name\n"); + usage(); + rc = EINVAL; + } + + + return (rc); + + +}/*parse_args*/ + + +/* + * NAME: main + * + * FUNCTION: Simple test program to validate transport + * interfaces to a CAPI Flash AFU. + * + * + * INPUTS: + * TBD + * + * RETURNS: + * 0 for good completion, ERRNO on error + * + */ + +int main (int argc, char **argv) +{ + int rc; /* Return code */ + int fd; /* File descriptor for adapter */ + void *mmio; /* MMIO space that is mmapped */ + int i; + uint64_t file_len = 0; + + + if (!valid_endianess()) { + fprintf(stderr,"This program is compiled for different endianess then the host is is running\n"); + + } + + /* parse the input args & handle syntax request quickly */ + rc = parse_args( argc, argv ); + if (rc) + { + return (0); + } + + if ( hflag) + { + usage(); + return (0); + } + + if (fflag) { + + if (cmd_type == TRANSP_WRITE_CMD) { + /* + * If we are doing a write and + * filename is specified then this + * file will be input file for data + * we will write to the device. + */ + + /* Open file */ + file = fopen(filename, "rb"); + + if (!file) { + fprintf(stderr,"Failed to open filename = %s\n",filename); + + return errno; + } + + /* Get file length */ + + fseek(file, 0, SEEK_END); + file_len=ftell(file); + fseek(file, 0, SEEK_SET); + + if (file_len < DATA_BUF_SIZE) { + + fprintf(stderr,"Input file must be at least 4096 bytes in size\n"); + + fclose(file); + return 0; + } + + + + } else { + + fflag = FALSE; + } + } + + rc = open_and_setup(device_name,&fd,&mmio); + + if (rc) { + + fprintf(stderr,"Open and setup failed rc = %d\n",rc); + if (fflag) { + fclose(file); + } + return rc; + } + + + + for (i=0;i< num_loops;i++) { + rc = issue_ioarcb(fd,mmio,block_number); + + if (rc) { + break; + } + } + + close_and_cleanup(fd,mmio); + if (fflag) { + fclose(file); + } + return 0; +} diff --git a/src/test/transport_test.h b/src/test/transport_test.h new file mode 100644 index 00000000..7865faa6 --- /dev/null +++ b/src/test/transport_test.h @@ -0,0 +1,108 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/transport_test.h $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +//#define _OLD_CODE 1 +#include +#include +#include +#include +#ifndef _OLD_CODE +#include +#endif /* _OLD_CODE */ + +#define CAPI_FLASH_REG_SIZE 0x2000000 + +#define CAPI_SCSI_IO_TIME_OUT 5 + + +typedef +enum { + TRANSP_READ_CMD = 0x1, /* SCSI Read 16 Command */ + TRANSP_WRITE_CMD = 0x2, /* SCSI Write 16 Command */ + TRANSP_STD_INQ_CMD = 0x3, /* SCSI Inquiry standard data */ + TRANSP_PG83_INQ_CMD = 0x4, /* SCSI Inquiry page 0x83 data */ + TRANSP_RD_CAP_CMD = 0x5, /* SCSI Read Capacity 16 command */ + TRANSP_RPT_LUNS_CMD = 0x6, /* SCSI Report Luns command */ + TRANSP_TUR_CMD = 0x7, /* SCSI Test Unit Ready command */ + TRANSP_MSENSE_CMD = 0x8, /* SCSI Mode Sense 10 command */ + TRANSP_MSELECT_CMD = 0x9, /* SCSI Mode Select 10 command */ + TRANSP_REQSNS_CMD = 0xa, /* SCSI Request Sense command */ + TRANSP_LAST_CMD = 0xb, /* Not valid command */ +} transp_scsi_cmd_t; + +/*----------------------- SIS lite header stuff ----------------------------*/ + + +#ifdef _OLD_CODE +/************************************************************************/ +/* CAPI Flash SIS LITE Register offsets */ +/************************************************************************/ + +#define CAPI_IOARRIN_OFFSET 0x20 /* Offset of IOARRIN register */ +#define CAPI_RRQ0_START_EA_OFFSET 0x28 /* Offset of RRQ # 0 start EA register */ +#define CAPI_RRQ0_END_EA_OFFSET 0x30 /* Offset of RRQ # 0 Last EA register */ + +#define SISL_RESP_HANDLE_T_BIT 0x1ull /* Toggle bit */ + +#ifdef _MACOSX +typedef unsigned short ushort; +#endif /* MACOSX */ + +typedef struct capi_ioarcb_s { + ushort ctx_id; /* Context ID from context handle. */ + ushort req_flags; /* Request flags */ +#define SISL_REQ_FLAGS_RES_HNDL 0x8000u +#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000u +#define SISL_REQ_FLAGS_SUPRESS_ULEN 0x0002u +#define SISL_REQ_FLAGS_HOST_WRITE 0x0001u +#define SISL_REQ_FLAGS_HOST_READ 0x0000u + + uint32_t hndl_fc; /* Resouorce handle for fc_port */ + uint64_t lun_id; /* Destination Lun ID if valid. */ + uint32_t data_len; /* Data length */ + uint32_t ioadl_len; /* IODL Length for scatter/gather */ + /* list */ + uint64_t data_ea; /* Effective address of data buffer */ + /* or IOADL */ + uint8_t msi; /* MSI number to interrupt when this*/ + /* IOARCB completes */ + + uint8_t rrq_num; /* response queue number */ + ushort reserved1; /* Reserved for future use */ + uint32_t timeout; /* Time-out in seconds */ + struct scsi_cdb cdb; /* SCSI Command Descriptor Block */ + uint64_t reserved2; /* Reserved for future use */ + uint32_t ioasc; /* SIS IOA Status Code */ + uint32_t residual_len; /* Residual length */ + ushort ioasa_flags; /* Status flags */ +#define CAPI_IOASA_FLG_SNS_VAL 1 /* Sense Data Valid */ + ushort fc_port;; /* FC Port on which the request was */ + /* issued. */ + char sense_data[20]; /* First 20 bytes of sense data */ +} capi_ioarcb_t; + +/*----------------------- SIS lite header stuff ----------------------------*/ +#else +typedef sisl_ioarcb_t capi_ioarcb_t; +#endif /* _OLD_CODE */ diff --git a/src/test/unit_test-kv.C b/src/test/unit_test-kv.C new file mode 100644 index 00000000..6956611c --- /dev/null +++ b/src/test/unit_test-kv.C @@ -0,0 +1,94 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/unit_test-kv.C $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#include + +extern "C" +{ +#include +} + +//it is the convention to write the test (case) names without quotes. +//Compilation error occurs if quotes are used around the strings +//ie: TEST("System_Test_Kv", kv_tst_bv) or TEST(System_Test_Kv, "kv_tst_bv") //INVALID +TEST(System_Test_Kv, kv_tst_bl) +{ +/* +Function prototype for tst_bl: +tst_bl(int iter, int bcnt,int ccnt,int cmax, int grow, int hold, int resz, int seed, int prnt, int dprt, int w); +*/ +//Array used to store numerous lists of parameters + int array[2][11]={{0,8,16,6,8,0,0,1234,0,0,34}, + {0,8,16,6,8,0,0,100,0,0,25}}; +//Usage of matrix to process all the information + for(int i=0;i<2;i++) + { + EXPECT_EQ(0, tst_bl(array[i][0],array[i][1],array[i][2],array[i][3],array[i][4],array[i][5],array[i][6],array[i][7],array[i][8],array[i][9],array[i][10])); + //If the return value of tst_bl is non valid for **ANY** of the rows of the matrice then the test case fails. + } + +} +TEST(System_Test_Kv, kv_tst_bv) +{ + + EXPECT_EQ(0, system("../../obj/tests/_tst_bv")); + +} +TEST(System_Test_Kv, kv_tst_ht) +{ + + EXPECT_EQ(0, system("../../obj/tests/_tst_ht")); + +} +TEST(System_Test_Kv, kv_tst_vi) +{ + + EXPECT_EQ(0, system("../../obj/tests/_tst_vi")); + +} +TEST(System_Test_Kv, kv_tst_ark) +{ + + EXPECT_EQ(0,system("../../obj/tests/_tst_ark")); + +} +TEST(System_Test_Kv, kv_tst_bt) +{ + + EXPECT_EQ(0, system("../../obj/tests/_tst_bt")); + +} +TEST(System_Test_Kv, transport_test) +{ + + EXPECT_EQ(0, system("../../obj/tests/transport_test")); + +} +TEST(System_Test_Kv, kv_tst_iv) +{ + + EXPECT_EQ(0, system("../../obj/tests/_tst_iv")); + +} + diff --git a/src/test/xlate.c b/src/test/xlate.c new file mode 100644 index 00000000..85e66a06 --- /dev/null +++ b/src/test/xlate.c @@ -0,0 +1,793 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/test/xlate.c $ */ +/* */ +/* IBM Data Engine for NoSQL - Power Systems Edition User Library Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2014,2015 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +// set this to 1 to run w/libafu or in sim +#define LIBAFU 0 + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define REMAP_FREQ 0xF /* must be all ones, + higher value is fewer remaps */ +#define MC_RHT_NMASK 16 /* in bits */ +#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs, see mclient.h */ +#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */ +#define MC_CHUNK_OFF_MASK (MC_CHUNK_SIZE - 1) /* apply to LBA get offset */ + +#if LIBAFU +#include +/* offset in afu_mmio_write_dw is in words and from the base of the entire + map */ +#define MMIO_WRITE_64(addr, val) \ + afu_mmio_write_dw(p_afu, ((__u64)(addr) - (__u64)p_afu->ps_addr)/4, (val)) + +#define MMIO_READ_64(addr, p_val) \ + afu_mmio_read_dw(p_afu, ((__u64)(addr) - (__u64)p_afu->ps_addr)/4, (p_val)) + + +/* fuctions to byte reverse SCSI CDB on LE host */ +static inline void write_64(volatile __u64 *addr, __u64 val) +{ + __u64 zero = 0; + asm volatile ( "stdbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +} + +static inline void write_32(volatile __u32 *addr, __u32 val) +{ + __u32 zero = 0; + asm volatile ( "stwbrx %0, %1, %2" : : "r"(val), "r"(zero), "r"(addr) ); +} +#else +#include +#include +#include +#define MMIO_WRITE_64(addr, val) write_64((addr), (val)) +#define MMIO_READ_64(addr, p_val) *(p_val) = read_64((addr)) +#endif + + +/* + Stand alone program to test LBA translation. Does not require + master context/daemon. + */ + +#define B_DONE 0x01 +#define B_ERROR 0x02 +#define NUM_RRQ_ENTRY 64 +#define NUM_CMDS 64 /* max is NUM_RRQ_ENTRY */ +#define LUN_INDEX 1 /* lun index to use, should be something other than 0 + used by mserv */ + +#define CL_SIZE 128 /* Processor cache line size */ +#define CL_SIZE_MASK 0x7F /* Cache line size mask */ +#define DATA_SEED 0xdead000000000000ull +#define CHUNK_BASE 0x100 /* chunk# 0x100, i.e. RLBA=0x1000000 */ + +struct ctx { + /* Stuff requiring alignment go first. */ + + /* Command & data for AFU commands issued by test. */ + char rbuf[NUM_CMDS][0x1000]; // 4K read data buffer (page aligned) + char wbuf[NUM_CMDS][0x1000]; // 4K write data buffer (page aligned) + char rbufm[NUM_CMDS][0x1000]; // 4K read data buffer (page aligned) + __u64 rrq_entry[NUM_RRQ_ENTRY]; // 128B RRQ (page aligned) + + struct afu_cmd { + sisl_ioarcb_t rcb; // IOARCB (cache line aligned) + sisl_ioasa_t sa; // IOASA follows RCB + pthread_mutex_t mutex; + pthread_cond_t cv; + + __u8 cl_pad[CL_SIZE - + ((sizeof(sisl_ioarcb_t) + + sizeof(sisl_ioasa_t) + + sizeof(pthread_mutex_t) + + sizeof(pthread_cond_t)) & CL_SIZE_MASK)]; + } cmd[NUM_CMDS]; + + // AFU interface + int afu_fd; + struct cxl_ioctl_start_work work; + char event_buf[0x1000]; /* Linux cxl event buffer (interrupts) */ + volatile struct sisl_host_map *p_host_map; + volatile struct sisl_ctrl_map *p_ctrl_map; + volatile struct surelock_afu_map *p_afu_map; + ctx_hndl_t ctx_hndl; + + __u64 *p_hrrq_start; + __u64 *p_hrrq_end; + volatile __u64 *p_hrrq_curr; + unsigned int toggle; + + // LBA xlate + sisl_rht_entry_t rht; + sisl_lxt_entry_t lxt[NUM_CMDS]; // each cmd targets 1 chunk + +} __attribute__ ((aligned (0x1000))); + +typedef void (*read_fcn_t)(struct ctx *p_ctx, __u64 start_lba, __u64 stride); + +void *ctx_rrq_rx(void *arg); +int ctx_init(struct ctx *p_ctx, char *dev_path); +void send_writep(struct ctx *p_ctx, __u64 start_lba, __u64 stride); +void send_readp(struct ctx *p_ctx, __u64 start_lba, __u64 stride); +void send_readv(struct ctx *p_ctx, __u64 start_lba, __u64 stride); +void send_readm(struct ctx *p_ctx, __u64 start_lba, __u64 stride); +void rw_cmp_buf(struct ctx *p_ctx, __u64 start_lba, __u64 stride); +void send_cmd(struct ctx *p_ctx); +void wait_resp(struct ctx *p_ctx); +void fill_buf(__u64* p_buf, unsigned int len, __u64 vlba, __u64 plba); +int cmp_buf(__u64* p_buf1, __u64 *p_buf2, unsigned int len); +__u64 gen_rand(); +struct ctx* remap(); + + +char *afu_path; /* points to argv[] string */ +pid_t pid; +read_fcn_t read_fcn = send_readv; // default is read virtual +__u64 lun_id = 0x0; +__u64 chunk_base = CHUNK_BASE; +int rand_fd; +int ctx_fd; +char ctx_file[32]; +struct ctx *p_ctx; + +#if LIBAFU +struct afu *p_afu; +#endif + +// run multiple instances separated by 0x100 e.g. +// xlate -c 0x100 ... +// xlate -c 0x200 ... +// etc +void +usage(char *prog) +{ + printf("Usage: %s [-c chunk_base] [-l lun_id] [-p] master_dev_path\n", prog); + printf("e. g.: %s -c 0x100 -l 0x1000000000000 /dev/cxl/afu0.0m\n", prog); + printf(" -p runs reads & writes in physical LBAs\n"); + printf(" default is write physical, read virtual\n"); +} + +void +get_parameters(int argc, char** argv) +{ + extern int optind; /* for getopt function */ + extern char *optarg; /* for getopt function */ + int ch; + + while ((ch = getopt(argc,argv,"pl:c:h")) != EOF) { + switch (ch) { + case 'p' : /* use physical read (default is virtual) */ + read_fcn = send_readp; + break; + + case 'l' : /* LUN_ID to use */ + sscanf(optarg, "%lx", &lun_id); + break; + + case 'c' : /* chunk_base to use */ + sscanf(optarg, "%lx", &chunk_base); + break; + + case 'h': + usage(argv[0]); + exit(0); + + default: + usage(argv[0]); + exit(-1); + } + } + + if ((argc - optind) != 1) { /* number of afus specified in cmd line */ + usage(argv[0]); + exit(-11); + } + + afu_path = argv[optind]; +} + + + +void *ctx_rrq_rx(void *arg) { + struct ctx *p_ctx = (struct ctx*) arg; + struct afu_cmd *p_cmd; + int len; + + while (1) { + // + // read afu fd block on any interrupt + len = read(p_ctx->afu_fd, &p_ctx->event_buf[0], + sizeof(p_ctx->event_buf)); + + if (len == -EIO) { + fprintf(stderr, "afu has been reset, exiting...\n"); + exit(-1); + } + + // process however many RRQ entries that are ready + // not checking the event type + while ((*p_ctx->p_hrrq_curr & SISL_RESP_HANDLE_T_BIT) == + p_ctx->toggle) { + p_cmd = (struct afu_cmd*)((*p_ctx->p_hrrq_curr) & (~SISL_RESP_HANDLE_T_BIT)); + + pthread_mutex_lock(&p_cmd->mutex); + p_cmd->sa.host_use[0] |= B_DONE; + pthread_cond_signal(&p_cmd->cv); + pthread_mutex_unlock(&p_cmd->mutex); + + if (p_ctx->p_hrrq_curr < p_ctx->p_hrrq_end) { + p_ctx->p_hrrq_curr++; /* advance to next RRQ entry */ + } + else { /* wrap HRRQ & flip toggle */ + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle ^= SISL_RESP_HANDLE_T_BIT; + } + } + } + + return NULL; +} + +// dev_path must be master device +// dev_path is not used when running w/libafu - see afu.c +int ctx_init(struct ctx *p_ctx, char *dev_path) +{ + +#if !(LIBAFU) + void *map; + __u32 proc_elem; +#endif + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + int i; + __u64 reg; + + // general init, no resources allocated + memset(p_ctx, 0, sizeof(*p_ctx)); + pthread_mutexattr_init(&mattr); // check rc on there ? + pthread_condattr_init(&cattr); + + for (i = 0; i < NUM_CMDS; i++) { + pthread_mutex_init(&p_ctx->cmd[i].mutex, &mattr); + pthread_cond_init(&p_ctx->cmd[i].cv, &cattr); + } +#if LIBAFU + if ((p_afu = afu_map()) == NULL) { + printf("Cannot open AFU using libafu\n"); + return -1; + } + // copy stuff into p_ctx + p_ctx->afu_fd = p_afu->fd; + p_ctx->work = p_afu->work; + p_ctx->p_afu_map = (volatile struct surelock_afu_map *) p_afu->ps_addr; + p_ctx->ctx_hndl = p_afu->process_element; +#else + // open master device + p_ctx->afu_fd = open(dev_path, O_RDWR); + if (p_ctx->afu_fd < 0) { + fprintf(stderr, "open failed: device %s, errno %d", dev_path, errno); + return -1; + } + + // enable the AFU. This must be done before mmap. + p_ctx->work.num_interrupts = 4; + p_ctx->work.flags = CXL_START_WORK_NUM_IRQS; + if (ioctl(p_ctx->afu_fd, CXL_IOCTL_START_WORK, &p_ctx->work) != 0) { + fprintf(stderr, "start command failed on AFU, errno %d\n", errno); + return -1; + } + if (ioctl(p_ctx->afu_fd, CXL_IOCTL_GET_PROCESS_ELEMENT, + &proc_elem) != 0) { + fprintf(stderr, "get_process_element failed, errno %d\n", errno); + return -1; + } + + // mmap entire MMIO space of this AFU + map = mmap(NULL, sizeof(struct surelock_afu_map), + PROT_READ|PROT_WRITE, MAP_SHARED, p_ctx->afu_fd, 0); + if (map == MAP_FAILED) { + fprintf(stderr, "mmap failed, errno %d\n", errno); + return -1; + } + p_ctx->p_afu_map = (volatile struct surelock_afu_map *) map; + p_ctx->ctx_hndl = proc_elem; // ctx_hndl is 16 bits in CAIA +#endif + + // copy frequently used fields into p_ctx + p_ctx->p_host_map = &p_ctx->p_afu_map->hosts[p_ctx->ctx_hndl].host; + p_ctx->p_ctrl_map = &p_ctx->p_afu_map->ctrls[p_ctx->ctx_hndl].ctrl; + + // initialize RRQ pointers + p_ctx->p_hrrq_start = &p_ctx->rrq_entry[0]; + p_ctx->p_hrrq_end = &p_ctx->rrq_entry[NUM_RRQ_ENTRY - 1]; + p_ctx->p_hrrq_curr = p_ctx->p_hrrq_start; + p_ctx->toggle = 1; + + printf("p_host_map %p, ctx_hndl %d, rrq_start %p\n", + p_ctx->p_host_map, p_ctx->ctx_hndl, p_ctx->p_hrrq_start); + + // initialize cmd fields that never change + for (i = 0; i < NUM_CMDS; i++) { + p_ctx->cmd[i].rcb.msi = 0x2; + p_ctx->cmd[i].rcb.rrq = 0x0; + p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl; + } + + // set up RRQ in AFU + MMIO_WRITE_64(&p_ctx->p_host_map->rrq_start, (__u64) p_ctx->p_hrrq_start); + MMIO_WRITE_64(&p_ctx->p_host_map->rrq_end, (__u64) p_ctx->p_hrrq_end); + + // program FC_PORT LUN Tbl + MMIO_WRITE_64(&p_ctx->p_afu_map->global.fc_port[0][LUN_INDEX], lun_id); + MMIO_WRITE_64(&p_ctx->p_afu_map->global.fc_port[1][LUN_INDEX], lun_id); + + // AFU configuration + MMIO_READ_64(&p_ctx->p_afu_map->global.regs.afu_config, ®); + reg |= 0x7F00; // enable auto retry + MMIO_WRITE_64(&p_ctx->p_afu_map->global.regs.afu_config, reg); + + // turn off PSL page-mode translation, use in-order translation + // MMIO_WRITE_64((__u64*)&p_ctx->p_afu_map->global.page1[0x28], 0); + + // set up my own CTX_CAP to allow real mode, host translation + // tbls, allow read/write cmds + MMIO_READ_64(&p_ctx->p_ctrl_map->mbox_r, ®); + asm volatile ( "eieio" : : ); + MMIO_WRITE_64(&p_ctx->p_ctrl_map->ctx_cap, + SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | + SISL_CTX_CAP_WRITE_CMD | SISL_CTX_CAP_READ_CMD); + + // set up LBA xlate + // + for (i = 0; i < NUM_CMDS; i++) { + // LUN_INDEX & select both ports, use r/w perms from RHT + p_ctx->lxt[i].rlba_base + = (((chunk_base + i) << MC_CHUNK_SHIFT) | (LUN_INDEX << 8) | 0x33); + } + p_ctx->rht.lxt_start = &p_ctx->lxt[0]; + p_ctx->rht.lxt_cnt = NUM_CMDS; + p_ctx->rht.nmask = MC_RHT_NMASK; + p_ctx->rht.fp = SISL_RHT_FP(0u, 0x3); /* format 0 & RW perms */ + + // make tables visible to AFU before MMIO + asm volatile ( "lwsync" : : ); + + // make MMIO registers for this context point to the single entry + // RHT. The RHT is under this context. + MMIO_WRITE_64(&p_ctx->p_ctrl_map->rht_start, + (__u64)&p_ctx->rht); + MMIO_WRITE_64(&p_ctx->p_ctrl_map->rht_cnt_id, + SISL_RHT_CNT_ID((__u64)1, + (__u64)(p_ctx->ctx_hndl))); + return 0; +} + +void ctx_close(struct ctx *p_ctx) +{ +#if LIBAFU + afu_unmap(p_afu); +#else + munmap((void*)p_ctx->p_afu_map, sizeof(struct surelock_afu_map)); + close(p_ctx->afu_fd); +#endif +} + +__u64 xlate_lba(__u64 vlba) { + __u64 chunk_id = (vlba >> MC_CHUNK_SHIFT); + __u64 chunk_off = (vlba & MC_CHUNK_OFF_MASK); + __u64 rlba_base; + + if (chunk_id < NUM_CMDS) { + rlba_base = ((chunk_base + chunk_id) << MC_CHUNK_SHIFT); + return (rlba_base | chunk_off); + } + else { + return -1; // error + } +} + +// writes wbuf using physical LBA +void send_writep(struct ctx *p_ctx, __u64 start_lba, __u64 stride) { + int i; + __u64 *p_u64; + __u32 *p_u32; + __u64 vlba, plba; + + for (i = 0; i < NUM_CMDS; i++) { + vlba = start_lba + i*stride; + plba = xlate_lba(vlba); + + fill_buf((__u64*)&p_ctx->wbuf[i][0], + sizeof(p_ctx->wbuf[i])/sizeof(__u64), vlba, plba); + + p_ctx->cmd[i].rcb.lun_id = lun_id; + p_ctx->cmd[i].rcb.port_sel = 0x3; // either FC port + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[i]); + p_ctx->cmd[i].rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_HOST_WRITE); + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->wbuf[i][0]; + + memset(&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + p_ctx->cmd[i].rcb.cdb[0] = 0x8A; // write(16) + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + write_64(p_u64, plba); // physical LBA# + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, 8); // 8 LBAs for 4K + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } + + if ((gen_rand() & REMAP_FREQ) == 0) { + p_ctx = remap(); + } + + send_cmd(p_ctx); + wait_resp(p_ctx); +} + +// read into rbuf using virtual LBA +void send_readv(struct ctx *p_ctx, __u64 start_lba, __u64 stride) { + int i; + __u64 *p_u64; + __u32 *p_u32; + __u64 vlba; + + for (i = 0; i < NUM_CMDS; i++) { + memset(&p_ctx->rbuf[i][0], 0xB, sizeof(p_ctx->rbuf[i])); + + p_ctx->cmd[i].rcb.res_hndl = 0; // only 1 resource open at RHT[0] + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->rbuf[i]); + p_ctx->cmd[i].rcb.req_flags = (SISL_REQ_FLAGS_RES_HNDL | + SISL_REQ_FLAGS_HOST_READ); + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->rbuf[i][0]; + + memset(&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + p_ctx->cmd[i].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + vlba = start_lba + i*stride; + + write_64(p_u64, vlba); // virtual LBA# + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, 8); // 8 LBAs for 4K + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } + + if ((gen_rand() & REMAP_FREQ) == 0) { + p_ctx = remap(); + } + + send_cmd(p_ctx); + wait_resp(p_ctx); +} + +// read into rbuf using physical LBA +void send_readp(struct ctx *p_ctx, __u64 start_lba, __u64 stride) { + int i; + __u64 *p_u64; + __u32 *p_u32; + __u64 vlba, plba; + + for (i = 0; i < NUM_CMDS; i++) { + memset(&p_ctx->rbuf[i][0], 0xB, sizeof(p_ctx->rbuf[i])); + + p_ctx->cmd[i].rcb.lun_id = lun_id; + p_ctx->cmd[i].rcb.port_sel = 0x3; // either FC port + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->rbuf[i]); + p_ctx->cmd[i].rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_HOST_READ); + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->rbuf[i][0]; + + memset(&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + p_ctx->cmd[i].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + vlba = start_lba + i*stride; + plba = xlate_lba(vlba); + + write_64(p_u64, plba); // physical LBA# + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, 8); // 8 LBAs for 4K + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } + + if ((gen_rand() & REMAP_FREQ) == 0) { + p_ctx = remap(); + } + + send_cmd(p_ctx); + wait_resp(p_ctx); +} + + +// read into rbufm using physical LBA - used in miscompare debug only +void send_readm(struct ctx *p_ctx, __u64 start_lba, __u64 stride) { + int i; + __u64 *p_u64; + __u32 *p_u32; + __u64 vlba, plba; + + for (i = 0; i < NUM_CMDS; i++) { + memset(&p_ctx->rbufm[i][0], 0xB, sizeof(p_ctx->rbufm[i])); + + p_ctx->cmd[i].rcb.lun_id = lun_id; + p_ctx->cmd[i].rcb.port_sel = 0x3; // either FC port + p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->rbufm[i]); + p_ctx->cmd[i].rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_HOST_READ); + p_ctx->cmd[i].rcb.data_ea = (__u64) &p_ctx->rbufm[i][0]; + + memset(&p_ctx->cmd[i].rcb.cdb[0], 0, sizeof(p_ctx->cmd[i].rcb.cdb)); + p_ctx->cmd[i].rcb.cdb[0] = 0x88; // read(16) + p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; + + vlba = start_lba + i*stride; + plba = xlate_lba(vlba); + + write_64(p_u64, plba); // physical LBA# + p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; + write_32(p_u32, 8); // 8 LBAs for 4K + + p_ctx->cmd[i].sa.host_use[0] = 0; // 0 means active + p_ctx->cmd[i].sa.ioasc = 0; + } + + send_cmd(p_ctx); + wait_resp(p_ctx); +} + +// compare wbuf & rbuf +void rw_cmp_buf(struct ctx *p_ctx, __u64 start_lba, __u64 stride) { + int i; + char buf[32]; + int read_fd, write_fd, readm_fd; + + for (i = 0; i < NUM_CMDS; i++) { + if (cmp_buf((__u64*)&p_ctx->rbuf[i][0], (__u64*)&p_ctx->wbuf[i][0], + sizeof(p_ctx->rbuf[i])/sizeof(__u64))) { + printf("%d: miscompare at start_vlba 0x%lx, chunk# %d\n", + pid, start_lba, i); + fflush(stdout); + + send_readm(p_ctx, start_lba, stride); // sends NUM_CMDS reads + + sprintf(buf, "read.%d", pid); + read_fd = open(buf, O_RDWR|O_CREAT); + sprintf(buf, "write.%d", pid); + write_fd = open(buf, O_RDWR|O_CREAT); + sprintf(buf, "readm.%d", pid); + readm_fd = open(buf, O_RDWR|O_CREAT); + + write(read_fd, &p_ctx->rbuf[i][0], sizeof(p_ctx->rbuf[i])); + write(write_fd, &p_ctx->wbuf[i][0], sizeof(p_ctx->wbuf[i])); + write(readm_fd, &p_ctx->rbufm[i][0], sizeof(p_ctx->rbufm[i])); + + close(read_fd); + close(write_fd); + close(readm_fd); + + while(1); // stop IOs and stay quiet + } + } +} + +// do not touch memory to make remap effective +void send_cmd(struct ctx *p_ctx) { + int cnt = NUM_CMDS; + int i; + __u64 room; + + asm volatile ( "lwsync" : : ); /* make memory updates visible to AFU */ + + while (cnt) { + asm volatile ( "eieio" : : ); // let IOARRIN writes complete + MMIO_READ_64(&p_ctx->p_host_map->cmd_room, &room); + for (i = 0; i < room; i++) { + // write IOARRIN + MMIO_WRITE_64(&p_ctx->p_host_map->ioarrin, + (__u64)&p_ctx->cmd[cnt - 1].rcb); + if (cnt-- == 1) break; + } + } +} + +void wait_resp(struct ctx *p_ctx) { + int i; + + for (i = 0; i < NUM_CMDS; i++) { + pthread_mutex_lock(&p_ctx->cmd[i].mutex); + while (!(p_ctx->cmd[i].sa.host_use[0] & B_DONE)) { + pthread_cond_wait(&p_ctx->cmd[i].cv, &p_ctx->cmd[i].mutex); + } + pthread_mutex_unlock(&p_ctx->cmd[i].mutex); + + if (p_ctx->cmd[i].sa.ioasc) { + printf("%d:IOASC = flags 0x%x, afu_rc 0x%x, scsi_rc 0x%x, fc_rc 0x%x\n", + pid, + p_ctx->cmd[i].sa.rc.flags, + p_ctx->cmd[i].sa.rc.afu_rc, + p_ctx->cmd[i].sa.rc.scsi_rc, + p_ctx->cmd[i].sa.rc.fc_rc); + fflush(stdout); + } + } + +} + +__u64 gen_rand() { + __u64 rand; + + if (read(rand_fd, &rand, sizeof(rand)) != sizeof(rand)) { + fprintf(stderr, "cannot read random device, errno %d\n", errno); + exit(-1); + } + return rand; +} + +struct ctx* remap() { +#if LIBAFU + return p_ctx; // NOP +#else + void *map; + munmap((void*)p_ctx, sizeof(struct ctx)); + + // ask for the mapping at the same address since there are pointers + // in struct ctx or in the AFU's RHT/LXT that we do not want to + // reinitialize + // + map = mmap(p_ctx, sizeof(struct ctx), + PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FIXED, ctx_fd, 0); + if (map == MAP_FAILED) { + fprintf(stderr, "remap failed, errno %d\n", errno); + exit(-1); + } + return (struct ctx *) map; +#endif +} + + +// when runing w/libafu, the master device is hard coded in afu.c to +// /dev/cxl/afu0.0m. The cmd line path is not used. +// +int +main(int argc, char *argv[]) +{ + int rc; +#if LIBAFU + struct ctx myctx; +#else + void *map; +#endif + pthread_t thread; + + __u64 start_lba; + __u64 npass = 0; + __u64 stride = MC_CHUNK_SIZE; // or use 8 to test all LBAs + unsigned long nchunk = NUM_CMDS; + __u64 nlba = nchunk*MC_CHUNK_SIZE; + + get_parameters(argc, argv); + + pid = getpid(); // pid used to create unique data patterns + // or ctx file for mmap + + rand_fd = open("/dev/urandom", O_RDONLY); + if (rand_fd < 0) { + fprintf(stderr, "cannot open random device, errno %d\n", errno); + exit(-1); + } + +#if LIBAFU + p_ctx = &myctx; +#else + sprintf(ctx_file, "ctx.%d", pid); + unlink(ctx_file); + ctx_fd = open(ctx_file, O_RDWR|O_CREAT); + if (ctx_fd < 0) { + fprintf(stderr, "open failed: file %s, errno %d", ctx_file, errno); + exit(-1); + } + + // mmap a struct ctx + ftruncate(ctx_fd, sizeof(struct ctx)); + map = mmap(NULL, sizeof(struct ctx), + PROT_READ|PROT_WRITE, MAP_SHARED, ctx_fd, 0); + if (map == MAP_FAILED) { + fprintf(stderr, "mmap failed, errno %d\n", errno); + exit(-1); + } + p_ctx = (struct ctx *) map; +#endif + + printf("instantiating ctx on %s...\n", afu_path); + rc = ctx_init(p_ctx, afu_path); + if (rc != 0) { + fprintf(stderr, "error instantiating ctx, rc %d\n", rc); + exit(-1); + } + pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); + + + + // when stride == 1 chunk, it sends NUM_CMDS writes followed by + // same numner of reads. each command targets a different chunk. + // + while (1) { + for (start_lba = 0; start_lba < nlba; start_lba += (NUM_CMDS*stride)) { + send_writep(p_ctx, start_lba, stride); + (*read_fcn)(p_ctx, start_lba, stride); + rw_cmp_buf(p_ctx, start_lba, stride); + } + + if ((npass++ & 0xFF) == 0) { + printf("%d:completed pass %ld\n", pid, npass>>8); fflush(stdout); + } + } + + pthread_join(thread, NULL); + + return 0; +} + +// len in __u64 +void fill_buf(__u64* p_buf, unsigned int len, __u64 vlba, __u64 plba) +{ + static __u64 data = DATA_SEED; + int i; + + // the vlba & plba helps to see if right data is going to the right + // place. + for (i = 0; i < len; i += 4) { + p_buf[i] = pid; + p_buf[i + 1] = data++; + p_buf[i + 2] = vlba; + p_buf[i + 3] = plba; + } +} + + +// len in __u64 +int cmp_buf(__u64* p_buf1, __u64 *p_buf2, unsigned int len) +{ + return memcmp(p_buf1, p_buf2, len*sizeof(__u64)); +} +