#
#  Copyright Red Hat Inc., 2002
#  Copyright Mission Critical Linux, 2000
#
#  This program is free software; you can redistribute it and/or modify it
#  under the terms of the GNU General Public License as published by the
#  Free Software Foundation; either version 2, or (at your option) any
#  later version.
#
#  This program is distributed in the hope that it will be useful, but
#  WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
#  General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with this program; see the file COPYING.  If not, write to the
#  Free Software Foundation, Inc.,  675 Mass Ave, Cambridge, 
#  MA 02139, USA.
#

# $Revision: 1.8 $
#
# Author: Tim Burke <tburke at redhat.com>

#
# Shell library for NFS functions.
#

#
# Called to stop or start or check status an NFS services.
#
startStopNFS()
{
	# cloned from startFilesystem
	if [ $# -ne 2 ]; then
	  logAndPrint $LOG_ERR "Usage: startStopNFS command svcID"
	fi

	typeset command=$1
	typeset svcID=$2

	typeset svc_name=$(getSvcName $DB $svcID) > /dev/null

	# This routine is called on the start of ALL services.
	# First verify if this service has any NFS exports, if not
	# there's nothing to do so simply return success.
	is_nfs_service $svcID 
	if [ $? -ne $YES ]; then
 	    logAndPrint $LOG_DEBUG "$svc_name is not an NFS service"
	    return $SUCCESS
	fi

	# Verify that the appropriate collection of daemons are running.
	nfs_daemons_running
	if [ $? -ne $SUCCESS ]; then
	    #
		# Don't restart daemons during status check.
		#
		if [ "$command" = "status" ]; then
			return $FAIL;
		fi
		
	  	logAndPrint $LOG_ERR "startStopNFS: restarting NFS daemons"
		# Note restart does less than stop/start
		`/etc/rc.d/init.d/nfs stop`
		`/etc/rc.d/init.d/nfs start`
		sleep 2
		nfs_daemons_running
		if [ $? -ne $SUCCESS ]; then
	  		logAndPrint $LOG_ERR "startStopNFS: Failed restarting NFS daemons"
    			return $FAIL
		fi
	  	logAndPrint $LOG_NOTICE "startStopNFS: Successfully restarted NFS daemons"
	fi

	# Call export_service to do the real work.
	export_service $command $svcID
	if [ $? -ne $SUCCESS ]; then
	    return $FAIL
	fi

	return $SUCCESS
}

#
# Status check for NFS services.
# Currently not very rubust at all.
#
statusNFS()
{
	typeset svcID=$1

	# This routine is called on the status of ALL services.
	# First verify if this service has any NFS exports, if not
	# there's nothing to do so simply return success.
	is_nfs_service $svcID 
	if [ $? -ne $YES ]; then
	    return $SUCCESS
	fi
	
	# Verify that the appropriate collection of daemons are running.
	nfs_daemons_running
	if [ $? -ne $SUCCESS ]; then
	    return $FAIL
	fi
	return $SUCCESS
}

#
# nfs action serviceID
#
nfs()
{

	if [ $# -ne 2 ]; then
	  logAndPrint $LOG_ERR "Usage: nfs [start, stop, status] serviceID"
	  return $FAIL
	fi

	typeset action=$1
	typeset svcID=$2

	case "$SVC_ACTION" in
	'start')
	  startStopNFS "start" $svcID
	  return $?
	  ;;

	'stop')
	  startStopNFS "stop" $svcID
	  return $?
	  ;;

	'status')
	  startStopNFS "status" $svcID
	  return $?
	  ;;
	esac
}

#
# Check if the NFS daemons are running.
#
nfs_daemons_running()
{
    NFS_DAEMONS="nfsd rpc.mountd rpc.statd"

    for daemon in $NFS_DAEMONS; do
        ps -ef | grep "$daemon" | grep -v grep >/dev/null 2>&1
        if [ $? -ne 0 ]; then
	    logAndPrint $LOG_ERR \
            "NFS daemon $daemon is not running."
	    logAndPrint $LOG_ERR \
            "Verify that the NFS service run level script is enabled."
            return $FAIL
        fi
    done

    logAndPrint $LOG_DEBUG "All required NFS daemons are running."
    return $SUCCESS
}

#
# Check if the NFS daemons are running.
#
is_nfs_service()
{
	typeset svcID=$1
	typeset token tokenlist

	tokenlist=$(getTokenList $DB \
		"$SVC_SERVICES_LIST_STR$SEP$SVC_SERVICE_STR$svcID$SEP$SVC_DEVICE_STR[0-9]+$SEP$SVC_EXPORT_DIR_STR[0-9]+" \
		"$SVC_EXPORT_NAME_STR")
	[ -n "$tokenlist" ] && return $YES
	return $NO
}

#
# Iterate through each export within a service and call exportfs to
# to the real work.  Unwind in the event of failure.
#
export_service()
{
	typeset command=$1
	typeset svcID=$2
	typeset token tokenlist
	typeset devN

	typeset svc_name=$(getSvcName $DB $svcID) > /dev/null

	#
	# Loop through all of the devices and call off to export any
	# associated directories.
	#
	tokenlist=$(getSvcDeviceTokenList $DB $svcID)
	for token in $tokenlist; do
	  #
	  # We loop on devices looking for devices that have mount points
	  #
	  export_device $command $svcID $token
	  if [ $? -eq $FAIL ]; then
	    # If one fails, bail out by unexporting all of them.
	    # XXX - I don't think this is needed because if service
	    # start fails, svcmgr should call service stop anyways.
	    # unexport_service $svcID
	    return $FAIL
	  fi
	done

	return $SUCCESS
}

# Call into the NFS kernel additions to tell it whether
# or not it should respond to NFS client requests for a
# device.  This is necessary to facilitate transparent
# service relocation.  If this isn't done, the client will
# receive "stale file handle" errors during the relocation
# interval.
export_nfs_device()
{
	typeset command=$1
	typeset svcID=$2
	typeset token=$3

	typeset device_name=$(getSvcDevice $DB $token) > /dev/null

	if [ $? -ne $SUCCESS ]; then
		# Returning an error here as it indicates that the service
		# is misconfigured.
		logAndPrint $LOG_ERR \
	   "export_nfs_device: can't get device name for service '$svcID'"
		return $FAIL
	fi
	
	typeset mountpoint=$(getSvcMountPoint $DB $token) > /dev/null
	if [ $? -ne $SUCCESS ]; then
		# Returning an error here as it indicates that the service
		# is misconfigured.
		logAndPrint $LOG_ERR \
	   "export_nfs_device: can't get mount point service '$svcID'"
		return $FAIL
	fi

        case "$command" in
            start)
		    rm -f $mountpoint/.clumanager/pid
		    $RMTABD $mountpoint
		    typeset nfsop_arg="-s"
		    ;;
		    
		    status)
		    typeset rmtabpid=$(cat $mountpoint/.clumanager/pid)
		    if [ -n "$rmtabpid" ]; then
		        if kill -s 0 $rmtabpid; then
		        	# TODO: validate pid?
		        	return $SUCCESS;
		        fi
		    fi
		    #
		    # rmtabd not running or nonexistent pidfile
		    #
		    return $FAIL;
		    ;;
		    
            stop)
		    typeset rmtabpid=$(cat $mountpoint/.clumanager/pid)
		    if [ -n "$rmtabpid" ]; then
		        kill $rmtabpid
		    fi
		    rm -f $mountpoint/.clumanager/pid
		    typeset nfsop_arg="-e"
		    ;;
	esac

	$NFSOPS $nfsop_arg -d $device_name
	ret_val=$?
	if [ $ret_val -ne 0 ]; then
	  	logAndPrint $LOG_WARNING "export_nfs_device: returning success anyways."
		# XXX - currently letting these errors slide in case 
		# the kerenel patches are not included.
	  	# return $FAIL
	fi

	return $SUCCESS
}

#
# Iterate through each directory within a device and individually
# initiate an export of each directory.
# This routine name is somewhat of a misnomer because the device
# itself isn't being exported, rather the directories within it.
#
export_device()
{
	typeset command=$1
	typeset svcID=$2
	typeset devtoken=$3
	typeset token tokenlist

	typeset svc_name=$(getSvcName $DB $svcID) > /dev/null

	#
	# Call NFS kernel extensions to prevent "stale file handle" errors.
	#
	export_nfs_device $command $svcID $devtoken
	if [ $? -ne $SUCCESS ]; then
	    logAndPrint $LOG_ERR "export_device: failed NFS kernel export for service '$svcID'"
	    return $FAIL
	fi
	
	#
	# We don't status-check individual exports.
	# XXX Maybe someday
	#
	if [ $command = "status" ]; then
		return $SUCCESS;
	fi

	#
	# Loop through all of the directories and see if any of them
	# have corresponding exports.  
	#
	tokenlist=$(getSvcExportDirTokenList $DB $devtoken)
	for token in $tokenlist; do
	  #
	  # We loop on directories looking for exports.
	  #
	  export_directory $command $svcID $token
	  if [ $? -eq $FAIL ]; then
	    # If one fails, bail out by unexporting all of them.
	    unexport_service $svcID
	    return $FAIL
	  fi
	done

	return $SUCCESS
}

#
# Iterate through each export within a service and call exportfs to
# to the real work.  Unwind in the event of failure.
#
export_directory()
{
	typeset command=$1
	typeset svcID=$2
	typeset dirtoken=$3

	typeset svc_name=$(getSvcName $DB $svcID) > /dev/null
	typeset export_client
	typeset export_options

	typeset token_list
	typeset token

	# XXX - perform a sanity check to verify that the directory is mounted!

	#
	# Loop through all of the clients for this directory.
	#
	token_list=$(getSvcExportClientTokenList $DB $dirtoken)
	for token in $token_list; do
	  # 
	  # First retrieve the client name and client options.
	  # No need to check return code; it had better be there (our
	  # token list was generated by the export client names...)
	  #
	  export_client=$(getSvcExportClient $DB $token) > /dev/null
	  case $? in
	  0) 
	    ;;
	  2)			# No options found.
	    export_options=""
	    ;;
	  *) logAndPrint $LOG_ERR \
"export_directory: Cannot get export client options for service $svc_name, err=$?"
	    return $FAIL ;;
	  esac


	  #
	  # Get the client options.
	  #
	  export_options=$(getSvcExportOptions $DB $token) > /dev/null
	  case $? in
	  0) 
	    ;;
	  2)			# No options found.
	    export_options=""
	    ;;
	  *) logAndPrint $LOG_ERR \
"export_directory: Cannot get export client options for service $svc_name, err=$?"
	    return $FAIL ;;
	  esac

	  # Note: need to quote client as it may include '*'
	  export_client $command $svcID $dirtoken "$export_client" \
		"$export_options"
	  if [ $? -ne $SUCCESS ]; then
	    # XXX - probably needs better error handling to unwind.
	    logAndPrint $LOG_ERR \
"export_directory: export_client failed with err=$?"
	    return $FAIL
	  fi
	done

	return $SUCCESS
}

#
# Finally! Do the real work of an export. 
#
export_client()
{
	typeset command=$1
	typeset svcID=$2
	typeset dirtoken=$3
	typeset export_client=$4
	typeset export_options=$5

	typeset svc_name=$(getSvcName $DB $svcID) > /dev/null
	typeset export_directory=$(getSvcExportDir $DB $dirtoken) > /dev/null

        case "$command" in
          start) 
		    # Be a good sport and perform validation on the directory
		    # to ensure its a fully qualified path name. Once I add
		    # the validation logic into the service routines this
		    # won't be necessary.
		    case "$export_directory" in
		    	/*) ;;	# Passed validation
			 *) logAndPrint $LOG_ERR "Export directory $export_directory must start with /"
		    esac
		    typeset export_opt="-o"
	  	    if [ "$export_options" = "None" -o -z "$export_options" ]
		    then
	    		export_options=""
			export_opt=""
	  	    fi 
		    typeset export_args="$export_opt $export_options $export_client:$export_directory" 
		    ;;
          stop)     
		    typeset export_args="-u $export_client:$export_directory" 
		    ;;
	  *)        
	            logAndPrint $LOG_ERR "export_client: Invalid command $command"
		    return $FAIL
	            ;;
	esac

	logAndPrint $LOG_DEBUG "export_client: exportfs '$export_args'"
	exportfs $export_args
	ret_val=$?
	if [ $ret_val -ne 0 ]; then
	  logAndPrint $LOG_ERR "export_client: exportfs '$export_args'"
	  return $FAIL
	fi
	return $SUCCESS
}

#
# Iterate through each export within a service and unexport it.  Called
# as part of error handling in case a start fails.
# within the devices.
#
# XXX - hopefully this isn't even needed as service stop being called in 
# the event of error in service start is assumed to be called to mop up.
#
unexport_service()
{
	return $SUCCESS
}
