fix run.sh, support set a test filesystem with single node (#161)
Some checks failed
Build / build (push) Has been cancelled

This commit is contained in:
Yiyuan Liu 2025-03-13 11:49:43 +08:00 committed by GitHub
parent a188ed39ec
commit 3c100b90ce
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 163 additions and 473 deletions

View File

@ -4,7 +4,7 @@ cluster_id = '${FS_CLUSTER}'
default_pkey_index = ${PKEY_INDEX} default_pkey_index = ${PKEY_INDEX}
[fdb] [fdb]
clusterFile = '${FDB_UNITTEST_CLUSTER}' clusterFile = '${FDB_TEST_CLUSTER}'
[mgmtd_client] [mgmtd_client]
mgmtd_server_addresses = [ 'RDMA://${ADDRESS}:${MGMTD_PORT}' ] mgmtd_server_addresses = [ 'RDMA://${ADDRESS}:${MGMTD_PORT}' ]

View File

@ -1,8 +1,7 @@
attr_timeout = 2 attr_timeout = 2
entry_timeout = 2 entry_timeout = 2
fsync_length_hint = true fsync_length_hint = true
enable_read_cache = {{ env.READCACHE }} enable_read_cache = true
chunk_size_limit = '64KB'
[[common.log.categories]] [[common.log.categories]]
categories = [ '.' ] categories = [ '.' ]
@ -13,19 +12,15 @@ async = true
name = 'normal' name = 'normal'
start_level = 'NONE' start_level = 'NONE'
writer_type = 'FILE' writer_type = 'FILE'
file_path = '{{ env.LOG_FILE }}' file_path = '${FUSE_LOG}'
[mgmtd] [mgmtd]
mgmtd_server_addresses = [ 'RDMA://${ADDRESS}:${MGMTD_PORT}' ] mgmtd_server_addresses = [ 'RDMA://${ADDRESS}:${MGMTD_PORT}' ]
[meta]
dynamic_stripe = {{ env.DYNAMIC_STRIPE }}
[periodic_sync] [periodic_sync]
enable = true enable = true
interval = '500ms' interval = '500ms'
limit = 16 limit = 16
[io_bufs] [io_bufs]
write_buf_size = '{{ "1MB" if env.ENABLE_WRITE_BUFFER else "0MB" }}'
max_buf_size = '1MB' max_buf_size = '1MB'

View File

@ -7,11 +7,11 @@ async = true
name = 'normal' name = 'normal'
start_level = 'NONE' start_level = 'NONE'
writer_type = 'FILE' writer_type = 'FILE'
file_path = '{{ env.LOG_FILE }}' file_path = '${META_LOG}'
[[server.base.groups]] [[server.base.groups]]
[server.base.groups.listener] [server.base.groups.listener]
listen_port = {{ env.PORT }} listen_port = ${META_PORT}
[[server.base.groups]] [[server.base.groups]]
[server.base.groups.listener] [server.base.groups.listener]
@ -40,5 +40,5 @@ gc_file_delay = '5s'
[server.fdb] [server.fdb]
casual_read_risky = true casual_read_risky = true
clusterFile = '${FDB_UNITTEST_CLUSTER}' clusterFile = '${FDB_TEST_CLUSTER}'
externalClientPath = '/lib/libfdb_c.so' externalClientPath = '/lib/libfdb_c.so'

View File

@ -7,11 +7,11 @@ async = true
name = 'normal' name = 'normal'
start_level = 'NONE' start_level = 'NONE'
writer_type = 'FILE' writer_type = 'FILE'
file_path = '{{ env.LOG_FILE }}' file_path = '${MGMTD_LOG}'
[[server.base.groups]] [[server.base.groups]]
[server.base.groups.listener] [server.base.groups.listener]
listen_port = {{ env.PORT }} listen_port = ${MGMTD_PORT}
[[server.base.groups]] [[server.base.groups]]
[server.base.groups.listener] [server.base.groups.listener]

View File

@ -4,4 +4,4 @@ cluster_id = '${FS_CLUSTER}'
default_pkey_index = ${PKEY_INDEX} default_pkey_index = ${PKEY_INDEX}
[kv_engine.fdb] [kv_engine.fdb]
clusterFile = '${FDB_UNITTEST_CLUSTER}' clusterFile = '${FDB_TEST_CLUSTER}'

View File

@ -7,11 +7,11 @@ async = true
name = 'normal' name = 'normal'
start_level = 'NONE' start_level = 'NONE'
writer_type = 'FILE' writer_type = 'FILE'
file_path = '{{ env.LOG_FILE }}' file_path = '${STORAGE_LOG}'
[[server.base.groups]] [[server.base.groups]]
[server.base.groups.listener] [server.base.groups.listener]
listen_port = {{ env.PORT }} listen_port = ${STORAGE_PORT}
[[server.base.groups]] [[server.base.groups]]
[server.base.groups.listener] [server.base.groups.listener]

View File

@ -1,533 +1,228 @@
#!/bin/bash #!/bin/bash
RED='\033[0;31m' # ======== Helper Functions ========
NC='\033[0m' # No Color
function check_exists { function check_exists {
if ! [ -e $1 ]; then if ! [ -e "$1" ]; then
echo $1 not exists echo "$1 not exists"
exit 1 exit 1
fi fi
} }
function random_port {
local min_port=8000
local max_port=65535
while true; do
random_port=$((min_port + RANDOM % (max_port - min_port + 1)))
if ! lsof -i ":$random_port" >/dev/null; then
echo "$random_port"
break
fi
done
}
function start_fdb { function start_fdb {
local PORT=$1 local PORT="$1"
local DATA=$2 local DATA="$2"
local LOG=$3 local LOG="$3"
local CLUSTER=${DATA}/fdb.cluster local CLUSTER="${DATA}/fdb.cluster"
echo fdbserver: $FDB echo "fdbserver: $FDB, fdbclient: $FDBCLI, start fdb @ port ${PORT}"
echo fdbclient: $FDBCLI echo "test${FDB_PORT}:testdb${FDB_PORT}@127.0.0.1:${FDB_PORT}" > "${CLUSTER}"
echo start fdb @ port ${PORT} "${FDB}" -p auto:"${PORT}" -d "${DATA}" -L "${LOG}" -C "${CLUSTER}" &> "${LOG}/fdbserver.log" &
echo test${FDB_PORT}:testdb${FDB_PORT}@127.0.0.1:${FDB_PORT} >${CLUSTER} sleep 5 && "$FDBCLI" -C "${CLUSTER}" --exec "configure new memory single"
${FDB} -p auto:${PORT} -d ${DATA} -L ${LOG} -C ${CLUSTER} &> ${LOG}/fdbserver.log & sleep 5 && "$FDBCLI" -C "${CLUSTER}" --exec "status minimal"
echo "sleep 5s then create database"
sleep 5 && $FDBCLI -C ${CLUSTER} --exec "configure new memory single"
echo "sleep 5s then check foundationdb status"
sleep 5 && $FDBCLI -C ${CLUSTER} --exec "status minimal"
} }
function create_app_config { function cleanup {
local NODE=$1 rv=$?
local FILE=$2 echo "Exit with ${rv}"
fusermount3 -u "${MOUNT}" || true
echo "allow_empty_node_id = false" >$FILE sleep 1
echo "node_id = $NODE" >>$FILE jobs -p | xargs -r kill || true
sleep 1
jobs -p | xargs -r kill -9 &>/dev/null || true
fusermount3 -u "${MOUNT}" &>/dev/null || true
exit $rv
} }
function wait_path { # ======== Main Script ========
local file_path="$1"
local timeout=$2
local interval=1 # Check interval in seconds
local elapsed_time=0
# Wait until the file exists or timeout is reached
while [ ! -d "$file_path" ] && [ $elapsed_time -lt $timeout ]; do
sleep $interval
elapsed_time=$((elapsed_time + interval))
done
if ! [ -d "$file_path" ]; then
echo "Path '$file_path' not found within $timeout seconds."
exit 1
fi
}
function random_64 {
local random_number=$(od -An -N8 -tu8 /dev/urandom | awk '{print $1}')
echo "$random_number"
}
# Parameter validation
if (($# < 2)); then if (($# < 2)); then
echo "${0} <binary> <test-dir> <log-dir>" echo "${0} <binary> <test-dir> <log-dir>"
exit 1 exit 1
fi fi
# Set variables
BINARY=${1%/} BINARY=${1%/}
TEST_DIR=${2%/} TEST_DIR=${2%/}
LOG_DIR=${3:-} LOG_DIR=${3:-}
mkdir -p ${TEST_DIR}
function cleanup { # Set exit trap and error handling
rv=$?
echo exit with ${rv}
fusermount3 -u ${MOUNT1} || true
fusermount3 -u ${MOUNT2} || true
fusermount3 -u ${MOUNT3} || true
fusermount3 -u ${MOUNT4} || true
sleep 1
jobs -p | xargs -r kill || true
sleep 1
jobs -p | xargs -r kill -9 &> /dev/null || true
fusermount3 -u ${MOUNT1} &> /dev/null || true
fusermount3 -u ${MOUNT2} &> /dev/null || true
fusermount3 -u ${MOUNT3} &> /dev/null || true
fusermount3 -u ${MOUNT4} &> /dev/null || true
exit $rv
}
trap "cleanup" EXIT trap "cleanup" EXIT
set -euo pipefail set -euo pipefail
DATA=${TEST_DIR}/data # Define paths
CONFIG=${TEST_DIR}/config DATA="${TEST_DIR}/data"
DEFAULT_LOG=${TEST_DIR}/log CONFIG="${TEST_DIR}/config"
DEFAULT_LOG="${TEST_DIR}/log"
LOG=${LOG_DIR:=${DEFAULT_LOG}} LOG=${LOG_DIR:=${DEFAULT_LOG}}
MOUNT1=`realpath ${TEST_DIR}/mnt1` MOUNT=$(realpath "${TEST_DIR}/mnt")
MOUNT2=`realpath ${TEST_DIR}/mnt2` export MGMTD_LOG=${LOG}/mgmtd.log
MOUNT3=`realpath ${TEST_DIR}/mnt3` export META_LOG=${LOG}/meta.log
MOUNT4=`realpath ${TEST_DIR}/mnt4` export STORAGE_LOG=${LOG}/storage.log
export FUSE_LOG=${LOG}/fuse.log
export FUSE_TEST_TIMEOUT="${FUSE_TEST_TIMEOUT:-3600}"
# Script related tools
SCRIPT_DIR=$(dirname "$0") SCRIPT_DIR=$(dirname "$0")
UPDATE_CONFIG="python3 ${SCRIPT_DIR}/update-config.py" UPDATE_CONFIG="python3 ${SCRIPT_DIR}/update-config.py"
# NOTE: need regenerate chain table if change storage number or disk number # Set environment variables
NUM_META=2
NUM_STORAGE=3
NUM_DISK=4
export TOKEN="AADHHSOs8QA92iRe2wB1fmuL" export TOKEN="AADHHSOs8QA92iRe2wB1fmuL"
export TOKEN_FILE="${CONFIG}/token" export TOKEN_FILE="${CONFIG}/token"
export PKEY_INDEX=${PKEY_INDEX:-0}
export FS_CLUSTER="ci_test"
export FDB_UNITTEST_CLUSTER=${DATA}/foundationdb/fdb.cluster
export TARGETS="$(seq -f \'{{\ env.STORAGE_DIR\ }}/data%g\', 1 ${NUM_DISK})"
export PKEY_INDEX=0
export FS_CLUSTER="ci_test"
export FDB_TEST_CLUSTER="${DATA}/foundationdb/fdb.cluster"
export TARGETS="'${DATA}/storage/data1', '${DATA}/storage/data2', '${DATA}/storage/data3', '${DATA}/storage/data4'"
# Set FoundationDB paths
if [ -z "${FDB_PATH+x}" ]; then if [ -z "${FDB_PATH+x}" ]; then
FDB=$(which fdbserver) FDB=$(which fdbserver)
FDBCLI=$(which fdbcli) FDBCLI=$(which fdbcli)
else else
FDB=$FDB_PATH/fdbserver FDB="$FDB_PATH/fdbserver"
FDBCLI=$FDB_PATH/fdbcli FDBCLI="$FDB_PATH/fdbcli"
fi fi
# check network # ======== Network Configuration ========
echo "### check network" echo "### Check network"
export ADDRESS=$(ip -o -4 addr show | grep -E '(en|eth|ib)' | awk '{print $4}' | head -n 1 | awk -F'[/ ]' '{print $1}') export ADDRESS=$(ip -o -4 addr show | grep -E '(en|eth|ib)' | awk '{print $4}' | head -n 1 | awk -F'[/ ]' '{print $1}')
export FDB_PORT=$(random_port) export FDB_PORT=12500
export MGMTD_PORT=$(random_port) export MGMTD_PORT=12501
if [ -n "${FUSE_TEST_FDB_PORT+x}" ]; then export META_PORT=12502
echo use FUSE_TEST_FDB_PORT ${FUSE_TEST_FDB_PORT} export STORAGE_PORT=12503
lsof -i ":${FUSE_TEST_FDB_PORT}" || echo port available
export FDB_PORT=${FUSE_TEST_FDB_PORT} # ======== Check Directories and Files ========
echo "### Check directory and file"
check_exists "${TEST_DIR}"
if [ -z "$(ls -A ${TEST_DIR})" ]; then
echo ${TEST_DIR} is empty
else
echo ${TEST_DIR} is not empty
exit 1
fi fi
if [ -n "${FUSE_TEST_MGMTD_PORT+x}" ]; then mkdir -p ${DATA}/storage/data{1..4}
echo use FUSE_TEST_MGMTD_PORT ${FUSE_TEST_MGMTD_PORT}
lsof -i ":${FUSE_TEST_MGMTD_PORT}" || echo port available
export MGMTD_PORT=${FUSE_TEST_MGMTD_PORT}
fi
echo "NIC address ${ADDRESS}"
echo "FDB port ${FDB_PORT}"
echo "MGMTD port ${MGMTD_PORT}"
# check directory check_exists "${BINARY}"
echo "### check directory and file" check_exists "${BINARY}/mgmtd_main"
check_exists ${TEST_DIR} check_exists "${BINARY}/meta_main"
rm -rf ${TEST_DIR}/* check_exists "${BINARY}/storage_main"
check_exists ${BINARY} check_exists "${BINARY}/hf3fs_fuse_main"
check_exists ${BINARY}/mgmtd_main check_exists "${BINARY}/admin_cli"
check_exists ${BINARY}/meta_main check_exists "${FDB}"
check_exists ${BINARY}/storage_main check_exists "${FDBCLI}"
check_exists ${BINARY}/hf3fs_fuse_main
check_exists ${BINARY}/admin_cli
check_exists ${FDB}
check_exists ${FDBCLI}
# create subdirectory # ======== Create Directories ========
echo "### create subdirectory" echo "### Create subdirectories"
mkdir -p ${CONFIG} mkdir -p "${CONFIG}" "${LOG}" "${DATA}" "${MOUNT}" "${DATA}/foundationdb" "${DATA}/storage"
mkdir -p ${LOG}
mkdir -p ${DATA}
mkdir -p ${MOUNT1}
mkdir -p ${MOUNT2}
mkdir -p ${MOUNT3}
mkdir -p ${MOUNT4}
mkdir -p ${DATA}/foundationdb
for i in $(seq 1 ${NUM_STORAGE}); do
mkdir -p ${DATA}/storage-${i}
for j in $(seq 1 ${NUM_DISK}); do
mkdir -p ${DATA}/storage-${i}/data${j}
done
done
# generate app config # ======== Generate Configuration Files ========
echo "generate app config" echo "Generate config"
create_app_config 1 ${CONFIG}/mgmtd_main_app.toml # mgmtd app configuration
for i in $(seq 1 ${NUM_META}); do echo "allow_empty_node_id = false" > "${CONFIG}/mgmtd_main_app.toml"
node=$(expr 50 + ${i}) echo "node_id = 1" >> "${CONFIG}/mgmtd_main_app.toml"
file=${CONFIG}/meta_main_app_${node}.toml # meta app configuration
create_app_config ${node} ${file} echo "allow_empty_node_id = false" > "${CONFIG}/meta_main_app.toml"
done echo "node_id = 50" >> "${CONFIG}/meta_main_app.toml"
create_app_config 51 ${CONFIG}/meta_main_app.toml # storage app configuration
for i in $(seq 1 ${NUM_STORAGE}); do echo "allow_empty_node_id = false" > "${CONFIG}/storage_main_app.toml"
node=$(expr 10000 + ${i}) echo "node_id = 10000" >> "${CONFIG}/storage_main_app.toml"
file=${CONFIG}/storage_main_app_${node}.toml
create_app_config ${node} ${file}
done
# generate config
echo "generate config"
echo ${TOKEN} > ${TOKEN_FILE} echo ${TOKEN} > ${TOKEN_FILE}
for file in ${SCRIPT_DIR}/config/*.toml; do
echo "- generate $(basename $file)" # Copy and process template configurations
cat ${file} | envsubst > ${CONFIG}/$(basename $file) for file in "${SCRIPT_DIR}"/config/*.toml; do
echo "- Generate $(basename "$file")"
cat "${file}" | envsubst > "${CONFIG}/$(basename "$file")"
done done
ADMIN_CLI="${BINARY}/admin_cli --cfg ${CONFIG}/admin_cli.toml --" ADMIN_CLI="${BINARY}/admin_cli --cfg ${CONFIG}/admin_cli.toml --"
# start foundationdb # ======== Create FoundationDB ========
echo "### create foundationdb" echo "### Create foundationdb"
start_fdb $FDB_PORT ${DATA}/foundationdb ${LOG}/ start_fdb $FDB_PORT "${DATA}/foundationdb" "${LOG}/"
# init cluster # ======== Initialize Cluster ========
echo "### init cluster" echo "### Init cluster"
echo "admin_cli: ${ADMIN_CLI}" ${ADMIN_CLI} user-add --root --admin --token "${TOKEN}" 0 root
${ADMIN_CLI} user-add --root --admin --token ${TOKEN} 0 root
${ADMIN_CLI} user-set-token --new 0 ${ADMIN_CLI} user-set-token --new 0
echo ""
${ADMIN_CLI} user-list ${ADMIN_CLI} user-list
echo ""
${ADMIN_CLI} init-cluster \ ${ADMIN_CLI} init-cluster \
--mgmtd ${CONFIG}/mgmtd_main.toml \ --mgmtd "${CONFIG}/mgmtd_main.toml" \
--meta ${CONFIG}/meta_main.toml \ --meta "${CONFIG}/meta_main.toml" \
--storage ${CONFIG}/storage_main.toml \ --storage "${CONFIG}/storage_main.toml" \
--fuse ${CONFIG}/hf3fs_fuse_main.toml \ --fuse "${CONFIG}/hf3fs_fuse_main.toml" \
--skip-config-check 1 524288 8 --skip-config-check 1 524288 8
# start mgmtd # ======== Start Services ========
echo "### start mgmtd" echo "### Start mgmtd"
LOG_FILE=${LOG}/mgmtd.log \ "${BINARY}/mgmtd_main" \
PORT=${MGMTD_PORT} \ --app_cfg "${CONFIG}/mgmtd_main_app.toml" \
${BINARY}/mgmtd_main \ --launcher_cfg "${CONFIG}/mgmtd_main_launcher.toml" \
--app_cfg ${CONFIG}/mgmtd_main_app.toml \ --cfg "${CONFIG}/mgmtd_main.toml" \
--launcher_cfg ${CONFIG}/mgmtd_main_launcher.toml \ > "${LOG}/mgmtd_main.stdout" 2> "${LOG}/mgmtd_main.stderr" &
--cfg ${CONFIG}/mgmtd_main.toml \ sleep 5
>${LOG}/mgmtd_main.stdout 2>${LOG}/mgmtd_main.stderr &
sleep 5 && echo ""
# start meta echo "### Start meta & storage"
echo "### start meta & storage" "${BINARY}/storage_main" \
for i in $(seq 1 ${NUM_STORAGE}); do --app_cfg "${CONFIG}/storage_main_app.toml" \
LOG_FILE=${LOG}/storage-${i}.log \ --launcher_cfg "${CONFIG}/storage_main_launcher.toml" \
PORT=0 \ --cfg "${CONFIG}/storage_main.toml" \
STORAGE_DIR=${DATA}/storage-${i}/ \ > "${LOG}/storage_main.stdout" 2> "${LOG}/storage_main.stderr" &
${BINARY}/storage_main \
--app_cfg ${CONFIG}/storage_main_app_$(expr 10000 + ${i}).toml \
--launcher_cfg ${CONFIG}/storage_main_launcher.toml \
--cfg ${CONFIG}/storage_main.toml \
>${LOG}/storage_main-${i}.stdout 2>${LOG}/storage_main-${i}.stderr &
done
for i in $(seq 1 ${NUM_META}); do "${BINARY}/meta_main" \
LOG_FILE=${LOG}/meta-${i}.log \ --app_cfg "${CONFIG}/meta_main_app.toml" \
PORT=0 \ --launcher_cfg "${CONFIG}/meta_main_launcher.toml" \
${BINARY}/meta_main \ --cfg "${CONFIG}/meta_main.toml" \
--app_cfg ${CONFIG}/meta_main_app_$(expr 50 + ${i}).toml \ > "${LOG}/meta_main.stdout" 2> "${LOG}/meta_main.stderr" &
--launcher_cfg ${CONFIG}/meta_main_launcher.toml \
--cfg ${CONFIG}/meta_main.toml \
>${LOG}/meta_main-${i}.stdout 2>${LOG}/meta_main-${i}.stderr &
done
sleep 15 && echo "" sleep 15
# list nodes # ======== Configure Nodes and Chains ========
echo "### list nodes" echo "### List nodes"
${ADMIN_CLI} list-nodes ${ADMIN_CLI} list-nodes
# create chains echo "### Create targets and chain table"
echo "### create targets and chain table" echo "ChainId,TargetId" > "${CONFIG}/chains.csv"
echo "create targets" echo "ChainId" > "${CONFIG}/chain-table.csv"
echo "ChainId,TargetId" >${CONFIG}/chains.csv for disk in $(seq 1 4); do
echo "ChainId" >${CONFIG}/chain-table.csv target=$(printf "%d%02d001" 10000 $disk)
for storage in $(seq 1 ${NUM_STORAGE}); do ${ADMIN_CLI} create-target --node-id 10000 --disk-index $((disk - 1)) --target-id "${target}" --chain-id "${target}"
node=$(expr 10000 + ${storage}) echo "${target},${target}" >> "${CONFIG}/chains.csv"
for disk in $(seq 1 ${NUM_DISK}); do echo "${target}" >> "${CONFIG}/chain-table.csv"
target=$(printf "%d%02d001" $node $disk)
args="create-target --node-id ${node} --disk-index $((disk - 1)) --target-id ${target} --chain-id ${target}"
echo $args
echo "${target},${target}" >>${CONFIG}/chains.csv
echo "${target}" >>${CONFIG}/chain-table.csv
${ADMIN_CLI} ${args}
done
done done
echo "update chains and chain table" ${ADMIN_CLI} upload-chains "${CONFIG}/chains.csv"
${ADMIN_CLI} upload-chains ${CONFIG}/chains.csv ${ADMIN_CLI} upload-chain-table 1 "${CONFIG}/chain-table.csv" --desc replica-1
${ADMIN_CLI} upload-chain-table 1 ${CONFIG}/chain-table.csv --desc replica-1
sleep 10 sleep 10
${ADMIN_CLI} list-chains ${ADMIN_CLI} list-chains
${ADMIN_CLI} list-chain-tables ${ADMIN_CLI} list-chain-tables
echo "create test user" if [ $UID -ne 0 ]; then ${ADMIN_CLI} user-add ${UID} test; fi
if [ $UID -ne 0 ]; then
${ADMIN_CLI} user-add ${UID} test
fi
${ADMIN_CLI} user-list ${ADMIN_CLI} user-list
${ADMIN_CLI} mkdir --perm 0755 test ${ADMIN_CLI} mkdir --perm 0755 test
${ADMIN_CLI} set-perm --uid ${UID} --gid ${UID} test ${ADMIN_CLI} set-perm --uid ${UID} --gid ${UID} test
echo "### start fuse" # ======== Start FUSE ========
echo "redirect stdout to ${LOG}/fuse.1.stdout, stderr to ${LOG}/fuse.1.stderr" echo "### Start fuse"
LOG_FILE=${LOG}/fuse.1.log DYNAMIC_STRIPE=true READCACHE=false \ "${BINARY}/hf3fs_fuse_main" \
timeout -k 10 ${FUSE_TEST_TIMEOUT} ${BINARY}/hf3fs_fuse_main \ --launcher_cfg "${CONFIG}/hf3fs_fuse_main_launcher.toml" \
--launcher_cfg ${CONFIG}/hf3fs_fuse_main_launcher.toml \ --launcher_config.mountpoint="${MOUNT}" \
--launcher_config.mountpoint=${MOUNT1} \ > "${LOG}/fuse.stdout" 2> "${LOG}/fuse.stderr" &
>${LOG}/fuse.1.stdout 2>${LOG}/fuse.1.stderr &
echo "redirect stdout to ${LOG}/fuse.2.stdout, stderr to ${LOG}/fuse.2.stderr" # Wait for mount point to be ready
LOG_FILE=${LOG}/fuse.2.log ENABLE_WRITE_BUFFER=1 DYNAMIC_STRIPE=false READCACHE=false \ SECONDS=0
timeout -k 10 ${FUSE_TEST_TIMEOUT} ${BINARY}/hf3fs_fuse_main \ while [ ! -d "${MOUNT}/test" ] && [ ${SECONDS} -lt 30 ]; do
--launcher_cfg ${CONFIG}/hf3fs_fuse_main_launcher.toml \ sleep 1
--launcher_config.mountpoint=${MOUNT2} \ done
>${LOG}/fuse.2.stdout 2>${LOG}/fuse.2.stderr & if [ ! -d "${MOUNT}/test" ]; then
echo "Mount point not ready within 30 seconds"
echo "redirect stdout to ${LOG}/fuse.3.stdout, stderr to ${LOG}/fuse.3.stderr" exit 1
LOG_FILE=${LOG}/fuse.3.log DYNAMIC_STRIPE=false READCACHE=true \
timeout -k 10 ${FUSE_TEST_TIMEOUT} ${BINARY}/hf3fs_fuse_main \
--launcher_cfg ${CONFIG}/hf3fs_fuse_main_launcher.toml \
--launcher_config.mountpoint=${MOUNT3} \
>${LOG}/fuse.3.stdout 2>${LOG}/fuse.3.stderr &
echo "redirect stdout to ${LOG}/fuse.4.stdout, stderr to ${LOG}/fuse.4.stderr"
LOG_FILE=${LOG}/fuse.4.log ENABLE_WRITE_BUFFER=1 DYNAMIC_STRIPE=true READCACHE=true \
timeout -k 10 ${FUSE_TEST_TIMEOUT} ${BINARY}/hf3fs_fuse_main \
--launcher_cfg ${CONFIG}/hf3fs_fuse_main_launcher.toml \
--launcher_config.mountpoint=${MOUNT4} \
>${LOG}/fuse.4.stdout 2>${LOG}/fuse.4.stderr &
wait_path ${MOUNT1}/test 30
wait_path ${MOUNT2}/test 30
wait_path ${MOUNT3}/test 30
wait_path ${MOUNT4}/test 30
if [ -z "${FIO_BINARY+x}" ]; then
if [ -f ${BINARY}/fio ]; then
FIO_BINARY=${BINARY}/fio
else
FIO_BINARY=""
fi
fi fi
echo "Fuse mounted" echo "A temporary 3FS has been mounted to ${MOUNT}"
echo "This file system is for testing purposes only."
echo "Do not store any important data or you will lose them."
sleep infinity || true
python3 ${SCRIPT_DIR}/concurrent_rmrf.py --mnt_path ${MOUNT1} echo "Unmount fuse and stop all servers"
fusermount3 -u "${MOUNT}"
echo "### test gc permission check"
mkdir -p ${MOUNT1}/test/data/subdir{1..1024}
sudo mkdir ${MOUNT1}/test/data/subdir999/root
sudo touch ${MOUNT1}/test/data/subdir999/root/file-{1..5}
mv ${MOUNT1}/test/data ${MOUNT1}/3fs-virt/rm-rf
sleep 5
stat ${MOUNT1}/trash/gc-orphans
ls -lR ${MOUNT1}/trash/gc-orphans
echo "### test trash"
bash ${SCRIPT_DIR}/test_trash.sh ${BINARY} ${TEST_DIR}
echo "### test ioctl"
sudo rm -rf ${MOUNT1}/trash/`id -un`/*
python3 ${SCRIPT_DIR}/test_ioctl.py ${MOUNT1} ${MOUNT1}/test/ioctl
for MOUNT in ${MOUNT1} ${MOUNT2} ${MOUNT3} ${MOUNT4}; do
new_chunk_engine=$((( RANDOM % 2 == 0 )) && echo "true" || echo "false")
echo "### set new_chunk_engine to ${new_chunk_engine}"
${ADMIN_CLI} hot-update-config --node-type META --string "server.meta.enable_new_chunk_engine=${new_chunk_engine}"
echo "### run test under ${MOUNT}"
SUBDIR=${MOUNT}/test/$(random_64)
mkdir -p ${SUBDIR}
echo "use tmp dir ${SUBDIR}"
echo "#### run ci test"
mkdir ${SUBDIR}/fuse_test_ci
python3 ${SCRIPT_DIR}/fuse_test_ci.py ${SUBDIR}/fuse_test_ci
echo ""
echo "#### test read after write"
mkdir ${SUBDIR}/read_after_write
python3 ${SCRIPT_DIR}/read_after_write.py ${SUBDIR}/read_after_write
echo ""
echo "### test client io during update chain table"
# start a read/write task
mkdir ${SUBDIR}/rw_during_update_chain_table
python3 ${SCRIPT_DIR}/read_after_write.py ${SUBDIR}/rw_during_update_chain_table --seconds 60 &
PID=$!
# then update chain table in background
sleep 10
start_time=$(date +%s)
while true; do
current_time=$(date +%s)
elapsed=$(( current_time - start_time ))
if [ $elapsed -lt 30 ]; then
echo "update chain table"
(head -n 1 ${CONFIG}/chain-table.csv && tail -n +2 ${CONFIG}/chain-table.csv | shuf) > ${CONFIG}/chain-table-shuffle.csv
${ADMIN_CLI} upload-chain-table 1 ${CONFIG}/chain-table-shuffle.csv --desc replica-1-shuflle
${ADMIN_CLI} list-chain-tables
else
break
fi
sleep 10
done
wait $PID
echo ""
echo "#### test random write and read"
mkdir ${SUBDIR}/random_rw
python3 ${SCRIPT_DIR}/random_rw.py ${SUBDIR}/random_rw
echo ""
echo "#### test read before close"
mkdir ${SUBDIR}/read_before_close
python3 ${SCRIPT_DIR}/random_rw.py --read_before_close ${SUBDIR}/read_before_close
echo ""
set +e
python3 -c 'import hf3fs_fuse.io as h3io'
ret=$?
set -e
if [ ${ret} -eq 0 ]; then
# cat /proc/self/mountinfo
# echo
# grep hf3fs /proc/self/mountinfo | awk '{print $5, $(NF-2)}'
# ls $(grep hf3fs /proc/self/mountinfo | awk '{print $5}')/
# ls ${MOUNT}
# echo ${SUBDIR}/usrbio
echo '#### test usrbio'
mkdir ${SUBDIR}/usrbio
python3 ${SCRIPT_DIR}/usrbio.py ${SUBDIR}/usrbio ${MOUNT}
else
echo -e "${RED}#### can't inmport hf3fs_fuse.io, skip usrbio test!!!${NC}"
fi
if [ -f "${FIO_BINARY}" ]; then
echo "#### test fio with usrbio"
mkdir ${SUBDIR}/fio
for j in {0..3}; do
for f in {0..2}; do
truncate -s 4G ${SUBDIR}/fio/j${j}-f${f}
done
done
date
LD_PRELOAD="${BINARY}/libhf3fs_api_shared.so" ${FIO_BINARY} -name=test_write -rw=write -direct=1 -ioengine=hf3fs_ring -group_reporting -numjobs=4 -bs=4m -size=4G -directory=${SUBDIR}/fio -filename_format='j$jobnum-f$filenum' -thread=1 -mountpoint=${MOUNT} -verify=crc32c -nrfiles=2
date
ls -lh ${SUBDIR}/fio
date
LD_PRELOAD="${BINARY}/libhf3fs_api_shared.so" ${FIO_BINARY} -name=test_read -rw=randread -direct=1 -ioengine=hf3fs_ring -group_reporting -numjobs=4 -bs=4k -iodepth=100 -time_based -runtime=30 -directory=${SUBDIR}/fio -filename_format='j$jobnum-f$filenum' -thread=1 -mountpoint=${MOUNT} -iodepth_batch_submit=100 -iodepth_batch_complete_min=100 -iodepth_batch_complete_max=100 -size=4G -nrfiles=2
date
else
echo "${RED}#### fio not found, skip fio test!!!${NC}"
fi
if [ -f ${BINARY}/pjdfstest ]; then
echo "#### run pjdfstest"
${BINARY}/pjdfstest -c ${CONFIG}/pjdfstest.toml -p ${SUBDIR}
else
echo "${RED}#### pjdfstest not found, skip pjdfstest!!!${NC}"
fi
touch ${SUBDIR}/test_iflags
python3 ${SCRIPT_DIR}/test_iflags.py ${SUBDIR}/test_iflags
done
echo "### test concurrent rw"
mkdir -p ${MOUNT1}/test/concurrent_rw1 ${MOUNT2}/test/concurrent_rw1
python3 ${SCRIPT_DIR}/concurrent_rw.py ${MOUNT1}/test/concurrent_rw1 ${MOUNT2}/test/concurrent_rw1
mkdir -p ${MOUNT1}/test/concurrent_rw2 ${MOUNT2}/test/concurrent_rw2
python3 ${SCRIPT_DIR}/concurrent_rw.py ${MOUNT2}/test/concurrent_rw2 ${MOUNT1}/test/concurrent_rw2
echo "### test lock directory"
mkdir -p ${MOUNT1}/test/test_dir_lock
python3 ${SCRIPT_DIR}/lock_directory.py ${MOUNT1}/test/test_dir_lock ${MOUNT2}/test/test_dir_lock
echo "### test client io during update chain table"
(head -n 1 ${CONFIG}/chain-table.csv && tail -n +2 ${CONFIG}/chain-table.csv | shuf) > ${CONFIG}/chain-table-2.csv
${ADMIN_CLI} upload-chain-table 1 ${CONFIG}/chain-table-2.csv --desc replica-1-version-2
${ADMIN_CLI} list-chain-tables
echo "### test chattr +i"
sudo chattr +i ${MOUNT1}/test
lsattr ${MOUNT1}
sudo chattr -i ${MOUNT1}/test
echo "unmount fuse and stop all servers"
fusermount3 -u ${MOUNT1}
fusermount3 -u ${MOUNT2}
fusermount3 -u ${MOUNT3}
fusermount3 -u ${MOUNT4}
# 定义函数 kill_and_wait
kill_and_wait() {
local pid="$1"
local cmd_name="$2"
# 检查进程是否存在
if ps -p "$pid" > /dev/null; then
# 杀死进程
kill "$pid"
echo "Sent kill signal to process $cmd_name with PID $pid"
# 等待并检查进程是否退出
for i in {1..5}; do
sleep 1
if ! ps -p "$pid" > /dev/null; then
echo "Process $cmd_name with PID $pid has exited."
return 0
fi
done
# 如果5秒后进程仍然存在
echo -e "\e[31mError: Process $cmd_name with PID $pid did not exit after 5 seconds.\e[0m"
kill -9 "$pid"
else
echo "No process with PID $pid found."
return 0
fi
}
# 获取所有后台任务的PID和命令名
jobs_array=()
while read -r pid cmd; do
jobs_array+=("$pid $cmd")
done < <(jobs -l | awk '{print $2, $3, $4, $5, $6, $7, $8, $9, $10}')
# 按照启动顺序的相反顺序进行 kill
for (( idx=${#jobs_array[@]}-1 ; idx>=0 ; idx-- )); do
job_info=(${jobs_array[idx]})
pid=${job_info[0]}
cmd_name=${job_info[@]:1}
kill_and_wait "$pid" "$cmd_name"
done