Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Browse files Browse the repository at this point in the history
… dataset-pil
  • Loading branch information
LielinJiang committed Oct 30, 2020
2 parents 108a788 + 8cd1c10 commit 908ad8c
Show file tree
Hide file tree
Showing 33 changed files with 806 additions and 163 deletions.
6 changes: 3 additions & 3 deletions paddle/.set_port.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

DIRNAME=`dirname $0`
source $DIRNAME/.common_test_util.sh
set_port $@
DIRNAME="$(dirname "$0")"
sh "$DIRNAME"/.common_test_util.sh
set_port "$@"
8 changes: 5 additions & 3 deletions paddle/.set_python_path.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,14 @@
PYPATH=""
set -x
while getopts "d:" opt; do
case $opt in
case "$opt" in
d)
PYPATH=$OPTARG
;;
*)
;;
esac
done
shift $(($OPTIND - 1))
shift $(("$OPTIND" - 1))
export PYTHONPATH=$PYPATH:$PYTHONPATH
$@
"$@"
3 changes: 2 additions & 1 deletion paddle/fluid/framework/ir/graph_pattern_detector.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2101,7 +2101,8 @@ PDNode *patterns::QuantizePlacement::operator()(
PDNode *patterns::Bfloat16Placement::operator()(
const std::unordered_set<std::string> &bfloat16_enabled_op_types) {
std::unordered_set<std::string> supported_op_types =
std::unordered_set<std::string>({"conv2d", "fusion_gru"});
std::unordered_set<std::string>(
{"concat", "conv2d", "fusion_gru", "reshape2", "transpose2"});
if (!bfloat16_enabled_op_types.empty()) {
supported_op_types = bfloat16_enabled_op_types;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,10 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name,
op->SetInput("X", {inputs[0], inputs[1]});
} else if (type == "pool2d") {
op->SetInput("X", {inputs[0]});
} else if (type == "transpose2") {
op->SetInput("X", {inputs[0]});
} else if (type == "reshape2") {
op->SetInput("X", {inputs[0]});
} else {
FAIL() << "Unexpected operator type.";
}
Expand All @@ -57,8 +61,8 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name,
ProgramDesc BuildProgramDesc() {
ProgramDesc prog;

for (auto& v :
std::vector<std::string>({"a", "b", "c", "f", "g", "h", "k", "l"})) {
for (auto& v : std::vector<std::string>(
{"a", "b", "c", "f", "g", "h", "k", "l", "m", "n", "o", "p"})) {
prog.MutableBlock(0)->Var(v);
}

Expand All @@ -68,6 +72,9 @@ ProgramDesc BuildProgramDesc() {
SetOp(&prog, "pool2d", "pool1", {"g"}, {"h"});
SetOp(&prog, "conv2d", "conv2", {"h"}, {"k"});
SetOp(&prog, "pool2d", "pool2", {"k"}, {"l"});
SetOp(&prog, "concat", "concat2", {"l", "m"}, {"n"});
SetOp(&prog, "transpose2", "transpose", {"n"}, {"o"});
SetOp(&prog, "reshape2", "reshape", {"o"}, {"p"});

return prog;
}
Expand Down Expand Up @@ -115,15 +122,15 @@ void DefaultAttrTest(unsigned expected_bfloat16_data_type_count) {
}

TEST(Bfloat16PlacementPass, enable_all) {
MainTest({"conv2d", "pool2d", "relu", "concat"}, 6);
MainTest({"conv2d", "pool2d", "relu", "concat"}, 7);
}

TEST(Bfloat16PlacementPass, enabled_conv_and_pool) {
// 2 conv2d + 2 pool2 - 1 orphaned conv2d
MainTest({"conv2d", "pool2d"}, 3);
}

TEST(Bfloat16PlacementPass, default_attr_value) { DefaultAttrTest(0); }
TEST(Bfloat16PlacementPass, default_attr_value) { DefaultAttrTest(5); }

} // namespace ir
} // namespace framework
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/inference/api/demo_ci/clean.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#!/bin/bash
set -x
cd `dirname $0`
cd "$(dirname "$0")" || exit
rm -rf build/ data/
set +x
175 changes: 90 additions & 85 deletions paddle/fluid/inference/api/demo_ci/run.sh
Original file line number Diff line number Diff line change
@@ -1,29 +1,29 @@
#!/bin/bash
set -x
PADDLE_ROOT=$1
TURN_ON_MKL=$2 # use MKL or Openblas
TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
DATA_DIR=$4 # dataset
TENSORRT_INCLUDE_DIR=$5 # TensorRT header file dir, default to /usr/local/TensorRT/include
TENSORRT_LIB_DIR=$6 # TensorRT lib file dir, default to /usr/local/TensorRT/lib
MSVC_STATIC_CRT=$7
inference_install_dir=${PADDLE_ROOT}/build/paddle_inference_install_dir
PADDLE_ROOT="$1"
TURN_ON_MKL="$2" # use MKL or Openblas
TEST_GPU_CPU="$3" # test both GPU/CPU mode or only CPU mode
DATA_DIR="$4" # dataset
TENSORRT_INCLUDE_DIR="$5" # TensorRT header file dir, default to /usr/local/TensorRT/include
TENSORRT_LIB_DIR="$6" # TensorRT lib file dir, default to /usr/local/TensorRT/lib
MSVC_STATIC_CRT="$7"
inference_install_dir="${PADDLE_ROOT}"/build/paddle_inference_install_dir

cd `dirname $0`
current_dir=`pwd`
if [ $2 == ON ]; then
cd "$(dirname "$0")" || exit
current_dir=$(pwd)
if [ "$2" == ON ]; then
# You can export yourself if move the install path
MKL_LIB=${inference_install_dir}/third_party/install/mklml/lib
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MKL_LIB}
MKL_LIB="${inference_install_dir}"/third_party/install/mklml/lib
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH":"${MKL_LIB}"
fi
if [ $3 == ON ]; then
if [ "$3" == ON ]; then
use_gpu_list='true false'
else
use_gpu_list='false'
fi

USE_TENSORRT=OFF
if [ -d "$TENSORRT_INCLUDE_DIR" -a -d "$TENSORRT_LIB_DIR" ]; then
if [ -d "$TENSORRT_INCLUDE_DIR" ] && [ -d "$TENSORRT_LIB_DIR" ]; then
USE_TENSORRT=ON
fi

Expand All @@ -32,141 +32,146 @@ URL_ROOT=http://paddlemodels.bj.bcebos.com/${PREFIX}

# download vis_demo data
function download() {
dir_name=$1
mkdir -p $dir_name
cd $dir_name
dir_name="$1"
mkdir -p "$dir_name"
cd "$dir_name" || exit
if [[ -e "${PREFIX}${dir_name}.tar.gz" ]]; then
echo "${PREFIX}${dir_name}.tar.gz has been downloaded."
else
wget -q ${URL_ROOT}$dir_name.tar.gz
tar xzf *.tar.gz
wget -q "${URL_ROOT}""$dir_name".tar.gz
tar xzf ./*.tar.gz
fi
cd ..
cd .. || exit
}
mkdir -p $DATA_DIR
cd $DATA_DIR
mkdir -p "$DATA_DIR"
cd "$DATA_DIR" || exit
vis_demo_list='se_resnext50 ocr mobilenet'
for vis_demo_name in $vis_demo_list; do
download $vis_demo_name
download "$vis_demo_name"
done

# download word2vec data
mkdir -p word2vec
cd word2vec
cd word2vec || exit
if [[ -e "word2vec.inference.model.tar.gz" ]]; then
echo "word2vec.inference.model.tar.gz has been downloaded."
else
wget -q http://paddle-inference-dist.bj.bcebos.com/word2vec.inference.model.tar.gz
tar xzf *.tar.gz
tar xzf ./*.tar.gz
fi

# compile and test the demo
cd $current_dir
cd "$current_dir" || exit
mkdir -p build
cd build
rm -rf *
cd build || exit
rm -rf ./*

for WITH_STATIC_LIB in ON OFF; do
if [ $(echo `uname` | grep "Win") != "" ]; then
if [ "$(uname | grep Win)" != "" ]; then
# -----simple_on_word2vec on windows-----
cmake .. -G "Visual Studio 14 2015" -A x64 -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \
cmake .. -G "Visual Studio 14 2015" -A x64 -DPADDLE_LIB="${inference_install_dir}" \
-DWITH_MKL="$TURN_ON_MKL" \
-DDEMO_NAME=simple_on_word2vec \
-DWITH_GPU=$TEST_GPU_CPU \
-DWITH_STATIC_LIB=$WITH_STATIC_LIB \
-DMSVC_STATIC_CRT=$MSVC_STATIC_CRT
-DWITH_GPU="$TEST_GPU_CPU" \
-DWITH_STATIC_LIB="$WITH_STATIC_LIB" \
-DMSVC_STATIC_CRT="$MSVC_STATIC_CRT"
msbuild /maxcpucount /property:Configuration=Release cpp_inference_demo.sln
for use_gpu in $use_gpu_list; do
Release/simple_on_word2vec.exe \
--dirname=$DATA_DIR/word2vec/word2vec.inference.model \
--use_gpu=$use_gpu
if [ $? -ne 0 ]; then
--dirname="$DATA_DIR"/word2vec/word2vec.inference.model \
--use_gpu="$use_gpu"
EXCODE="$?"
if [ "$EXCODE" -ne 0 ]; then
echo "simple_on_word2vec demo runs fail."
exit 1
fi
done

# -----vis_demo on windows-----
rm -rf *
cmake .. -G "Visual Studio 14 2015" -A x64 -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \
rm -rf ./*
cmake .. -G "Visual Studio 14 2015" -A x64 -DPADDLE_LIB="${inference_install_dir}" \
-DWITH_MKL="$TURN_ON_MKL" \
-DDEMO_NAME=vis_demo \
-DWITH_GPU=$TEST_GPU_CPU \
-DWITH_STATIC_LIB=$WITH_STATIC_LIB \
-DMSVC_STATIC_CRT=$MSVC_STATIC_CRT
-DWITH_GPU="$TEST_GPU_CPU" \
-DWITH_STATIC_LIB="$WITH_STATIC_LIB" \
-DMSVC_STATIC_CRT="$MSVC_STATIC_CRT"
msbuild /maxcpucount /property:Configuration=Release cpp_inference_demo.sln
for use_gpu in $use_gpu_list; do
for vis_demo_name in $vis_demo_list; do
Release/vis_demo.exe \
--modeldir=$DATA_DIR/$vis_demo_name/model \
--data=$DATA_DIR/$vis_demo_name/data.txt \
--refer=$DATA_DIR/$vis_demo_name/result.txt \
--use_gpu=$use_gpu
if [ $? -ne 0 ]; then
--modeldir="$DATA_DIR"/"$vis_demo_name"/model \
--data="$DATA_DIR"/"$vis_demo_name"/data.txt \
--refer="$DATA_DIR"/"$vis_demo_name"/result.txt \
--use_gpu="$use_gpu"
EXCODE="$?"
if [ "$EXCODE" -ne 0 ]; then
echo "vis demo $vis_demo_name runs fail."
exit 1
fi
done
done
else
# -----simple_on_word2vec on linux/mac-----
rm -rf *
cmake .. -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \
rm -rf ./*
cmake .. -DPADDLE_LIB="${inference_install_dir}" \
-DWITH_MKL="$TURN_ON_MKL" \
-DDEMO_NAME=simple_on_word2vec \
-DWITH_GPU=$TEST_GPU_CPU \
-DWITH_STATIC_LIB=$WITH_STATIC_LIB
make -j$(nproc)
word2vec_model=$DATA_DIR'/word2vec/word2vec.inference.model'
if [ -d $word2vec_model ]; then
-DWITH_GPU="$TEST_GPU_CPU" \
-DWITH_STATIC_LIB="$WITH_STATIC_LIB"
make -j"$(nproc)"
word2vec_model="$DATA_DIR"'/word2vec/word2vec.inference.model'
if [ -d "$word2vec_model" ]; then
for use_gpu in $use_gpu_list; do
./simple_on_word2vec \
--dirname=$DATA_DIR/word2vec/word2vec.inference.model \
--use_gpu=$use_gpu
if [ $? -ne 0 ]; then
--dirname="$DATA_DIR"/word2vec/word2vec.inference.model \
--use_gpu="$use_gpu"
EXCODE="$?"
if [ "$EXCODE" -ne 0 ]; then
echo "simple_on_word2vec demo runs fail."
exit 1
fi
done
fi
# ---------vis_demo on linux/mac---------
rm -rf *
cmake .. -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \
rm -rf ./*
cmake .. -DPADDLE_LIB="${inference_install_dir}" \
-DWITH_MKL="$TURN_ON_MKL" \
-DDEMO_NAME=vis_demo \
-DWITH_GPU=$TEST_GPU_CPU \
-DWITH_STATIC_LIB=$WITH_STATIC_LIB
make -j$(nproc)
-DWITH_GPU="$TEST_GPU_CPU" \
-DWITH_STATIC_LIB="$WITH_STATIC_LIB"
make -j"$(nproc)"
for use_gpu in $use_gpu_list; do
for vis_demo_name in $vis_demo_list; do
./vis_demo \
--modeldir=$DATA_DIR/$vis_demo_name/model \
--data=$DATA_DIR/$vis_demo_name/data.txt \
--refer=$DATA_DIR/$vis_demo_name/result.txt \
--use_gpu=$use_gpu
if [ $? -ne 0 ]; then
--modeldir="$DATA_DIR"/"$vis_demo_name"/model \
--data="$DATA_DIR"/"$vis_demo_name"/data.txt \
--refer="$DATA_DIR"/"$vis_demo_name"/result.txt \
--use_gpu="$use_gpu"
EXCODE="$?"
if [ "$EXCODE" -ne 0 ]; then
echo "vis demo $vis_demo_name runs fail."
exit 1
fi
done
done
# --------tensorrt mobilenet on linux/mac------
if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
rm -rf *
cmake .. -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \
if [ "$USE_TENSORRT" == ON ] && [ "$TEST_GPU_CPU" == ON ]; then
rm -rf ./*
cmake .. -DPADDLE_LIB="${inference_install_dir}" \
-DWITH_MKL="$TURN_ON_MKL" \
-DDEMO_NAME=trt_mobilenet_demo \
-DWITH_GPU=$TEST_GPU_CPU \
-DWITH_STATIC_LIB=$WITH_STATIC_LIB \
-DUSE_TENSORRT=$USE_TENSORRT \
-DTENSORRT_INCLUDE_DIR=$TENSORRT_INCLUDE_DIR \
-DTENSORRT_LIB_DIR=$TENSORRT_LIB_DIR
make -j$(nproc)
-DWITH_GPU="$TEST_GPU_CPU" \
-DWITH_STATIC_LIB="$WITH_STATIC_LIB" \
-DUSE_TENSORRT="$USE_TENSORRT" \
-DTENSORRT_INCLUDE_DIR="$TENSORRT_INCLUDE_DIR" \
-DTENSORRT_LIB_DIR="$TENSORRT_LIB_DIR"
make -j"$(nproc)"
./trt_mobilenet_demo \
--modeldir=$DATA_DIR/mobilenet/model \
--data=$DATA_DIR/mobilenet/data.txt \
--refer=$DATA_DIR/mobilenet/result.txt
if [ $? -ne 0 ]; then
--modeldir="$DATA_DIR"/mobilenet/model \
--data="$DATA_DIR"/mobilenet/data.txt \
--refer="$DATA_DIR"/mobilenet/result.txt
EXCODE="$?"
if [ "$EXCODE" != 0 ]; then
echo "trt demo trt_mobilenet_demo runs fail."
exit 1
fi
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/inference/check_symbol.sh
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
#!/bin/sh

lib=$1
if [ $# -ne 1 ]; then echo "No input library"; exit -1 ; fi
lib="$1"
if [ "$#" -ne 1 ]; then echo "No input library"; exit 1 ; fi

num_paddle_syms=$(nm -D ${lib} | grep paddle | wc -l)
num_google_syms=$(nm -D ${lib} | grep google | grep -v paddle | grep "T " | wc -l)
num_paddle_syms=$(nm -D "${lib}" | grep -c paddle )
num_google_syms=$(nm -D "${lib}" | grep google | grep -v paddle | grep -c "T " )

if [ $num_paddle_syms -le 0 ]; then echo "Have no paddle symbols"; exit -1 ; fi
if [ $num_google_syms -ge 1 ]; then echo "Have some google symbols"; exit -1 ; fi
if [ "$num_paddle_syms" -le 0 ]; then echo "Have no paddle symbols"; exit 1 ; fi
if [ "$num_google_syms" -ge 1 ]; then echo "Have some google symbols"; exit 1 ; fi

exit 0
2 changes: 0 additions & 2 deletions paddle/fluid/inference/tests/api/analyzer_capi_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,6 @@ TEST(PD_AnalysisConfig, profile_mkldnn) {
bool quantizer_enable = PD_MkldnnQuantizerEnabled(config);
EXPECT_TRUE(quantizer_enable);
PD_EnableMkldnnBfloat16(config);
bool bfloat16_enable = PD_MkldnnBfloat16Enabled(config);
EXPECT_TRUE(bfloat16_enable);
PD_SetMkldnnCacheCapacity(config, 0);
PD_SetModel(config, prog_file.c_str(), params_file.c_str());
PD_DeleteAnalysisConfig(config);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ std::vector<double> Lexical_Test(
// return acc_res;
} else {
EXPECT_GT(outputs->size(), 0UL);
EXPECT_EQ(outputs[0].size(), 1UL);
EXPECT_GT(outputs[0].size(), 0UL);
LOG(INFO) << "No accuracy result. To get accuracy result provide a model "
"with accuracy layers in it and use --with_accuracy_layer "
"option.";
Expand Down
Loading

1 comment on commit 908ad8c

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

Please sign in to comment.