docker常用命令

docker常用命令

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
docker pull tensorflow/serving #从仓库拉取镜像
docker pull tensorflow/serving:latest-gpu #从仓库拉取GPU镜像
docker pull tensorflow/serving:2.8.3-gpu #从仓库拉取GPU镜像
docker run -it tensorflow/serving #进入到镜像中
exit #退出镜像
docker run tensorflow/serving #运行某个容器
docker ps // 查看所有正在运行容器
docker stop containerId // containerId 是容器的ID

docker ps -a // 查看所有容器
docker ps -a -q // 查看所有容器ID

docker stop $(docker ps -a -q) // stop停止所有容器
docker rm $(docker ps -a -q) // remove删除所有容器
docker rm/rmi #删除容器/镜像
docker cp local_files containerId:docker_files #本地文件复制到docker

tf-serving部署

1
2
3
4
5
6
7
8
9
10
11
12
-p: 指定主机到docker容器的端口映射
--mount: 表示要进行挂载,其中
type=bind: 是选择挂载模式,
source: 要部署模型的存储路径,也就是挂载的源(必须是绝对路径),
target: 要挂载的目标位置,模型挂载到docker容器中的位置,也就是docker容器中的目录(放在集装箱的哪里)
-t: 指定的是挂载到哪个容器
-e: 环境变量
MODEL_NAME: 必须与target指定路径的最后一个文件夹名称相同
--per_process_gpu_memory_fraction: 运行时所需的GPU显存资源最大比率的值设定

-v:
path1:path2 分别指模型在机器种储存的路径(必须是绝对路径),模型在容器中储存的路径(放在集装箱的哪里)
1
2
3
4
docker run -p 8500:8500 \
--mount type=bind,source=/Users/coreyzhong/workspace/tensorflow/saved_model/,target=/models/test-model \
-t tensorflow/serving:1.15.0 \
-e MODEL_NAME=test-model --model_base_path=/models/test-model/ &
1
2
3
4
5
model_path="/Users/havorld/jupyter/model_save/"
docker run -t --rm -p 8500:8500 -p 8501:8501 \
-v "$model_path/din:/models/tf_saved_models" \
-e MODEL_NAME=tf_saved_models \
tensorflow/serving &
1
2
3
4
5
6
7
查看TensorFlow-Serving状态: curl http://localhost:8501/v1/models/${model_name}
查看TensorFlow-Serving模型信息: curl http://localhost:8501/v1/models/${model_name}/metadata
查看模型信息: saved_model_cli show --dir='./${model_path}/20220422104620' --all
使用Http请求进行模型预测:
curl -d '{"instances": [1,2,3,4,5]}' -X POST http://localhost:8501/v1/models/${model_name}:predict
其中instances的value为模型输入Tensor的字符串形式,矩阵维度需要和Tensor对应。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
docker run -t --rm -p 8501:8501 \
-v "/home/Personas/havorld/tfserving/tf_saved_models:/models/tf_saved_models" \
-e MODEL_NAME=tf_saved_models \
tensorflow/serving

docker run -t --rm -p 8501:8501 \
-v "/Users/haopeng.meng/jupyter/tf_saved_models:/models/tf_saved_models" \
-e MODEL_NAME=tf_saved_models \
tensorflow/serving

cat /opt/logs/rec-feed-api/access.log | grep "feed recommend-> uid:55" | grep "id=34174686"


sudo docker run -t --rm -p 8501:8501 -p 8500:8500 \
-v "/home/meng.haopeng/tfserving/tf_saved_models:/models/tf_saved_models" \
-e MODEL_NAME=tf_saved_models \
tensorflow/serving

docker run -p 8501:8501 -p 8500:8500 \
--mount type=bind,source=/Users/haopeng.meng/jupyter/tf_saved_models,target=/models/tf_saved_models \
-e MODEL_NAME=tf_saved_models \
-t tensorflow/serving

docker run -p 8500:8500 \
--mount type=bind,source=./intent/,target=/models/intent_score \
-e MODEL_NAME=intent_score -t tensorflow/serving:1.10.0

docker run -p 8501:8501 -p 8500:8500 --mount type=bind,source=/my/model/path/m,target=/models/m -e MODEL_NAME=m -t tensorflow/serving:2.1.0

sudo docker run -t --rm -p 8501:8501 -p 8500:8500 \
-v "/home/meng.haopeng/tfserving/tf_saved_models:/models/tf_saved_models" \
-e MODEL_NAME=tf_saved_models \
tensorflow/serving


#docker run -t -p 443:8500 -p 8500:8501 -v "/data/lsj/dmp/SavedModel/:/models/" tensorflow/serving --model_config_file=/models/models.config --model_config_file_poll_wait_seconds=300

# run use container
docker run -t -p 8501:8500 --name=tf_serving_multi_version_01 -v "/data/tf-model/models/:/models/" tensorflow/serving --model_config_file=/models/models.config --model_config_file_poll_wait_seconds=300 --allow_version_labels_for_unavailable_models=true --enable_batching=true --batching_parameters_file=/models/batch.config




# tf-serving部署


docker run -t --rm -p 8501:8501 \
-v "/Users/haopeng.meng/jupyter/tf_saved_models:/models/tf_saved_models" \
-e MODEL_NAME=tf_saved_models \
tensorflow/serving

docker run -p 8501:8501 -p 8500:8500 \
--mount type=bind,source=/Users/haopeng.meng/jupyter/tf_saved_models,target=/models/tf_saved_models \
-e MODEL_NAME=tf_saved_models \
-t tensorflow/serving


sudo docker run -t --rm -p 8501:8501 -p 8500:8500 \
-v "/home/meng.haopeng/tfserving/tf_saved_models:/models/tf_saved_models" \
-e MODEL_NAME=tf_saved_models \
tensorflow/serving


sudo docker run --name feed -t --rm -p 8700:8500 -p 8701:8501 \
-v "/home/meng.haopeng/tfserving/tf_saved_models:/models/tf_saved_models" \
-e MODEL_NAME=tf_saved_models \
tensorflow/serving



docker run --name feed -t --rm -p 8701:8501 -p 8700:8500 \
--mount type=bind,source=/home/Personas/havorld/tfserving/model_save,target=/models/model_save \
-e MODEL_NAME=model_save \
-t tensorflow/serving:latest-gpu


docker run --name feed -t --rm -p 8700:8500 -p 8701:8501 \
--mount type=bind,source=/Users/haopeng.meng/Desktop/recommend/rec-alg-feed/model_save/din/serving/,target=/models/serving \
-e MODEL_NAME=serving \
-t tensorflow/serving


docker run --name feed -t --rm -p 8700:8500 -p 8701:8501 \
--mount type=bind,source=/Users/haopeng.meng/Desktop/recommend/serving/din/,target=/models/serving \
-e MODEL_NAME=serving \
-t tensorflow/serving