Browse Source

add pptsm (#2708)

* add pptsm for videoclas

* rename yaml and support eval

* repair np.array

* repair video decord
liuhongen1234567 11 months ago
parent
commit
a00c6fdea5
52 changed files with 5713 additions and 7 deletions
  1. 74 0
      docs/data_annotations/video_modules/video_classification.en.md
  2. 78 0
      docs/data_annotations/video_modules/video_classification.md
  3. 256 0
      docs/module_usage/tutorials/video_modules/video_classification.en.md
  4. 263 0
      docs/module_usage/tutorials/video_modules/video_classification.md
  5. 781 0
      docs/pipeline_usage/tutorials/video_pipelines/video_classification.en.md
  6. 801 0
      docs/pipeline_usage/tutorials/video_pipelines/video_classification.md
  7. 1 0
      paddlex/__init__.py
  8. 42 0
      paddlex/configs/modules/video_classification/PP-TSM-R50_8frames_uniform.yaml
  9. 42 0
      paddlex/configs/modules/video_classification/PP-TSMv2-LCNetV2_16frames_uniform.yaml
  10. 42 0
      paddlex/configs/modules/video_classification/PP-TSMv2-LCNetV2_8frames_uniform.yaml
  11. 1 0
      paddlex/inference/common/batch_sampler/__init__.py
  12. 94 0
      paddlex/inference/common/batch_sampler/video_batch_sampler.py
  13. 1 0
      paddlex/inference/common/reader/__init__.py
  14. 42 0
      paddlex/inference/common/reader/video_reader.py
  15. 2 0
      paddlex/inference/common/result/__init__.py
  16. 41 0
      paddlex/inference/common/result/base_video_result.py
  17. 24 0
      paddlex/inference/common/result/mixin.py
  18. 1 0
      paddlex/inference/models_new/__init__.py
  19. 15 0
      paddlex/inference/models_new/video_classification/__init__.py
  20. 141 0
      paddlex/inference/models_new/video_classification/predictor.py
  21. 394 0
      paddlex/inference/models_new/video_classification/processors.py
  22. 91 0
      paddlex/inference/models_new/video_classification/result.py
  23. 1 0
      paddlex/inference/utils/io/__init__.py
  24. 83 1
      paddlex/inference/utils/io/readers.py
  25. 48 0
      paddlex/inference/utils/io/writers.py
  26. 3 0
      paddlex/inference/utils/official_models.py
  27. 7 0
      paddlex/modules/__init__.py
  28. 1 0
      paddlex/modules/base/trainer.py
  29. 18 0
      paddlex/modules/video_classification/__init__.py
  30. 93 0
      paddlex/modules/video_classification/dataset_checker/__init__.py
  31. 18 0
      paddlex/modules/video_classification/dataset_checker/dataset_src/__init__.py
  32. 93 0
      paddlex/modules/video_classification/dataset_checker/dataset_src/analyse_dataset.py
  33. 121 0
      paddlex/modules/video_classification/dataset_checker/dataset_src/check_dataset.py
  34. 82 0
      paddlex/modules/video_classification/dataset_checker/dataset_src/split_dataset.py
  35. 44 0
      paddlex/modules/video_classification/evaluator.py
  36. 22 0
      paddlex/modules/video_classification/exportor.py
  37. 19 0
      paddlex/modules/video_classification/model_list.py
  38. 88 0
      paddlex/modules/video_classification/trainer.py
  39. 16 0
      paddlex/repo_apis/PaddleVideo_api/__init__.py
  40. 51 0
      paddlex/repo_apis/PaddleVideo_api/config_utils.py
  41. 156 0
      paddlex/repo_apis/PaddleVideo_api/configs/PP-TSM-R50_8frames_uniform.yaml
  42. 151 0
      paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_16frames_uniform.yaml
  43. 148 0
      paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_8frames_uniform.yaml
  44. 19 0
      paddlex/repo_apis/PaddleVideo_api/video_cls/__init__.py
  45. 547 0
      paddlex/repo_apis/PaddleVideo_api/video_cls/config.py
  46. 346 0
      paddlex/repo_apis/PaddleVideo_api/video_cls/model.py
  47. 71 0
      paddlex/repo_apis/PaddleVideo_api/video_cls/register.py
  48. 205 0
      paddlex/repo_apis/PaddleVideo_api/video_cls/runner.py
  49. 0 1
      paddlex/repo_apis/base/runner.py
  50. 14 0
      paddlex/repo_manager/meta.py
  51. 20 5
      paddlex/repo_manager/repo.py
  52. 1 0
      requirements.txt

+ 74 - 0
docs/data_annotations/video_modules/video_classification.en.md

@@ -0,0 +1,74 @@
+---
+comments: true
+---
+
+# PaddleX Video Classification Task Module Data Annotation Tutorial
+
+This document will introduce how to use the [BILS](https://github.com/PaddlePaddle/PaddleVideo/blob/develop/docs/zh-CN/annotation_tools.md) annotation tool to complete data annotation for video classification-related single models.
+Click the above link to install the data annotation tool and view the detailed usage process by referring to the homepage documentation.
+
+## 1. BILS Annotation
+### 1.1 Introduction to BILS Annotation Tool
+`BILS` (Baidu Intelligent Labeling System) is a video annotation software that supports tagging on the timeline and can be used for annotation tasks such as video event localization and short video classification. The user interface is simple, and the operation is easy and intuitive.
+### 1.2 BILS Installation
+Click the download link to download the installation package locally, and then follow the prompts to install it step-by-step.
+
+macOS: [dmg package download](https://videotag.bj.bcebos.com/Annotation-tools/4.11-EIVideo-0.0.0.dmg)
+
+Windows: [exe file download](https://videotag.bj.bcebos.com/Annotation-tools/EIVideo-Setup-0.0.0.exe)
+
+Instructional video: [video download](https://videotag.bj.bcebos.com/Annotation-tools/4.11-%E4%BA%A7%E5%93%81%E8%AF%B4%E6%98%8E.mp4)
+### 1.3 BILS Annotation Process
+#### 1.3.1 Prepare Data to Be Annotated
+* Create a root directory for the dataset, such as `video_cls`.
+* Create a `videos` directory within `video_cls` and store the videos to be annotated in the `videos` directory, as shown below:
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/01.png">
+
+#### 1.3.2 BILS
+Click the BILS software icon to launch the `BILS` annotation tool.
+
+#### 1.3.3 Start Video Annotation
+* After launching `BILS`, it will look like this:
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/02.png">
+
+* Click the `Settings` button in the right navigation bar, fill in the project name, and set both the project directory and dataset directory to the storage directory of the videos to be annotated, i.e., `video_cls/videos`. Click the `Update Files` button to read the videos to be annotated. After updating the files, the first video in the video folder will play automatically, and you can click the arrow icons to pause, play, or replay.
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/03.png">
+
+* Click the `Annotation` button in the right navigation bar. The default labels are `Goal, Three-pointer, Two-pointer`. If you need to create a new label, click the `Edit` icon, create a new label, and rename it. Here, `Drumming` is used as an example.
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/04.png">
+
+* Hold down the `Option` key and click somewhere in the video clip to set the start and end times of the video. Click the trash can icon to delete previously annotated labels. After determining the start and end times of the video clip, check `Drumming`, and the action category of the current time segment will be marked as `Drumming`. Finally, click the `OK` icon to save the current label to the `BILS` format annotation file.
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/05.png">
+
+* After completing the annotation of the current video, click the `Folder Mode` icon to switch to the video file mode. Then, click on the next video that needs annotation and repeat the above annotation process.
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/06.png">
+
+* After annotating all videos, click the `Export` icon to export the annotated label files.
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/07.png">
+
+* After obtaining the exported JSON file (the default name is `ai.json`), use the [convert_to_videocls.py](https://paddle-model-ecology.bj.bcebos.com/paddlex/PaddleX3.0/doc_images/applications/video_classification_dataset_prepare/convert_to_videocls.py) script to convert the exported dataset into the `Video Classification` dataset format. Generate `train.txt`, `val.txt`, and `label.txt`.
+
+```bash
+python convert_to_videocls.py --dataset_path /path/to/dataset
+```
+`dataset_path` is the annotated `BILS` format classification dataset.
+
+## 2. Data Format
+* The dataset defined by PaddleX for the video classification task is named **VideoClsDataset**, with the following organization structure and annotation format:
+
+```bash
+dataset_dir    # Root directory of the dataset, the directory name can be changed
+├── videos     # Directory for storing videos, the directory name can be changed, but note the correspondence with the content of train.txt and val.txt
+├── label.txt  # Correspondence between annotation IDs and category names, the file name cannot be changed. Each line gives the category ID and category name, for example: 0 abseiling
+├── train.txt  # Training set annotation file, the file name cannot be changed. Each line gives the video path and video category ID, separated by a space, for example: videos/Qbo_tnzfjOY.mp4 2
+└── val.txt    # Validation set annotation file, the file name cannot be changed. Each line gives the video path and video category ID, separated by a space, for example: videos/3caPS4FHFF8.mp4 0
+```
+
+Annotation files are in video format. Please prepare your data by referring to the above specifications, and you can also refer to the [example dataset](https://paddle-model-ecology.bj.bcebos.com/paddlex/data/k400_examples.tar).

+ 78 - 0
docs/data_annotations/video_modules/video_classification.md

@@ -0,0 +1,78 @@
+---
+comments: true
+---
+
+# PaddleX视频分类任务模块数据标注教程
+
+本文档将介绍如何使用 [BILS](https://github.com/PaddlePaddle/PaddleVideo/blob/develop/docs/zh-CN/annotation_tools.md) 标注工具完成视频分类相关单模型的数据标注。
+点击上述链接,参考⾸⻚⽂档即可安装数据标注⼯具并查看详细使⽤流程。
+
+## 1. BILS 标注
+### 1.1 BILS 标注工具介绍
+`BILS` (Baidu Intelligent Labeling System) 是一款支持时间轴打标签的视频标注软件,可被用于视频事件定位 、短视频分类等任务的标注工作。用户界面简约,操作简单、易上手。
+### 1.2 BILS 安装
+点击下载链接,将安装包下载到本地,之后按照提示一步步安装即可。
+
+mac端: [dmg包下载](https://videotag.bj.bcebos.com/Annotation-tools/4.11-EIVideo-0.0.0.dmg)
+
+windows端: [exe文件下载](https://videotag.bj.bcebos.com/Annotation-tools/EIVideo-Setup-0.0.0.exe)
+
+使用教学视频: [视频下载](https://videotag.bj.bcebos.com/Annotation-tools/4.11-%E4%BA%A7%E5%93%81%E8%AF%B4%E6%98%8E.mp4)
+### 1.3 BILS 标注过程
+#### 1.3.1 准备待标注数据
+* 创建数据集根目录,如 `video_cls`。
+* 在 `video_cls` 中创建 `videos` 目录,并将待标注视频存储在 `videos` 目录下,如下图所示:
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/01.png">
+
+#### 1.3.2 BILS
+点击BILS软件图标,启动` BILS` 标注工具
+
+#### 1.3.3 开始图片标注
+* 启动 `` BILS`` 后如图所示:
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/02.png">
+
+* 点击右侧导航栏中 `设置` 按钮,填写项目名称,并将项目目录、数据集目录均设置成标注视频的存储目录,即  `video_cls/video` 。点击更新文件按钮,读取待标注视频。更新文件后,视频文件夹中的第一个视频会自动播放,可以点击箭头图标进行暂停、播放或重播。
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/03.png">
+
+* 点击右侧导航栏中 `标注` 按钮,默认标签为 `进球、三分球、二分球`。如果需要新建标签,可以点击 `编辑` 图标,新建标签,并对其进行重命令。这里,以 `打鼓`为例。
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/04.png">
+
+*  按住 ` option` 键点击视频片段的某处,可以设置视频的起始时间和终止时间。点击垃圾桶图标可以删除之前标注的标签。确定视频片段的起始、终止时间后,勾选 `打鼓`, 即可将当前时间片段的动作类别标记为 `打鼓`。最后,点击 `确定` 图标,将当前标签保存到 `BILS` 格式的标注文件中。
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/05.png">
+
+
+* 当前视频标注完成后, 点击 `文件夹模式` 图标,切换到视频文件模式。然后,点击下一个需要标注的视频重复上述标注。
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/06.png">
+
+
+* 所有视频标注完成后,点击`导出`图标,可以导出标注好的标签文件。
+
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/data_prepare/video_classification/07.png">
+
+
+* 获得导出的json文件后(默认名称是 `ai.json` ),使用 [convert_to_videocls.py](https://paddle-model-ecology.bj.bcebos.com/paddlex/PaddleX3.0/doc_images/applications/video_classification_dataset_prepare/convert_to_videocls.py) 脚本,将导出的数据集转化为 `视频分类` 数据集格式。生成 `train.txt`,`val.txt` 和`label.txt`。
+
+```bash
+python convert_to_videocls.py --dataset_path /path/to/dataset
+```
+ `dataset_path` 为标注的 `BILS` 格式分类数据集。
+
+
+##  2. 数据格式
+* PaddleX 针对视频分类任务定义的数据集,名称是 <b>VideoClsDataset</b>,组织结构和标注格式如下:
+
+```bash
+dataset_dir    # 数据集根目录,目录名称可以改变
+├── videos     # 视频的保存目录,目录名称可以改变,但要注意与train.txt、val.txt的内容对应
+├── label.txt  # 标注id和类别名称的对应关系,文件名称不可改变。每行给出类别id和类别名称,内容举例:0 abseiling
+├── train.txt  # 训练集标注文件,文件名称不可改变。每行给出视频路径和视频类别id,使用空格分隔,内容举例:videos/Qbo_tnzfjOY.mp4 2
+└── val.txt    # 验证集标注文件,文件名称不可改变。每行给出视频路径和视频类别id,使用空格分隔,内容举例:videos/3caPS4FHFF8.mp4 0
+```
+标注文件采用视频格式。请大家参考上述规范准备数据,此外可以参考[示例数据集](https://paddle-model-ecology.bj.bcebos.com/paddlex/data/k400_examples.tar)。

+ 256 - 0
docs/module_usage/tutorials/video_modules/video_classification.en.md

@@ -0,0 +1,256 @@
+---
+comments: true
+---
+
+# Video Classification Module Development Tutorial
+
+## I. Overview
+The Video Classification Module is a crucial component in a computer vision system, responsible for categorizing input videos. The performance of this module directly impacts the accuracy and efficiency of the entire computer vision system. The Video Classification Module typically receives videos as input and then, through deep learning or other machine learning algorithms, classifies them into predefined categories based on their characteristics and content. For example, in an action recognition system, the Video Classification Module may need to classify input videos into categories such as "Abseiling," "Air Drumming," "Answering Questions," etc. The classification results of the Video Classification Module are output for use by other modules or systems.
+
+## II. List of Supported Models
+
+
+<table>
+<tr>
+<th>Model</th><th>Model Download Link</th>
+<th>Top1 Acc(%)</th>
+<th>Model Storage Size (M)</th>
+<th>Description</th>
+</tr>
+<tr>
+<td>PPTSM_ResNet50_k400_8frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSM_ResNet50_k400_8frames_uniform_infer.tar">Inference Model</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSM_ResNet50_k400_8frames_uniform_pretrained.pdparams">Trained Model</a></td>
+<td>74.36</td>
+<td>93.4 M</td>
+<td rowspan="1">
+PP-TSM is a video classification model developed by Baidu PaddlePaddle's Vision Team. This model is optimized based on the ResNet-50 backbone network and undergoes model tuning in six aspects: data augmentation, network structure fine-tuning, training strategies, Batch Normalization (BN) layer optimization, pre-trained model selection, and model distillation. Under the center crop evaluation method, its accuracy on Kinetics-400 is improved by 3.95 points compared to the original paper's implementation.
+</td>
+</tr>
+
+<tr>
+<td>PPTSMv2_LCNet_k400_8frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSMv2_LCNet_k400_8frames_uniform_infer.tar">Inference Model</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSMv2_LCNet_k400_8frames_uniform_pretrained.pdparams">Trained Model</a></td>
+<td>71.71</td>
+<td>22.5 M</td>
+<td rowspan="2">PP-TSMv2 is a lightweight video classification model optimized based on the CPU-oriented model PP-LCNetV2. It undergoes model tuning in seven aspects: backbone network and pre-trained model selection, data augmentation, TSM module tuning, input frame number optimization, decoding speed optimization, DML distillation, and LTA module. Under the center crop evaluation method, it achieves an accuracy of 75.16%, with an inference speed of only 456ms on the CPU for a 10-second video input.</td>
+</tr>
+<tr>
+<td>PPTSMv2_LCNet_k400_16frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSMv2_LCNet_k400_16frames_uniform_infer.tar">Inference Model</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSMv2_LCNet_k400_16frames_uniform_pretrained.pdparams">Trained Model</a></td>
+<td>73.11</td>
+<td>22.5 M</td>
+</tr>
+
+</table>
+
+<p><b>Note: The above accuracy metrics refer to Top-1 Accuracy on the <a href="https://github.com/PaddlePaddle/PaddleVideo/blob/develop/docs/zh-CN/dataset/k400.md">K400</a> validation set. </b><b>All model GPU inference times are based on NVIDIA Tesla T4 machines, with precision type FP32. CPU inference speeds are based on Intel® Xeon® Gold 5117 CPU @ 2.00GHz, with 8 threads and precision type FP32.</b></p></details>
+
+## <span id="lable">III. Quick Integration</span>
+> ❗ Before quick integration, please install the PaddleX wheel package. For detailed instructions, refer to the [PaddleX Local Installation Guide](../../../installation/installation.en.md).
+
+After installing the wheel package, you can complete video classification module inference with just a few lines of code. You can switch between models in this module freely, and you can also integrate the model inference of the video classification module into your project. Before running the following code, please download the [demo video](https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4) to your local machine.
+
+```python
+from paddlex import create_model
+model = create_model("PPTSMv2_LCNet_k400_8frames_uniform")
+output = model.predict("general_video_classification_001.mp4", batch_size=1)
+for res in output:
+    res.print(json_format=False)
+    res.save_to_video("./output/")
+    res.save_to_json("./output/res.json")
+```
+For more information on using PaddleX's single-model inference APIs, please refer to the [PaddleX Single-Model Python Script Usage Instructions](../../instructions/model_python_API.en.md).
+
+## IV. Custom Development
+If you are seeking higher accuracy from existing models, you can use PaddleX's custom development capabilities to develop better video classification models. Before using PaddleX to develop video classification models, please ensure that you have installed the relevant model training plugins for video classification in PaddleX. The installation process can be found in the custom development section of the [PaddleX Local Installation Guide](../../../installation/installation.en.md).
+
+### 4.1 Data Preparation
+Before model training, you need to prepare the dataset for the corresponding task module. PaddleX provides data validation functionality for each module, and <b>only data that passes data validation can be used for model training</b>. Additionally, PaddleX provides demo datasets for each module, which you can use to complete subsequent development. If you wish to use your own private dataset for subsequent model training, please refer to the [PaddleX Video Classification Task Module Data Annotation Guide](../../../data_annotations/video_modules/video_classification.en.md).
+
+#### 4.1.1 Demo Data Download
+You can use the following command to download the demo dataset to a specified folder:
+```bash
+cd /path/to/paddlex
+wget https://paddle-model-ecology.bj.bcebos.com/paddlex/data/k400_examples.tar -P ./dataset
+tar -xf ./dataset/k400_examples.tar -C ./dataset/
+```
+#### 4.1.2 Data Validation
+One command is all you need to complete data validation:
+
+```bash
+python main.py -c paddlex/configs/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml \
+    -o Global.mode=check_dataset \
+    -o Global.dataset_dir=./dataset/k400_examples
+```
+After executing the above command, PaddleX will validate the dataset and summarize its basic information. If the command runs successfully, it will print `Check dataset passed !` in the log. The validation results file is saved in `./output/check_dataset_result.json`, and related outputs are saved in the `./output/check_dataset` directory in the current directory, including visual examples of sample images and sample distribution histograms.
+
+<details><summary>👉 <b>Validation Results Details (Click to Expand)</b></summary>
+
+<pre><code class="language-bash">{ "done_flag": true,
+  "check_pass": true,
+  "attributes": {
+    "label_file": "../../dataset/k400_examples/label.txt",
+    "num_classes": 5,
+    "train_samples": 250,
+    "train_sample_paths": [
+      "check_dataset/../../dataset/k400_examples/videos/Wary2ON3aSo_000079_000089.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/_LHpfh0rXjk_000012_000022.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/dyoiNbn80q0_000039_000049.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/brBw6cFwock_000049_000059.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/-o4X5Z_Isyc_000085_000095.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/e24p-4W3TiU_000011_000021.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/2Grg_zwmYZE_000004_000014.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/aZY_0UqRNgA_000098_000108.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/WZlsi4nQHOo_000025_000035.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/rRh-lkFj4Tw_000001_000011.mp4"
+    ],
+    "val_samples": 50,
+    "val_sample_paths": [
+      "check_dataset/../../dataset/k400_examples/videos/7Mga5kywfU4.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/w5UCdQ2NmfY.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/Qbo_tnzfjOY.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/LgW8pMDtylE.mkv",
+      "check_dataset/../../dataset/k400_examples/videos/BY0883Dvt1c.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/PHQkMPu-KNo.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/7LSJ2Ryv1a8.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/oBYZWvlI8Uk.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/dpn2eg9O3Rs.mkv",
+      "check_dataset/../../dataset/k400_examples/videos/hXtsZAaZ3yc.mkv"
+    ]
+  },
+  "analysis": {
+    "histogram": "check_dataset/histogram.png"
+  },
+  "dataset_path": "./dataset/k400_examples",
+  "show_type": "video",
+  "dataset_type": "VideoClsDataset"
+}
+</code></pre>
+<p>The above validation results, with check_pass being True, indicate that the dataset format meets the requirements. Explanations for other indicators are as follows:</p>
+<ul>
+<li><code>attributes.num_classes</code>: The number of classes in this dataset is 5;</li>
+<li><code>attributes.train_samples</code>: The number of training set samples in this dataset is 250;</li>
+<li><code>attributes.val_samples</code>: The number of validation set samples in this dataset is 50;</li>
+<li><code>attributes.train_sample_paths</code>: A list of relative paths to the visual samples in the training set of this dataset;</li>
+<li><code>attributes.val_sample_paths</code>: A list of relative paths to the visual samples in the validation set of this dataset;</li>
+</ul>
+<p>Additionally, the dataset validation analyzes the sample number distribution across all classes in the dataset and generates a distribution histogram (histogram.png):</p>
+<p><img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/modules/video_classification/01.png"></p></details>
+
+#### 4.1.3 Dataset Format Conversion/Dataset Splitting (Optional)
+After completing data validation, you can convert the dataset format or re-split the training/validation ratio of the dataset by <b>modifying the configuration file</b> or <b>appending hyperparameters</b>.
+
+<details><summary>👉 <b>Dataset Format Conversion/Dataset Splitting Details (Click to Expand)</b></summary>
+
+<p><b>(1) Dataset Format Conversion</b></p>
+<p>Image classification does not currently support data conversion.</p>
+<p><b>(2) Dataset Splitting</b></p>
+<p>The parameters for dataset splitting can be set by modifying the fields under <code>CheckDataset</code> in the configuration file. The following are example explanations for some of the parameters in the configuration file:</p>
+<ul>
+<li><code>CheckDataset</code>:</li>
+<li><code>split</code>:</li>
+<li><code>enable</code>: Whether to re-split the dataset. When set to <code>True</code>, the dataset format will be converted. The default is <code>False</code>;</li>
+<li><code>train_percent</code>: If re-splitting the dataset, you need to set the percentage of the training set, which should be an integer between 0-100, ensuring that the sum with <code>val_percent</code> equals 100;</li>
+</ul>
+<p>For example, if you want to re-split the dataset with a 90% training set and a 10% validation set, you need to modify the configuration file as follows:</p>
+<pre><code class="language-bash">......
+CheckDataset:
+  ......
+  split:
+    enable: True
+    train_percent: 90
+    val_percent: 10
+  ......
+</code></pre>
+<p>Then execute the command:</p>
+<pre><code class="language-bash">python main.py -c paddlex/configs/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml \
+    -o Global.mode=check_dataset \
+    -o Global.dataset_dir=./dataset/k400_examples
+</code></pre>
+<p>After the data splitting is executed, the original annotation files will be renamed to <code>xxx.bak</code> in the original path.</p>
+<p>These parameters also support being set through appending command line arguments:</p>
+<pre><code class="language-bash">python main.py -c paddlex/configs/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml \
+    -o Global.mode=check_dataset \
+    -o Global.dataset_dir=./dataset/k400_examples \
+    -o CheckDataset.split.enable=True \
+    -o CheckDataset.split.train_percent=90 \
+    -o CheckDataset.split.val_percent=10
+</code></pre></details>
+
+### 4.2 Model Training
+A single command can complete the model training. Taking the training of the video classification model PPTSMv2_LCNet_k400_8frames_uniform as an example:
+```
+python main.py -c paddlex/configs/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml  \
+    -o Global.mode=train \
+    -o Global.dataset_dir=./dataset/k400_examples
+```
+
+the following steps are required:
+
+* Specify the path of the model's `.yaml` configuration file (here it is `PPTSMv2_LCNet_k400_8frames_uniform.yaml`. When training other models, you need to specify the corresponding configuration files. The relationship between the model and configuration files can be found in the [PaddleX Model List (CPU/GPU)](../../../support_list/models_list.en.md))
+* Specify the mode as model training: `-o Global.mode=train`
+* Specify the path of the training dataset: `-o Global.dataset_dir`. Other related parameters can be set by modifying the fields under `Global` and `Train` in the `.yaml` configuration file, or adjusted by appending parameters in the command line. For example, to specify training on the first 2 GPUs: `-o Global.device=gpu:0,1`; to set the number of training epochs to 10: `-o Train.epochs_iters=10`. For more modifiable parameters and their detailed explanations, refer to the configuration file parameter instructions for the corresponding task module of the model [PaddleX Common Model Configuration File Parameters](../../instructions/config_parameters_common.en.md).
+
+
+<details><summary>👉 <b>More Details (Click to Expand)</b></summary>
+
+<ul>
+<li>During model training, PaddleX automatically saves the model weight files, with the default being <code>output</code>. If you need to specify a save path, you can set it through the <code>-o Global.output</code> field in the configuration file.</li>
+<li>PaddleX shields you from the concepts of dynamic graph weights and static graph weights. During model training, both dynamic and static graph weights are produced, and static graph weights are selected by default for model inference.</li>
+<li>
+<p>After completing the model training, all outputs are saved in the specified output directory (default is <code>./output/</code>), typically including:</p>
+</li>
+<li>
+<p><code>train_result.json</code>: Training result record file, recording whether the training task was completed normally, as well as the output weight metrics, related file paths, etc.;</p>
+</li>
+<li><code>train.log</code>: Training log file, recording changes in model metrics and loss during training;</li>
+<li><code>config.yaml</code>: Training configuration file, recording the hyperparameter configuration for this training session;</li>
+<li><code>.pdparams</code>, <code>.pdema</code>, <code>.pdopt.pdstate</code>, <code>.pdiparams</code>, <code>.pdmodel</code>: Model weight-related files, including network parameters, optimizer, EMA, static graph network parameters, static graph network structure, etc.;</li>
+</ul></details>
+
+## <b>4.3 Model Evaluation</b>
+After completing model training, you can evaluate the specified model weight file on the validation set to verify the model accuracy. Using PaddleX for model evaluation, a single command can complete the model evaluation:
+```bash
+python main.py -c  paddlex/configs/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml  \
+    -o Global.mode=evaluate \
+    -o Global.dataset_dir=./dataset/k400_examples
+```
+Similar to model training, the following steps are required:
+
+* Specify the path of the model's `.yaml` configuration file (here it is `PPTSMv2_LCNet_k400_8frames_uniform.yaml`)
+* Specify the mode as model evaluation: `-o Global.mode=evaluate`
+* Specify the path of the validation dataset: `-o Global.dataset_dir`. Other related parameters can be set by modifying the fields under `Global` and `Evaluate` in the `.yaml` configuration. Other related parameters can be set by modifying the fields under `Global` and `Evaluate` in the `.yaml` configuration file. For details, please refer to [PaddleX Common Model Configuration File Parameter Description](../../instructions/config_parameters_common.en.md).
+
+<details><summary>👉 <b>More Details (Click to Expand)</b></summary>
+
+<p>When evaluating the model, you need to specify the model weight file path. Each configuration file has a default weight save path built-in. If you need to change it, simply set it by appending a command line parameter, such as <code>-o Evaluate.weight_path=./output/best_model/best_model.pdparams</code>.</p>
+<p>After completing the model evaluation, an <code>evaluate_result.json</code> file will be generated, which records the evaluation results. Specifically, it records whether the evaluation task was completed successfully and the model's evaluation metrics, including val.top1, val.top5;</p></details>
+
+### <b>4.4 Model Inference and Model Integration</b>
+After completing model training and evaluation, you can use the trained model weights for inference predictions or Python integration.
+
+#### 4.4.1 Model Inference
+To perform inference prediction through the command line, simply use the following command. Before running the following code, please download the [demo video](https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4) to your local machine.
+
+```bash
+python main.py -c paddlex/configs/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml \
+    -o Global.mode=predict \
+    -o Predict.model_dir="./output/best_model/inference" \
+    -o Predict.input="general_video_classification_001.mp4"
+```
+Similar to model training and evaluation, the following steps are required:
+
+* Specify the `.yaml` configuration file path for the model (here it is `PPTSMv2_LCNet_k400_8frames_uniform.yaml`)
+* Specify the mode as model inference prediction: `-o Global.mode=predict`
+* Specify the model weight path: `-o Predict.model_dir="./output/best_model/inference"`
+* Specify the input data path: `-o Predict.input="..."`
+Other related parameters can be set by modifying the fields under `Global` and `Predict` in the `.yaml` configuration file. For details, please refer to [PaddleX Common Model Configuration File Parameter Description](../../instructions/config_parameters_common.en.md).
+
+#### 4.4.2 Model Integration
+The model can be directly integrated into the PaddleX pipelines or directly into your own project.
+
+1.<b>Pipeline Integration</b>
+
+
+The video classification module can be integrated into the [General Video Classification Pipeline](../../../pipeline_usage/tutorials/video_pipelines/video_classification.en.md) of PaddleX. Simply replace the model path to update the video classification module of the relevant pipeline. In pipeline integration, you can use high-performance inference and service-oriented deployment to deploy your obtained model.
+
+2.<b>Module Integration</b>
+
+The weights you produce can be directly integrated into the video classification module. You can refer to the Python example code in <a href="#lable">Quick Integration</a>  and simply replace the model with the path to your trained model.

+ 263 - 0
docs/module_usage/tutorials/video_modules/video_classification.md

@@ -0,0 +1,263 @@
+---
+comments: true
+---
+
+# 视频分类模块使用教程
+
+## 一、概述
+视频分类模块是计算机视觉系统中的关键组成部分,负责对输入的视频进行分类。该模块的性能直接影响到整个计算机视觉系统的准确性和效率。视频分类模块通常会接收视频作为输入,然后通过深度学习或其他机器学习算法,根据视频的特性和内容,将其分类到预定义的类别中。例如,对于一个动作识别系统,视频分类模块可能需要将输入的视频分类为“攀绳下降”、“空中打鼓”、“回答问题”等类别。视频分类模块的分类结果将作为输出,供其他模块或系统使用。
+
+## 二、支持模型列表
+
+
+<table>
+<tr>
+<th>模型</th><th>模型下载链接</th>
+<th>Top1 Acc(%)</th>
+<th>模型存储大小 (M)</th>
+<th>介绍</th>
+</tr>
+<tr>
+<td>PPTSM_ResNet50_k400_8frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSM_ResNet50_k400_8frames_uniform_infer.tar">推理模型</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSM_ResNet50_k400_8frames_uniform_pretrained.pdparams">训练模型</a></td>
+<td>74.36</td>
+<td>93.4 M</td>
+<td rowspan="1">
+PP-TSM是一种百度飞桨视觉团队自研的视频分类模型。该模型基于ResNet-50骨干网络进行优化,从数据增强、网络结构微调、训练策略、BN层优化、预训练模型选择、模型蒸馏等6个方面进行模型调优,在中心采样评估方式下,Kinetics-400上精度较原论文实现提升3.95个点
+</td>
+</tr>
+
+<tr>
+<td>PPTSMv2_LCNet_k400_8frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSMv2_LCNet_k400_8frames_uniform_infer.tar">推理模型</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSMv2_LCNet_k400_8frames_uniform_pretrained.pdparams">训练模型</a></td>
+<td>71.71</td>
+<td>22.5 M</td>
+<td rowspan="2">PP-TSMv2是轻量化的视频分类模型,基于CPU端模型PP-LCNetV2进行优化,从骨干网络与预训练模型选择、数据增强、tsm模块调优、输入帧数优化、解码速度优化、DML蒸馏、LTA模块等7个方面进行模型调优,在中心采样评估方式下,精度达到75.16%,输入10s视频在CPU端的推理速度仅需456ms。</td>
+</tr>
+<tr>
+<td>PPTSMv2_LCNet_k400_16frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSMv2_LCNet_k400_16frames_uniform_infer.tar">推理模型</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSMv2_LCNet_k400_16frames_uniform_pretrained.pdparams">训练模型</a></td>
+<td>73.11</td>
+<td>22.5 M</td>
+</tr>
+
+</table>
+
+
+
+<p><b>注:以上精度指标为 <a href="https://github.com/PaddlePaddle/PaddleVideo/blob/develop/docs/zh-CN/dataset/k400.md">K400</a> 验证集 Top1 Acc。所有模型 GPU 推理耗时基于 NVIDIA Tesla T4 机器,精度类型为 FP32, CPU 推理速度基于 Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz,线程数为8,精度类型为 FP32。</b></p></details>
+
+## 三、快速集成
+> ❗ 在快速集成前,请先安装 PaddleX 的 wheel 包,详细请参考 [PaddleX本地安装教程](../../../installation/installation.md)。
+
+完成 wheel 包的安装后,几行代码即可完成视频分类模块的推理,可以任意切换该模块下的模型,您也可以将视频分类的模块中的模型推理集成到您的项目中。运行以下代码前,请您下载[示例视频](https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4)到本地。
+
+```python
+from paddlex import create_model
+model = create_model("PPTSMv2_LCNet_k400_8frames_uniform")
+output = model.predict("general_video_classification_001.mp4", batch_size=1)
+for res in output:
+    res.print(json_format=False)
+    res.save_to_video("./output/")
+    res.save_to_json("./output/res.json")
+```
+
+关于更多 PaddleX 的单模型推理的 API 的使用方法,可以参考[PaddleX单模型Python脚本使用说明](../../instructions/model_python_API.md)。
+
+## 四、二次开发
+如果你追求更高精度的现有模型,可以使用 PaddleX 的二次开发能力,开发更好的视频分类模型。在使用 PaddleX 开发视频分类模型之前,请务必安装 PaddleX 的 视频分类  [PaddleX本地安装教程](../../../installation/installation.md)中的二次开发部分。
+
+### 4.1 数据准备
+在进行模型训练前,需要准备相应任务模块的数据集。PaddleX 针对每一个模块提供了数据校验功能,<b>只有通过数据校验的数据才可以进行模型训练</b>。此外,PaddleX 为每一个模块都提供了 Demo 数据集,您可以基于官方提供的 Demo 数据完成后续的开发。若您希望用私有数据集进行后续的模型训练,可以参考[PaddleX视频分类任务模块数据标注教程](../../../data_annotations/video_modules/video_classification.md)
+
+#### 4.1.1 Demo 数据下载
+您可以参考下面的命令将 Demo 数据集下载到指定文件夹:
+
+```bash
+cd /path/to/paddlex
+wget https://paddle-model-ecology.bj.bcebos.com/paddlex/data/k400_examples.tar -P ./dataset
+tar -xf ./dataset/k400_examples.tar -C ./dataset/
+```
+#### 4.1.2 数据校验
+一行命令即可完成数据校验:
+
+```bash
+python main.py -c paddlex/configs/modules/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml \
+    -o Global.mode=check_dataset \
+    -o Global.dataset_dir=./dataset/k400_examples
+```
+执行上述命令后,PaddleX 会对数据集进行校验,并统计数据集的基本信息。命令运行成功后会在log中打印出`Check dataset passed !`信息。校验结果文件保存在`./output/check_dataset_result.json`,同时相关产出会保存在当前目录的`./output/check_dataset`目录下,产出目录中包括可视化的示例样本图片和样本分布直方图。
+
+<details><summary>👉 <b>校验结果详情(点击展开)</b></summary>
+<p>校验结果文件具体内容为:</p>
+<pre><code class="language-bash">
+{ "done_flag": true,
+  "check_pass": true,
+  "attributes": {
+    "label_file": "../../dataset/k400_examples/label.txt",
+    "num_classes": 5,
+    "train_samples": 250,
+    "train_sample_paths": [
+      "check_dataset/../../dataset/k400_examples/videos/Wary2ON3aSo_000079_000089.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/_LHpfh0rXjk_000012_000022.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/dyoiNbn80q0_000039_000049.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/brBw6cFwock_000049_000059.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/-o4X5Z_Isyc_000085_000095.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/e24p-4W3TiU_000011_000021.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/2Grg_zwmYZE_000004_000014.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/aZY_0UqRNgA_000098_000108.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/WZlsi4nQHOo_000025_000035.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/rRh-lkFj4Tw_000001_000011.mp4"
+    ],
+    "val_samples": 50,
+    "val_sample_paths": [
+      "check_dataset/../../dataset/k400_examples/videos/7Mga5kywfU4.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/w5UCdQ2NmfY.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/Qbo_tnzfjOY.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/LgW8pMDtylE.mkv",
+      "check_dataset/../../dataset/k400_examples/videos/BY0883Dvt1c.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/PHQkMPu-KNo.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/7LSJ2Ryv1a8.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/oBYZWvlI8Uk.mp4",
+      "check_dataset/../../dataset/k400_examples/videos/dpn2eg9O3Rs.mkv",
+      "check_dataset/../../dataset/k400_examples/videos/hXtsZAaZ3yc.mkv"
+    ]
+  },
+  "analysis": {
+    "histogram": "check_dataset/histogram.png"
+  },
+  "dataset_path": "./dataset/k400_examples",
+  "show_type": "video",
+  "dataset_type": "VideoClsDataset"
+}
+</code></pre>
+<p>上述校验结果中,check_pass 为 True 表示数据集格式符合要求,其他部分指标的说明如下:</p>
+<ul>
+<li><code>attributes.num_classes</code>:该数据集类别数为 5;</li>
+<li><code>attributes.train_samples</code>:该数据集训练集样本数量为 250;</li>
+<li><code>attributes.val_samples</code>:该数据集验证集样本数量为 50;</li>
+<li><code>attributes.train_sample_paths</code>:该数据集训练集样本可视化视频相对路径列表;</li>
+<li><code>attributes.val_sample_paths</code>:该数据集验证集样本可视化视频相对路径列表;</li>
+</ul>
+<p>另外,数据集校验还对数据集中所有类别的样本数量分布情况进行了分析,并绘制了分布直方图(histogram.png):</p>
+<p><img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/modules/video_classification/01.png"></p></details>
+
+#### 4.1.3 数据集格式转换/数据集划分(可选)
+在您完成数据校验之后,可以通过<b>修改配置文件</b>或是<b>追加超参数</b>的方式对数据集的格式进行转换,也可以对数据集的训练/验证比例进行重新划分。
+
+<details><summary>👉 <b>格式转换/数据集划分详情(点击展开)</b></summary>
+
+<p><b>(1)数据集格式转换</b></p>
+<p>视频分类暂不支持数据转换。</p>
+<p><b>(2)数据集划分</b></p>
+<p>数据集划分的参数可以通过修改配置文件中 <code>CheckDataset</code> 下的字段进行设置,配置文件中部分参数的示例说明如下:</p>
+<ul>
+<li><code>CheckDataset</code>:</li>
+<li><code>split</code>:</li>
+<li><code>enable</code>: 是否进行重新划分数据集,为 <code>True</code> 时进行数据集格式转换,默认为 <code>False</code>;</li>
+<li><code>train_percent</code>: 如果重新划分数据集,则需要设置训练集的百分比,类型为 0-100 之间的任意整数,需要保证和 <code>val_percent</code> 值加和为100;</li>
+</ul>
+<p>例如,您想重新划分数据集为 训练集占比90%、验证集占比10%,则需将配置文件修改为:</p>
+<pre><code class="language-bash">......
+CheckDataset:
+  ......
+  split:
+    enable: True
+    train_percent: 90
+    val_percent: 10
+  ......
+</code></pre>
+<p>随后执行命令:</p>
+<pre><code class="language-bash">python main.py -c paddlex/configs/modules/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml \
+    -o Global.mode=check_dataset \
+    -o Global.dataset_dir=./dataset/k400_examples
+</code></pre>
+<p>数据划分执行之后,原有标注文件会被在原路径下重命名为 <code>xxx.bak</code>。</p>
+<p>以上参数同样支持通过追加命令行参数的方式进行设置:</p>
+<pre><code class="language-bash">python main.py -c paddlex/configs/modules/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml \
+    -o Global.mode=check_dataset \
+    -o Global.dataset_dir=./dataset/k400_examples \
+    -o CheckDataset.split.enable=True \
+    -o CheckDataset.split.train_percent=90 \
+    -o CheckDataset.split.val_percent=10
+</code></pre></details>
+
+### 4.2 模型训练
+一条命令即可完成模型的训练,以此处视频分类模型 PPTSMv2_LCNet_k400_8frames_uniform 的训练为例:
+
+```
+python main.py -c paddlex/configs/modules/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml  \
+    -o Global.mode=train \
+    -o Global.dataset_dir=./dataset/k400_examples
+```
+需要如下几步:
+
+* 指定模型的`.yaml` 配置文件路径(此处为`PPTSMv2_LCNet_k400_8frames_uniform.yaml`,训练其他模型时,需要的指定相应的配置文件,模型和配置的文件的对应关系,可以查阅[PaddleX模型列表(CPU/GPU)](../../../support_list/models_list.md))
+* 指定模式为模型训练:`-o Global.mode=train`
+* 指定训练数据集路径:`-o Global.dataset_dir`
+其他相关参数均可通过修改`.yaml`配置文件中的`Global`和`Train`下的字段来进行设置,也可以通过在命令行中追加参数来进行调整。如指定前 2 卡 gpu 训练:`-o Global.device=gpu:0,1`;设置训练轮次数为 10:`-o Train.epochs_iters=10`。更多可修改的参数及其详细解释,可以查阅模型对应任务模块的配置文件说明[PaddleX通用模型配置文件参数说明](../../instructions/config_parameters_common.md)。
+
+<details><summary>👉 <b>更多说明(点击展开)</b></summary>
+
+<ul>
+<li>模型训练过程中,PaddleX 会自动保存模型权重文件,默认为<code>output</code>,如需指定保存路径,可通过配置文件中 <code>-o Global.output</code> 字段进行设置。</li>
+<li>PaddleX 对您屏蔽了动态图权重和静态图权重的概念。在模型训练的过程中,会同时产出动态图和静态图的权重,在模型推理时,默认选择静态图权重推理。</li>
+<li>
+<p>在完成模型训练后,所有产出保存在指定的输出目录(默认为<code>./output/</code>)下,通常有以下产出:</p>
+</li>
+<li>
+<p><code>train_result.json</code>:训练结果记录文件,记录了训练任务是否正常完成,以及产出的权重指标、相关文件路径等;</p>
+</li>
+<li><code>train.log</code>:训练日志文件,记录了训练过程中的模型指标变化、loss 变化等;</li>
+<li><code>config.yaml</code>:训练配置文件,记录了本次训练的超参数的配置;</li>
+<li><code>.pdparams</code>、<code>.pdema</code>、<code>.pdopt.pdstate</code>、<code>.pdiparams</code>、<code>.pdmodel</code>:模型权重相关文件,包括网络参数、优化器、EMA、静态图网络参数、静态图网络结构等;</li>
+</ul></details>
+
+## <b>4.3 模型评估</b>
+在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,一条命令即可完成模型的评估:
+
+```bash
+python main.py -c  paddlex/configs/modules/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml  \
+    -o Global.mode=evaluate \
+    -o Global.dataset_dir=./dataset/k400_examples
+```
+与模型训练类似,需要如下几步:
+
+* 指定模型的`.yaml` 配置文件路径(此处为`PPTSMv2_LCNet_k400_8frames_uniform.yaml`)
+* 指定模式为模型评估:`-o Global.mode=evaluate`
+* 指定验证数据集路径:`-o Global.dataset_dir`
+其他相关参数均可通过修改`.yaml`配置文件中的`Global`和`Evaluate`下的字段来进行设置,详细请参考[PaddleX通用模型配置文件参数说明](../../instructions/config_parameters_common.md)。
+
+<details><summary>👉 <b>更多说明(点击展开)</b></summary>
+
+<p>在模型评估时,需要指定模型权重文件路径,每个配置文件中都内置了默认的权重保存路径,如需要改变,只需要通过追加命令行参数的形式进行设置即可,如<code>-o Evaluate.weight_path=./output/best_model/best_model.pdparams</code>。</p>
+<p>在完成模型评估后,会产出<code>evaluate_result.json,其记录了</code>评估的结果,具体来说,记录了评估任务是否正常完成,以及模型的评估指标,包含 val.top1、val.top5;</p></details>
+
+### <b>4.4 模型推理和模型集成</b>
+
+在完成模型的训练和评估后,即可使用训练好的模型权重进行推理预测或者进行Python集成。
+
+#### 4.4.1 模型推理
+通过命令行的方式进行推理预测,只需如下一条命令。运行以下代码前,请您下载[示例视频](https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4)到本地。
+
+```bash
+python main.py -c paddlex/configs/modules/video_classification/PPTSMv2_LCNet_k400_8frames_uniform.yaml \
+    -o Global.mode=predict \
+    -o Predict.model_dir="./output/best_model/inference" \
+    -o Predict.input="general_video_classification_001.mp4"
+```
+与模型训练和评估类似,需要如下几步:
+
+* 指定模型的`.yaml` 配置文件路径(此处为`PPTSMv2_LCNet_k400_8frames_uniform.yaml`)
+* 指定模式为模型推理预测:`-o Global.mode=predict`
+* 指定模型权重路径:`-o Predict.model_dir="./output/best_model/inference"`
+* 指定输入数据路径:`-o Predict.input="..."`
+其他相关参数均可通过修改`.yaml`配置文件中的`Global`和`Predict`下的字段来进行设置,详细请参考[PaddleX通用模型配置文件参数说明](../../instructions/config_parameters_common.md)。
+
+#### 4.4.2 模型集成
+模型可以直接集成到 PaddleX 产线中,也可以直接集成到您自己的项目中。
+
+1.<b>产线集成</b>
+
+视频分类模块可以集成的 PaddleX 产线有[通用视频分类产线](../../../pipeline_usage/tutorials/video_pipelines/video_classification.md),只需要替换模型路径即可完成相关产线的视频分类模块的模型更新。在产线集成中,你可以使用高性能部署和服务化部署来部署你得到的模型。
+
+2.<b>模块集成</b>
+
+您产出的权重可以直接集成到视频分类模块中,可以参考[快速集成](#三快速集成)的 Python 示例代码,只需要将模型替换为你训练的到的模型路径即可。

+ 781 - 0
docs/pipeline_usage/tutorials/video_pipelines/video_classification.en.md

@@ -0,0 +1,781 @@
+---
+comments: true
+---
+
+# General Video Classification Pipeline Tutorial
+
+## 1. Introduction to the General Video Classification Pipeline
+Video Classification is a technique that assigns video clips to predefined categories. It is widely applied in fields such as action recognition, event detection, and content recommendation. Video classification can recognize various dynamic events and scenes, such as sports activities, natural phenomena, traffic conditions, and categorize them based on their characteristics. By utilizing deep learning models, especially the combination of Convolutional Neural Networks (CNN) and Recurrent Neural Networks (RNN), video classification can automatically extract spatio-temporal features from videos and perform accurate classification. This technology holds significant applications in video surveillance, media retrieval, and personalized recommendation systems.
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/pipelines/video_classification/01.jpg">
+<b>The General Video Classification Pipeline includes an video classification module. If you prioritize model accuracy, choose a model with higher accuracy. If you prioritize inference speed, select a model with faster inference. If you prioritize model storage size, choose a model with a smaller storage size.</b>
+
+<details><summary> 👉Details of Model List</summary>
+
+<table>
+<tr>
+<th>Model</th><th>Model Download Link</th>
+<th>Top1 Acc(%)</th>
+<th>Model Storage Size (M)</th>
+<th>Description</th>
+</tr>
+<tr>
+<td>PPTSM_ResNet50_k400_8frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSM_ResNet50_k400_8frames_uniform_infer.tar">Inference Model</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSM_ResNet50_k400_8frames_uniform_pretrained.pdparams">Trained Model</a></td>
+<td>74.36</td>
+<td>93.4 M</td>
+<td rowspan="1">
+PP-TSM is a video classification model developed by Baidu PaddlePaddle's Vision Team. This model is optimized based on the ResNet-50 backbone network and undergoes model tuning in six aspects: data augmentation, network structure fine-tuning, training strategies, Batch Normalization (BN) layer optimization, pre-trained model selection, and model distillation. Under the center crop evaluation method, its accuracy on Kinetics-400 is improved by 3.95 points compared to the original paper's implementation.
+</td>
+</tr>
+
+<tr>
+<td>PPTSMv2_LCNet_k400_8frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSMv2_LCNet_k400_8frames_uniform_infer.tar">Inference Model</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSMv2_LCNet_k400_8frames_uniform_pretrained.pdparams">Trained Model</a></td>
+<td>71.71</td>
+<td>22.5 M</td>
+<td rowspan="2">PP-TSMv2 is a lightweight video classification model optimized based on the CPU-oriented model PP-LCNetV2. It undergoes model tuning in seven aspects: backbone network and pre-trained model selection, data augmentation, TSM module tuning, input frame number optimization, decoding speed optimization, DML distillation, and LTA module. Under the center crop evaluation method, it achieves an accuracy of 75.16%, with an inference speed of only 456ms on the CPU for a 10-second video input.</td>
+</tr>
+<tr>
+<td>PPTSMv2_LCNet_k400_16frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSMv2_LCNet_k400_16frames_uniform_infer.tar">Inference Model</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSMv2_LCNet_k400_16frames_uniform_pretrained.pdparams">Trained Model</a></td>
+<td>73.11</td>
+<td>22.5 M</td>
+</tr>
+
+</table>
+
+<p><b>Note: The above accuracy metrics refer to Top-1 Accuracy on the <a href="https://github.com/PaddlePaddle/PaddleVideo/blob/develop/docs/zh-CN/dataset/k400.md">K400</a> validation set. </b><b>All model GPU inference times are based on NVIDIA Tesla T4 machines, with precision type FP32. CPU inference speeds are based on Intel® Xeon® Gold 5117 CPU @ 2.00GHz, with 8 threads and precision type FP32.</b></p></details>
+
+## 2. Quick Start
+
+PaddleX supports experiencing the effects of pipelines locally using the command line or Python.
+
+Before using the general video classification pipeline locally, please ensure that you have completed the installation of the PaddleX wheel package according to the PaddleX local installation tutorial.
+
+#### 2.1 Command Line Experience
+A single command is all you need to quickly experience the video classification pipeline, Use the [test file](https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4), and replace `--input` with the local path to perform prediction.
+
+```bash
+paddlex --pipeline video_classification --input general_video_classification_001.mp4 --device gpu:0
+```
+Parameter Explanation:
+
+```
+--pipeline: The name of the pipeline, here it is the video classification pipeline.
+--input: The local path or URL of the input video to be processed.
+--device: The GPU index to use (e.g., gpu:0 for the first GPU, gpu:1,2 for the second and third GPUs). You can also choose to use CPU (--device cpu).
+```
+
+When executing the above command, the default video classification pipeline configuration file is loaded. If you need to customize the configuration file, you can execute the following command to obtain it:
+
+<details><summary> 👉Click to expand</summary>
+
+<pre><code class="language-bash">paddlex --get_pipeline_config video_classification
+</code></pre>
+<p>After execution, the video classification pipeline configuration file will be saved in the current path. If you wish to customize the save location, you can execute the following command (assuming the custom save location is <code>./my_path</code>):</p>
+<pre><code class="language-bash">paddlex --get_pipeline_config video_classification --save_path ./my_path
+</code></pre>
+<p>After obtaining the pipeline configuration file, replace <code>--pipeline</code> with the configuration file's save path to make the configuration file take effect. For example, if the configuration file's save path is <code>./video_classification.yaml</code>, simply execute:</p>
+<pre><code class="language-bash">paddlex --pipeline ./video_classification.yaml --input general_video_classification_001.mp4  --device gpu:0
+</code></pre>
+<p>Here, parameters such as <code>--model</code> and <code>--device</code> do not need to be specified, as they will use the parameters in the configuration file. If you still specify parameters, the specified parameters will take precedence.</p></details>
+
+After running, the result will be:
+
+```
+{'input_path': 'general_video_classification_001.mp4', 'class_ids': [0], 'scores': array([0.91996]), 'label_names': ['abseiling']}
+```
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/pipelines/video_classification/02.jpg">
+
+
+The visualized video not saved by default. You can customize the save path through `--save_path`, and then all results will be saved in the specified path.
+
+#### 2.2  Python Script Integration
+A few lines of code can complete the quick inference of the pipeline. Taking the general video classification pipeline as an example:
+
+```python
+from paddlex import create_pipeline
+
+pipeline = create_pipeline(pipeline="video_classification")
+
+output = pipeline.predict("general_video_classification_001.mp4")
+for res in output:
+    res.print() # Print the structured output of the prediction
+    res.save_to_video("./output/")  # Save the result visualization video
+    res.save_to_json("./output/") # Save the structured output of the prediction
+```
+The results obtained are the same as those obtained through the command line method.
+
+In the above Python script, the following steps are executed:
+
+(1) Instantiate the `create_pipeline` to create a pipeline object: The specific parameter descriptions are as follows:
+
+<table>
+<thead>
+<tr>
+<th>Parameter</th>
+<th>Description</th>
+<th>Type</th>
+<th>Default</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>pipeline</code></td>
+<td>The name of the pipeline or the path to the pipeline configuration file. If it is the name of the pipeline, it must be a pipeline supported by PaddleX.</td>
+<td><code>str</code></td>
+<td>None</td>
+</tr>
+<tr>
+<td><code>device</code></td>
+<td>The device for pipeline model inference. Supports: "gpu", "cpu".</td>
+<td><code>str</code></td>
+<td>"gpu"</td>
+</tr>
+<tr>
+<td><code>use_hpip</code></td>
+<td>Whether to enable high-performance inference, which is only available when the pipeline supports it.</td>
+<td><code>bool</code></td>
+<td><code>False</code></td>
+</tr>
+</tbody>
+</table>
+(2) Call the `predict` method of the video classification pipeline object for inference prediction: The `predict` method parameter is `x`, which is used to input data to be predicted, supporting multiple input methods, as shown in the following examples:
+
+<table>
+<thead>
+<tr>
+<th>Parameter Type</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>Python Var</td>
+<td>Supports directly passing Python variables, such as numpy.ndarray representing video data.</td>
+</tr>
+<tr>
+<td><code>str</code></td>
+<td>Supports passing the path of the file to be predicted, such as the local path of an video file: <code>/root/data/video.mp4。</code>.</td>
+</tr>
+<tr>
+<td><code>str</code></td>
+<td>Supports passing the URL of the file to be predicted, such as the network URL of an video file: <a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4 ">Example</a>.</td>
+</tr>
+<tr>
+<td><code>str</code></td>
+<td>Supports passing a local directory, which should contain files to be predicted, such as the local path: <code>/root/data/</code>.</td>
+</tr>
+<tr>
+<td><code>dict</code></td>
+<td>Supports passing a dictionary type, where the key needs to correspond to the specific task, such as "video" for the video classification task, and the value of the dictionary supports the above data types, e.g., <code>{"video": "/root/data1"}</code>.</td>
+</tr>
+<tr>
+<td><code>list</code></td>
+<td>Supports passing a list, where the list elements need to be the above data types, such as <code>[numpy.ndarray, numpy.ndarray]</code>, <code>["/root/data/video1.mp4", "/root/data/video2.mp4"]</code>, <code>["/root/data1", "/root/data2"]</code>, <code>[{"video": "/root/data1"}, {"video": "/root/data2/video.mp4"}]</code>.</td>
+</tr>
+</tbody>
+</table>
+3)Obtain prediction results by calling the `predict` method: The `predict` method is a `generator`, so prediction results need to be obtained through iteration. The `predict` method predicts data in batches, so the prediction results are in the form of a list.
+
+(4)Process the prediction results: The prediction result for each sample is of `dict` type and supports printing or saving to files, with the supported file types depending on the specific pipeline. For example:
+
+<table>
+<thead>
+<tr>
+<th>Method</th>
+<th>Description</th>
+<th>Method Parameters</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>print</td>
+<td>Prints results to the terminal</td>
+<td><code>- format_json</code>: bool, whether to format the output content with json indentation, default is True;<br/><code>- indent</code>: int, json formatting setting, only valid when format_json is True, default is 4;<br/><code>- ensure_ascii</code>: bool, json formatting setting, only valid when format_json is True, default is False;</td>
+</tr>
+<tr>
+<td>save_to_json</td>
+<td>Saves results as a json file</td>
+<td><code>- save_path</code>: str, the path to save the file, when it's a directory, the saved file name is consistent with the input file type;<br/><code>- indent</code>: int, json formatting setting, default is 4;<br/><code>- ensure_ascii</code>: bool, json formatting setting, default is False;</td>
+</tr>
+<tr>
+<td>save_to_video</td>
+<td>Saves results as an video file</td>
+<td><code>- save_path</code>: str, the path to save the file, when it's a directory, the saved file name is consistent with the input file type;</td>
+</tr>
+</tbody>
+</table>
+If you have a configuration file, you can customize the configurations of the video anomaly detection pipeline by simply modifying the `pipeline` parameter in the `create_pipeline` method to the path of the pipeline configuration file.
+
+For example, if your configuration file is saved at `./my_path/video_classification.yaml`, you only need to execute:
+
+```python
+from paddlex import create_pipeline
+pipeline = create_pipeline(pipeline="./my_path/video_classification.yaml")
+output = pipeline.predict("general_video_classification_001.mp4 ")
+for res in output:
+    res.print()  # Print the structured output of prediction
+    res.save_to_video("./output/")  # Save the visualization video of the result
+    res.save_to_json("./output/")  # Save the structured output of prediction
+```
+
+## 3. Development Integration/Deployment
+If the pipeline meets your requirements for inference speed and accuracy, you can proceed directly with development integration/deployment.
+
+If you need to apply the pipeline directly in your Python project, refer to the example code in [2.2 Python Script Integration](#22-python-script-integration).
+
+Additionally, PaddleX provides three other deployment methods, detailed as follows:
+
+🚀 <b>High-Performance Inference</b>: In actual production environments, many applications have stringent standards for the performance metrics of deployment strategies (especially response speed) to ensure efficient system operation and smooth user experience. To this end, PaddleX provides high-performance inference plugins aimed at deeply optimizing model inference and pre/post-processing for significant end-to-end speedups. For detailed high-performance inference procedures, refer to the [PaddleX High-Performance Inference Guide](../../../pipeline_deploy/high_performance_inference.en.md).
+
+☁️ <b>Service-Oriented Deployment</b>: Service-oriented deployment is a common deployment form in actual production environments. By encapsulating inference functions as services, clients can access these services through network requests to obtain inference results. PaddleX supports users in achieving low-cost service-oriented deployment of pipelines. For detailed service-oriented deployment procedures, refer to the [PaddleX Service-Oriented Deployment Guide](../../../pipeline_deploy/service_deploy.en.md).
+
+Below are the API references and multi-language service invocation examples:
+
+<details><summary>API Reference</summary>
+
+<p>For main operations provided by the service:</p>
+<ul>
+<li>The HTTP request method is POST.</li>
+<li>The request body and the response body are both JSON data (JSON objects).</li>
+<li>When the request is processed successfully, the response status code is <code>200</code>, and the response body properties are as follows:</li>
+</ul>
+<table>
+<thead>
+<tr>
+<th>Name</th>
+<th>Type</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>errorCode</code></td>
+<td><code>integer</code></td>
+<td>Error code. Fixed as <code>0</code>.</td>
+</tr>
+<tr>
+<td><code>errorMsg</code></td>
+<td><code>string</code></td>
+<td>Error message. Fixed as <code>"Success"</code>.</td>
+</tr>
+</tbody>
+</table>
+<p>The response body may also have a <code>result</code> property of type <code>object</code>, which stores the operation result information.</p>
+<ul>
+<li>When the request is not processed successfully, the response body properties are as follows:</li>
+</ul>
+<table>
+<thead>
+<tr>
+<th>Name</th>
+<th>Type</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>errorCode</code></td>
+<td><code>integer</code></td>
+<td>Error code. Same as the response status code.</td>
+</tr>
+<tr>
+<td><code>errorMsg</code></td>
+<td><code>string</code></td>
+<td>Error message.</td>
+</tr>
+</tbody>
+</table>
+<p>Main operations provided by the service are as follows:</p>
+<ul>
+<li><b><code>infer</code></b></li>
+</ul>
+<p>Classify videos.</p>
+<p><code>POST /video-classification</code></p>
+<ul>
+<li>The request body properties are as follows:</li>
+</ul>
+<table>
+<thead>
+<tr>
+<th>Name</th>
+<th>Type</th>
+<th>Description</th>
+<th>Required</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>video</code></td>
+<td><code>string</code></td>
+<td>The URL of an video file accessible by the service or the Base64 encoded result of the video file content.</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td><code>inferenceParams</code></td>
+<td><code>object</code></td>
+<td>Inference parameters.</td>
+<td>No</td>
+</tr>
+</tbody>
+</table>
+<p>The properties of <code>inferenceParams</code> are as follows:</p>
+<table>
+<thead>
+<tr>
+<th>Name</th>
+<th>Type</th>
+<th>Description</th>
+<th>Required</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>topK</code></td>
+<td><code>integer</code></td>
+<td>Only the top <code>topK</code> categories with the highest scores will be retained in the results.</td>
+<td>No</td>
+</tr>
+</tbody>
+</table>
+<ul>
+<li>When the request is processed successfully, the <code>result</code> of the response body has the following properties:</li>
+</ul>
+<table>
+<thead>
+<tr>
+<th>Name</th>
+<th>Type</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>categories</code></td>
+<td><code>array</code></td>
+<td>video category information.</td>
+</tr>
+<tr>
+<td><code>video</code></td>
+<td><code>string</code></td>
+<td>The video classification result video. The video is in JPEG format and encoded using Base64.</td>
+</tr>
+</tbody>
+</table>
+<p>Each element in <code>categories</code> is an <code>object</code> with the following properties:</p>
+<table>
+<thead>
+<tr>
+<th>Name</th>
+<th>Type</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>id</code></td>
+<td><code>integer</code></td>
+<td>Category ID.</td>
+</tr>
+<tr>
+<td><code>name</code></td>
+<td><code>string</code></td>
+<td>Category name.</td>
+</tr>
+<tr>
+<td><code>score</code></td>
+<td><code>number</code></td>
+<td>Category score.</td>
+</tr>
+</tbody>
+</table>
+<p>An example of <code>result</code> is as follows:</p>
+<pre><code class="language-json">{
+&quot;categories&quot;: [
+{
+&quot;id&quot;: 5,
+&quot;name&quot;: &quot;Rabbit&quot;,
+&quot;score&quot;: 0.93
+}
+],
+&quot;video&quot;: &quot;xxxxxx&quot;
+}
+</code></pre></details>
+
+<details><summary>Multi-Language Service Invocation Examples</summary>
+
+<details>
+<summary>Python</summary>
+
+
+<pre><code class="language-python">import base64
+import requests
+
+API_URL = &quot;http://localhost:8080/video-classification&quot;
+video_path = &quot;./demo.mp4&quot;
+output_video_path = &quot;./out.mp4&quot;
+
+with open(video_path, &quot;rb&quot;) as file:
+    video_bytes = file.read()
+    video_data = base64.b64encode(video_bytes).decode(&quot;ascii&quot;)
+
+payload = {&quot;video&quot;: video_data}
+
+response = requests.post(API_URL, json=payload)
+
+assert response.status_code == 200
+result = response.json()[&quot;result&quot;]
+with open(output_video_path, &quot;wb&quot;) as file:
+    file.write(base64.b64decode(result[&quot;video&quot;]))
+print(f&quot;Output video saved at {output_video_path}&quot;)
+print(&quot;\nCategories:&quot;)
+print(result[&quot;categories&quot;])
+</code></pre></details>
+<details><summary>C++</summary>
+
+<pre><code class="language-cpp">#include &lt;iostream&gt;
+#include &quot;cpp-httplib/httplib.h&quot; // https://github.com/Huiyicc/cpp-httplib
+#include &quot;nlohmann/json.hpp&quot; // https://github.com/nlohmann/json
+#include &quot;base64.hpp&quot; // https://github.com/tobiaslocker/base64
+
+int main() {
+    httplib::Client client(&quot;localhost:8080&quot;);
+    const std::string videoPath = &quot;./demo.mp4&quot;;
+    const std::string outputvideoPath = &quot;./out.mp4&quot;;
+
+    httplib::Headers headers = {
+        {&quot;Content-Type&quot;, &quot;application/json&quot;}
+    };
+
+    std::ifstream file(videoPath, std::ios::binary | std::ios::ate);
+    std::streamsize size = file.tellg();
+    file.seekg(0, std::ios::beg);
+
+    std::vector&lt;char&gt; buffer(size);
+    if (!file.read(buffer.data(), size)) {
+        std::cerr &lt;&lt; &quot;Error reading file.&quot; &lt;&lt; std::endl;
+        return 1;
+    }
+    std::string bufferStr(reinterpret_cast&lt;const char*&gt;(buffer.data()), buffer.size());
+    std::string encodedVideo = base64::to_base64(bufferStr);
+
+    nlohmann::json jsonObj;
+    jsonObj[&quot;video&quot;] = encodedVideo;
+    std::string body = jsonObj.dump();
+
+    auto response = client.Post(&quot;/video-classification&quot;, headers, body, &quot;application/json&quot;);
+    if (response &amp;&amp; response-&gt;status == 200) {
+        nlohmann::json jsonResponse = nlohmann::json::parse(response-&gt;body);
+        auto result = jsonResponse[&quot;result&quot;];
+
+        encodedVideo = result[&quot;video&quot;];
+        std::string decodedString = base64::from_base64(encodedVideo);
+        std::vector&lt;unsigned char&gt; decodedImage(decodedString.begin(), decodedString.end());
+        std::ofstream outputImage(outPutvideoPath, std::ios::binary | std::ios::out);
+        if (outputImage.is_open()) {
+            outputImage.write(reinterpret_cast&lt;char*&gt;(decodedImage.data()), decodedImage.size());
+            outputImage.close();
+            std::cout &lt;&lt; &quot;Output video saved at &quot; &lt;&lt; outPutvideoPath &lt;&lt; std::endl;
+        } else {
+            std::cerr &lt;&lt; &quot;Unable to open file for writing: &quot; &lt;&lt; outPutvideoPath &lt;&lt; std::endl;
+        }
+
+        auto categories = result[&quot;categories&quot;];
+        std::cout &lt;&lt; &quot;\nCategories:&quot; &lt;&lt; std::endl;
+        for (const auto&amp; category : categories) {
+            std::cout &lt;&lt; category &lt;&lt; std::endl;
+        }
+    } else {
+        std::cout &lt;&lt; &quot;Failed to send HTTP request.&quot; &lt;&lt; std::endl;
+        return 1;
+    }
+
+    return 0;
+}
+</code></pre></details>
+
+<details><summary>Java</summary>
+
+<pre><code class="language-java">import okhttp3.*;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Base64;
+
+public class Main {
+    public static void main(String[] args) throws IOException {
+        String API_URL = &quot;http://localhost:8080/video-classification&quot;;
+        String videoPath = &quot;./demo.mp4&quot;;
+        String outputvideoPath = &quot;./out.mp4&quot;;
+
+        File file = new File(videoPath);
+        byte[] fileContent = java.nio.file.Files.readAllBytes(file.toPath());
+        String videoData = Base64.getEncoder().encodeToString(fileContent);
+
+        ObjectMapper objectMapper = new ObjectMapper();
+        ObjectNode params = objectMapper.createObjectNode();
+        params.put(&quot;video&quot;, videoData);
+
+        OkHttpClient client = new OkHttpClient();
+        MediaType JSON = MediaType.Companion.get(&quot;application/json; charset=utf-8&quot;);
+        RequestBody body = RequestBody.Companion.create(params.toString(), JSON);
+        Request request = new Request.Builder()
+                .url(API_URL)
+                .post(body)
+                .build();
+
+        try (Response response = client.newCall(request).execute()) {
+            if (response.isSuccessful()) {
+                String responseBody = response.body().string();
+                JsonNode resultNode = objectMapper.readTree(responseBody);
+                JsonNode result = resultNode.get(&quot;result&quot;);
+                String base64Image = result.get(&quot;video&quot;).asText();
+                JsonNode categories = result.get(&quot;categories&quot;);
+
+                byte[] videoBytes = Base64.getDecoder().decode(base64Image);
+                try (FileOutputStream fos = new FileOutputStream(outputvideoPath)) {
+                    fos.write(videoBytes);
+                }
+                System.out.println(&quot;Output video saved at &quot; + outputvideoPath);
+                System.out.println(&quot;\nCategories: &quot; + categories.toString());
+            } else {
+                System.err.println(&quot;Request failed with code: &quot; + response.code());
+            }
+        }
+    }
+}
+</code></pre></details>
+
+<details><summary>Go</summary>
+
+<pre><code class="language-go">package main
+
+import (
+    &quot;bytes&quot;
+    &quot;encoding/base64&quot;
+    &quot;encoding/json&quot;
+    &quot;fmt&quot;
+    &quot;io/ioutil&quot;
+    &quot;net/http&quot;
+)
+
+func main() {
+    API_URL := &quot;http://localhost:8080/video-classification&quot;
+    videoPath := &quot;./demo.mp4&quot;
+    outputvideoPath := &quot;./out.mp4&quot;
+
+    videoBytes, err := ioutil.ReadFile(videoPath)
+    if err != nil {
+        fmt.Println(&quot;Error reading video file:&quot;, err)
+        return
+    }
+    videoData := base64.StdEncoding.EncodeToString(videoBytes)
+
+    payload := map[string]string{&quot;video&quot;: videoData}
+    payloadBytes, err := json.Marshal(payload)
+    if err != nil {
+        fmt.Println(&quot;Error marshaling payload:&quot;, err)
+        return
+    }
+
+    client := &amp;http.Client{}
+    req, err := http.NewRequest(&quot;POST&quot;, API_URL, bytes.NewBuffer(payloadBytes))
+    if err != nil {
+        fmt.Println(&quot;Error creating request:&quot;, err)
+        return
+    }
+
+    res, err := client.Do(req)
+    if err != nil {
+        fmt.Println(&quot;Error sending request:&quot;, err)
+        return
+    }
+    defer res.Body.Close()
+
+    body, err := ioutil.ReadAll(res.Body)
+    if err != nil {
+        fmt.Println(&quot;Error reading response body:&quot;, err)
+        return
+    }
+    type Response struct {
+        Result struct {
+            Image      string   `json:&quot;video&quot;`
+            Categories []map[string]interface{} `json:&quot;categories&quot;`
+        } `json:&quot;result&quot;`
+    }
+    var respData Response
+    err = json.Unmarshal([]byte(string(body)), &amp;respData)
+    if err != nil {
+        fmt.Println(&quot;Error unmarshaling response body:&quot;, err)
+        return
+    }
+
+    outputImageData, err := base64.StdEncoding.DecodeString(respData.Result.Image)
+    if err != nil {
+        fmt.Println(&quot;Error decoding base64 video data:&quot;, err)
+        return
+    }
+    err = ioutil.WriteFile(outputvideoPath, outputImageData, 0644)
+    if err != nil {
+        fmt.Println(&quot;Error writing video to file:&quot;, err)
+        return
+    }
+    fmt.Printf(&quot;Image saved at %s.mp4\n&quot;, outputvideoPath)
+    fmt.Println(&quot;\nCategories:&quot;)
+    for _, category := range respData.Result.Categories {
+        fmt.Println(category)
+    }
+}
+</code></pre></details>
+
+<details><summary>C#</summary>
+
+<pre><code class="language-csharp">using System;
+using System.IO;
+using System.Net.Http;
+using System.Net.Http.Headers;
+using System.Text;
+using System.Threading.Tasks;
+using Newtonsoft.Json.Linq;
+
+class Program
+{
+    static readonly string API_URL = &quot;http://localhost:8080/video-classification&quot;;
+    static readonly string videoPath = &quot;./demo.mp4&quot;;
+    static readonly string outputvideoPath = &quot;./out.mp4&quot;;
+
+    static async Task Main(string[] args)
+    {
+        var httpClient = new HttpClient();
+
+        byte[] videoBytes = File.ReadAllBytes(videoPath);
+        string video_data = Convert.ToBase64String(videoBytes);
+
+        var payload = new JObject{ { &quot;video&quot;, video_data } };
+        var content = new StringContent(payload.ToString(), Encoding.UTF8, &quot;application/json&quot;);
+
+        HttpResponseMessage response = await httpClient.PostAsync(API_URL, content);
+        response.EnsureSuccessStatusCode();
+
+        string responseBody = await response.Content.ReadAsStringAsync();
+        JObject jsonResponse = JObject.Parse(responseBody);
+
+        string base64Image = jsonResponse[&quot;result&quot;][&quot;video&quot;].ToString();
+        byte[] outputImageBytes = Convert.FromBase64String(base64Image);
+
+        File.WriteAllBytes(outputvideoPath, outputImageBytes);
+        Console.WriteLine($&quot;Output video saved at {outputvideoPath}&quot;);
+        Console.WriteLine(&quot;\nCategories:&quot;);
+        Console.WriteLine(jsonResponse[&quot;result&quot;][&quot;categories&quot;].ToString());
+    }
+}
+</code></pre></details>
+
+<details><summary>Node.js</summary>
+
+<pre><code class="language-js">const axios = require('axios');
+const fs = require('fs');
+
+const API_URL = 'http://localhost:8080/video-classification'
+const videoPath = './demo.mp4'
+const outputvideoPath = &quot;./out.mp4&quot;;
+
+let config = {
+   method: 'POST',
+   maxBodyLength: Infinity,
+   url: API_URL,
+   data: JSON.stringify({
+    'video': encodeImageToBase64(videoPath)
+  })
+};
+
+function encodeImageToBase64(filePath) {
+  const bitmap = fs.readFileSync(filePath);
+  return Buffer.from(bitmap).toString('base64');
+}
+
+axios.request(config)
+.then((response) =&gt; {
+    const result = response.data[&quot;result&quot;];
+    const videoBuffer = Buffer.from(result[&quot;video&quot;], 'base64');
+    fs.writeFile(outputvideoPath, videoBuffer, (err) =&gt; {
+      if (err) throw err;
+      console.log(`Output video saved at ${outputvideoPath}`);
+    });
+    console.log(&quot;\nCategories:&quot;);
+    console.log(result[&quot;categories&quot;]);
+})
+.catch((error) =&gt; {
+  console.log(error);
+});
+</code></pre></details>
+<details><summary>PHP</summary>
+
+<pre><code class="language-php">&lt;?php
+
+$API_URL = &quot;http://localhost:8080/video-classification&quot;;
+$video_path = &quot;./demo.mp4&quot;;
+$output_video_path = &quot;./out.mp4&quot;;
+
+$video_data = base64_encode(file_get_contents($video_path));
+$payload = array(&quot;video&quot; =&gt; $video_data);
+
+$ch = curl_init($API_URL);
+curl_setopt($ch, CURLOPT_POST, true);
+curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($payload));
+curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
+$response = curl_exec($ch);
+curl_close($ch);
+
+$result = json_decode($response, true)[&quot;result&quot;];
+file_put_contents($output_video_path, base64_decode($result[&quot;video&quot;]));
+echo &quot;Output video saved at &quot; . $output_video_path . &quot;\n&quot;;
+echo &quot;\nCategories:\n&quot;;
+print_r($result[&quot;categories&quot;]);
+?&gt;
+</code></pre></details>
+
+</details>
+<br/>
+
+📱 <b>Edge Deployment</b>: Edge deployment is a method that places computing and data processing functions on user devices themselves, allowing devices to process data directly without relying on remote servers. PaddleX supports deploying models on edge devices such as Android. For detailed edge deployment procedures, refer to the [PaddleX Edge Deployment Guide](../../../pipeline_deploy/edge_deploy.en.md).
+You can choose the appropriate deployment method for your model pipeline based on your needs and proceed with subsequent AI application integration.
+
+## 4. Custom Development
+If the default model weights provided by the general video classification pipeline do not meet your requirements for accuracy or speed in your specific scenario, you can try to further fine-tune the existing model using <b>data from your specific domain or application scenario</b> to improve the recognition performance of the general video classification pipeline in your scenario.
+
+### 4.1 Model Fine-tuning
+Since the general video classification pipeline includes an video classification module, if the performance of the pipeline does not meet expectations, you need to refer to the [Customization](../../../module_usage/tutorials/video_modules/video_classification.en.md#iv-custom-development) section in the [Video Classification Module Development Tutorial](../../../module_usage/tutorials/video_modules/video_classification.en.md) and use your private dataset to fine-tune the video classification model.
+
+### 4.2 Model Application
+After you have completed fine-tuning training using your private dataset, you will obtain local model weight files.
+
+If you need to use the fine-tuned model weights, simply modify the pipeline configuration file by replacing the local path of the fine-tuned model weights to the corresponding location in the pipeline configuration file:
+
+```yaml
+......
+Pipeline:
+  model: PPTSMv2_LCNet_k400_8frames_uniform  # Can be modified to the local path of the fine-tuned model
+  device: "gpu"
+  batch_size: 1
+......
+```
+Then, refer to the command line method or Python script method in the local experience section to load the modified pipeline configuration file.
+
+## 5. Multi-hardware Support
+PaddleX supports various mainstream hardware devices such as NVIDIA GPUs, Kunlun XPU, Ascend NPU, and Cambricon MLU. <b>Simply modify the `--device` parameter</b> to seamlessly switch between different hardware.
+
+For example, if you use an NVIDIA GPU for inference in the video classification pipeline, the Python command is:
+
+```bash
+paddlex --pipeline video_classification --input general_video_classification_001.mp4  --device gpu:0
+``````
+At this point, if you wish to switch the hardware to Ascend NPU, simply modify the `--device` in the Python command to `npu:0`:
+
+```bash
+paddlex --pipeline video_classification --input general_video_classification_001.mp4  --device npu:0
+```
+If you want to use the General Video Classification Pipeline on more types of hardware, please refer to the [PaddleX Multi-Device Usage Guide](../../../other_devices_support/multi_devices_use_guide.en.md).

+ 801 - 0
docs/pipeline_usage/tutorials/video_pipelines/video_classification.md

@@ -0,0 +1,801 @@
+---
+comments: true
+---
+
+# 通用视频分类产线使用教程
+
+## 1. 通用视频分类产线介绍
+视频分类是一种将视频片段分配到预定义类别的技术。它广泛应用于动作识别、事件检测和内容推荐等领域。视频分类可以识别各种动态事件和场景,如体育活动、自然现象、交通状况等,并根据其特征将其归类。通过使用深度学习模型,尤其是卷积神经网络(CNN)和循环神经网络(RNN)的结合,视频分类能够自动提取视频中的时空特征并进行准确分类。这种技术在视频监控、媒体检索和个性化推荐系统中具有重要应用.
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/pipelines/video_classification/01.jpg">
+<b>通用视频分类产线中包含了视频分类模块,如您更考虑模型精度,请选择精度较高的模型,如您更考虑模型推理速度,请选择推理速度较快的模型,如您更考虑模型存储大小,请选择存储大小较小的模型</b>。
+
+<details><summary> 👉模型列表详情</summary>
+
+
+<table>
+<tr>
+<th>模型</th><th>模型下载链接</th>
+<th>Top1 Acc(%)</th>
+<th>模型存储大小 (M)</th>
+<th>介绍</th>
+</tr>
+<tr>
+<td>PPTSM_ResNet50_k400_8frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSM_ResNet50_k400_8frames_uniform_infer.tar">推理模型</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSM_ResNet50_k400_8frames_uniform_pretrained.pdparams">训练模型</a></td>
+<td>74.36</td>
+<td>93.4 M</td>
+<td rowspan="1">
+PP-TSM是一种百度飞桨视觉团队自研的视频分类模型。该模型基于ResNet-50骨干网络进行优化,从数据增强、网络结构微调、训练策略、BN层优化、预训练模型选择、模型蒸馏等6个方面进行模型调优,在中心采样评估方式下,Kinetics-400上精度较原论文实现提升3.95个点
+</td>
+</tr>
+
+<tr>
+<td>PPTSMv2_LCNet_k400_8frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSMv2_LCNet_k400_8frames_uniform_infer.tar">推理模型</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSMv2_LCNet_k400_8frames_uniform_pretrained.pdparams">训练模型</a></td>
+<td>71.71</td>
+<td>22.5 M</td>
+<td rowspan="2">PP-TSMv2是轻量化的视频分类模型,基于CPU端模型PP-LCNetV2进行优化,从骨干网络与预训练模型选择、数据增强、tsm模块调优、输入帧数优化、解码速度优化、DML蒸馏、LTA模块等7个方面进行模型调优,在中心采样评估方式下,精度达到75.16%,输入10s视频在CPU端的推理速度仅需456ms。</td>
+</tr>
+<tr>
+<td>PPTSMv2_LCNet_k400_16frames_uniform</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PPTSMv2_LCNet_k400_16frames_uniform_infer.tar">推理模型</a>/<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPTSMv2_LCNet_k400_16frames_uniform_pretrained.pdparams">训练模型</a></td>
+<td>73.11</td>
+<td>22.5 M</td>
+</tr>
+
+</table>
+
+<p><b>注:以上精度指标为 <a href="https://github.com/PaddlePaddle/PaddleVideo/blob/develop/docs/zh-CN/dataset/k400.md">K400</a> 验证集 Top1 Acc。所有模型 GPU 推理耗时基于 NVIDIA Tesla T4 机器,精度类型为 FP32, CPU 推理速度基于 Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz,线程数为8,精度类型为 FP32。</b></p></details>
+
+## 2. 快速开始
+
+PaddleX 支持在本地使用命令行或 Python 体验产线的效果。
+
+在本地使用通用视频分类产线前,请确保您已经按照PaddleX本地安装教程完成了PaddleX的wheel包安装。
+
+#### 2.1 命令行方式体验
+一行命令即可快速体验视频分类产线效果,使用 [测试文件](https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4),并将 `--input` 替换为本地路径,进行预测
+
+```bash
+paddlex --pipeline video_classification --input general_video_classification_001.mp4 --device gpu:0
+```
+参数说明:
+
+```
+--pipeline:产线名称,此处为视频分类产线
+--input:待处理的输入视频的本地路径或URL
+--device 使用的GPU序号(例如gpu:0表示使用第0块GPU,gpu:1,2表示使用第1、2块GPU),也可选择使用CPU(--device cpu)
+```
+
+在执行上述 Python 脚本时,加载的是默认的视频分类产线配置文件,若您需要自定义配置文件,可执行如下命令获取:
+
+<details><summary> 👉点击展开</summary>
+
+<pre><code>paddlex --get_pipeline_config video_classification
+</code></pre>
+<p>执行后,视频分类产线配置文件将被保存在当前路径。若您希望自定义保存位置,可执行如下命令(假设自定义保存位置为 <code>./my_path</code> ):</p>
+<pre><code>paddlex --get_pipeline_config video_classification --save_path ./my_path
+</code></pre>
+<p>获取产线配置文件后,可将 <code>--pipeline</code> 替换为配置文件保存路径,即可使配置文件生效。例如,若配置文件保存路径为 <code>./video_classification.yaml</code>,只需执行:</p>
+<pre><code class="language-bash">paddlex --pipeline ./video_classification.yaml --input general_video_classification_001.mp4 --device gpu:0
+</code></pre>
+<p>其中,<code>--model</code>、<code>--device</code> 等参数无需指定,将使用配置文件中的参数。若依然指定了参数,将以指定的参数为准。</p></details>
+
+运行后,得到的结果为:
+
+```
+{'input_path': 'general_video_classification_001.mp4', 'class_ids': [0], 'scores': array([0.91996]), 'label_names': ['abseiling']}
+```
+
+<img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/pipelines/video_classification/02.jpg">
+
+可视化视频默认不进行保存,您可以通过 `--save_path` 自定义保存路径,随后所有结果将被保存在指定路径下。
+
+#### 2.2 Python脚本方式集成
+几行代码即可完成产线的快速推理,以通用视频分类产线为例:
+
+```
+from paddlex import create_pipeline
+
+pipeline = create_pipeline(pipeline="video_classification")
+
+output = pipeline.predict("general_video_classification_001.mp4")
+for res in output:
+    res.print() ## 打印预测的结构化输出
+    res.save_to_video("./output/") ## 保存结果可视化视频
+    res.save_to_json("./output/") ## 保存预测的结构化输出
+```
+得到的结果与命令行方式相同。
+
+在上述 Python 脚本中,执行了如下几个步骤:
+
+(1)实例化 `create_pipeline` 实例化产线对象:具体参数说明如下:
+
+<table>
+<thead>
+<tr>
+<th>参数</th>
+<th>参数说明</th>
+<th>参数类型</th>
+<th>默认值</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>pipeline</code></td>
+<td>产线名称或是产线配置文件路径。如为产线名称,则必须为 PaddleX 所支持的产线。</td>
+<td><code>str</code></td>
+<td>无</td>
+</tr>
+<tr>
+<td><code>device</code></td>
+<td>产线模型推理设备。支持:“gpu”,“cpu”。</td>
+<td><code>str</code></td>
+<td><code>gpu</code></td>
+</tr>
+<tr>
+<td><code>use_hpip</code></td>
+<td>是否启用高性能推理,仅当该产线支持高性能推理时可用。</td>
+<td><code>bool</code></td>
+<td><code>False</code></td>
+</tr>
+</tbody>
+</table>
+(2)调用视频分类产线对象的 `predict` 方法进行推理预测:`predict` 方法参数为`x`,用于输入待预测数据,支持多种输入方式,具体示例如下:
+
+<table>
+<thead>
+<tr>
+<th>参数类型</th>
+<th>参数说明</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>Python Var</td>
+<td>支持直接传入Python变量,如numpy.ndarray表示的视频数据。</td>
+</tr>
+<tr>
+<td>str</td>
+<td>支持传入待预测数据文件路径,如视频文件的本地路径:<code>/root/data/video.mp4</code>。</td>
+</tr>
+<tr>
+<td>str</td>
+<td>支持传入待预测数据文件URL,如视频文件的网络URL:<a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4">示例</a>。</td>
+</tr>
+<tr>
+<td>str</td>
+<td>支持传入本地目录,该目录下需包含待预测数据文件,如本地路径:<code>/root/data/</code>。</td>
+</tr>
+<tr>
+<td>dict</td>
+<td>支持传入字典类型,字典的key需与具体任务对应,如视频分类任务对应\"video\",字典的val支持上述类型数据,例如:<code>{\"video\": \"/root/data1\"}</code>。</td>
+</tr>
+<tr>
+<td>list</td>
+<td>支持传入列表,列表元素需为上述类型数据,如<code>[numpy.ndarray, numpy.ndarray],[\"/root/data/video1.mp4\", \"/root/data/video2.mp4\"]</code>,<code>[\"/root/data1\", \"/root/data2\"]</code>,<code>[{\"video\": \"/root/data1\"}, {\"video\": \"/root/data2/video.mp4\"}]</code>。</td>
+</tr>
+</tbody>
+</table>
+(3)调用`predict`方法获取预测结果:`predict` 方法为`generator`,因此需要通过调用获得预测结果,`predict`方法以batch为单位对数据进行预测,因此预测结果为list形式表示的一组预测结果。
+
+(4)对预测结果进行处理:每个样本的预测结果均为`dict`类型,且支持打印,或保存为文件,支持保存的类型与具体产线相关,如:
+
+<table>
+<thead>
+<tr>
+<th>方法</th>
+<th>说明</th>
+<th>方法参数</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>print</td>
+<td>打印结果到终端</td>
+<td><code>- format_json</code>:bool类型,是否对输出内容进行使用json缩进格式化,默认为True;<br/><code>- indent</code>:int类型,json格式化设置,仅当format_json为True时有效,默认为4;<br/><code>- ensure_ascii</code>:bool类型,json格式化设置,仅当format_json为True时有效,默认为False;</td>
+</tr>
+<tr>
+<td>save_to_json</td>
+<td>将结果保存为json格式的文件</td>
+<td><code>- save_path</code>:str类型,保存的文件路径,当为目录时,保存文件命名与输入文件类型命名一致;<br/><code>- indent</code>:int类型,json格式化设置,默认为4;<br/><code>- ensure_ascii</code>:bool类型,json格式化设置,默认为False;</td>
+</tr>
+<tr>
+<td>save_to_video</td>
+<td>将结果保存为视频格式的文件</td>
+<td><code>- save_path</code>:str类型,保存的文件路径,当为目录时,保存文件命名与输入文件类型命名一致;</td>
+</tr>
+</tbody>
+</table>
+若您获取了配置文件,即可对视频分类产线各项配置进行自定义,只需要修改 `create_pipeline` 方法中的 `pipeline` 参数值为产线配置文件路径即可。
+
+例如,若您的配置文件保存在 `./my_path/video_classification*.yaml` ,则只需执行:
+
+```
+from paddlex import create_pipeline
+pipeline = create_pipeline(pipeline="./my_path/video_classification.yaml")
+output = pipeline.predict("general_video_classification_001.mp4")
+for res in output:
+    res.print() ## 打印预测的结构化输出
+    res.save_to_video("./output/") ## 保存结果可视化视频
+    res.save_to_json("./output/") ## 保存预测的结构化输出
+```
+## 3. 开发集成/部署
+如果产线可以达到您对产线推理速度和精度的要求,您可以直接进行开发集成/部署。
+
+若您需要将产线直接应用在您的Python项目中,可以参考 [2.2 Python脚本方式](#22-python脚本方式集成)中的示例代码。
+
+此外,PaddleX 也提供了其他三种部署方式,详细说明如下:
+
+🚀 <b>高性能推理</b>:在实际生产环境中,许多应用对部署策略的性能指标(尤其是响应速度)有着较严苛的标准,以确保系统的高效运行与用户体验的流畅性。为此,PaddleX 提供高性能推理插件,旨在对模型推理及前后处理进行深度性能优化,实现端到端流程的显著提速,详细的高性能推理流程请参考[PaddleX高性能推理指南](../../../pipeline_deploy/high_performance_inference.md)。
+
+☁️ <b>服务化部署</b>:服务化部署是实际生产环境中常见的一种部署形式。通过将推理功能封装为服务,客户端可以通过网络请求来访问这些服务,以获取推理结果。PaddleX 支持用户以低成本实现产线的服务化部署,详细的服务化部署流程请参考[PaddleX服务化部署指南](../../../pipeline_deploy/service_deploy.md)。
+
+下面是API参考和多语言服务调用示例:
+
+<details><summary>API参考</summary>
+
+<p>对于服务提供的主要操作:</p>
+<ul>
+<li>HTTP请求方法为POST。</li>
+<li>请求体和响应体均为JSON数据(JSON对象)。</li>
+<li>当请求处理成功时,响应状态码为<code>200</code>,响应体的属性如下:</li>
+</ul>
+<table>
+<thead>
+<tr>
+<th>名称</th>
+<th>类型</th>
+<th>含义</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>errorCode</code></td>
+<td><code>integer</code></td>
+<td>错误码。固定为<code>0</code>。</td>
+</tr>
+<tr>
+<td><code>errorMsg</code></td>
+<td><code>string</code></td>
+<td>错误说明。固定为<code>"Success"</code>。</td>
+</tr>
+</tbody>
+</table>
+<p>响应体还可能有<code>result</code>属性,类型为<code>object</code>,其中存储操作结果信息。</p>
+<ul>
+<li>当请求处理未成功时,响应体的属性如下:</li>
+</ul>
+<table>
+<thead>
+<tr>
+<th>名称</th>
+<th>类型</th>
+<th>含义</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>errorCode</code></td>
+<td><code>integer</code></td>
+<td>错误码。与响应状态码相同。</td>
+</tr>
+<tr>
+<td><code>errorMsg</code></td>
+<td><code>string</code></td>
+<td>错误说明。</td>
+</tr>
+</tbody>
+</table>
+<p>服务提供的主要操作如下:</p>
+<ul>
+<li><b><code>infer</code></b></li>
+</ul>
+<p>对视频进行分类。</p>
+<p><code>POST /video-classification</code></p>
+<ul>
+<li>请求体的属性如下:</li>
+</ul>
+<table>
+<thead>
+<tr>
+<th>名称</th>
+<th>类型</th>
+<th>含义</th>
+<th>是否必填</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>video</code></td>
+<td><code>string</code></td>
+<td>服务可访问的视频文件的URL或视频文件内容的Base64编码结果。</td>
+<td>是</td>
+</tr>
+<tr>
+<td><code>inferenceParams</code></td>
+<td><code>object</code></td>
+<td>推理参数。</td>
+<td>否</td>
+</tr>
+</tbody>
+</table>
+<p><code>inferenceParams</code>的属性如下:</p>
+<table>
+<thead>
+<tr>
+<th>名称</th>
+<th>类型</th>
+<th>含义</th>
+<th>是否必填</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>topK</code></td>
+<td><code>integer</code></td>
+<td>结果中将只保留得分最高的<code>topK</code>个类别。</td>
+<td>否</td>
+</tr>
+</tbody>
+</table>
+<ul>
+<li>请求处理成功时,响应体的<code>result</code>具有如下属性:</li>
+</ul>
+<table>
+<thead>
+<tr>
+<th>名称</th>
+<th>类型</th>
+<th>含义</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>categories</code></td>
+<td><code>array</code></td>
+<td>视频类别信息。</td>
+</tr>
+<tr>
+<td><code>video</code></td>
+<td><code>string</code></td>
+<td>视频分类结果图。视频为JPEG格式,使用Base64编码。</td>
+</tr>
+</tbody>
+</table>
+<p><code>categories</code>中的每个元素为一个<code>object</code>,具有如下属性:</p>
+<table>
+<thead>
+<tr>
+<th>名称</th>
+<th>类型</th>
+<th>含义</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><code>id</code></td>
+<td><code>integer</code></td>
+<td>类别ID。</td>
+</tr>
+<tr>
+<td><code>name</code></td>
+<td><code>string</code></td>
+<td>类别名称。</td>
+</tr>
+<tr>
+<td><code>score</code></td>
+<td><code>number</code></td>
+<td>类别得分。</td>
+</tr>
+</tbody>
+</table>
+<p><code>result</code>示例如下:</p>
+<pre><code class="language-json">{
+&quot;categories&quot;: [
+{
+&quot;id&quot;: 5,
+&quot;name&quot;: &quot;兔子&quot;,
+&quot;score&quot;: 0.93
+}
+],
+&quot;video&quot;: &quot;xxxxxx&quot;
+}
+</code></pre></details>
+
+<details><summary>多语言调用服务示例</summary>
+
+<details>
+<summary>Python</summary>
+
+
+<pre><code class="language-python">import base64
+import requests
+
+API_URL = &quot;http://localhost:8080/video-classification&quot; # 服务URL
+video_path = &quot;./demo.mp4&quot;
+output_video_path = &quot;./out.mp4&quot;
+
+# 对本地视频进行Base64编码
+with open(video_path, &quot;rb&quot;) as file:
+    video_bytes = file.read()
+    video_data = base64.b64encode(video_bytes).decode(&quot;ascii&quot;)
+
+payload = {&quot;video&quot;: video_data}  # Base64编码的文件内容或者视频URL
+
+# 调用API
+response = requests.post(API_URL, json=payload)
+
+# 处理接口返回数据
+assert response.status_code == 200
+result = response.json()[&quot;result&quot;]
+with open(output_video_path, &quot;wb&quot;) as file:
+    file.write(base64.b64decode(result[&quot;video&quot;]))
+print(f&quot;Output video saved at {output_video_path}&quot;)
+print(&quot;\nCategories:&quot;)
+print(result[&quot;categories&quot;])
+</code></pre></details>
+<details><summary>C++</summary>
+
+<pre><code class="language-cpp">#include &lt;iostream&gt;
+#include &quot;cpp-httplib/httplib.h&quot; // https://github.com/Huiyicc/cpp-httplib
+#include &quot;nlohmann/json.hpp&quot; // https://github.com/nlohmann/json
+#include &quot;base64.hpp&quot; // https://github.com/tobiaslocker/base64
+
+int main() {
+    httplib::Client client(&quot;localhost:8080&quot;);
+    const std::string videoPath = &quot;./demo.mp4&quot;;
+    const std::string outputImagePath = &quot;./out.mp4&quot;;
+
+    httplib::Headers headers = {
+        {&quot;Content-Type&quot;, &quot;application/json&quot;}
+    };
+
+    // 对本地视频进行Base64编码
+    std::ifstream file(videoPath, std::ios::binary | std::ios::ate);
+    std::streamsize size = file.tellg();
+    file.seekg(0, std::ios::beg);
+
+    std::vector&lt;char&gt; buffer(size);
+    if (!file.read(buffer.data(), size)) {
+        std::cerr &lt;&lt; &quot;Error reading file.&quot; &lt;&lt; std::endl;
+        return 1;
+    }
+    std::string bufferStr(reinterpret_cast&lt;const char*&gt;(buffer.data()), buffer.size());
+    std::string encodedImage = base64::to_base64(bufferStr);
+
+    nlohmann::json jsonObj;
+    jsonObj[&quot;video&quot;] = encodedImage;
+    std::string body = jsonObj.dump();
+
+    // 调用API
+    auto response = client.Post(&quot;/video-classification&quot;, headers, body, &quot;application/json&quot;);
+    // 处理接口返回数据
+    if (response &amp;&amp; response-&gt;status == 200) {
+        nlohmann::json jsonResponse = nlohmann::json::parse(response-&gt;body);
+        auto result = jsonResponse[&quot;result&quot;];
+
+        encodedImage = result[&quot;video&quot;];
+        std::string decodedString = base64::from_base64(encodedImage);
+        std::vector&lt;unsigned char&gt; decodedImage(decodedString.begin(), decodedString.end());
+        std::ofstream outputImage(outPutImagePath, std::ios::binary | std::ios::out);
+        if (outputImage.is_open()) {
+            outputImage.write(reinterpret_cast&lt;char*&gt;(decodedImage.data()), decodedImage.size());
+            outputImage.close();
+            std::cout &lt;&lt; &quot;Output video saved at &quot; &lt;&lt; outPutImagePath &lt;&lt; std::endl;
+        } else {
+            std::cerr &lt;&lt; &quot;Unable to open file for writing: &quot; &lt;&lt; outPutImagePath &lt;&lt; std::endl;
+        }
+
+        auto categories = result[&quot;categories&quot;];
+        std::cout &lt;&lt; &quot;\nCategories:&quot; &lt;&lt; std::endl;
+        for (const auto&amp; category : categories) {
+            std::cout &lt;&lt; category &lt;&lt; std::endl;
+        }
+    } else {
+        std::cout &lt;&lt; &quot;Failed to send HTTP request.&quot; &lt;&lt; std::endl;
+        return 1;
+    }
+
+    return 0;
+}
+</code></pre></details>
+
+<details><summary>Java</summary>
+
+<pre><code class="language-java">import okhttp3.*;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Base64;
+
+public class Main {
+    public static void main(String[] args) throws IOException {
+        String API_URL = &quot;http://localhost:8080/video-classification&quot;; // 服务URL
+        String videoPath = &quot;./demo.mp4&quot;; // 本地视频
+        String outputImagePath = &quot;./out.mp4&quot;; // 输出视频
+
+        // 对本地视频进行Base64编码
+        File file = new File(videoPath);
+        byte[] fileContent = java.nio.file.Files.readAllBytes(file.toPath());
+        String videoData = Base64.getEncoder().encodeToString(fileContent);
+
+        ObjectMapper objectMapper = new ObjectMapper();
+        ObjectNode params = objectMapper.createObjectNode();
+        params.put(&quot;video&quot;, videoData); // Base64编码的文件内容或者视频URL
+
+        // 创建 OkHttpClient 实例
+        OkHttpClient client = new OkHttpClient();
+        MediaType JSON = MediaType.Companion.get(&quot;application/json; charset=utf-8&quot;);
+        RequestBody body = RequestBody.Companion.create(params.toString(), JSON);
+        Request request = new Request.Builder()
+                .url(API_URL)
+                .post(body)
+                .build();
+
+        // 调用API并处理接口返回数据
+        try (Response response = client.newCall(request).execute()) {
+            if (response.isSuccessful()) {
+                String responseBody = response.body().string();
+                JsonNode resultNode = objectMapper.readTree(responseBody);
+                JsonNode result = resultNode.get(&quot;result&quot;);
+                String base64Image = result.get(&quot;video&quot;).asText();
+                JsonNode categories = result.get(&quot;categories&quot;);
+
+                byte[] videoBytes = Base64.getDecoder().decode(base64Image);
+                try (FileOutputStream fos = new FileOutputStream(outputImagePath)) {
+                    fos.write(videoBytes);
+                }
+                System.out.println(&quot;Output video saved at &quot; + outputImagePath);
+                System.out.println(&quot;\nCategories: &quot; + categories.toString());
+            } else {
+                System.err.println(&quot;Request failed with code: &quot; + response.code());
+            }
+        }
+    }
+}
+</code></pre></details>
+
+<details><summary>Go</summary>
+
+<pre><code class="language-go">package main
+
+import (
+    &quot;bytes&quot;
+    &quot;encoding/base64&quot;
+    &quot;encoding/json&quot;
+    &quot;fmt&quot;
+    &quot;io/ioutil&quot;
+    &quot;net/http&quot;
+)
+
+func main() {
+    API_URL := &quot;http://localhost:8080/video-classification&quot;
+    videoPath := &quot;./demo.mp4&quot;
+    outputImagePath := &quot;./out.mp4&quot;
+
+    // 对本地视频进行Base64编码
+    videoBytes, err := ioutil.ReadFile(videoPath)
+    if err != nil {
+        fmt.Println(&quot;Error reading video file:&quot;, err)
+        return
+    }
+    videoData := base64.StdEncoding.EncodeToString(videoBytes)
+
+    payload := map[string]string{&quot;video&quot;: videoData} // Base64编码的文件内容或者视频URL
+    payloadBytes, err := json.Marshal(payload)
+    if err != nil {
+        fmt.Println(&quot;Error marshaling payload:&quot;, err)
+        return
+    }
+
+    // 调用API
+    client := &amp;http.Client{}
+    req, err := http.NewRequest(&quot;POST&quot;, API_URL, bytes.NewBuffer(payloadBytes))
+    if err != nil {
+        fmt.Println(&quot;Error creating request:&quot;, err)
+        return
+    }
+
+    res, err := client.Do(req)
+    if err != nil {
+        fmt.Println(&quot;Error sending request:&quot;, err)
+        return
+    }
+    defer res.Body.Close()
+
+    // 处理接口返回数据
+    body, err := ioutil.ReadAll(res.Body)
+    if err != nil {
+        fmt.Println(&quot;Error reading response body:&quot;, err)
+        return
+    }
+    type Response struct {
+        Result struct {
+            Image      string   `json:&quot;video&quot;`
+            Categories []map[string]interface{} `json:&quot;categories&quot;`
+        } `json:&quot;result&quot;`
+    }
+    var respData Response
+    err = json.Unmarshal([]byte(string(body)), &amp;respData)
+    if err != nil {
+        fmt.Println(&quot;Error unmarshaling response body:&quot;, err)
+        return
+    }
+
+    outputImageData, err := base64.StdEncoding.DecodeString(respData.Result.Image)
+    if err != nil {
+        fmt.Println(&quot;Error decoding base64 video data:&quot;, err)
+        return
+    }
+    err = ioutil.WriteFile(outputImagePath, outputImageData, 0644)
+    if err != nil {
+        fmt.Println(&quot;Error writing video to file:&quot;, err)
+        return
+    }
+    fmt.Printf(&quot;Image saved at %s.mp4\n&quot;, outputImagePath)
+    fmt.Println(&quot;\nCategories:&quot;)
+    for _, category := range respData.Result.Categories {
+        fmt.Println(category)
+    }
+}
+</code></pre></details>
+
+<details><summary>C#</summary>
+
+<pre><code class="language-csharp">using System;
+using System.IO;
+using System.Net.Http;
+using System.Net.Http.Headers;
+using System.Text;
+using System.Threading.Tasks;
+using Newtonsoft.Json.Linq;
+
+class Program
+{
+    static readonly string API_URL = &quot;http://localhost:8080/video-classification&quot;;
+    static readonly string videoPath = &quot;./demo.mp4&quot;;
+    static readonly string outputImagePath = &quot;./out.mp4&quot;;
+
+    static async Task Main(string[] args)
+    {
+        var httpClient = new HttpClient();
+
+        // 对本地视频进行Base64编码
+        byte[] videoBytes = File.ReadAllBytes(videoPath);
+        string video_data = Convert.ToBase64String(videoBytes);
+
+        var payload = new JObject{ { &quot;video&quot;, video_data } }; // Base64编码的文件内容或者视频URL
+        var content = new StringContent(payload.ToString(), Encoding.UTF8, &quot;application/json&quot;);
+
+        // 调用API
+        HttpResponseMessage response = await httpClient.PostAsync(API_URL, content);
+        response.EnsureSuccessStatusCode();
+
+        // 处理接口返回数据
+        string responseBody = await response.Content.ReadAsStringAsync();
+        JObject jsonResponse = JObject.Parse(responseBody);
+
+        string base64Image = jsonResponse[&quot;result&quot;][&quot;video&quot;].ToString();
+        byte[] outputImageBytes = Convert.FromBase64String(base64Image);
+
+        File.WriteAllBytes(outputImagePath, outputImageBytes);
+        Console.WriteLine($&quot;Output video saved at {outputImagePath}&quot;);
+        Console.WriteLine(&quot;\nCategories:&quot;);
+        Console.WriteLine(jsonResponse[&quot;result&quot;][&quot;categories&quot;].ToString());
+    }
+}
+</code></pre></details>
+
+<details><summary>Node.js</summary>
+
+<pre><code class="language-js">const axios = require('axios');
+const fs = require('fs');
+
+const API_URL = 'http://localhost:8080/video-classification'
+const videoPath = './demo.mp4'
+const outputImagePath = &quot;./out.mp4&quot;;
+
+let config = {
+   method: 'POST',
+   maxBodyLength: Infinity,
+   url: API_URL,
+   data: JSON.stringify({
+    'video': encodeImageToBase64(videoPath)  // Base64编码的文件内容或者视频URL
+  })
+};
+
+// 对本地视频进行Base64编码
+function encodeImageToBase64(filePath) {
+  const bitmap = fs.readFileSync(filePath);
+  return Buffer.from(bitmap).toString('base64');
+}
+
+// 调用API
+axios.request(config)
+.then((response) =&gt; {
+    // 处理接口返回数据
+    const result = response.data[&quot;result&quot;];
+    const videoBuffer = Buffer.from(result[&quot;video&quot;], 'base64');
+    fs.writeFile(outputImagePath, videoBuffer, (err) =&gt; {
+      if (err) throw err;
+      console.log(`Output video saved at ${outputImagePath}`);
+    });
+    console.log(&quot;\nCategories:&quot;);
+    console.log(result[&quot;categories&quot;]);
+})
+.catch((error) =&gt; {
+  console.log(error);
+});
+</code></pre></details>
+<details><summary>PHP</summary>
+
+<pre><code class="language-php">&lt;?php
+
+$API_URL = &quot;http://localhost:8080/video-classification&quot;; // 服务URL
+$video_path = &quot;./demo.mp4&quot;;
+$output_video_path = &quot;./out.mp4&quot;;
+
+// 对本地视频进行Base64编码
+$video_data = base64_encode(file_get_contents($video_path));
+$payload = array(&quot;video&quot; =&gt; $video_data); // Base64编码的文件内容或者视频URL
+
+// 调用API
+$ch = curl_init($API_URL);
+curl_setopt($ch, CURLOPT_POST, true);
+curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($payload));
+curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
+$response = curl_exec($ch);
+curl_close($ch);
+
+// 处理接口返回数据
+$result = json_decode($response, true)[&quot;result&quot;];
+file_put_contents($output_video_path, base64_decode($result[&quot;video&quot;]));
+echo &quot;Output video saved at &quot; . $output_video_path . &quot;\n&quot;;
+echo &quot;\nCategories:\n&quot;;
+print_r($result[&quot;categories&quot;]);
+?&gt;
+</code></pre></details>
+</details>
+<br/>
+
+📱 <b>端侧部署</b>:端侧部署是一种将计算和数据处理功能放在用户设备本身上的方式,设备可以直接处理数据,而不需要依赖远程的服务器。PaddleX 支持将模型部署在 Android 等端侧设备上,详细的端侧部署流程请参考[PaddleX端侧部署指南](../../../pipeline_deploy/edge_deploy.md)。
+您可以根据需要选择合适的方式部署模型产线,进而进行后续的 AI 应用集成。
+
+## 4. 二次开发
+如果通用视频分类产线提供的默认模型权重在您的场景中,精度或速度不满意,您可以尝试利用<b>您自己拥有的特定领域或应用场景的数据</b>对现有模型进行进一步的<b>微调</b>,以提升通用视频分类产线的在您的场景中的识别效果。
+
+### 4.1 模型微调
+由于通用视频分类产线包含视频分类模块,如果模型产线的效果不及预期,那么您需要参考[视频分类模块开发教程](../../../module_usage/tutorials/video_modules/video_classification.md)中的[二次开发](../../../module_usage/tutorials/video_modules/video_classification.md#四二次开发)章节,使用您的私有数据集对视频分类模型进行微调。
+
+### 4.2 模型应用
+当您使用私有数据集完成微调训练后,可获得本地模型权重文件。
+
+若您需要使用微调后的模型权重,只需对产线配置文件做修改,将微调后模型权重的本地路径替换至产线配置文件中的对应位置即可:
+
+```
+......
+Pipeline:
+  model: PPTSMv2_LCNet_k400_8frames_uniform #可修改为微调后模型的本地路径
+  device: "gpu"
+  batch_size: 1
+......
+```
+随后, 参考本地体验中的命令行方式或 Python 脚本方式,加载修改后的产线配置文件即可。
+
+##  5. 多硬件支持
+PaddleX 支持英伟达 GPU、昆仑芯 XPU、昇腾 NPU和寒武纪 MLU 等多种主流硬件设备,<b>仅需修改 `--device` 参数</b>即可完成不同硬件之间的无缝切换。
+
+例如,您使用英伟达 GPU 进行视频分类产线的推理,使用的命令为:
+
+```bash
+paddlex --pipeline video_classification --input general_video_classification_001.mp4 --device gpu:0
+```
+此时,若您想将硬件切换为昇腾 NPU,仅需将 `--device` 修改为 npu:0 即可:
+
+```bash
+paddlex --pipeline video_classification --input general_video_classification_001.mp4 --device npu:0
+```
+若您想在更多种类的硬件上使用通用视频分类产线,请参考[PaddleX多硬件使用指南](../../../other_devices_support/multi_devices_use_guide.md)。

+ 1 - 0
paddlex/__init__.py

@@ -20,6 +20,7 @@ sys.modules["lazy_paddle"] = paddle
 
 import os
 
+
 from . import version
 from .modules import (
     build_dataset_checker,

+ 42 - 0
paddlex/configs/modules/video_classification/PP-TSM-R50_8frames_uniform.yaml

@@ -0,0 +1,42 @@
+Global:
+  model: PP-TSM-R50_8frames_uniform
+  mode: check_dataset # check_dataset/train/evaluate/predict
+  dataset_dir: "./dataset/k400_examples"
+  device: gpu:0,1,2,3
+  output: "output"
+
+CheckDataset:
+  convert: 
+    enable: False
+    src_dataset_type: null
+  split: 
+    enable: False
+    train_percent: null
+    val_percent: null
+
+Train:
+  num_classes: 5
+  epochs_iters: 20
+  batch_size: 16
+  learning_rate: 0.01
+  pretrain_weight_path: https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PP-TSM-R50_8frames_uniform_pretrained.pdparams
+  warmup_steps: 4
+  resume_path: null
+  log_interval: 10
+  eval_interval: 1
+  save_interval: 1
+
+Evaluate:
+  weight_path: "output/best_model/best_model.pdparams"
+  log_interval: 1
+  batch_size: 2
+
+Export:
+  weight_path: https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PP-TSM-R50_8frames_uniform_pretrained.pdparams
+
+Predict:
+  batch_size: 1
+  model_dir: "output/best_model/inference"
+  input: "https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4"
+  kernel_option:
+    run_mode: paddle

+ 42 - 0
paddlex/configs/modules/video_classification/PP-TSMv2-LCNetV2_16frames_uniform.yaml

@@ -0,0 +1,42 @@
+Global:
+  model: PP-TSMv2-LCNetV2_16frames_uniform
+  mode: check_dataset # check_dataset/train/evaluate/predict
+  dataset_dir: "./dataset/k400_examples"
+  device: gpu:0,1,2,3
+  output: "output"
+
+CheckDataset:
+  convert: 
+    enable: False
+    src_dataset_type: null
+  split: 
+    enable: False
+    train_percent: null
+    val_percent: null
+
+Train:
+  num_classes: 5
+  epochs_iters: 20
+  batch_size: 16
+  learning_rate: 0.01
+  pretrain_weight_path: https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PP-TSMv2-LCNetV2_16frames_uniform_pretrained.pdparams
+  warmup_steps: 4
+  resume_path: null
+  log_interval: 10
+  eval_interval: 1
+  save_interval: 1
+
+Evaluate:
+  weight_path: "output/best_model/best_model.pdparams"
+  log_interval: 1
+  batch_size: 2
+
+Export:
+  weight_path: https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PP-TSMv2-LCNetV2_16frames_uniform_pretrained.pdparams
+
+Predict:
+  batch_size: 1
+  model_dir: "output/best_model/inference"
+  input: "https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4"
+  kernel_option:
+    run_mode: paddle

+ 42 - 0
paddlex/configs/modules/video_classification/PP-TSMv2-LCNetV2_8frames_uniform.yaml

@@ -0,0 +1,42 @@
+Global:
+  model: PP-TSMv2-LCNetV2_8frames_uniform
+  mode: check_dataset # check_dataset/train/evaluate/predict
+  dataset_dir: "./dataset/k400_examples"
+  device: gpu:0,1,2,3
+  output: "output"
+
+CheckDataset:
+  convert: 
+    enable: False
+    src_dataset_type: null
+  split: 
+    enable: False
+    train_percent: null
+    val_percent: null
+
+Train:
+  num_classes: 5
+  epochs_iters: 20
+  batch_size: 16
+  learning_rate: 0.01
+  pretrain_weight_path: https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PP-TSMv2-LCNetV2_8frames_uniform_pretrained.pdparams
+  warmup_steps: 4
+  resume_path: null
+  log_interval: 10
+  eval_interval: 1
+  save_interval: 1
+
+Evaluate:
+  weight_path: "output/best_model/best_model.pdparams"
+  log_interval: 1
+  batch_size: 2
+
+Export:
+  weight_path: https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PP-TSMv2-LCNetV2_8frames_uniform_pretrained.pdparams
+
+Predict:
+  batch_size: 1
+  model_dir: "output/best_model/inference"
+  input: "https://paddle-model-ecology.bj.bcebos.com/paddlex/videos/demo_video/general_video_classification_001.mp4"
+  kernel_option:
+    run_mode: paddle

+ 1 - 0
paddlex/inference/common/batch_sampler/__init__.py

@@ -14,4 +14,5 @@
 
 from .base_batch_sampler import BaseBatchSampler
 from .image_batch_sampler import ImageBatchSampler
+from .video_batch_sampler import VideoBatchSampler
 from .ts_batch_sampler import TSBatchSampler

+ 94 - 0
paddlex/inference/common/batch_sampler/video_batch_sampler.py

@@ -0,0 +1,94 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import ast
+from pathlib import Path
+import numpy as np
+
+from ....utils import logging
+from ....utils.download import download
+from ....utils.cache import CACHE_DIR
+from .base_batch_sampler import BaseBatchSampler
+
+
+class VideoBatchSampler(BaseBatchSampler):
+
+    SUFFIX = ["mp4", "avi", "mkv"]
+
+    # XXX: auto download for url
+    def _download_from_url(self, in_path):
+        file_name = Path(in_path).name
+        save_path = Path(CACHE_DIR) / "predict_input" / file_name
+        download(in_path, save_path, overwrite=True)
+        return save_path.as_posix()
+
+    def _get_files_list(self, fp):
+        file_list = []
+        if fp is None or not os.path.exists(fp):
+            raise Exception(f"Not found any img file in path: {fp}")
+        if os.path.isfile(fp) and fp.split(".")[-1] in self.SUFFIX:
+            file_list.append(fp)
+        elif os.path.isdir(fp):
+            for root, dirs, files in os.walk(fp):
+                for single_file in files:
+                    if single_file.split(".")[-1] in self.SUFFIX:
+                        file_list.append(os.path.join(root, single_file))
+        if len(file_list) == 0:
+            raise Exception("Not found any file in {}".format(fp))
+        file_list = sorted(file_list)
+        return file_list
+
+    def sample(self, inputs):
+        if not isinstance(inputs, list):
+            inputs = [inputs]
+
+        batch = []
+        for input in inputs:
+            if isinstance(input, str):
+                file_path = (
+                    self._download_from_url(input)
+                    if input.startswith("http")
+                    else input
+                )
+                file_list = self._get_files_list(file_path)
+                for file_path in file_list:
+                    batch.append(file_path)
+                    if len(batch) == self.batch_size:
+                        yield batch
+                        batch = []
+            else:
+                logging.warning(
+                    f"Not supported input data type! Only `str` are supported! So has been ignored: {input}."
+                )
+        if len(batch) > 0:
+            yield batch
+
+    def _rand_batch(self, data_size):
+        def parse_size(s):
+            res = ast.literal_eval(s)
+            if isinstance(res, int):
+                return (res, res)
+            else:
+                assert isinstance(res, (tuple, list))
+                assert len(res) == 2
+                assert all(isinstance(item, int) for item in res)
+                return res
+
+        size = parse_size(data_size)
+        rand_batch = [
+            np.random.randint(0, 256, (*size, 3), dtype=np.uint8)
+            for _ in range(self.batch_size)
+        ]
+        return rand_batch

+ 1 - 0
paddlex/inference/common/reader/__init__.py

@@ -13,4 +13,5 @@
 # limitations under the License.
 
 from .image_reader import ReadImage
+from .video_reader import ReadVideo
 from .ts_reader import ReadTS

+ 42 - 0
paddlex/inference/common/reader/video_reader.py

@@ -0,0 +1,42 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import cv2
+
+from ...utils.io import VideoReader
+
+
+class ReadVideo:
+    """Load video from the file."""
+
+    def __init__(self, backend="opencv", num_seg=8, seg_len=1, sample_type=None):
+
+        super().__init__()
+        self._video_reader = VideoReader(
+            backend=backend, num_seg=num_seg, seg_len=seg_len, sample_type=sample_type
+        )
+
+    def __call__(self, videos):
+        """apply"""
+        return [self._read(video) for video in videos]
+
+    def _read(self, file_path):
+        return self._read_video(file_path)
+
+    def _read_video(self, video_path):
+        blob = list(self._video_reader.read(video_path))
+        if blob is None:
+            raise Exception("Video read Error")
+        return blob

+ 2 - 0
paddlex/inference/common/result/__init__.py

@@ -15,6 +15,7 @@
 from .base_result import BaseResult
 from .base_cv_result import BaseCVResult
 from .base_ts_result import BaseTSResult
+from .base_video_result import BaseVideoResult
 from .mixin import (
     StrMixin,
     JsonMixin,
@@ -23,4 +24,5 @@ from .mixin import (
     CSVMixin,
     HtmlMixin,
     XlsxMixin,
+    VideoMixin,
 )

+ 41 - 0
paddlex/inference/common/result/base_video_result.py

@@ -0,0 +1,41 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .base_result import BaseResult
+from .mixin import StrMixin, JsonMixin, ImgMixin, VideoMixin
+from ...utils.io import VideoReader, VideoWriter
+
+
+class BaseVideoResult(BaseResult, StrMixin, JsonMixin, VideoMixin):
+    """Base class for computer vision results."""
+
+    INPUT_IMG_KEY = "input_img"
+
+    def __init__(self, data: dict) -> None:
+        """
+        Initialize the BaseVideoResult.
+
+        Args:
+            data (dict): The initial data.
+
+        Raises:
+            AssertionError: If the required key (`BaseVideoResult.INPUT_IMG_KEY`) are not found in the data.
+        """
+        self._video_reader = VideoReader(backend="decord")
+        self._video_writer = VideoWriter(backend="opencv")
+
+        super().__init__(data)
+        StrMixin.__init__(self)
+        JsonMixin.__init__(self)
+        VideoMixin.__init__(self, "opencv")

+ 24 - 0
paddlex/inference/common/result/mixin.py

@@ -31,6 +31,7 @@ from ...utils.io import (
     HtmlWriter,
     XlsxWriter,
     TextWriter,
+    VideoWriter,
 )
 
 
@@ -443,3 +444,26 @@ class XlsxMixin:
         if not str(save_path).endswith(".xlsx"):
             save_path = Path(save_path) / f"{Path(self['input_path']).stem}.xlsx"
         self._xlsx_writer.write(save_path.as_posix(), self.xlsx, *args, **kwargs)
+
+
+class VideoMixin:
+    def __init__(self, backend="opencv", *args, **kwargs):
+        self._video_writer = VideoWriter(backend=backend, *args, **kwargs)
+        self._save_funcs.append(self.save_to_video)
+
+    @abstractmethod
+    def _to_video(self):
+        raise NotImplementedError
+
+    @property
+    def video(self):
+        video = self._to_video()
+        return video
+
+    def save_to_video(self, save_path, *args, **kwargs):
+        if not str(save_path).lower().endswith((".mp4", ".avi", ".mkv")):
+            fp = Path(self["input_path"])
+            save_path = Path(save_path) / f"{fp.stem}{fp.suffix}"
+        _save_list_data(
+            self._video_writer.write, save_path, self.video, *args, **kwargs
+        )

+ 1 - 0
paddlex/inference/models_new/__init__.py

@@ -38,6 +38,7 @@ from .image_multilabel_classification import MLClasPredictor
 # from .general_recognition import ShiTuRecPredictor
 # from .anomaly_detection import UadPredictor
 # from .face_recognition import FaceRecPredictor
+from .video_classification import VideoClasPredictor
 
 
 def _create_hp_predictor(

+ 15 - 0
paddlex/inference/models_new/video_classification/__init__.py

@@ -0,0 +1,15 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .predictor import VideoClasPredictor

+ 141 - 0
paddlex/inference/models_new/video_classification/predictor.py

@@ -0,0 +1,141 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Union, Dict, List, Tuple
+from ....utils.func_register import FuncRegister
+from ....modules.video_classification.model_list import MODELS
+from ...common.batch_sampler import VideoBatchSampler
+from ...common.reader import ReadVideo
+from ..common import (
+    ToBatch,
+    StaticInfer,
+)
+from ..base import BasicPredictor
+from .processors import Scale, CenterCrop, Image2Array, NormalizeVideo, VideoClasTopk
+from .result import TopkVideoResult
+
+
+class VideoClasPredictor(BasicPredictor):
+
+    entities = MODELS
+
+    _FUNC_MAP = {}
+    register = FuncRegister(_FUNC_MAP)
+
+    def __init__(self, topk: Union[int, None] = None, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.topk = topk
+        self.pre_tfs, self.infer, self.post_op = self._build()
+
+    def _build_batch_sampler(self):
+        return VideoBatchSampler()
+
+    def _get_result_class(self):
+        return TopkVideoResult
+
+    def _build(self):
+        pre_tfs = {}
+        for cfg in self.config["PreProcess"]["transform_ops"]:
+            tf_key = list(cfg.keys())[0]
+            assert tf_key in self._FUNC_MAP
+            func = self._FUNC_MAP[tf_key]
+            args = cfg.get(tf_key, {})
+            name, op = func(self, **args) if args else func(self)
+            if op:
+                pre_tfs[name] = op
+        pre_tfs["ToBatch"] = ToBatch()
+
+        infer = StaticInfer(
+            model_dir=self.model_dir,
+            model_prefix=self.MODEL_FILE_PREFIX,
+            option=self.pp_option,
+        )
+
+        post_op = {}
+        for key in self.config["PostProcess"]:
+            func = self._FUNC_MAP.get(key)
+            args = self.config["PostProcess"].get(key, {})
+            name, op = func(self, **args) if args else func(self)
+            post_op[name] = op
+
+        return pre_tfs, infer, post_op
+
+    def process(self, batch_data, topk: Union[int, None] = None):
+        batch_raw_videos = self.pre_tfs["ReadVideo"](videos=batch_data)
+        batch_videos = self.pre_tfs["Scale"](videos=batch_raw_videos)
+        batch_videos = self.pre_tfs["CenterCrop"](videos=batch_videos)
+        batch_videos = self.pre_tfs["Image2Array"](videos=batch_videos)
+        x = self.pre_tfs["NormalizeVideo"](videos=batch_videos)
+
+        batch_preds = self.infer(x=x)
+
+        batch_class_ids, batch_scores, batch_label_names = self.post_op["Topk"](
+            batch_preds, topk=topk or self.topk
+        )
+        return {
+            "input_path": batch_data,
+            "class_ids": batch_class_ids,
+            "scores": batch_scores,
+            "label_names": batch_label_names,
+        }
+
+    @register("ReadVideo")
+    def build_readvideo(
+        self,
+        num_seg=8,
+        target_size=224,
+        seg_len=1,
+        sample_type=None,
+    ):
+        return "ReadVideo", ReadVideo(
+            backend="decord",
+            num_seg=num_seg,
+            seg_len=seg_len,
+            sample_type=sample_type,
+        )
+
+    @register("Scale")
+    def build_scale(self, short_size=224):
+        return "Scale", Scale(
+            short_size=short_size,
+            fixed_ratio=True,
+            keep_ratio=None,
+            do_round=False,
+        )
+
+    @register("CenterCrop")
+    def build_center_crop(self, target_size=224):
+        return "CenterCrop", CenterCrop(target_size=target_size)
+
+    @register("Image2Array")
+    def build_image2array(self, data_format="tchw"):
+        return "Image2Array", Image2Array(transpose=True, data_format="tchw")
+
+    @register("NormalizeVideo")
+    def build_normalize(
+        self,
+        mean=[0.485, 0.456, 0.406],
+        std=[0.229, 0.224, 0.225],
+    ):
+        return "NormalizeVideo", NormalizeVideo(mean=mean, std=std)
+
+    @register("Topk")
+    def build_topk(self, topk, label_list=None):
+        if not self.topk:
+            self.topk = int(topk)
+        return "Topk", VideoClasTopk(class_ids=label_list)
+
+    @register("KeepKeys")
+    def foo(self, *args, **kwargs):
+        return None, None

+ 394 - 0
paddlex/inference/models_new/video_classification/processors.py

@@ -0,0 +1,394 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import os.path as osp
+from typing import List, Sequence, Union, Optional, Tuple
+
+import re
+import numpy as np
+import cv2
+import math
+import json
+import tempfile
+import lazy_paddle
+
+
+class Scale:
+    """Scale images."""
+
+    def __init__(
+        self,
+        short_size: int,
+        fixed_ratio: bool = True,
+        keep_ratio: Union[bool, None] = None,
+        do_round: bool = False,
+    ) -> None:
+        """
+        Initializes the Scale class.
+
+        Args:
+            short_size (int): The target size for the shorter side of the image.
+            fixed_ratio (bool): Whether to maintain a fixed aspect ratio of 4:3.
+            keep_ratio (Union[bool, None]): Whether to keep the aspect ratio. Cannot be True if fixed_ratio is True.
+            do_round (bool): Whether to round the scaling factor.
+        """
+        super().__init__()
+        self.short_size = short_size
+        assert (fixed_ratio and not keep_ratio) or (
+            not fixed_ratio
+        ), f"fixed_ratio and keep_ratio cannot be true at the same time"
+        self.fixed_ratio = fixed_ratio
+        self.keep_ratio = keep_ratio
+        self.do_round = do_round
+
+    def scale(self, video: List[np.ndarray]) -> List[np.ndarray]:
+        """
+        Performs resize operations on a sequence of images.
+
+        Args:
+            video (List[np.ndarray]): List where each item is an image,  as a numpy array.
+             For example, [np.ndarray0, np.ndarray1, np.ndarray2, ...]
+
+        Returns:
+            List[np.ndarray]: List where each item is a np.ndarray after scaling.
+        """
+
+        imgs = video
+
+        resized_imgs = []
+        for i in range(len(imgs)):
+            img = imgs[i]
+            if isinstance(img, np.ndarray):
+                h, w, _ = img.shape
+            else:
+                raise NotImplementedError
+            if (w <= h and w == self.short_size) or (h <= w and h == self.short_size):
+                resized_imgs.append(img)
+                continue
+
+            if w <= h:
+                ow = self.short_size
+                if self.fixed_ratio:
+                    oh = int(self.short_size * 4.0 / 3.0)
+                elif self.keep_ratio is False:
+                    oh = self.short_size
+                else:
+                    scale_factor = self.short_size / w
+                    oh = (
+                        int(h * float(scale_factor) + 0.5)
+                        if self.do_round
+                        else int(h * self.short_size / w)
+                    )
+                    ow = (
+                        int(w * float(scale_factor) + 0.5)
+                        if self.do_round
+                        else self.short_size
+                    )
+            else:
+                oh = self.short_size
+                if self.fixed_ratio:
+                    ow = int(self.short_size * 4.0 / 3.0)
+                elif self.keep_ratio is False:
+                    ow = self.short_size
+                else:
+                    scale_factor = self.short_size / h
+                    oh = (
+                        int(h * float(scale_factor) + 0.5)
+                        if self.do_round
+                        else self.short_size
+                    )
+                    ow = (
+                        int(w * float(scale_factor) + 0.5)
+                        if self.do_round
+                        else int(w * self.short_size / h)
+                    )
+            if self.keep_ratio is not None:
+                resized_imgs.append(
+                    cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR)
+                )
+        imgs = resized_imgs
+        return imgs
+
+    def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
+        """
+        Apply the scaling operation to a list of videos.
+
+        Args:
+            videos (List[np.ndarray]): A list of videos, where each video is a sequence
+            of images.
+
+        Returns:
+            List[np.ndarray]: A list of videos after scaling, where each video is a list of images.
+        """
+        return [self.scale(video) for video in videos]
+
+
+class CenterCrop:
+    """Center crop images."""
+
+    def __init__(self, target_size: int, do_round: bool = True) -> None:
+        """
+        Initializes the CenterCrop class.
+
+        Args:
+            target_size (int): The size of the cropped area.
+            do_round (bool): Whether to round the crop coordinates.
+        """
+        super().__init__()
+        self.target_size = target_size
+        self.do_round = do_round
+
+    def center_crop(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
+        """
+        Performs center crop operations on images.
+
+        Args:
+            imgs (List[np.ndarray]): A sequence of images (a numpy array).
+
+        Returns:
+            List[np.ndarray]: A list of images after center cropping or a cropped numpy array.
+        """
+
+        crop_imgs = []
+        th, tw = self.target_size, self.target_size
+        if isinstance(imgs, lazy_paddle.Tensor):
+            h, w = imgs.shape[-2:]
+            x1 = int(round((w - tw) / 2.0)) if self.do_round else (w - tw) // 2
+            y1 = int(round((h - th) / 2.0)) if self.do_round else (h - th) // 2
+            crop_imgs = imgs[:, :, y1 : y1 + th, x1 : x1 + tw]
+        else:
+            for img in imgs:
+                h, w, _ = img.shape
+                assert (w >= self.target_size) and (
+                    h >= self.target_size
+                ), "image width({}) and height({}) should be larger than crop size".format(
+                    w, h, self.target_size
+                )
+                x1 = int(round((w - tw) / 2.0)) if self.do_round else (w - tw) // 2
+                y1 = int(round((h - th) / 2.0)) if self.do_round else (h - th) // 2
+                crop_imgs.append(img[y1 : y1 + th, x1 : x1 + tw])
+        return crop_imgs
+
+    def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
+        """
+        Apply the center crop operation to a list of videos.
+
+        Args:
+            videos (List[np.ndarray]): A list of videos, where each video is a sequence of images.
+
+        Returns:
+            List[np.ndarray]: A list of videos after center cropping.
+        """
+        return [self.center_crop(video) for video in videos]
+
+
+class Image2Array:
+    """Convert a sequence of images to a numpy array with optional transposition."""
+
+    def __init__(self, transpose: bool = True, data_format: str = "tchw") -> None:
+        """
+        Initializes the Image2Array class.
+
+        Args:
+            transpose (bool): Whether to transpose the resulting numpy array.
+            data_format (str): The format to transpose to, either 'tchw' or 'cthw'.
+
+        Raises:
+            AssertionError: If data_format is not one of the allowed values.
+        """
+        super().__init__()
+        assert data_format in [
+            "tchw",
+            "cthw",
+        ], f"Target format must in ['tchw', 'cthw'], but got {data_format}"
+        self.transpose = transpose
+        self.data_format = data_format
+
+    def img2array(self, imgs: List[np.ndarray]) -> np.ndarray:
+        """
+        Converts a sequence of images to a numpy array and optionally transposes it.
+
+        Args:
+            imgs (List[np.ndarray]): A list of images to be converted to a numpy array.
+
+        Returns:
+            np.ndarray: A numpy array representation of the images.
+        """
+        t_imgs = np.stack(imgs).astype("float32")
+        if self.transpose:
+            if self.data_format == "tchw":
+                t_imgs = t_imgs.transpose([0, 3, 1, 2])  # tchw
+            else:
+                t_imgs = t_imgs.transpose([3, 0, 1, 2])  # cthw
+        return t_imgs
+
+    def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
+        """
+        Apply the image to array conversion to a list of videos.
+
+        Args:
+            videos (List[Sequence[np.ndarray]]): A list of videos, where each video is a sequence of images.
+
+        Returns:
+            List[np.ndarray]: A list of numpy arrays, one for each video.
+        """
+        return [self.img2array(video) for video in videos]
+
+
+class NormalizeVideo:
+    """
+    Normalize video frames by subtracting the mean and dividing by the standard deviation.
+    """
+
+    def __init__(
+        self,
+        mean: Sequence[float],
+        std: Sequence[float],
+        tensor_shape: Sequence[int] = [3, 1, 1],
+        inplace: bool = False,
+    ) -> None:
+        """
+        Initializes the NormalizeVideo class.
+
+        Args:
+            mean (Sequence[float]): The mean values for each channel.
+            std (Sequence[float]): The standard deviation values for each channel.
+            tensor_shape (Sequence[int]): The shape of the mean and std tensors.
+            inplace (bool): Whether to perform normalization in place.
+        """
+        super().__init__()
+
+        self.inplace = inplace
+        if not inplace:
+            self.mean = np.array(mean).reshape(tensor_shape).astype(np.float32)
+            self.std = np.array(std).reshape(tensor_shape).astype(np.float32)
+        else:
+            self.mean = np.array(mean, dtype=np.float32)
+            self.std = np.array(std, dtype=np.float32)
+
+    def normalize_video(self, imgs: np.ndarray) -> np.ndarray:
+        """
+        Normalizes a sequence of images.
+
+        Args:
+            imgs (np.ndarray): A numpy array of images to be normalized.
+
+        Returns:
+            np.ndarray: The normalized images as a numpy array.
+        """
+
+        if self.inplace:
+            n = len(imgs)
+            h, w, c = imgs[0].shape
+            norm_imgs = np.empty((n, h, w, c), dtype=np.float32)
+            for i, img in enumerate(imgs):
+                norm_imgs[i] = img
+
+            for img in norm_imgs:  # [n,h,w,c]
+                mean = np.float64(self.mean.reshape(1, -1))  # [1, 3]
+                stdinv = 1 / np.float64(self.std.reshape(1, -1))  # [1, 3]
+                cv2.subtract(img, mean, img)
+                cv2.multiply(img, stdinv, img)
+        else:
+            imgs = imgs
+            norm_imgs = imgs / 255.0
+            norm_imgs -= self.mean
+            norm_imgs /= self.std
+
+        imgs = norm_imgs
+        imgs = np.expand_dims(imgs, axis=0).copy()
+        return imgs
+
+    def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
+        """
+        Apply normalization to a list of videos.
+
+        Args:
+            videos (List[np.ndarray]): A list of videos, where each video is a numpy array of images.
+
+        Returns:
+            List[np.ndarray]: A list of normalized videos as numpy arrays.
+        """
+        return [self.normalize_video(video) for video in videos]
+
+
+class VideoClasTopk:
+    """Applies a top-k transformation on video classification predictions."""
+
+    def __init__(self, class_ids: Optional[Sequence[Union[str, int]]] = None) -> None:
+        """
+        Initializes the VideoClasTopk class.
+
+        Args:
+            class_ids (Optional[Sequence[Union[str, int]]]): A list of class labels corresponding to class indices.
+        """
+        super().__init__()
+        self.class_id_map = self._parse_class_id_map(class_ids)
+
+    def softmax(self, data: np.ndarray) -> np.ndarray:
+        """
+        Applies the softmax function to an array of data.
+
+        Args:
+            data (np.ndarray): An array of data for which to compute softmax.
+
+        Returns:
+            np.ndarray: The softmax-transformed data.
+        """
+        exp_data = np.exp(data - np.max(data))
+        softmax_data = exp_data / np.sum(exp_data)
+        return softmax_data
+
+    def _parse_class_id_map(
+        self, class_ids: Optional[Sequence[Union[str, int]]]
+    ) -> Optional[dict]:
+        """
+        Parses a list of class IDs into a mapping from class index to class label.
+
+        Args:
+            class_ids (Optional[Sequence[Union[str, int]]]): A list of class labels.
+
+        Returns:
+            Optional[dict]: A dictionary mapping class indices to labels, or None if no class_ids are provided.
+        """
+        if class_ids is None:
+            return None
+        class_id_map = {id: str(lb) for id, lb in enumerate(class_ids)}
+        return class_id_map
+
+    def __call__(
+        self, preds: np.ndarray, topk: int = 5
+    ) -> Tuple[np.ndarray, List[np.ndarray], List[List[str]]]:
+        """
+        Selects the top-k predictions from the classification output.
+
+        Args:
+            preds (np.ndarray): A 2D array of prediction scores.
+            topk (int): The number of top predictions to return.
+
+        Returns:
+            Tuple[np.ndarray, List[np.ndarray], List[List[str]]]: A tuple containing:
+                - An array of indices of the top-k predictions.
+                - A list of arrays of scores for the top-k predictions.
+                - A list of lists of label names for the top-k predictions.
+        """
+        preds = self.softmax(preds)
+        indexes = preds[0].argsort(axis=1)[:, -topk:][:, ::-1].astype("int32")
+        scores = [
+            np.around(pred[index], decimals=5) for pred, index in zip(preds[0], indexes)
+        ]
+        label_names = [[self.class_id_map[i] for i in index] for index in indexes]
+        return indexes, scores, label_names

+ 91 - 0
paddlex/inference/models_new/video_classification/result.py

@@ -0,0 +1,91 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cv2
+import numpy as np
+import PIL
+from PIL import Image, ImageDraw, ImageFont
+
+from ....utils.fonts import PINGFANG_FONT_FILE_PATH
+from ...utils.color_map import get_colormap
+from ...common.result import BaseVideoResult
+
+
+class TopkVideoResult(BaseVideoResult):
+
+    def _to_video(self):
+        """Draw label on image"""
+        labels = self.get("label_names", self["class_ids"])
+        label_str = f"{labels[0]} {self['scores'][0]:.2f}"
+        video_reader = self._video_reader
+        video = video_reader.read(self["input_path"])
+        video = list(video)
+        write_fps = video_reader.get_fps()
+
+        video_list = []
+        for i in range(len(video)):
+            image = Image.fromarray(video[i].asnumpy())
+            image_size = image.size
+            draw = ImageDraw.Draw(image)
+            min_font_size = int(image_size[0] * 0.02)
+            max_font_size = int(image_size[0] * 0.05)
+            for font_size in range(max_font_size, min_font_size - 1, -1):
+                font = ImageFont.truetype(
+                    PINGFANG_FONT_FILE_PATH, font_size, encoding="utf-8"
+                )
+                if tuple(map(int, PIL.__version__.split("."))) <= (10, 0, 0):
+                    text_width_tmp, text_height_tmp = draw.textsize(label_str, font)
+                else:
+                    left, top, right, bottom = draw.textbbox((0, 0), label_str, font)
+                    text_width_tmp, text_height_tmp = right - left, bottom - top
+                if text_width_tmp <= image_size[0]:
+                    break
+                else:
+                    font = ImageFont.truetype(PINGFANG_FONT_FILE_PATH, min_font_size)
+            color_list = get_colormap(rgb=True)
+            color = tuple(color_list[0])
+            font_color = tuple(self._get_font_colormap(3))
+            if tuple(map(int, PIL.__version__.split("."))) <= (10, 0, 0):
+                text_width, text_height = draw.textsize(label_str, font)
+            else:
+                left, top, right, bottom = draw.textbbox((0, 0), label_str, font)
+                text_width, text_height = right - left, bottom - top
+
+            rect_left = 3
+            rect_top = 3
+            rect_right = rect_left + text_width + 3
+            rect_bottom = rect_top + text_height + 6
+
+            draw.rectangle(
+                [(rect_left, rect_top), (rect_right, rect_bottom)], fill=color
+            )
+
+            text_x = rect_left + 3
+            text_y = rect_top
+            draw.text((text_x, text_y), label_str, fill=font_color, font=font)
+            image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
+            video_list.append(image)
+        return np.array(video_list), write_fps
+
+    def _get_font_colormap(self, color_index):
+        """
+        Get font colormap
+        """
+        dark = np.array([0x14, 0x0E, 0x35])
+        light = np.array([0xFF, 0xFF, 0xFF])
+        light_indexs = [0, 3, 4, 8, 9, 13, 14, 18, 19]
+        if color_index in light_indexs:
+            return light.astype("int32")
+        else:
+            return dark.astype("int32")

+ 1 - 0
paddlex/inference/utils/io/__init__.py

@@ -30,4 +30,5 @@ from .writers import (
     HtmlWriter,
     XlsxWriter,
     YAMLWriter,
+    VideoWriter,
 )

+ 83 - 1
paddlex/inference/utils/io/readers.py

@@ -21,6 +21,8 @@ from PIL import Image, ImageOps
 import pandas as pd
 import numpy as np
 import yaml
+import decord
+import random
 
 __all__ = [
     "ReaderType",
@@ -152,21 +154,28 @@ class VideoReader(_GenerativeReader):
         self.st_frame_id = st_frame_id
         self.max_num_frames = max_num_frames
         self.auto_close = auto_close
+        self._fps = 0
 
     def read(self, in_path):
         """read vide file from path"""
         self._backend.set_pos(self.st_frame_id)
         gen = self._backend.read_file(str(in_path))
-        if self.num_frames is not None:
+        if self.max_num_frames is not None:
             gen = itertools.islice(gen, self.num_frames)
         yield from gen
         if self.auto_close:
             self._backend.close()
 
+    def get_fps(self):
+        """get fps"""
+        return self._backend.get_fps()
+
     def _init_backend(self, bk_type, bk_args):
         """init backend"""
         if bk_type == "opencv":
             return OpenCVVideoReaderBackend(**bk_args)
+        elif bk_type == "decord":
+            return DecordVideoReaderBackend(**bk_args)
         else:
             raise ValueError("Unsupported backend type")
 
@@ -264,6 +273,9 @@ class OpenCVVideoReaderBackend(_VideoReaderBackend):
         self._pos = 0
         self._max_num_frames = None
 
+    def get_fps(self):
+        return self._cap.get(cv2.CAP_PROP_FPS)
+
     def read_file(self, in_path):
         """read vidio file from path"""
         if self._cap is not None:
@@ -283,6 +295,9 @@ class OpenCVVideoReaderBackend(_VideoReaderBackend):
         self._cap_release()
 
     def _cap_open(self, video_path):
+        self.cap_init_args.pop("num_seg")
+        self.cap_init_args.pop("seg_len")
+        self.cap_init_args.pop("sample_type")
         self._cap = cv2.VideoCapture(video_path, **self.cap_init_args)
         if not self._cap.isOpened():
             raise RuntimeError(f"Failed to open {video_path}")
@@ -303,6 +318,73 @@ class OpenCVVideoReaderBackend(_VideoReaderBackend):
             self._cap = None
 
 
+class DecordVideoReaderBackend(_VideoReaderBackend):
+    """DecordVideoReaderBackend"""
+
+    def __init__(self, **bk_args):
+        super().__init__()
+        self.cap_init_args = bk_args
+        self._cap = None
+        self._pos = 0
+        self._max_num_frames = None
+        self.num_seg = bk_args.get("num_seg", 8)
+        self.seg_len = bk_args.get("seg_len", 1)
+        self.sample_type = bk_args.get("sample_type", 1)
+        self.valid_mode = True
+        self._fps = 0
+
+    def set_pos(self, pos):
+        self._pos = pos
+
+    def sample(self, frames_len, video_object):
+        frames_idx = []
+        average_dur = int(frames_len / self.num_seg)
+        for i in range(self.num_seg):
+            idx = 0
+            if not self.valid_mode:
+                if average_dur >= self.seg_len:
+                    idx = random.randint(0, average_dur - self.seg_len)
+                    idx += i * average_dur
+                elif average_dur >= 1:
+                    idx += i * average_dur
+                else:
+                    idx = i
+            else:
+                if average_dur >= self.seg_len:
+                    idx = (average_dur - 1) // 2
+                    idx += i * average_dur
+                elif average_dur >= 1:
+                    idx += i * average_dur
+                else:
+                    idx = i
+            for jj in range(idx, idx + self.seg_len):
+                frames_idx.append(int(jj % frames_len))
+        frames_select = video_object.get_batch(frames_idx)
+        # dearray_to_img
+        np_frames = frames_select.asnumpy()
+        imgs = []
+        for i in range(np_frames.shape[0]):
+            imgbuf = np_frames[i]
+            imgs.append(imgbuf)
+        return imgs
+
+    def get_fps(self):
+        return self._cap.get_avg_fps()
+
+    def read_file(self, in_path):
+        """read vidio file from path"""
+        self._cap = decord.VideoReader(in_path)
+        frame_len = len(self._cap)
+        if self.sample_type == "uniform":
+            sample_video = self.sample(frame_len, self._cap)
+            return sample_video
+        else:
+            return self._cap
+
+    def close(self):
+        pass
+
+
 class CSVReader(_BaseReader):
     """CSVReader"""
 

+ 48 - 0
paddlex/inference/utils/io/writers.py

@@ -19,6 +19,7 @@ import json
 from pathlib import Path
 
 import cv2
+import decord
 import numpy as np
 from PIL import Image
 import pandas as pd
@@ -35,6 +36,7 @@ __all__ = [
     "HtmlWriter",
     "XlsxWriter",
     "YAMLWriter",
+    "VideoWriter",
 ]
 
 
@@ -114,6 +116,28 @@ class ImageWriter(_BaseWriter):
         return WriterType.IMAGE
 
 
+class VideoWriter(_BaseWriter):
+    """VideoWriter"""
+
+    def __init__(self, backend="opencv", **bk_args):
+        super().__init__(backend=backend, **bk_args)
+
+    def write(self, out_path, obj):
+        """write"""
+        return self._backend.write_obj(str(out_path), obj)
+
+    def _init_backend(self, bk_type, bk_args):
+        """init backend"""
+        if bk_type == "opencv":
+            return OpenCVVideoWriterBackend(**bk_args)
+        else:
+            raise ValueError("Unsupported backend type")
+
+    def get_type(self):
+        """get type"""
+        return WriterType.VIDEO
+
+
 class TextWriter(_BaseWriter):
     """TextWriter"""
 
@@ -294,6 +318,30 @@ class PILImageWriterBackend(_ImageWriterBackend):
         return img.save(out_path, format=self.format)
 
 
+class _VideoWriterBackend(_BaseWriterBackend):
+    """_VideoWriterBackend"""
+
+    pass
+
+
+class OpenCVVideoWriterBackend(_VideoWriterBackend):
+    """OpenCVImageWriterBackend"""
+
+    def _write_obj(self, out_path, obj):
+        """write video object by OpenCV"""
+        obj, fps = obj
+        if isinstance(obj, np.ndarray):
+            vr = obj
+            width, height = vr[0].shape[1], vr[0].shape[0]
+            fourcc = cv2.VideoWriter_fourcc(*"mp4v")  # Alternatively, use 'XVID'
+            out = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
+            for frame in vr:
+                out.write(frame)
+            out.release()
+        else:
+            raise TypeError("Unsupported object type")
+
+
 class _BaseJsonWriterBackend(object):
     def __init__(self, indent=4, ensure_ascii=False):
         super().__init__()

+ 3 - 0
paddlex/inference/utils/official_models.py

@@ -304,6 +304,9 @@ PP-LCNet_x1_0_vehicle_attribute_infer.tar",
     "MobileFaceNet": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/MobileFaceNet_infer.tar",
     "ResNet50_face": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/ResNet50_face_infer.tar",
     "PP-YOLOE-R_L": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PP-YOLOE-R_L_infer.tar",
+    "PP-TSM-R50_8frames_uniform": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PP-TSM-R50_8frames_uniform_infer.tar",
+    "PP-TSMv2-LCNetV2_8frames_uniform": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PP-TSMv2-LCNetV2_8frames_uniform_infer.tar",
+    "PP-TSMv2-LCNetV2_16frames_uniform": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b2/PP-TSMv2-LCNetV2_16frames_uniform_infer.tar",
 }
 
 

+ 7 - 0
paddlex/modules/__init__.py

@@ -103,3 +103,10 @@ from .face_recognition import (
 )
 
 from .ts_forecast import TSFCDatasetChecker, TSFCTrainer, TSFCEvaluator
+
+from .video_classification import (
+    VideoClsDatasetChecker,
+    VideoClsTrainer,
+    VideoClsEvaluator,
+    VideoClsExportor,
+)

+ 1 - 0
paddlex/modules/base/trainer.py

@@ -56,6 +56,7 @@ class BaseTrainer(ABC, metaclass=AutoRegisterABCMetaClass):
         self.config = config
         self.global_config = config.Global
         self.train_config = config.Train
+        self.eval_config = config.Evaluate
         self.benchmark_config = config.get("Benchmark", None)
         config_path = self.train_config.get("basic_config_path", None)
 

+ 18 - 0
paddlex/modules/video_classification/__init__.py

@@ -0,0 +1,18 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .trainer import VideoClsTrainer
+from .dataset_checker import VideoClsDatasetChecker
+from .evaluator import VideoClsEvaluator
+from .exportor import VideoClsExportor

+ 93 - 0
paddlex/modules/video_classification/dataset_checker/__init__.py

@@ -0,0 +1,93 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from pathlib import Path
+
+from ...base import BaseDatasetChecker
+from .dataset_src import check, split_dataset, deep_analyse
+from ..model_list import MODELS
+
+
+class VideoClsDatasetChecker(BaseDatasetChecker):
+    """Dataset Checker for Image Classification Model"""
+
+    entities = MODELS
+    sample_num = 10
+
+    def get_dataset_root(self, dataset_dir: str) -> str:
+        """find the dataset root dir
+
+        Args:
+            dataset_dir (str): the directory that contain dataset.
+
+        Returns:
+            str: the root directory of dataset.
+        """
+        anno_dirs = list(Path(dataset_dir).glob("**/videos"))
+        assert len(anno_dirs) == 1
+        dataset_dir = anno_dirs[0].parent.as_posix()
+        return dataset_dir
+
+    def split_dataset(self, src_dataset_dir: str) -> str:
+        """repartition the train and validation dataset
+
+        Args:
+            src_dataset_dir (str): the root directory of dataset.
+
+        Returns:
+            str: the root directory of splited dataset.
+        """
+        return split_dataset(
+            src_dataset_dir,
+            self.check_dataset_config.split.train_percent,
+            self.check_dataset_config.split.val_percent,
+        )
+
+    def check_dataset(self, dataset_dir: str, sample_num: int = sample_num) -> dict:
+        """check if the dataset meets the specifications and get dataset summary
+
+        Args:
+            dataset_dir (str): the root directory of dataset.
+            sample_num (int): the number to be sampled.
+        Returns:
+            dict: dataset summary.
+        """
+        return check(dataset_dir, self.output)
+
+    def analyse(self, dataset_dir: str) -> dict:
+        """deep analyse dataset
+
+        Args:
+            dataset_dir (str): the root directory of dataset.
+
+        Returns:
+            dict: the deep analysis results.
+        """
+        return deep_analyse(dataset_dir, self.output)
+
+    def get_show_type(self) -> str:
+        """get the show type of dataset
+
+        Returns:
+            str: show type
+        """
+        return "video"
+
+    def get_dataset_type(self) -> str:
+        """return the dataset type
+
+        Returns:
+            str: dataset type
+        """
+        return "VideoClsDataset"

+ 18 - 0
paddlex/modules/video_classification/dataset_checker/dataset_src/__init__.py

@@ -0,0 +1,18 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .check_dataset import check
+from .split_dataset import split_dataset
+from .analyse_dataset import deep_analyse

+ 93 - 0
paddlex/modules/video_classification/dataset_checker/dataset_src/analyse_dataset.py

@@ -0,0 +1,93 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import json
+import math
+import platform
+from pathlib import Path
+
+from collections import defaultdict
+from PIL import Image
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib import font_manager
+from matplotlib.backends.backend_agg import FigureCanvasAgg
+
+from .....utils.file_interface import custom_open
+from .....utils.fonts import PINGFANG_FONT_FILE_PATH
+
+
+def deep_analyse(dataset_path, output):
+    """class analysis for dataset"""
+    tags = ["train", "val"]
+    labels_cnt = defaultdict(str)
+    label_path = os.path.join(dataset_path, "label.txt")
+    with custom_open(label_path, "r") as f:
+        lines = f.readlines()
+    for line in lines:
+        line = line.strip().split()
+        labels_cnt[line[0]] = " ".join(line[1:])
+    for tag in tags:
+        anno_path = os.path.join(dataset_path, f"{tag}.txt")
+        classes_num = defaultdict(int)
+        for i in range(len(labels_cnt)):
+            classes_num[labels_cnt[str(i)]] = 0
+        with custom_open(anno_path, "r") as f:
+            lines = f.readlines()
+        for line in lines:
+            line = line.strip().split()
+            classes_num[labels_cnt[line[1]]] += 1
+        if tag == "train":
+            cnts_train = [cat_ids for cat_name, cat_ids in classes_num.items()]
+        elif tag == "val":
+            cnts_val = [cat_ids for cat_name, cat_ids in classes_num.items()]
+
+    classes = [cat_name for cat_name, cat_ids in classes_num.items()]
+    sorted_id = sorted(
+        range(len(cnts_train)), key=lambda k: cnts_train[k], reverse=True
+    )
+    cnts_train_sorted = [cnts_train[index] for index in sorted_id]
+    cnts_val_sorted = [cnts_val[index] for index in sorted_id]
+    classes_sorted = [classes[index] for index in sorted_id]
+    x = np.arange(len(classes))
+    width = 0.5
+
+    # bar
+    os_system = platform.system().lower()
+    if os_system == "windows":
+        plt.rcParams["font.sans-serif"] = "FangSong"
+    else:
+        font = font_manager.FontProperties(fname=PINGFANG_FONT_FILE_PATH, size=10)
+    fig, ax = plt.subplots(figsize=(max(8, int(len(classes) / 5)), 5), dpi=300)
+    ax.bar(x, cnts_train_sorted, width=0.5, label="train")
+    ax.bar(x + width, cnts_val_sorted, width=0.5, label="val")
+    plt.xticks(
+        x + width / 2,
+        classes_sorted,
+        rotation=90,
+        fontproperties=None if os_system == "windows" else font,
+    )
+    ax.set_xlabel(
+        "类别名称", fontproperties=None if os_system == "windows" else font, fontsize=12
+    )
+    ax.set_ylabel(
+        "视频数量", fontproperties=None if os_system == "windows" else font, fontsize=12
+    )
+    plt.legend(loc=1)
+    fig.tight_layout()
+    file_path = os.path.join(output, "histogram.png")
+    fig.savefig(file_path, dpi=300)
+
+    return {"histogram": os.path.join("check_dataset", "histogram.png")}

+ 121 - 0
paddlex/modules/video_classification/dataset_checker/dataset_src/check_dataset.py

@@ -0,0 +1,121 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import os.path as osp
+import random
+from PIL import Image, ImageOps
+from collections import defaultdict
+
+from .....utils.errors import DatasetFileNotFoundError, CheckFailedError
+
+
+def check(dataset_dir, output, sample_num=10):
+    """check dataset"""
+    dataset_dir = osp.abspath(dataset_dir)
+    # Custom dataset
+    if not osp.exists(dataset_dir) or not osp.isdir(dataset_dir):
+        raise DatasetFileNotFoundError(file_path=dataset_dir)
+
+    tags = ["train", "val"]
+    delim = " "
+    valid_num_parts = 2
+
+    sample_cnts = dict()
+    label_map_dict = dict()
+    sample_paths = defaultdict(list)
+    labels = []
+
+    label_file = osp.join(dataset_dir, "label.txt")
+    if not osp.exists(label_file):
+        raise DatasetFileNotFoundError(
+            file_path=label_file,
+            solution=f"Ensure that `label.txt` exist in {dataset_dir}",
+        )
+
+    with open(label_file, "r", encoding="utf-8") as f:
+        all_lines = f.readlines()
+        for line in all_lines:
+            substr = line.strip("\n").split(" ", 1)
+            try:
+                label_idx = int(substr[0])
+                labels.append(label_idx)
+                label_map_dict[label_idx] = str(substr[1])
+            except:
+                raise CheckFailedError(
+                    f"Ensure that the first number in each line in {label_file} should be int."
+                )
+    if min(labels) != 0:
+        raise CheckFailedError(
+            f"Ensure that the index starts from 0 in `{label_file}`."
+        )
+
+    for tag in tags:
+        file_list = osp.join(dataset_dir, f"{tag}.txt")
+        if not osp.exists(file_list):
+            if tag in ("train", "val"):
+                # train and val file lists must exist
+                raise DatasetFileNotFoundError(
+                    file_path=file_list,
+                    solution=f"Ensure that both `train.txt` and `val.txt` exist in {dataset_dir}",
+                )
+            else:
+                # tag == 'test'
+                continue
+        else:
+            with open(file_list, "r", encoding="utf-8") as f:
+                all_lines = f.readlines()
+                random.seed(123)
+                random.shuffle(all_lines)
+                sample_cnts[tag] = len(all_lines)
+                for line in all_lines:
+                    substr = line.strip("\n").split(delim)
+                    if len(substr) != valid_num_parts:
+                        raise CheckFailedError(
+                            f"The number of delimiter-separated items in each row in {file_list} \
+                                    should be {valid_num_parts} (current delimiter is '{delim}')."
+                        )
+                    file_name = substr[0]
+                    label = substr[1]
+
+                    video_path = osp.join(dataset_dir, file_name)
+
+                    if not osp.exists(video_path):
+                        raise DatasetFileNotFoundError(file_path=video_path)
+
+                    if len(sample_paths[tag]) < sample_num:
+                        sample_path = osp.join(
+                            "check_dataset", os.path.relpath(video_path, output)
+                        )
+                        sample_paths[tag].append(sample_path)
+
+                    try:
+                        label = int(label)
+                    except (ValueError, TypeError) as e:
+                        raise CheckFailedError(
+                            f"Ensure that the second number in each line in {label_file} should be int."
+                        ) from e
+
+    num_classes = max(labels) + 1
+
+    attrs = {}
+    attrs["label_file"] = osp.relpath(label_file, output)
+    attrs["num_classes"] = num_classes
+    attrs["train_samples"] = sample_cnts["train"]
+    attrs["train_sample_paths"] = sample_paths["train"]
+
+    attrs["val_samples"] = sample_cnts["val"]
+    attrs["val_sample_paths"] = sample_paths["val"]
+
+    return attrs

+ 82 - 0
paddlex/modules/video_classification/dataset_checker/dataset_src/split_dataset.py

@@ -0,0 +1,82 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+from random import shuffle
+
+from .....utils.file_interface import custom_open
+
+
+def split_dataset(root_dir, train_rate, val_rate):
+    """
+    Split the image dataset into training, validation, and test sets according to the given ratios,
+    and generate corresponding .txt files.
+
+    Args:
+        root_dir (str): Path to the root directory of the dataset.
+        train_rate (int): Percentage of the dataset to be used as the training set.
+        val_rate (int): Percentage of the dataset to be used as the validation set.
+
+    Returns:
+        str: Information about the dataset split results.
+    """
+    sum_rate = train_rate + val_rate
+    assert (
+        sum_rate == 100
+    ), f"The sum of train_rate({train_rate}), val_rate({val_rate}) should equal 100!"
+    assert (
+        train_rate > 0 and val_rate > 0
+    ), f"The train_rate({train_rate}) and val_rate({val_rate}) should be greater than 0!"
+    tags = ["train", "val"]
+    valid_path = False
+    video_files = []
+    for tag in tags:
+        split_image_list = os.path.abspath(os.path.join(root_dir, f"{tag}.txt"))
+        rename_image_list = os.path.abspath(os.path.join(root_dir, f"{tag}.txt.bak"))
+        if os.path.exists(split_image_list):
+            with custom_open(split_image_list, "r") as f:
+                lines = f.readlines()
+            video_files = video_files + lines
+            valid_path = True
+            if not os.path.exists(rename_image_list):
+                os.rename(split_image_list, rename_image_list)
+
+    assert (
+        valid_path
+    ), f"The files to be divided{tags[0]}.txt, {tags[1]}.txt, do not exist in the dataset directory."
+
+    shuffle(video_files)
+    start = 0
+    video_num = len(video_files)
+    rate_list = [train_rate, val_rate]
+    for i, tag in enumerate(tags):
+
+        rate = rate_list[i]
+        if rate == 0:
+            continue
+
+        end = start + round(video_num * rate / 100)
+        if sum(rate_list[i + 1 :]) == 0:
+            end = video_num
+
+        txt_file = os.path.abspath(os.path.join(root_dir, tag + ".txt"))
+        with custom_open(txt_file, "w") as f:
+            m = 0
+            for id in range(start, end):
+                m += 1
+                f.write(video_files[id])
+        start = end
+
+    return root_dir

+ 44 - 0
paddlex/modules/video_classification/evaluator.py

@@ -0,0 +1,44 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ..base import BaseEvaluator
+from .model_list import MODELS
+
+
+class VideoClsEvaluator(BaseEvaluator):
+    """Image Classification Model Evaluator"""
+
+    entities = MODELS
+
+    def update_config(self):
+        """update evalution config"""
+        if self.eval_config.log_interval:
+            self.pdx_config.update_log_interval(self.eval_config.log_interval)
+        self.pdx_config.update_dataset(
+            self.global_config.dataset_dir, "VideoClsDataset"
+        )
+        if self.eval_config.batch_size is not None:
+            self.pdx_config.update_batch_size(self.eval_config.batch_size, mode="eval")
+        self.pdx_config.update_pretrained_weights(self.eval_config.weight_path)
+
+    def get_eval_kwargs(self) -> dict:
+        """get key-value arguments of model evalution function
+
+        Returns:
+            dict: the arguments of evaluation function.
+        """
+        return {
+            "weight_path": self.eval_config.weight_path,
+            "device": self.get_device(using_device_number=1),
+        }

+ 22 - 0
paddlex/modules/video_classification/exportor.py

@@ -0,0 +1,22 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ..base import BaseExportor
+from .model_list import MODELS
+
+
+class VideoClsExportor(BaseExportor):
+    """Image Classification Model Exportor"""
+
+    entities = MODELS

+ 19 - 0
paddlex/modules/video_classification/model_list.py

@@ -0,0 +1,19 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+MODELS = [
+    "PP-TSM-R50_8frames_uniform",
+    "PP-TSMv2-LCNetV2_8frames_uniform",
+    "PP-TSMv2-LCNetV2_16frames_uniform",
+]

+ 88 - 0
paddlex/modules/video_classification/trainer.py

@@ -0,0 +1,88 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import shutil
+from pathlib import Path
+
+from ..base import BaseTrainer
+from .model_list import MODELS
+from ...utils.config import AttrDict
+
+
+class VideoClsTrainer(BaseTrainer):
+    """Image Classification Model Trainer"""
+
+    entities = MODELS
+
+    def dump_label_dict(self, src_label_dict_path: str):
+        """dump label dict config
+
+        Args:
+            src_label_dict_path (str): path to label dict file to be saved.
+        """
+        dst_label_dict_path = Path(self.global_config.output).joinpath("label_dict.txt")
+        shutil.copyfile(src_label_dict_path, dst_label_dict_path)
+
+    def update_config(self):
+        """update training config"""
+        if self.train_config.log_interval:
+            self.pdx_config.update_log_interval(self.train_config.log_interval)
+        if self.train_config.eval_interval:
+            self.pdx_config.update_eval_interval(self.train_config.eval_interval)
+        if self.train_config.save_interval:
+            self.pdx_config.update_save_interval(self.train_config.save_interval)
+
+        self.pdx_config.update_dataset(
+            self.global_config.dataset_dir, "VideoClsDataset"
+        )
+        if self.train_config.num_classes is not None:
+            self.pdx_config.update_num_classes(self.train_config.num_classes)
+        if self.train_config.pretrain_weight_path != "":
+            self.pdx_config.update_pretrained_weights(
+                self.train_config.pretrain_weight_path
+            )
+
+        label_dict_path = Path(self.global_config.dataset_dir).joinpath("label.txt")
+        if label_dict_path.exists():
+            self.dump_label_dict(label_dict_path)
+        if self.train_config.batch_size is not None:
+            self.pdx_config.update_batch_size(
+                self.train_config.batch_size, mode="train"
+            )
+        if self.eval_config.batch_size is not None:
+            self.pdx_config.update_batch_size(self.eval_config.batch_size, mode="eval")
+        if self.train_config.learning_rate is not None:
+            self.pdx_config.update_learning_rate(self.train_config.learning_rate)
+        if self.train_config.epochs_iters is not None:
+            self.pdx_config._update_epochs(self.train_config.epochs_iters)
+        if self.train_config.warmup_steps is not None:
+            self.pdx_config.update_warmup_epochs(self.train_config.warmup_steps)
+        if self.global_config.output is not None:
+            self.pdx_config._update_output_dir(self.global_config.output)
+
+    def get_train_kwargs(self) -> dict:
+        """get key-value arguments of model training function
+
+        Returns:
+            dict: the arguments of training function.
+        """
+        train_args = {"device": self.get_device()}
+        if (
+            self.train_config.resume_path is not None
+            and self.train_config.resume_path != ""
+        ):
+            train_args["resume_path"] = self.train_config.resume_path
+        train_args["dy2st"] = self.train_config.get("dy2st", False)
+        return train_args

+ 16 - 0
paddlex/repo_apis/PaddleVideo_api/__init__.py

@@ -0,0 +1,16 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .video_cls import VideoClsModel, VideoClsRunner, register

+ 51 - 0
paddlex/repo_apis/PaddleVideo_api/config_utils.py

@@ -0,0 +1,51 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+
+import yaml
+
+
+def load_config(file_path):
+    """load_config"""
+    _, ext = os.path.splitext(file_path)
+    assert ext in [".yml", ".yaml"], "only support yaml files for now"
+    config = yaml.load(open(file_path, "rb"), Loader=yaml.Loader)
+    return config
+
+
+def merge_config(config, opts):
+    """merge_config"""
+    for key, value in opts.items():
+        if "." not in key:
+            if isinstance(value, dict) and key in config:
+                config[key].update(value)
+            else:
+                config[key] = value
+        else:
+            sub_keys = key.split(".")
+            assert sub_keys[0] in config, (
+                "the sub_keys can only be one of global_config: {}, but get: "
+                "{}, please check your running command".format(
+                    config.keys(), sub_keys[0]
+                )
+            )
+            cur = config[sub_keys[0]]
+            for idx, sub_key in enumerate(sub_keys[1:]):
+                if idx == len(sub_keys) - 2:
+                    cur[sub_key] = value
+                else:
+                    cur = cur[sub_key]
+    return config

+ 156 - 0
paddlex/repo_apis/PaddleVideo_api/configs/PP-TSM-R50_8frames_uniform.yaml

@@ -0,0 +1,156 @@
+Global:
+  checkpoints: null
+  pretrained_model: https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PP-TSM-R50_8frames_uniform_pretrained.pdparams
+  output_dir: ./output/
+  device: gpu
+  use_visualdl: False
+  save_inference_dir: ./inference
+  # training model under @to_static
+  to_static: False
+  algorithm: PP-TSM-R50_8frames_uniform
+
+MODEL: #MODEL field
+    framework: "Recognizer2D" #Mandatory, indicate the type of network, associate to the 'paddlevideo/modeling/framework/' .
+    backbone: #Mandatory, indicate the type of backbone, associate to the 'paddlevideo/modeling/backbones/' .
+        name: "ResNetTweaksTSM" #Mandatory, The name of backbone.
+        pretrained: null
+        depth: 50 #Optional, the depth of backbone architecture.
+    head:
+        name: "ppTSMHead" #Mandatory, indicate the type of head, associate to the 'paddlevideo/modeling/heads'
+        num_classes: 400 #Optional, the number of classes to be classified.
+        in_channels: 2048 #input channel of the extracted feature.
+        drop_ratio: 0.5 #the ratio of dropout
+        std: 0.01 #std value in params initialization
+        ls_eps: 0.1 # label smooth factor
+
+DATASET: #DATASET field
+    batch_size: 16  #Mandatory, bacth size
+    num_workers: 4 #Mandatory, the number of subprocess on each GPU.
+    # test_batch_size: 1
+    train:
+        format: "VideoDataset" #Mandatory, indicate the type of dataset, associate to the 'paddlevidel/loader/dateset'
+        data_prefix: "K400_dataset/K400/videos"  #Mandatory, train data root path
+        file_path: "K400_dataset/K400/train.txt" #Mandatory, train data index file path
+    valid:
+        format: "VideoDataset" #Mandatory, indicate the type of dataset, associate to the 'paddlevidel/loader/dateset'
+        data_prefix: "K400_dataset/K400/videos"  #Mandatory, train data root path
+        file_path: "K400_dataset/K400/val.txt" #Mandatory, valid data index file path
+    test:
+        format: "VideoDataset" #Mandatory, indicate the type of dataset, associate to the 'paddlevidel/loader/dateset'
+        data_prefix: "K400_dataset/K400/videos"  #Mandatory, train data root path
+        file_path: "K400_dataset/K400/val.txt" #Mandatory, valid data index file path
+
+PIPELINE: #PIPELINE field
+    train: #Mandotary, indicate the pipeline to deal with the training data, associate to the 'paddlevideo/loader/pipelines/'
+        decode:
+            name: "VideoDecoder"
+            backend: "decord"
+        sample:
+            name: "Sampler"
+            num_seg: 8
+            seg_len: 1
+            valid_mode: False
+        transform: #Mandotary, image transfrom operator
+            - Scale:
+                short_size: 256
+            - MultiScaleCrop:
+                target_size: 256
+            - RandomCrop:
+                target_size: 224
+            - RandomFlip:
+            - Image2Array:
+            - Normalization:
+                mean: [0.485, 0.456, 0.406]
+                std: [0.229, 0.224, 0.225]
+    valid: #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+        decode:
+            name: "VideoDecoder"
+            backend: "decord"
+        sample:
+            name: "Sampler"
+            num_seg: 8
+            seg_len: 1
+            valid_mode: True
+        transform:
+            - Scale:
+                short_size: 256
+            - CenterCrop:
+                target_size: 224
+            - Image2Array:
+            - Normalization:
+                mean: [0.485, 0.456, 0.406]
+                std: [0.229, 0.224, 0.225]
+    test:  #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+        decode:
+            name: "VideoDecoder"
+            backend: "decord"
+        sample:
+            name: "Sampler"
+            num_seg: 8
+            seg_len: 1
+            valid_mode: True
+        transform:
+            - Scale:
+                short_size: 256
+            - CenterCrop:
+                target_size: 224
+            - Image2Array:
+            - Normalization:
+                mean: [0.485, 0.456, 0.406]
+                std: [0.229, 0.224, 0.225]
+
+OPTIMIZER: #OPTIMIZER field
+  name: 'Momentum'
+  momentum: 0.9
+  learning_rate:
+    iter_step: True
+    name: 'CustomWarmupCosineDecay'
+    max_epoch: 80
+    warmup_epochs: 10
+    warmup_start_lr: 0.005
+    cosine_base_lr: 0.01
+  weight_decay:
+    name: 'L2'
+    value: 1e-4
+  use_nesterov: True
+
+MIX:
+    name: "Mixup"
+    alpha: 0.2
+
+PRECISEBN:
+  preciseBN_interval: 5     # epoch interval to do preciseBN, default 1.
+  num_iters_preciseBN: 200  # how many batches used to do preciseBN, default 200.
+
+
+METRIC:
+    name: 'CenterCropMetric'
+
+INFERENCE:
+    name: 'ppTSM_Inference_helper'
+    num_seg: 8
+    target_size: 224
+
+Infer:
+    transforms:
+        - ReadVideo:
+            num_seg: 8
+            sample_type: 'uniform'
+        - Scale:
+            short_size: 256
+        - CenterCrop:
+            target_size: 224
+        - Image2Array:
+            data_format: 'tchw'
+        - NormalizeVideo:
+            mean: [0.485, 0.456, 0.406]
+            std: [0.229, 0.224, 0.225] 
+    PostProcess:
+        name: Topk
+        topk: 1
+        class_id_map_file: data/k400/Kinetics-400_label_list.txt     
+
+model_name: "ppTSM"
+log_interval: 10 #Optional, the interal of logger, default:10
+epochs: 80 #Mandatory, total epoch
+log_level: "INFO" #Optional, the logger level. default: "INFO"

+ 151 - 0
paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_16frames_uniform.yaml

@@ -0,0 +1,151 @@
+Global:
+    checkpoints: null
+    pretrained_model: "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PP-TSMv2-LCNetV2_16frames_uniform_pretrained.pdparams"
+    output_dir: ./output/
+    device: gpu
+    use_visualdl: False
+    save_inference_dir: ./inference
+    # training model under @to_static
+    to_static: False
+    algorithm: PP-TSMv2-LCNetV2_16frames_uniform
+
+MODEL: #MODEL field
+    framework: "Recognizer2D" #Mandatory, indicate the type of network, associate to the 'paddlevideo/modeling/framework/' .
+    backbone: #Mandatory, indicate the type of backbone, associate to the 'paddlevideo/modeling/backbones/' .
+        name: "PPTSM_v2" #Mandatory, The name of backbone.
+        pretrained: null #Optional, pretrained model path.
+        num_seg: 16
+        class_num: 400
+    head:
+        name: "MoViNetHead" #Mandatory, indicate the type of head, associate to the 'paddlevideo/modeling/heads'
+
+
+DATASET: #DATASET field
+    batch_size: 16  #Mandatory, bacth size
+    num_workers: 4 #Mandatory, the number of subprocess on each GPU.
+    train:
+        format: "VideoDataset" #Mandatory, indicate the type of dataset, associate to the 'paddlevidel/loader/dateset'
+        data_prefix: "K400_dataset/K400/videos" #Mandatory, train data root path
+        file_path: "K400_dataset/K400/train.txt" #Mandatory, train data index file path
+
+    valid:
+        format: "VideoDataset" #Mandatory, indicate the type of dataset, associate to the 'paddlevidel/loader/dateset'
+        data_prefix: "K400_dataset/K400/videos" #Mandatory, valid data root path
+        file_path: "K400_dataset/K400/val.txt" #Mandatory, valid data index file path
+
+    test:
+        format: "VideoDataset" #Mandatory, indicate the type of dataset, associate to the 'paddlevidel/loader/dateset'
+        data_prefix: "K400_dataset/K400/videos" #Mandatory, valid data root path
+        file_path: "K400_dataset/K400/val.txt" #Mandatory, valid data index file path
+
+
+PIPELINE: #PIPELINE field
+    train: #Mandotary, indicate the pipeline to deal with the training data, associate to the 'paddlevideo/loader/pipelines/'
+        decode:
+            name: "VideoDecoder"
+            backend: "decord"
+        sample:
+            name: "Sampler"
+            num_seg: 16
+            seg_len: 1
+            valid_mode: False
+        transform: #Mandotary, image transfrom operator
+            - Scale:
+                short_size: 256
+            - MultiScaleCrop:
+                target_size: 256
+            - RandomCrop:
+                target_size: 224
+            - RandomFlip:
+            - Image2Array:
+            - Normalization:
+                mean: [0.485, 0.456, 0.406]
+                std: [0.229, 0.224, 0.225]
+    valid: #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+        decode:
+            name: "VideoDecoder"
+            backend: "decord"
+        sample:
+            name: "Sampler"
+            num_seg: 16
+            seg_len: 1
+            valid_mode: True
+        transform:
+            - Scale:
+                short_size: 256
+            - CenterCrop:
+                target_size: 224
+            - Image2Array:
+            - Normalization:
+                mean: [0.485, 0.456, 0.406]
+                std: [0.229, 0.224, 0.225]
+    test:  #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+        decode:
+            name: "VideoDecoder"
+            backend: "decord"
+        sample:
+            name: "Sampler"
+            num_seg: 16
+            seg_len: 1
+            valid_mode: True
+        transform:
+            - Scale:
+                short_size: 256
+            - CenterCrop:
+                target_size: 224
+            - Image2Array:
+            - Normalization:
+                mean: [0.485, 0.456, 0.406]
+                std: [0.229, 0.224, 0.225]
+
+OPTIMIZER: #OPTIMIZER field
+  name: 'Momentum'
+  momentum: 0.9
+  learning_rate:
+    iter_step: True
+    name: 'CustomWarmupCosineDecay'
+    max_epoch: 120
+    warmup_epochs: 10
+    warmup_start_lr: 0.005
+    cosine_base_lr: 0.01
+  weight_decay:
+    name: 'L2'
+    value: 1e-4
+  use_nesterov: True
+
+MIX:
+    name: "Mixup"
+    alpha: 0.2
+
+
+METRIC:
+    name: 'CenterCropMetric'
+
+INFERENCE:
+    name: 'ppTSM_Inference_helper'
+    num_seg: 16
+    target_size: 224
+
+Infer:
+    transforms:
+        - ReadVideo:
+            num_seg: 16
+            sample_type: 'uniform'
+        - Scale:
+            short_size: 256
+        - CenterCrop:
+            target_size: 224
+        - Image2Array:
+            data_format: 'tchw'
+        - NormalizeVideo:
+            mean: [0.485, 0.456, 0.406]
+            std: [0.229, 0.224, 0.225] 
+    PostProcess:
+        name: Topk
+        topk: 1
+        class_id_map_file: data/k400/Kinetics-400_label_list.txt      
+
+model_name: "ppTSMv2"
+log_interval: 10 #Optional, the interal of logger, default:10
+epochs: 120  #Mandatory, total epoch
+log_level: "INFO" #Optional, the logger level. default: "INFO"

+ 148 - 0
paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_8frames_uniform.yaml

@@ -0,0 +1,148 @@
+Global:
+    checkpoints: null
+    pretrained_model: "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PP-TSMv2-LCNetV2_8frames_uniform_pretrained.pdparams"
+    output_dir: ./output/
+    device: gpu
+    use_visualdl: False
+    save_inference_dir: ./inference
+    # training model under @to_static
+    to_static: False
+    algorithm: PP-TSMv2-LCNetV2_8frames_uniform
+
+MODEL: #MODEL field
+    framework: "Recognizer2D" #Mandatory, indicate the type of network, associate to the 'paddlevideo/modeling/framework/' .
+    backbone: #Mandatory, indicate the type of backbone, associate to the 'paddlevideo/modeling/backbones/' .
+        name: "PPTSM_v2" #Mandatory, The name of backbone.
+        pretrained: null #Optional, pretrained model path.
+        num_seg: 8
+        class_num: 400
+    head:
+        name: "MoViNetHead" #Mandatory, indicate the type of head, associate to the 'paddlevideo/modeling/heads'
+
+
+DATASET: #DATASET field
+    batch_size: 16  #Mandatory, bacth size
+    num_workers: 4 #Mandatory, the number of subprocess on each GPU.
+    train:
+        format: "VideoDataset" #Mandatory, indicate the type of dataset, associate to the 'paddlevidel/loader/dateset'
+        data_prefix: "K400_dataset/K400/videos" #Mandatory, train data root path
+        file_path: "K400_dataset/K400/train.txt" #Mandatory, train data index file path
+    valid:
+        format: "VideoDataset" #Mandatory, indicate the type of dataset, associate to the 'paddlevidel/loader/dateset'
+        data_prefix: "K400_dataset/K400/videos" #Mandatory, valid data root path
+        file_path: "K400_dataset/K400/val.txt" #Mandatory, valid data index file path
+    test:
+        format: "VideoDataset" #Mandatory, indicate the type of dataset, associate to the 'paddlevidel/loader/dateset'
+        data_prefix: "K400_dataset/K400/videos" #Mandatory, valid data root path
+        file_path: "K400_dataset/K400/val.txt" #Mandatory, valid data index file path
+
+PIPELINE: #PIPELINE field
+    train: #Mandotary, indicate the pipeline to deal with the training data, associate to the 'paddlevideo/loader/pipelines/'
+        decode:
+            name: "VideoDecoder"
+            backend: "decord"
+        sample:
+            name: "Sampler"
+            num_seg: 8
+            seg_len: 1
+            valid_mode: False
+        transform: #Mandotary, image transfrom operator
+            - Scale:
+                short_size: 256
+            - MultiScaleCrop:
+                target_size: 256
+            - RandomCrop:
+                target_size: 224
+            - RandomFlip:
+            - Image2Array:
+            - Normalization:
+                mean: [0.485, 0.456, 0.406]
+                std: [0.229, 0.224, 0.225]
+    valid: #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+        decode:
+            name: "VideoDecoder"
+            backend: "decord"
+        sample:
+            name: "Sampler"
+            num_seg: 8
+            seg_len: 1
+            valid_mode: True
+        transform:
+            - Scale:
+                short_size: 256
+            - CenterCrop:
+                target_size: 224
+            - Image2Array:
+            - Normalization:
+                mean: [0.485, 0.456, 0.406]
+                std: [0.229, 0.224, 0.225]
+    test:  #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+        decode:
+            name: "VideoDecoder"
+            backend: "decord"
+        sample:
+            name: "Sampler"
+            num_seg: 8
+            seg_len: 1
+            valid_mode: True
+        transform:
+            - Scale:
+                short_size: 256
+            - CenterCrop:
+                target_size: 224
+            - Image2Array:
+            - Normalization:
+                mean: [0.485, 0.456, 0.406]
+                std: [0.229, 0.224, 0.225]
+
+OPTIMIZER: #OPTIMIZER field
+  name: 'Momentum'
+  momentum: 0.9
+  learning_rate:
+    iter_step: True
+    name: 'CustomWarmupCosineDecay'
+    max_epoch: 120
+    warmup_epochs: 10
+    warmup_start_lr: 0.005
+    cosine_base_lr: 0.01
+  weight_decay:
+    name: 'L2'
+    value: 1e-4
+  use_nesterov: True
+
+MIX:
+    name: "Mixup"
+    alpha: 0.2
+
+
+METRIC:
+    name: 'CenterCropMetric'
+
+INFERENCE:
+    name: 'ppTSM_Inference_helper'
+    num_seg: 8
+    target_size: 224
+
+Infer:
+    transforms:
+        - ReadVideo:
+            num_seg: 8
+            sample_type: 'uniform'
+        - Scale:
+            short_size: 256
+        - CenterCrop:
+            target_size: 224
+        - Image2Array:
+            data_format: 'tchw'
+        - NormalizeVideo:
+            mean: [0.485, 0.456, 0.406]
+            std: [0.229, 0.224, 0.225] 
+    PostProcess:
+        name: Topk
+        topk: 1
+        class_id_map_file: data/k400/Kinetics-400_label_list.txt     
+
+model_name: "ppTSMv2"
+log_interval: 10 #Optional, the interal of logger, default:10
+epochs: 120  #Mandatory, total epoch
+log_level: "INFO" #Optional, the logger level. default: "INFO"

+ 19 - 0
paddlex/repo_apis/PaddleVideo_api/video_cls/__init__.py

@@ -0,0 +1,19 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .model import VideoClsModel
+from .runner import VideoClsRunner
+from .config import VideoClsConfig
+from . import register

+ 547 - 0
paddlex/repo_apis/PaddleVideo_api/video_cls/config.py

@@ -0,0 +1,547 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import yaml
+from typing import Union
+
+from ...base import BaseConfig
+from ....utils.misc import abspath
+from ..config_utils import merge_config
+
+
+class VideoClsConfig(BaseConfig):
+    """Image Classification Task Config"""
+
+    def update(self, dict_like_obj: list):
+        """update self
+
+        Args:
+            dict_like_obj (list): list of pairs(key0.key1.idx.key2=value)
+        """
+        dict_ = merge_config(self.dict, dict_like_obj)
+        self.reset_from_dict(dict_)
+
+    def load(self, config_file_path: str):
+        """load config from yaml file
+
+        Args:
+            config_file_path (str): the path of yaml file.
+
+        Raises:
+            TypeError: the content of yaml file `config_file_path` error.
+        """
+        dict_ = yaml.load(open(config_file_path, "rb"), Loader=yaml.Loader)
+        if not isinstance(dict_, dict):
+            raise TypeError
+        self.reset_from_dict(dict_)
+
+    def dump(self, config_file_path: str):
+        """dump self to yaml file
+
+        Args:
+            config_file_path (str): the path to save self as yaml file.
+        """
+        with open(config_file_path, "w", encoding="utf-8") as f:
+            yaml.dump(self.dict, f, default_flow_style=False, sort_keys=False)
+
+    def update_dataset(
+        self,
+        dataset_path: str,
+        dataset_type: str = None,
+        *,
+        train_list_path: str = None,
+    ):
+        """update dataset settings
+
+        Args:
+            dataset_path (str): the root path of dataset.
+            dataset_type (str, optional): dataset type. Defaults to None.
+            train_list_path (str, optional): the path of train dataset annotation file . Defaults to None.
+
+        Raises:
+            ValueError: the dataset_type error.
+        """
+        dataset_path = abspath(dataset_path)
+        if dataset_type is None:
+            dataset_type = "VideoClsDataset"
+        if train_list_path:
+            train_list_path = f"{train_list_path}"
+        else:
+            train_list_path = f"{dataset_path}/train.txt"
+
+        if dataset_type in ["VideoClsDataset"]:
+            _cfg = {
+                "DATASET.train.format": "VideoDataset",
+                "DATASET.train.data_prefix": dataset_path,
+                "DATASET.train.file_path": train_list_path,
+                "DATASET.valid.format": "VideoDataset",
+                "DATASET.valid.data_prefix": dataset_path,
+                "DATASET.valid.file_path": os.path.join(dataset_path, "val.txt"),
+                "DATASET.test.format": "VideoDataset",
+                "DATASET.test.data_prefix": dataset_path,
+                "DATASET.test.file_path": os.path.join(dataset_path, "val.txt"),
+                "Infer.PostProcess.class_id_map_file": os.path.join(
+                    dataset_path, "label.txt"
+                ),
+            }
+        else:
+            raise ValueError(f"{repr(dataset_type)} is not supported.")
+        self.update(_cfg)
+
+    def update_batch_size(self, batch_size: int, mode: str = "train"):
+        """update batch size setting
+
+        Args:
+            batch_size (int): the batch size number to set.
+            mode (str, optional): the mode that to be set batch size, must be one of 'train', 'eval', 'test'.
+                Defaults to 'train'.
+
+        Raises:
+            ValueError: `mode` error.
+        """
+
+        if mode == "train":
+            _cfg = {"DATASET.batch_size": batch_size}
+        elif mode == "eval":
+            _cfg = {"DATASET.test_batch_size": batch_size}
+        elif mode == "test":
+            _cfg = {"DATASET.test_batch_size": batch_size}
+        else:
+            raise ValueError("The input `mode` should be train, eval or test.")
+        self.update(_cfg)
+
+    def update_learning_rate(self, learning_rate: float):
+        """update learning rate
+
+        Args:
+            learning_rate (float): the learning rate value to set.
+        """
+        if (
+            self._dict["OPTIMIZER"]["learning_rate"].get("cosine_base_lr", None)
+            is not None
+        ):
+            _cfg = {"OPTIMIZER.learning_rate.cosine_base_lr": learning_rate}
+        else:
+            raise ValueError("unsupported lr format")
+        self.update(_cfg)
+
+    def update_warmup_epochs(self, warmup_epochs: int):
+        """update warmup epochs
+
+        Args:
+            warmup_epochs (int): the warmup epochs value to set.
+        """
+        _cfg = {"OPTIMIZER.learning_rate.warmup_epochs": warmup_epochs}
+        self.update(_cfg)
+
+    def update_pretrained_weights(self, pretrained_model: str):
+        """update pretrained weight path
+
+        Args:
+            pretrained_model (str): the local path or url of pretrained weight file to set.
+        """
+        assert isinstance(
+            pretrained_model, (str, type(None))
+        ), "The 'pretrained_model' should be a string, indicating the path to the '*.pdparams' file, or 'None', \
+indicating that no pretrained model to be used."
+
+        if pretrained_model is None:
+            self.update({"Global.pretrained_model", None})
+        else:
+            if pretrained_model.lower() == "default":
+                self.update({"Global.pretrained_model", None})
+            else:
+                if not pretrained_model.startswith(("http://", "https://")):
+                    pretrained_model = abspath(pretrained_model)
+                self.update({"Global.pretrained_model": pretrained_model})
+
+    def update_num_classes(self, num_classes: int):
+        """update classes number
+
+        Args:
+            num_classes (int): the classes number value to set.
+        """
+        if self._dict["model_name"] == "ppTSMv2":
+            update_str_list = {"MODEL.backbone.class_num": num_classes}
+            self.update(update_str_list)
+        else:
+            update_str_list = {"MODEL.head.num_classes": num_classes}
+            self.update(update_str_list)
+
+    def _update_slim_config(self, slim_config_path: str):
+        """update slim settings
+
+        Args:
+            slim_config_path (str): the path to slim config yaml file.
+        """
+        slim_config = yaml.load(open(slim_config_path, "rb"), Loader=yaml.Loader)[
+            "Slim"
+        ]
+        self.update({"Slim": slim_config})
+
+    def _update_amp(self, amp: Union[None, str]):
+        """update AMP settings
+
+        Args:
+            amp (None | str): the AMP settings.
+
+        Raises:
+            ValueError: AMP setting `amp` error, missing field `AMP`.
+        """
+        if amp is None or amp == "OFF":
+            if "AMP" in self.dict:
+                self._dict.pop("AMP")
+        else:
+            if "AMP" not in self.dict:
+                raise ValueError("Config must have AMP information.")
+            _cfg = {"AMP.use_amp": True, "AMP.level": amp}
+            self.update(_cfg)
+
+    def update_num_workers(self, num_workers: int):
+        """update workers number of train and eval dataloader
+
+        Args:
+            num_workers (int): the value of train and eval dataloader workers number to set.
+        """
+        _cfg = {
+            "DATASET.num_workers": num_workers,
+        }
+        self.update(_cfg)
+
+    def update_shared_memory(self, shared_memeory: bool):
+        """update shared memory setting of train and eval dataloader
+
+        Args:
+            shared_memeory (bool): whether or not to use shared memory
+        """
+        assert isinstance(shared_memeory, bool), "shared_memeory should be a bool"
+        _cfg = [
+            f"DataLoader.Train.loader.use_shared_memory={shared_memeory}",
+            f"DataLoader.Eval.loader.use_shared_memory={shared_memeory}",
+        ]
+        self.update(_cfg)
+
+    def update_shuffle(self, shuffle: bool):
+        """update shuffle setting of train and eval dataloader
+
+        Args:
+            shuffle (bool): whether or not to shuffle the data
+        """
+        assert isinstance(shuffle, bool), "shuffle should be a bool"
+        _cfg = [
+            f"DataLoader.Train.loader.shuffle={shuffle}",
+            f"DataLoader.Eval.loader.shuffle={shuffle}",
+        ]
+        self.update(_cfg)
+
+    def update_dali(self, dali: bool):
+        """enable DALI setting of train and eval dataloader
+
+        Args:
+            dali (bool): whether or not to use DALI
+        """
+        assert isinstance(dali, bool), "dali should be a bool"
+        _cfg = [
+            f"Global.use_dali={dali}",
+            f"Global.use_dali={dali}",
+        ]
+        self.update(_cfg)
+
+    def update_seed(self, seed: int):
+        """update seed
+
+        Args:
+            seed (int): the random seed value to set
+        """
+        _cfg = {"Global.seed": seed}
+        self.update(_cfg)
+
+    def update_device(self, device: str):
+        """update device setting
+
+        Args:
+            device (str): the running device to set
+        """
+        device = device.split(":")[0]
+        _cfg = {"Global.device": device}
+        self.update(_cfg)
+
+    def update_label_dict_path(self, dict_path: str):
+        """update label dict file path
+
+        Args:
+            dict_path (str): the path of label dict file to set
+        """
+        _cfg = {
+            "PostProcess.Topk.class_id_map_file": {abspath(dict_path)},
+        }
+        self.update(_cfg)
+
+    def _update_to_static(self, dy2st: bool):
+        """update config to set dynamic to static mode
+
+        Args:
+            dy2st (bool): whether or not to use the dynamic to static mode.
+        """
+        self.update({"Global.to_static": dy2st})
+
+    def _update_use_vdl(self, use_vdl: bool):
+        """update config to set VisualDL
+
+        Args:
+            use_vdl (bool): whether or not to use VisualDL.
+        """
+        self.update({"Global.use_visuald": use_vdl})
+
+    def _update_epochs(self, epochs: int):
+        """update epochs setting
+
+        Args:
+            epochs (int): the epochs number value to set
+        """
+        self.update({"epochs": epochs})
+
+    def _update_checkpoints(self, resume_path: Union[None, str]):
+        """update checkpoint setting
+
+        Args:
+            resume_path (None | str): the resume training setting. if is `None`, train from scratch, otherwise,
+                train from checkpoint file that path is `.pdparams` file.
+        """
+        if resume_path is not None:
+            resume_path = resume_path.replace(".pdparams", "")
+        self.update({"Global.checkpoints": resume_path})
+
+    def _update_output_dir(self, save_dir: str):
+        """update output directory
+
+        Args:
+            save_dir (str): the path to save outputs.
+        """
+        self.update({"output_dir": abspath(save_dir)})
+
+    def update_log_interval(self, log_interval: int):
+        """update log interval(steps)
+
+        Args:
+            log_interval (int): the log interval value to set.
+        """
+        self.update({"log_interval": log_interval})
+
+    def update_eval_interval(self, eval_interval: int):
+        """update eval interval(epochs)
+
+        Args:
+            eval_interval (int): the eval interval value to set.
+        """
+        self.update({"val_interval": eval_interval})
+
+    def update_save_interval(self, save_interval: int):
+        """update eval interval(epochs)
+
+        Args:
+            save_interval (int): the save interval value to set.
+        """
+        self.update({"save_interval": save_interval})
+
+    def update_log_ranks(self, device):
+        """update log ranks
+
+        Args:
+            device (str): the running device to set
+        """
+        log_ranks = device.split(":")[1]
+        self.update({"Global.log_ranks": log_ranks})
+
+    def update_print_mem_info(self, print_mem_info: bool):
+        """setting print memory info"""
+        assert isinstance(print_mem_info, bool), "print_mem_info should be a bool"
+        self.update({"Global.print_mem_info": print_mem_info})
+
+    def _update_predict_video(self, infer_video: str, infer_list: str = None):
+        """update video to be predicted
+
+        Args:
+            infer_video (str): the path to image that to be predicted.
+            infer_list (str, optional): the path to file that videos. Defaults to None.
+        """
+        if infer_list:
+            self.update({"Infer.infer_list": infer_list})
+        self.update({"Infer.infer_videos": infer_video})
+
+    def _update_save_inference_dir(self, save_inference_dir: str):
+        """update directory path to save inference model files
+
+        Args:
+            save_inference_dir (str): the directory path to set.
+        """
+        self.update({"Global.save_inference_dir": abspath(save_inference_dir)})
+
+    def _update_inference_model_dir(self, model_dir: str):
+        """update inference model directory
+
+        Args:
+            model_dir (str): the directory path of inference model fils that used to predict.
+        """
+        self.update({"Global.inference_model_dir": abspath(model_dir)})
+
+    def _update_infer_video(self, infer_video: str):
+        """update path of image that would be predict
+
+        Args:
+            infer_video (str): the image path.
+        """
+        self.update({"Global.infer_videos": infer_video})
+
+    def _update_infer_device(self, device: str):
+        """update the device used in predicting
+
+        Args:
+            device (str): the running device setting
+        """
+        self.update({"Global.use_gpu": device.split(":")[0] == "gpu"})
+
+    def _update_enable_mkldnn(self, enable_mkldnn: bool):
+        """update whether to enable MKLDNN
+
+        Args:
+            enable_mkldnn (bool): `True` is enable, otherwise is disable.
+        """
+        self.update({"Global.enable_mkldnn": enable_mkldnn})
+
+    def _update_infer_video_shape(self, img_shape: str):
+        """update image cropping shape in the preprocessing
+
+        Args:
+            img_shape (str): the shape of cropping in the preprocessing,
+                i.e. `PreProcess.transform_ops.1.CropImage.size`.
+        """
+        self.update({"INFERENCE.target_size": img_shape})
+
+    def _update_save_predict_result(self, save_dir: str):
+        """update directory that save predicting output
+
+        Args:
+            save_dir (str): the dicrectory path that save predicting output.
+        """
+        self.update({"Infer.save_dir": save_dir})
+
+    def get_epochs_iters(self) -> int:
+        """get epochs
+
+        Returns:
+            int: the epochs value, i.e., `Global.epochs` in config.
+        """
+        return self.dict["Global"]["epochs"]
+
+    def get_log_interval(self) -> int:
+        """get log interval(steps)
+
+        Returns:
+            int: the log interval value, i.e., `Global.print_batch_step` in config.
+        """
+        return self.dict["Global"]["print_batch_step"]
+
+    def get_eval_interval(self) -> int:
+        """get eval interval(epochs)
+
+        Returns:
+            int: the eval interval value, i.e., `Global.eval_interval` in config.
+        """
+        return self.dict["Global"]["eval_interval"]
+
+    def get_save_interval(self) -> int:
+        """get save interval(epochs)
+
+        Returns:
+            int: the save interval value, i.e., `Global.save_interval` in config.
+        """
+        return self.dict["Global"]["save_interval"]
+
+    def get_learning_rate(self) -> float:
+        """get learning rate
+
+        Returns:
+            float: the learning rate value, i.e., `Optimizer.lr.learning_rate` in config.
+        """
+        return self.dict["Optimizer"]["lr"]["learning_rate"]
+
+    def get_warmup_epochs(self) -> int:
+        """get warmup epochs
+
+        Returns:
+            int: the warmup epochs value, i.e., `Optimizer.lr.warmup_epochs` in config.
+        """
+        return self.dict["Optimizer"]["lr"]["warmup_epoch"]
+
+    def get_label_dict_path(self) -> str:
+        """get label dict file path
+
+        Returns:
+            str: the label dict file path, i.e., `PostProcess.Topk.class_id_map_file` in config.
+        """
+        return self.dict["PostProcess"]["Topk"]["class_id_map_file"]
+
+    def get_batch_size(self, mode="train") -> int:
+        """get batch size
+
+        Args:
+            mode (str, optional): the mode that to be get batch size value, must be one of 'train', 'eval', 'test'.
+                Defaults to 'train'.
+
+        Returns:
+            int: the batch size value of `mode`, i.e., `DataLoader.{mode}.sampler.batch_size` in config.
+        """
+        return self.dict["DataLoader"]["Train"]["sampler"]["batch_size"]
+
+    def get_qat_epochs_iters(self) -> int:
+        """get qat epochs
+
+        Returns:
+            int: the epochs value.
+        """
+        return self.get_epochs_iters()
+
+    def get_qat_learning_rate(self) -> float:
+        """get qat learning rate
+
+        Returns:
+            float: the learning rate value.
+        """
+        return self.get_learning_rate()
+
+    def _get_arch_name(self) -> str:
+        """get architecture name of model
+
+        Returns:
+            str: the model arch name, i.e., `Arch.name` in config.
+        """
+        return self.dict["Arch"]["name"]
+
+    def _get_dataset_root(self) -> str:
+        """get root directory of dataset, i.e. `DataLoader.Train.dataset.video_root`
+
+        Returns:
+            str: the root directory of dataset
+        """
+        return self.dict["DataLoader"]["Train"]["dataset"]["video_root"]
+
+    def get_train_save_dir(self) -> str:
+        """get the directory to save output
+
+        Returns:
+            str: the directory to save output
+        """
+        return self["output_dir"]

+ 346 - 0
paddlex/repo_apis/PaddleVideo_api/video_cls/model.py

@@ -0,0 +1,346 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from ...base import BaseModel
+from ...base.utils.arg import CLIArgument
+from ...base.utils.subprocess import CompletedProcess
+from ....utils.misc import abspath
+from ....utils import logging
+
+
+class VideoClsModel(BaseModel):
+    """Video Classification Model"""
+
+    def train(
+        self,
+        batch_size: int = None,
+        learning_rate: float = None,
+        epochs_iters: int = None,
+        ips: str = None,
+        device: str = "gpu",
+        resume_path: str = None,
+        dy2st: bool = False,
+        amp: str = "OFF",
+        num_workers: int = None,
+        use_vdl: bool = True,
+        save_dir: str = None,
+        **kwargs,
+    ) -> CompletedProcess:
+        """train self
+
+        Args:
+            batch_size (int, optional): the train batch size value. Defaults to None.
+            learning_rate (float, optional): the train learning rate value. Defaults to None.
+            epochs_iters (int, optional): the train epochs value. Defaults to None.
+            ips (str, optional): the ip addresses of nodes when using distribution. Defaults to None.
+            device (str, optional): the running device. Defaults to 'gpu'.
+            resume_path (str, optional): the checkpoint file path to resume training. Train from scratch if it is set
+                to None. Defaults to None.
+            dy2st (bool, optional): Enable dynamic to static. Defaults to False.
+            amp (str, optional): the amp settings. Defaults to 'OFF'.
+            num_workers (int, optional): the workers number. Defaults to None.
+            use_vdl (bool, optional): enable VisualDL. Defaults to True.
+            save_dir (str, optional): the directory path to save train output. Defaults to None.
+
+        Returns:
+           CompletedProcess: the result of training subprocess execution.
+        """
+        if resume_path is not None:
+            resume_path = abspath(resume_path)
+
+        with self._create_new_config_file() as config_path:
+            # Update YAML config file
+            config = self.config.copy()
+            config.update_device(device)
+            config._update_to_static(dy2st)
+            config._update_use_vdl(use_vdl)
+
+            if batch_size is not None:
+                config.update_batch_size(batch_size)
+            if learning_rate is not None:
+                config.update_learning_rate(learning_rate)
+            if epochs_iters is not None:
+                config._update_epochs(epochs_iters)
+            config._update_checkpoints(resume_path)
+            if save_dir is not None:
+                save_dir = abspath(save_dir)
+            else:
+                # `save_dir` is None
+                save_dir = abspath(config.get_train_save_dir())
+            config._update_output_dir(save_dir)
+            if num_workers is not None:
+                config.update_num_workers(num_workers)
+
+            cli_args = []
+            do_eval = kwargs.pop("do_eval", True)
+            profile = kwargs.pop("profile", None)
+            if profile is not None:
+                cli_args.append(CLIArgument("--profiler_options", profile))
+
+            # Benchmarking mode settings
+            benchmark = kwargs.pop("benchmark", None)
+            if benchmark is not None:
+                envs = benchmark.get("env", None)
+                seed = benchmark.get("seed", None)
+                do_eval = benchmark.get("do_eval", False)
+                num_workers = benchmark.get("num_workers", None)
+                config.update_log_ranks(device)
+                config._update_amp(benchmark.get("amp", None))
+                config.update_dali(benchmark.get("dali", False))
+                config.update_shuffle(benchmark.get("shuffle", False))
+                config.update_shared_memory(benchmark.get("shared_memory", True))
+                config.update_print_mem_info(benchmark.get("print_mem_info", True))
+                if num_workers is not None:
+                    config.update_num_workers(num_workers)
+                if seed is not None:
+                    config.update_seed(seed)
+                if envs is not None:
+                    for env_name, env_value in envs.items():
+                        os.environ[env_name] = str(env_value)
+            else:
+                config._update_amp(amp)
+            # PDX related settings
+            device_type = device.split(":")[0]
+            uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
+            config.update({"Global.uniform_output_enabled": uniform_output_enabled})
+            config.update({"Global.pdx_model_name": self.name})
+
+            config.dump(config_path)
+            self._assert_empty_kwargs(kwargs)
+            return self.runner.train(
+                config_path, cli_args, device, ips, save_dir, do_eval=do_eval
+            )
+
+    def evaluate(
+        self,
+        weight_path: str,
+        batch_size: int = None,
+        ips: str = None,
+        device: str = "gpu",
+        amp: str = "OFF",
+        num_workers: int = None,
+        **kwargs,
+    ) -> CompletedProcess:
+        """evaluate self using specified weight
+
+        Args:
+            weight_path (str): the path of model weight file to be evaluated.
+            batch_size (int, optional): the batch size value in evaluating. Defaults to None.
+            ips (str, optional): the ip addresses of nodes when using distribution. Defaults to None.
+            device (str, optional): the running device. Defaults to 'gpu'.
+            amp (str, optional): the AMP setting. Defaults to 'OFF'.
+            num_workers (int, optional): the workers number in evaluating. Defaults to None.
+
+        Returns:
+            CompletedProcess: the result of evaluating subprocess execution.
+        """
+
+        with self._create_new_config_file() as config_path:
+            # Update YAML config file
+            config = self.config.copy()
+            config._update_amp(amp)
+            config.update_device(device)
+            config.update_pretrained_weights(weight_path)
+            if batch_size is not None:
+                config.update_batch_size(batch_size)
+            if num_workers is not None:
+                config.update_num_workers(num_workers)
+
+            config.dump(config_path)
+
+            self._assert_empty_kwargs(kwargs)
+
+            cp = self.runner.evaluate(config_path, [], device, ips)
+            return cp
+
+    def predict(
+        self,
+        weight_path: str,
+        input_path: str,
+        input_list_path: str = None,
+        device: str = "gpu",
+        save_dir: str = None,
+        **kwargs,
+    ) -> CompletedProcess:
+        """predict using specified weight
+
+        Args:
+            weight_path (str): the path of model weight file used to predict.
+            input_path (str): the path of image file to be predicted.
+            input_list_path (str, optional): the paths of images to be predicted if is not None. Defaults to None.
+            device (str, optional): the running device. Defaults to 'gpu'.
+            save_dir (str, optional): the directory path to save predict output. Defaults to None.
+
+        Returns:
+            CompletedProcess: the result of predicting subprocess execution.
+        """
+        input_path = abspath(input_path)
+        if input_list_path:
+            input_list_path = abspath(input_list_path)
+
+        with self._create_new_config_file() as config_path:
+            # Update YAML config file
+            config = self.config.copy()
+            config.update_pretrained_weights(weight_path)
+            config._update_predict_img(input_path, input_list_path)
+            config.update_device(device)
+            config._update_save_predict_result(save_dir)
+
+            config.dump(config_path)
+
+            self._assert_empty_kwargs(kwargs)
+
+            return self.runner.predict(config_path, [], device)
+
+    def export(self, weight_path: str, save_dir: str, **kwargs) -> CompletedProcess:
+        """export the dynamic model to static model
+
+        Args:
+            weight_path (str): the model weight file path that used to export.
+            save_dir (str): the directory path to save export output.
+
+        Returns:
+            CompletedProcess: the result of exporting subprocess execution.
+        """
+        if not weight_path.startswith(("http://", "https://")):
+            weight_path = abspath(weight_path)
+        save_dir = abspath(save_dir)
+
+        with self._create_new_config_file() as config_path:
+            # Update YAML config file
+            config = self.config.copy()
+            config.update_pretrained_weights(weight_path)
+            config._update_save_inference_dir(save_dir)
+            device = kwargs.pop("device", None)
+            if device:
+                config.update_device(device)
+            # PDX related settings
+            uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
+            config.update({"Global.uniform_output_enabled": uniform_output_enabled})
+            config.update({"Global.pdx_model_name": self.name})
+
+            config.dump(config_path)
+
+            self._assert_empty_kwargs(kwargs)
+
+            return self.runner.export(config_path, [], None, save_dir)
+
+    def infer(
+        self,
+        model_dir: str,
+        input_path: str,
+        device: str = "gpu",
+        save_dir: str = None,
+        dict_path: str = None,
+        **kwargs,
+    ) -> CompletedProcess:
+        """predict image using infernece model
+
+        Args:
+            model_dir (str): the directory path of inference model files that would use to predict.
+            input_path (str): the path of image that would be predict.
+            device (str, optional): the running device. Defaults to 'gpu'.
+            save_dir (str, optional): the directory path to save output. Defaults to None.
+            dict_path (str, optional): the label dict file path. Defaults to None.
+
+        Returns:
+            CompletedProcess: the result of infering subprocess execution.
+        """
+        model_dir = abspath(model_dir)
+        input_path = abspath(input_path)
+        if save_dir is not None:
+            logging.warning("`save_dir` will not be used.")
+        config_path = os.path.join(model_dir, "inference.yml")
+        config = self.config.copy()
+        config.load(config_path)
+        config._update_inference_model_dir(model_dir)
+        config._update_infer_img(input_path)
+        config._update_infer_device(device)
+        if dict_path is not None:
+            dict_path = abspath(dict_path)
+            config.update_label_dict_path(dict_path)
+        if "enable_mkldnn" in kwargs:
+            config._update_enable_mkldnn(kwargs.pop("enable_mkldnn"))
+
+        with self._create_new_config_file() as config_path:
+            config.dump(config_path)
+
+            self._assert_empty_kwargs(kwargs)
+
+            return self.runner.infer(config_path, [], device)
+
+    def compression(
+        self,
+        weight_path: str,
+        batch_size: int = None,
+        learning_rate: float = None,
+        epochs_iters: int = None,
+        device: str = "gpu",
+        use_vdl: bool = True,
+        save_dir: str = None,
+        **kwargs,
+    ) -> CompletedProcess:
+        """compression model
+
+        Args:
+            weight_path (str): the path to weight file of model.
+            batch_size (int, optional): the batch size value of compression training. Defaults to None.
+            learning_rate (float, optional): the learning rate value of compression training. Defaults to None.
+            epochs_iters (int, optional): the epochs or iters of compression training. Defaults to None.
+            device (str, optional): the device to run compression training. Defaults to 'gpu'.
+            use_vdl (bool, optional): whether or not to use VisualDL. Defaults to True.
+            save_dir (str, optional): the directory to save output. Defaults to None.
+
+        Returns:
+            CompletedProcess: the result of compression subprocess execution.
+        """
+
+        with self._create_new_config_file() as config_path:
+            # Update YAML config file
+            config = self.config.copy()
+            config._update_amp(None)
+            config.update_device(device)
+            config._update_use_vdl(use_vdl)
+            config._update_slim_config(self.model_info["auto_compression_config_path"])
+            config.update_pretrained_weights(weight_path)
+
+            if batch_size is not None:
+                config.update_batch_size(batch_size)
+            if learning_rate is not None:
+                config.update_learning_rate(learning_rate)
+            if epochs_iters is not None:
+                config._update_epochs(epochs_iters)
+            if save_dir is not None:
+                save_dir = abspath(save_dir)
+            else:
+                # `save_dir` is None
+                save_dir = abspath(config.get_train_save_dir())
+            config._update_output_dir(save_dir)
+            config.dump(config_path)
+
+            export_cli_args = []
+            export_cli_args.append(
+                CLIArgument(
+                    "-o",
+                    f"Global.save_inference_dir={os.path.join(save_dir, 'export')}",
+                )
+            )
+
+            self._assert_empty_kwargs(kwargs)
+
+            return self.runner.compression(
+                config_path, [], export_cli_args, device, save_dir
+            )

+ 71 - 0
paddlex/repo_apis/PaddleVideo_api/video_cls/register.py

@@ -0,0 +1,71 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import os.path as osp
+from pathlib import Path
+
+from ...base.register import register_model_info, register_suite_info
+from .model import VideoClsModel
+from .runner import VideoClsRunner
+from .config import VideoClsConfig
+
+REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEVIDEO_PATH")
+PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
+
+register_suite_info(
+    {
+        "suite_name": "VideoCls",
+        "model": VideoClsModel,
+        "runner": VideoClsRunner,
+        "config": VideoClsConfig,
+        "runner_root_path": REPO_ROOT_PATH,
+    }
+)
+
+################ Models Using Universal Config ################
+register_model_info(
+    {
+        "model_name": "PP-TSM-R50_8frames_uniform",
+        "suite": "VideoCls",
+        "config_path": osp.join(PDX_CONFIG_DIR, "PP-TSM-R50_8frames_uniform.yaml"),
+        "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
+        "infer_config": "deploy/configs/inference_cls.yaml",
+    }
+)
+
+register_model_info(
+    {
+        "model_name": "PP-TSMv2-LCNetV2_8frames_uniform",
+        "suite": "VideoCls",
+        "config_path": osp.join(
+            PDX_CONFIG_DIR, "PP-TSMv2-LCNetV2_8frames_uniform.yaml"
+        ),
+        "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
+        "infer_config": "deploy/configs/inference_cls.yaml",
+    }
+)
+
+
+register_model_info(
+    {
+        "model_name": "PP-TSMv2-LCNetV2_16frames_uniform",
+        "suite": "VideoCls",
+        "config_path": osp.join(
+            PDX_CONFIG_DIR, "PP-TSMv2-LCNetV2_16frames_uniform.yaml"
+        ),
+        "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
+        "infer_config": "deploy/configs/inference_cls.yaml",
+    }
+)

+ 205 - 0
paddlex/repo_apis/PaddleVideo_api/video_cls/runner.py

@@ -0,0 +1,205 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import tempfile
+
+from ...base import BaseRunner
+from ...base.utils.subprocess import CompletedProcess
+
+
+class VideoClsRunner(BaseRunner):
+    """Cls Runner"""
+
+    def train(
+        self,
+        config_path: str,
+        cli_args: list,
+        device: str,
+        ips: str,
+        save_dir: str,
+        do_eval=True,
+    ) -> CompletedProcess:
+        """train model
+
+        Args:
+            config_path (str): the config file path used to train.
+            cli_args (list): the additional parameters.
+            device (str): the training device.
+            ips (str): the ip addresses of nodes when using distribution.
+            save_dir (str): the directory path to save training output.
+            do_eval (bool, optional): whether or not to evaluate model during training. Defaults to True.
+
+        Returns:
+            CompletedProcess: the result of training subprocess execution.
+        """
+        args, env = self.distributed(device, ips, log_dir=save_dir)
+        cmd = [*args, "main.py", "--validate", "-c", config_path, *cli_args]
+        cmd.extend(["-o", f"Global.eval_during_train={do_eval}"])
+        return self.run_cmd(
+            cmd,
+            env=env,
+            switch_wdir=True,
+            echo=True,
+            silent=False,
+            capture_output=True,
+            log_path=self._get_train_log_path(save_dir),
+        )
+
+    def evaluate(
+        self, config_path: str, cli_args: list, device: str, ips: str
+    ) -> CompletedProcess:
+        """run model evaluating
+
+        Args:
+            config_path (str): the config file path used to evaluate.
+            cli_args (list): the additional parameters.
+            device (str): the evaluating device.
+            ips (str): the ip addresses of nodes when using distribution.
+
+        Returns:
+            CompletedProcess: the result of evaluating subprocess execution.
+        """
+        args, env = self.distributed(device, ips)
+        cmd = [*args, "main.py", "--test", "-c", config_path, *cli_args]
+        cp = self.run_cmd(
+            cmd, env=env, switch_wdir=True, echo=True, silent=False, capture_output=True
+        )
+
+        if cp.returncode == 0:
+            metric_dict = _extract_eval_metrics(cp.stdout)
+            cp.metrics = metric_dict
+        return cp
+
+    def predict(
+        self, config_path: str, cli_args: list, device: str
+    ) -> CompletedProcess:
+        """run predicting using dynamic mode
+
+        Args:
+            config_path (str): the config file path used to predict.
+            cli_args (list): the additional parameters.
+            device (str): unused.
+
+        Returns:
+            CompletedProcess: the result of predicting subprocess execution.
+        """
+        # `device` unused
+        cmd = [self.python, "tools/infer.py", "-c", config_path, *cli_args]
+        return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
+
+    def export(
+        self, config_path: str, cli_args: list, device: str, save_dir: str = None
+    ) -> CompletedProcess:
+        """run exporting
+
+        Args:
+            config_path (str): the path of config file used to export.
+            cli_args (list): the additional parameters.
+            device (str): unused.
+            save_dir (str, optional): the directory path to save exporting output. Defaults to None.
+
+        Returns:
+            CompletedProcess: the result of exporting subprocess execution.
+        """
+        # `device` unused
+
+        cmd = [
+            self.python,
+            "tools/export_model.py",
+            "-c",
+            config_path,
+            *cli_args,
+            "-o",
+            save_dir,
+        ]
+
+        cp = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
+        return cp
+
+    def infer(self, config_path: str, cli_args: list, device: str) -> CompletedProcess:
+        """run predicting using inference model
+
+        Args:
+            config_path (str): the path of config file used to predict.
+            cli_args (list): the additional parameters.
+            device (str): unused.
+
+        Returns:
+            CompletedProcess: the result of infering subprocess execution.
+        """
+        # `device` unused
+        cmd = [self.python, "python/predict_cls.py", "-c", config_path, *cli_args]
+        return self.run_cmd(cmd, switch_wdir="deploy", echo=True, silent=False)
+
+    def compression(
+        self,
+        config_path: str,
+        train_cli_args: list,
+        export_cli_args: list,
+        device: str,
+        train_save_dir: str,
+    ) -> CompletedProcess:
+        """run compression model
+
+        Args:
+            config_path (str): the path of config file used to predict.
+            train_cli_args (list): the additional training parameters.
+            export_cli_args (list): the additional exporting parameters.
+            device (str): the running device.
+            train_save_dir (str): the directory path to save output.
+
+        Returns:
+            CompletedProcess: the result of compression subprocess execution.
+        """
+        # Step 1: Train model
+        cp_train = self.train(config_path, train_cli_args, device, None, train_save_dir)
+
+        # Step 2: Export model
+        weight_path = os.path.join(train_save_dir, "best_model", "model")
+        export_cli_args = [
+            *export_cli_args,
+            "-o",
+            f"Global.pretrained_model={weight_path}",
+        ]
+        cp_export = self.export(config_path, export_cli_args, device)
+
+        return cp_train, cp_export
+
+
+def _extract_eval_metrics(stdout: str) -> dict:
+    """extract evaluation metrics from training log
+
+    Args:
+        stdout (str): the training log
+
+    Returns:
+        dict: the training metric
+    """
+    import re
+
+    patterns = [r"avg_acc1=([\d.]+), avg_acc5=([\d.]+)"]
+    keys = [
+        ["val.top1", "val.top5"],
+    ]
+
+    metric_dict = dict()
+    for pattern, key in zip(patterns, keys):
+        pattern = re.compile(pattern)
+        for line in stdout.splitlines():
+            match = pattern.search(line)
+            if match:
+                for k, v in zip(key, map(float, match.groups())):
+                    metric_dict[k] = v
+    return metric_dict

+ 0 - 1
paddlex/repo_apis/base/runner.py

@@ -48,7 +48,6 @@ class BaseRunner(metaclass=abc.ABCMeta):
             runner_root_path (str): Path of the directory where the scripts reside.
         """
         super().__init__()
-
         self.runner_root_path = abspath(runner_root_path)
         # Path to python interpreter
         self.python = sys.executable

+ 14 - 0
paddlex/repo_manager/meta.py

@@ -25,6 +25,7 @@ REPO_NAMES = [
     "PaddleSeg",
     "PaddleNLP",
     "PaddleTS",
+    "PaddleVideo",
 ]
 
 REPO_META = {
@@ -130,6 +131,19 @@ REPO_META = {
         "path_env": "PADDLE_PDX_PADDLEMIX_PATH",
         "requires": ["PaddleNLP"],
     },
+    "PaddleVideo": {
+        "git_path": "/PaddlePaddle/PaddleVideo.git",
+        "platform": "github",
+        "branch": "develop",
+        "pkg_name": "paddlevideo",
+        "lib_name": "ppvideo",
+        "pdx_pkg_name": "PaddleVideo_api",
+        "editable": False,
+        "extra_req_files": [
+            "requirements_paddlex.txt",
+        ],
+        "path_env": "PADDLE_PDX_PADDLEVIDEO_PATH",
+    },
 }
 
 

+ 20 - 5
paddlex/repo_manager/repo.py

@@ -115,13 +115,19 @@ class PPRepository(object):
         """install_deps"""
         return RepositoryGroupInstaller([self]).install_deps(*args, **kwargs)
 
-    def install_package(self, no_deps=False, clean=True):
+    def install_package(self, no_deps=False, clean=True, install_extra_only=False):
         """install_package"""
         editable = self.meta.get("editable", True)
         extra_editable = self.meta.get("extra_editable", None)
         if editable:
             logging.warning(f"{self.pkg_name} will be installed in editable mode.")
         with switch_working_dir(self.root_dir):
+            if install_extra_only:
+                src_requirements = os.path.join(self.root_dir, "requirements.txt")
+                paddlex_requirements = os.path.join(
+                    self.root_dir, "requirements_paddlex.txt"
+                )
+                shutil.copy(paddlex_requirements, src_requirements)
             try:
                 install_packages_using_pip(["."], editable=editable, no_deps=no_deps)
                 install_external_deps(self.name, self.root_dir)
@@ -211,10 +217,13 @@ class PPRepository(object):
         """get_pdx"""
         return importlib.import_module(self.pdx_mod_name)
 
-    def get_deps(self):
+    def get_deps(self, install_extra_only=False):
         """get_deps"""
         # Merge requirement files
-        req_list = [self.main_req_file]
+        if install_extra_only:
+            req_list = []
+        else:
+            req_list = [self.main_req_file]
         req_list.extend(self.meta.get("extra_req_files", []))
         deps = []
         for req in req_list:
@@ -270,7 +279,10 @@ class RepositoryGroupInstaller(object):
         # failure of one repo package aborts the entire installation process.
         for ins_flag, repo in zip(ins_flags, repos):
             if ins_flag:
-                repo.install_package(no_deps=True)
+                if repo.name in ["PaddleVideo"]:
+                    repo.install_package(no_deps=True, install_extra_only=True)
+                else:
+                    repo.install_package(no_deps=True)
 
     def uninstall(self):
         """uninstall"""
@@ -286,7 +298,10 @@ class RepositoryGroupInstaller(object):
         deps_list = []
         repos = self._sort_repos(self.repos, check_missing=True)
         for repo in repos:
-            deps = repo.get_deps()
+            if repo.name in ["PaddleVideo"]:
+                deps = repo.get_deps(install_extra_only=True)
+            else:
+                deps = repo.get_deps()
             deps = self._normalize_deps(deps, headline=f"# {repo.name} dependencies")
             deps_list.append(deps)
         # Add an extra new line to separate dependencies of different repos.

+ 1 - 0
requirements.txt

@@ -36,3 +36,4 @@ erniebot-agent == 0.5.0
 unstructured
 networkx
 faiss-cpu
+decord==0.6.0