Bladeren bron

add STFPM (#2021)

* add STFPM

* add requirements

* add require

---------

Co-authored-by: cuicheng01 <45199522+cuicheng01@users.noreply.github.com>
yongsheng yuan 1 jaar geleden
bovenliggende
commit
1f352707c6
28 gewijzigde bestanden met toevoegingen van 1573 en 11 verwijderingen
  1. 6 2
      README.md
  2. 4 0
      docs/tutorials/models/support_model_list.md
  3. 40 0
      paddlex/configs/anomaly_detection/STFPM.yaml
  4. 7 2
      paddlex/modules/__init__.py
  5. 19 0
      paddlex/modules/anomaly_detection/__init__.py
  6. 95 0
      paddlex/modules/anomaly_detection/dataset_checker/__init__.py
  7. 19 0
      paddlex/modules/anomaly_detection/dataset_checker/dataset_src/__init__.py
  8. 73 0
      paddlex/modules/anomaly_detection/dataset_checker/dataset_src/analyse_dataset.py
  9. 84 0
      paddlex/modules/anomaly_detection/dataset_checker/dataset_src/check_dataset.py
  10. 210 0
      paddlex/modules/anomaly_detection/dataset_checker/dataset_src/convert_dataset.py
  11. 87 0
      paddlex/modules/anomaly_detection/dataset_checker/dataset_src/split_dataset.py
  12. 13 0
      paddlex/modules/anomaly_detection/dataset_checker/dataset_src/utils/__init__.py
  13. 71 0
      paddlex/modules/anomaly_detection/dataset_checker/dataset_src/utils/visualizer.py
  14. 58 0
      paddlex/modules/anomaly_detection/evaluator.py
  15. 22 0
      paddlex/modules/anomaly_detection/exportor.py
  16. 18 0
      paddlex/modules/anomaly_detection/model_list.py
  17. 17 0
      paddlex/modules/anomaly_detection/predictor/__init__.py
  18. 32 0
      paddlex/modules/anomaly_detection/predictor/keys.py
  19. 106 0
      paddlex/modules/anomaly_detection/predictor/predictor.py
  20. 194 0
      paddlex/modules/anomaly_detection/predictor/transforms.py
  21. 85 0
      paddlex/modules/anomaly_detection/predictor/utils.py
  22. 236 0
      paddlex/modules/anomaly_detection/trainer.py
  23. 10 5
      paddlex/modules/base/predictor/predictor.py
  24. 1 0
      paddlex/modules/base/predictor/utils/official_models.py
  25. 49 0
      paddlex/repo_apis/PaddleSeg_api/configs/STFPM.yaml
  26. 6 2
      paddlex/repo_apis/PaddleSeg_api/seg/model.py
  27. 10 0
      paddlex/repo_apis/PaddleSeg_api/seg/register.py
  28. 1 0
      requirements.txt

+ 6 - 2
README.md

@@ -73,8 +73,8 @@ PaddleX 3.0 覆盖了 16 条产业级模型产线,其中 9 条基础产线可
     <summary><b>more</b></summary><br/>PP-YOLOE_plus-S<br/>PP-YOLOE_plus-M<br/>PP-YOLOE_plus-L<br/>PP-YOLOE_plus-X<br/>RT-DETR-L<br/>RT-DETR-H<br/>RT-DETR-X<br/>RT-DETR-R18<br/>RT-DETR-R50<br/>YOLOv3-DarkNet53<br/>YOLOv3-MobileNetV3<br/>YOLOv3-ResNet50_vd_DCN<br/>YOLOX-L<br/>YOLOX-M<br/>YOLOX-N<br/>YOLOX-S<br/>YOLOX-T<br/>YOLOX-X<br/>FasterRCNN-ResNet34-FPN<br/>FasterRCNN-ResNet50<br/>FasterRCNN-ResNet50-FPN<br/>FasterRCNN-ResNet50-vd-FPN<br/>FasterRCNN-ResNet50-vd-SSLDv2-FPN<br/>FasterRCNN-ResNet101<br/>FasterRCNN-ResNet101-FPN<br/>FasterRCNN-ResNeXt101-vd-FPN<br/>FasterRCNN-Swin-Tiny-FPN<br/>Cascade-FasterRCNN-ResNet50-FPN<br/>Cascade-FasterRCNN-ResNet50-vd-SSLDv2-FPN</details></td>
   </tr>
   <tr>
-    <td>基础产线</td>
-    <td>通用语义分割</td>
+    <td rowspan="2">基础产线</td>
+    <td rowspan="2">通用语义分割</td>
     <td>语义分割</td>
     <td>OCRNet_HRNet-W48<br/>OCRNet_HRNet-W18<br/>PP-LiteSeg-T<details>
     <summary><b>more</b></summary><br/>Deeplabv3-R50<br/>Deeplabv3-R101<br/>Deeplabv3_Plus-R50<br/>Deeplabv3_Plus-R101<br/>SeaFormer_tiny<br/
@@ -83,6 +83,10 @@ PaddleX 3.0 覆盖了 16 条产业级模型产线,其中 9 条基础产线可
     >SegFormer-B3<br/>SegFormer-B4<br/>SegFormer-B5</details></td>
   </tr>
   <tr>
+    <td>无监督异常检测</td>
+    <td>Mask-RT-DETR-L<br/>Mask-RT-DETR-H</td>
+  </tr>
+  <tr>
     <td>基础产线</td>
     <td>通用实例分割</td>
     <td>实例分割</td>

+ 4 - 0
docs/tutorials/models/support_model_list.md

@@ -199,6 +199,10 @@
 | SeaFormer_small | [SeaFormer_small.yaml](../../../paddlex/configs/semantic_segmentation/SeaFormer_small.yaml)|
 | SeaFormer_base | [SeaFormer_base.yaml](../../../paddlex/configs/semantic_segmentation/SeaFormer_base.yaml)|
 | SeaFormer_large | [SeaFormer_large.yaml](../../../paddlex/configs/semantic_segmentation/SeaFormer_large.yaml)|
+### 6.STFPM 系列
+| 模型名称 | config |
+| :--- | :---: |
+| STFPM | [STFPM.yaml](../../../paddlex/configs/anomaly_detection/STFPM.yaml)|
 ## 五、表格识别
 | 模型名称 | config |
 | :--- | :---: |

+ 40 - 0
paddlex/configs/anomaly_detection/STFPM.yaml

@@ -0,0 +1,40 @@
+Global:
+  model: STFPM
+  mode: check_dataset # check_dataset/train/evaluate/predict
+  dataset_dir: "/mnt/yys/dataset/mv_dataset"
+  device: gpu:0,1,2,3
+  output: "output"
+
+CheckDataset:
+  convert: 
+    enable: False
+    src_dataset_type: null
+  split: 
+    enable: False
+    train_percent: null
+    val_percent: null
+
+Train:
+  epochs_iters: 50
+  num_classes: 1
+  batch_size: 1
+  learning_rate: #0.01
+  pretrain_weight_path: null
+  warmup_steps: #0
+  resume_path: null
+  log_interval: 10
+  eval_interval: 100
+
+Evaluate:
+  weight_path: "output/best_model/model.pdparams"
+  log_interval: 10
+
+Predict:
+  model_dir: "output/best_model/model"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/uad_grid.png"
+  kernel_option:
+    run_mode: paddle
+    batch_size: 1
+  
+Export:
+  weight_path: https://bj.bcebos.com/paddleseg/dygraph/mvtec_ad/stfpm/model.pdparams

+ 7 - 2
paddlex/modules/__init__.py

@@ -29,14 +29,19 @@ from .image_classification import (
     ClsExportor,
     ClsPredictor,
 )
-
+from .anomaly_detection import (
+    UadDatasetChecker,
+    UadTrainer,
+    UadEvaluator,
+    UadExportor,
+    UadPredictor,
+)
 from .general_recognition import (
     ShiTuRecDatasetChecker,
     ShiTuRecTrainer,
     ShiTuRecEvaluator,
     ShiTuRecExportor,
 )
-
 from .object_detection import (
     COCODatasetChecker,
     DetTrainer,

+ 19 - 0
paddlex/modules/anomaly_detection/__init__.py

@@ -0,0 +1,19 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .dataset_checker import UadDatasetChecker
+from .trainer import UadTrainer
+from .evaluator import UadEvaluator
+from .predictor import UadPredictor, transforms
+from .exportor import UadExportor

+ 95 - 0
paddlex/modules/anomaly_detection/dataset_checker/__init__.py

@@ -0,0 +1,95 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import os.path as osp
+
+from ...base import BaseDatasetChecker
+from .dataset_src import check_dataset, convert_dataset, split_dataset, anaylse_dataset
+
+from ..model_list import MODELS
+
+
+class UadDatasetChecker(BaseDatasetChecker):
+    """Dataset Checker for Semantic Segmentation Model"""
+
+    entities = MODELS
+    sample_num = 10
+
+    def convert_dataset(self, src_dataset_dir: str) -> str:
+        """convert the dataset from other type to specified type
+
+        Args:
+            src_dataset_dir (str): the root directory of dataset.
+
+        Returns:
+            str: the root directory of converted dataset.
+        """
+        return convert_dataset(
+            self.check_dataset_config.convert.src_dataset_type, src_dataset_dir
+        )
+
+    def split_dataset(self, src_dataset_dir: str) -> str:
+        """repartition the train and validation dataset
+
+        Args:
+            src_dataset_dir (str): the root directory of dataset.
+
+        Returns:
+            str: the root directory of splited dataset.
+        """
+        return split_dataset(
+            src_dataset_dir,
+            self.check_dataset_config.split.train_percent,
+            self.check_dataset_config.split.val_percent,
+        )
+
+    def check_dataset(self, dataset_dir: str, sample_num: int = sample_num) -> dict:
+        """check if the dataset meets the specifications and get dataset summary
+
+        Args:
+            dataset_dir (str): the root directory of dataset.
+            sample_num (int): the number to be sampled.
+        Returns:
+            dict: dataset summary.
+        """
+        return check_dataset(dataset_dir, self.output, sample_num)
+
+    def analyse(self, dataset_dir: str) -> dict:
+        """deep analyse dataset
+
+        Args:
+            dataset_dir (str): the root directory of dataset.
+
+        Returns:
+            dict: the deep analysis results.
+        """
+        return anaylse_dataset(dataset_dir, self.output)
+
+    def get_show_type(self) -> str:
+        """get the show type of dataset
+
+        Returns:
+            str: show type
+        """
+        return "image"
+
+    def get_dataset_type(self) -> str:
+        """return the dataset type
+
+        Returns:
+            str: dataset type
+        """
+        return "SegDataset"

+ 19 - 0
paddlex/modules/anomaly_detection/dataset_checker/dataset_src/__init__.py

@@ -0,0 +1,19 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .check_dataset import check_dataset
+from .convert_dataset import convert_dataset
+from .split_dataset import split_dataset
+from .analyse_dataset import anaylse_dataset

+ 73 - 0
paddlex/modules/anomaly_detection/dataset_checker/dataset_src/analyse_dataset.py

@@ -0,0 +1,73 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import os.path as osp
+
+import matplotlib.pyplot as plt
+import numpy as np
+from PIL import Image, ImageOps
+
+from .....utils.file_interface import custom_open
+from .....utils.logging import info
+
+
+def anaylse_dataset(dataset_dir, output):
+    """class analysis for dataset"""
+
+    split_tags = ["train", "val"]
+    label2count = {tag: dict() for tag in split_tags}
+    for tag in split_tags:
+        mapping_file = osp.join(dataset_dir, f"{tag}.txt")
+        with custom_open(mapping_file, "r") as fp:
+            lines = filter(None, (line.strip() for line in fp.readlines()))
+            for i, line in enumerate(lines):
+                _, ann_file = line.split(" ")
+                ann_file = osp.join(dataset_dir, ann_file)
+                ann = np.array(ImageOps.exif_transpose(Image.open(ann_file)), "uint8")
+
+                for idx in set(ann.reshape([-1]).tolist()):
+                    if idx == 255:
+                        continue
+                    if idx not in label2count[tag]:
+                        label2count[tag][idx] = 1
+                    else:
+                        label2count[tag][idx] += 1
+            if label2count[tag].get(0, None) is None:
+                label2count[tag][0] = 0
+
+    train_label_idx = np.array(list(label2count["train"].keys()))
+    val_label_idx = np.array(list(label2count["val"].keys()))
+    label_idx = np.array(list(set(train_label_idx) | set(val_label_idx)))
+    x = np.arange(len(label_idx))
+    train_list = []
+    val_list = []
+    for i in range(len(label_idx)):
+        train_list.append(label2count["train"].get(i, 0))
+        val_list.append(label2count["val"].get(i, 0))
+    fig, ax = plt.subplots(figsize=(max(8, int(len(label_idx) / 5)), 5), dpi=120)
+
+    width = (0.5,)
+    ax.bar(x, train_list, width=width, label="train")
+    ax.bar(x + width, val_list, width=width, label="val")
+
+    plt.xticks(x + 0.25, label_idx)
+    ax.set_xlabel("Label Index")
+    ax.set_ylabel("Sample Counts")
+    plt.legend()
+    fig.tight_layout()
+    fig_path = os.path.join(output, "histogram.png")
+    fig.savefig(fig_path)
+    return {"histogram": os.path.join("check_dataset", "histogram.png")}

+ 84 - 0
paddlex/modules/anomaly_detection/dataset_checker/dataset_src/check_dataset.py

@@ -0,0 +1,84 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import os.path as osp
+
+import numpy as np
+from PIL import Image, ImageOps
+import cv2
+
+from .utils.visualizer import visualize
+from .....utils.errors import DatasetFileNotFoundError
+from .....utils.file_interface import custom_open
+from .....utils.logging import info
+
+
+def check_dataset(dataset_dir, output, sample_num=10):
+    """check dataset"""
+    dataset_dir = osp.abspath(dataset_dir)
+    if not osp.exists(dataset_dir) or not osp.isdir(dataset_dir):
+        raise DatasetFileNotFoundError(file_path=dataset_dir)
+    vis_save_dir = osp.join(output, "demo_img")
+    if not osp.exists(vis_save_dir):
+        os.makedirs(vis_save_dir)
+    split_tags = ["train", "val"]
+    attrs = dict()
+    class_ids = set()
+    for tag in split_tags:
+        mapping_file = osp.join(dataset_dir, f"{tag}.txt")
+        if not osp.exists(mapping_file):
+            info(f"The mapping file ({mapping_file}) doesn't exist, ignored.")
+            info("If you are using MVTec_AD dataset, add args below in your training commands:")
+            info("-o CheckDataset.convert.enable=True")
+            info("-o CheckDataset.convert.src_dataset_type=MVTec_AD")
+            continue
+        with custom_open(mapping_file, "r") as fp:
+            lines = filter(None, (line.strip() for line in fp.readlines()))
+            for i, line in enumerate(lines):
+                img_file, ann_file = line.split(" ")
+                img_file = osp.join(dataset_dir, img_file)
+                ann_file = osp.join(dataset_dir, ann_file)
+                assert osp.exists(img_file), FileNotFoundError(
+                    f"{img_file} not exist, please check!"
+                )
+                assert osp.exists(ann_file), FileNotFoundError(
+                    f"{ann_file} not exist, please check!"
+                )
+                img = np.array(cv2.imread(img_file), "uint8")
+                ann = np.array(cv2.imread(ann_file), "uint8")[:, :, 0]
+                assert img.shape[:2] == ann.shape, ValueError(
+                    f"The shape of {img_file}:{img.shape[:2]} and "
+                    f"{ann_file}:{ann.shape} must be the same!"
+                )
+                class_ids = class_ids | set(ann.reshape([-1]).tolist())
+                if i < sample_num:
+                    vis_img = visualize(img, ann)
+                    vis_img = Image.fromarray(vis_img)
+                    vis_save_path = osp.join(vis_save_dir, osp.basename(img_file))
+                    vis_img.save(vis_save_path)
+                    vis_save_path = osp.join(
+                        "check_dataset", os.path.relpath(vis_save_path, output)
+                    )
+                    if f"{tag}_sample_paths" not in attrs:
+                        attrs[f"{tag}_sample_paths"] = [vis_save_path]
+                    else:
+                        attrs[f"{tag}_sample_paths"].append(vis_save_path)
+            if f"{tag}_samples" not in attrs:
+                attrs[f"{tag}_samples"] = i + 1
+    if 255 in class_ids:
+        class_ids.remove(255)
+    attrs["num_classes"] = len(class_ids)
+    return attrs

+ 210 - 0
paddlex/modules/anomaly_detection/dataset_checker/dataset_src/convert_dataset.py

@@ -0,0 +1,210 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import glob
+import json
+import os
+import os.path as osp
+import shutil
+
+import cv2
+import numpy as np
+from PIL import Image, ImageDraw
+
+from .....utils.file_interface import custom_open
+from .....utils import logging
+from .....utils.logging import info
+
+def convert_dataset(dataset_type, input_dir):
+    """convert to paddlex official format"""
+    if dataset_type == "LabelMe":
+        return convert_labelme_dataset(input_dir)
+    elif dataset_type == "MVTec_AD":
+        return convert_mvtec_dataset(input_dir)
+    else:
+        raise NotImplementedError(dataset_type)
+
+
+def convert_labelme_dataset(input_dir):
+    """convert labelme format to paddlex official format"""
+    bg_name = "_background_"
+    ignore_name = "__ignore__"
+
+    # prepare dir
+    output_img_dir = osp.join(input_dir, "images")
+    output_annot_dir = osp.join(input_dir, "annotations")
+    if not osp.exists(output_img_dir):
+        os.makedirs(output_img_dir)
+    if not osp.exists(output_annot_dir):
+        os.makedirs(output_annot_dir)
+
+    # collect class_names and set class_name_to_id
+    class_names = []
+    class_name_to_id = {}
+    split_tags = ["train", "val"]
+    for tag in split_tags:
+        mapping_file = osp.join(input_dir, f"{tag}_anno_list.txt")
+        with open(mapping_file, "r") as f:
+            label_files = [
+                osp.join(input_dir, line.strip("\n")) for line in f.readlines()
+            ]
+        for label_file in label_files:
+            with custom_open(label_file, "r") as fp:
+                data = json.load(fp)
+                for shape in data["shapes"]:
+                    cls_name = shape["label"]
+                    if cls_name not in class_names:
+                        class_names.append(cls_name)
+
+        if ignore_name in class_names:
+            class_name_to_id[ignore_name] = 255
+            class_names.remove(ignore_name)
+        if bg_name in class_names:
+            class_names.remove(bg_name)
+        class_name_to_id[bg_name] = 0
+        for i, name in enumerate(class_names):
+            class_name_to_id[name] = i + 1
+
+        if len(class_names) > 256:
+            raise ValueError(
+                f"There are {len(class_names)} categories in the annotation file, "
+                f"exceeding 256, Not compliant with paddlex official format!"
+            )
+
+        # create annotated images and copy origin images
+        color_map = get_color_map_list(256)
+        img_file_list = []
+        label_file_list = []
+        for i, label_file in enumerate(label_files):
+            filename = osp.splitext(osp.basename(label_file))[0]
+            annotated_img_path = osp.join(output_annot_dir, filename + ".png")
+            with custom_open(label_file, "r") as f:
+                data = json.load(f)
+                img_path = osp.join(osp.dirname(label_file), data["imagePath"])
+                if not os.path.exists(img_path):
+                    logging.info("%s is not existed, skip this image" % img_path)
+                    continue
+                img_name = img_path.split("/")[-1]
+                img_file_list.append(f"images/{img_name}")
+                label_img_name = annotated_img_path.split("/")[-1]
+                label_file_list.append(f"annotations/{label_img_name}")
+
+                img = np.asarray(cv2.imread(img_path))
+                lbl = shape2label(
+                    img_size=img.shape,
+                    shapes=data["shapes"],
+                    class_name_mapping=class_name_to_id,
+                )
+                lbl_pil = Image.fromarray(lbl.astype(np.uint8), mode="P")
+                lbl_pil.putpalette(color_map)
+                lbl_pil.save(annotated_img_path)
+
+                shutil.copy(img_path, output_img_dir)
+        with custom_open(osp.join(input_dir, f"{tag}.txt"), "w") as fp:
+            for img_path, lbl_path in zip(img_file_list, label_file_list):
+                fp.write(f"{img_path} {lbl_path}\n")
+
+    with custom_open(osp.join(input_dir, "class_name.txt"), "w") as fp:
+        for name in class_names:
+            fp.write(f"{name}{os.linesep}")
+    with custom_open(osp.join(input_dir, "class_name_to_id.txt"), "w") as fp:
+        for key, val in class_name_to_id.items():
+            fp.write(f"{val}: {key}{os.linesep}")
+
+    return input_dir
+
+
+def get_color_map_list(num_classes):
+    """get color map list"""
+    num_classes += 1
+    color_map = num_classes * [0, 0, 0]
+    for i in range(0, num_classes):
+        j = 0
+        lab = i
+        while lab:
+            color_map[i * 3] |= ((lab >> 0) & 1) << (7 - j)
+            color_map[i * 3 + 1] |= ((lab >> 1) & 1) << (7 - j)
+            color_map[i * 3 + 2] |= ((lab >> 2) & 1) << (7 - j)
+            j += 1
+            lab >>= 3
+    color_map = color_map[3:]
+    return color_map
+
+
+def shape2label(img_size, shapes, class_name_mapping):
+    """根据输入的形状列表,将图像的标签矩阵填充为对应形状的类别编号"""
+    label = np.zeros(img_size[:2], dtype=np.int32)
+    for shape in shapes:
+        points = shape["points"]
+        class_name = shape["label"]
+        label_mask = polygon2mask(img_size[:2], points)
+        label[label_mask] = class_name_mapping[class_name]
+    return label
+
+
+def polygon2mask(img_size, points):
+    """将给定形状的点转换成对应的掩膜"""
+    label_mask = Image.fromarray(np.zeros(img_size[:2], dtype=np.uint8))
+    image_draw = ImageDraw.Draw(label_mask)
+    points_list = [tuple(point) for point in points]
+    assert len(points_list) > 2, ValueError("Polygon must have points more than 2")
+    image_draw.polygon(xy=points_list, outline=1, fill=1)
+    return np.array(label_mask, dtype=bool)
+
+
+def save_item_to_txt(items, file_path):
+    try:
+        with open(file_path, 'a') as file:
+            file.write(items)
+        file.close()
+    except Exception as e:
+        print(f"Saving_error: {e}")
+
+
+def save_training_txt(cls_root, mode, cat):
+    imgs = os.listdir(os.path.join(cls_root, mode, cat))
+    imgs.sort()
+    for img in imgs:
+        if mode == 'train':
+            item = os.path.join(cls_root, mode, cat, img)
+            items = item + ' ' + item + '\n'
+            save_item_to_txt(items, os.path.join(cls_root, 'train.txt'))
+        elif mode == 'test' and cat != 'good':
+            item1 = os.path.join(cls_root, mode, cat, img)
+            item2 = os.path.join(cls_root, 'ground_truth', cat, img.split('.')[0]+'_mask.png')
+            items = item1 + ' ' + item2 + '\n'
+            save_item_to_txt(items, os.path.join(cls_root, 'val.txt'))
+
+
+def check_old_txt(cls_pth, mode):
+    set_name = 'train.txt' if mode == 'train' else 'val.txt'
+    pth = os.path.join(cls_pth, set_name)
+    if os.path.exists(pth):
+        os.remove(pth)
+
+
+def convert_mvtec_dataset(input_dir):
+    classes =  ['bottle', 'cable', 'capsule', 'hazelnut', 'metal_nut', 'pill', 'screw',
+    'toothbrush', 'transistor', 'zipper', 'carpet', 'grid', 'leather', 'tile', 'wood']
+    clas = os.path.split(input_dir)[-1]
+    assert clas in classes, info(f"Make sure your class: '{clas}' in your dataset root in\n {classes}")
+    modes = ['train', 'test']
+    cls_root = input_dir
+    for mode in modes:
+        check_old_txt(cls_root, mode)
+        cats = os.listdir(os.path.join(cls_root, mode))
+        for cat in cats:
+            save_training_txt(cls_root, mode, cat)
+    info(f"Add train.txt/val.txt successfully for {input_dir}")

+ 87 - 0
paddlex/modules/anomaly_detection/dataset_checker/dataset_src/split_dataset.py

@@ -0,0 +1,87 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import os.path as osp
+import random
+import shutil
+
+from .....utils.file_interface import custom_open
+from .....utils import logging
+
+
+def split_dataset(root_dir, train_percent, val_percent):
+    """split dataset"""
+    assert train_percent > 0, ValueError(
+        f"The train_percent({train_percent}) must greater than 0!"
+    )
+    assert val_percent > 0, ValueError(
+        f"The val_percent({val_percent}) must greater than 0!"
+    )
+    if train_percent + val_percent != 100:
+        raise ValueError(
+            f"The sum of train_percent({train_percent})and val_percent({val_percent}) should be 100!"
+        )
+
+    img_dir = osp.join(root_dir, "images")
+    assert osp.exists(img_dir), FileNotFoundError(
+        f"The dir of images ({img_dir}) doesn't exist, please check!"
+    )
+    ann_dir = osp.join(root_dir, "annotations")
+    assert osp.exists(ann_dir), FileNotFoundError(
+        f"The dir of annotations ({ann_dir}) doesn't exist, please check!"
+    )
+
+    img_file_list = [osp.join("images", img_name) for img_name in os.listdir(img_dir)]
+    img_num = len(img_file_list)
+    ann_file_list = [
+        osp.join("annotations", ann_name) for ann_name in os.listdir(ann_dir)
+    ]
+    ann_num = len(ann_file_list)
+    assert img_num == ann_num, ValueError(
+        "The number of images and annotations must be equal!"
+    )
+
+    split_tags = ["train", "val"]
+    mapping_line_list = []
+    for tag in split_tags:
+        mapping_file = osp.join(root_dir, f"{tag}.txt")
+        if not osp.exists(mapping_file):
+            logging.info(f"The mapping file ({mapping_file}) doesn't exist, ignored.")
+            continue
+        with custom_open(mapping_file, "r") as fp:
+            lines = filter(None, (line.strip() for line in fp.readlines()))
+            mapping_line_list.extend(lines)
+
+    sample_num = len(mapping_line_list)
+    random.shuffle(mapping_line_list)
+    split_percents = [train_percent, val_percent]
+    start_idx = 0
+    for tag, percent in zip(split_tags, split_percents):
+        if tag == "test" and percent == 0:
+            continue
+        end_idx = start_idx + round(sample_num * percent / 100)
+        end_idx = min(end_idx, sample_num)
+        mapping_file = osp.join(root_dir, f"{tag}.txt")
+        if os.path.exists(mapping_file):
+            shutil.move(mapping_file, mapping_file + ".bak")
+            logging.info(
+                f"The original mapping file ({mapping_file}) "
+                f"has been backed up to ({mapping_file}.bak)"
+            )
+        with custom_open(mapping_file, "w") as fp:
+            fp.write("\n".join(mapping_line_list[start_idx:end_idx]))
+        start_idx = end_idx
+    return root_dir

+ 13 - 0
paddlex/modules/anomaly_detection/dataset_checker/dataset_src/utils/__init__.py

@@ -0,0 +1,13 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+ 71 - 0
paddlex/modules/anomaly_detection/dataset_checker/dataset_src/utils/visualizer.py

@@ -0,0 +1,71 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import cv2
+import numpy as np
+
+
+def get_color_map_list(length):
+    """Returns the color map for visualizing the segmentation mask"""
+    length += 1
+    color_map = length * [0, 0, 0]
+    for i in range(0, length):
+        j = 0
+        lab = i
+        while lab:
+            color_map[i * 3] |= ((lab >> 0) & 1) << (7 - j)
+            color_map[i * 3 + 1] |= ((lab >> 1) & 1) << (7 - j)
+            color_map[i * 3 + 2] |= ((lab >> 2) & 1) << (7 - j)
+            j += 1
+            lab >>= 3
+    color_map = color_map[3:]
+    return color_map
+
+
+def visualize(image, result, weight=0.6, use_multilabel=False):
+    """Convert predict result to color image, and save added image."""
+    color_map = get_color_map_list(256)
+    color_map = [color_map[i : i + 3] for i in range(0, len(color_map), 3)]
+    color_map = np.array(color_map).astype("uint8")
+
+    if not use_multilabel:
+        # Use OpenCV LUT for color mapping
+        c1 = cv2.LUT(result, color_map[:, 0])
+        c2 = cv2.LUT(result, color_map[:, 1])
+        c3 = cv2.LUT(result, color_map[:, 2])
+        pseudo_img = np.dstack((c3, c2, c1))
+
+        vis_result = cv2.addWeighted(image, weight, pseudo_img, 1 - weight, 0)
+    else:
+        vis_result = image.copy()
+        for i in range(result.shape[0]):
+            mask = result[i]
+            c1 = np.where(mask, color_map[i, 0], vis_result[..., 0])
+            c2 = np.where(mask, color_map[i, 1], vis_result[..., 1])
+            c3 = np.where(mask, color_map[i, 2], vis_result[..., 2])
+            pseudo_img = np.dstack((c3, c2, c1)).astype("uint8")
+
+            contour, _ = cv2.findContours(
+                mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
+            )
+            vis_result = cv2.addWeighted(vis_result, weight, pseudo_img, 1 - weight, 0)
+            contour_color = (
+                int(color_map[i, 0]),
+                int(color_map[i, 1]),
+                int(color_map[i, 2]),
+            )
+            vis_result = cv2.drawContours(vis_result, contour, -1, contour_color, 1)
+
+    return vis_result

+ 58 - 0
paddlex/modules/anomaly_detection/evaluator.py

@@ -0,0 +1,58 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+from pathlib import Path
+
+from ..base import BaseEvaluator
+from .model_list import MODELS
+
+
+class UadEvaluator(BaseEvaluator):
+    """Semantic Segmentation Model Evaluator"""
+
+    entities = MODELS
+
+    def update_config(self):
+        """update evalution config"""
+        self.pdx_config.update_dataset(self.global_config.dataset_dir, "SegDataset")
+        self.pdx_config.update_pretrained_weights(None, is_backbone=True)
+
+    def get_config_path(self, weight_path):
+        """
+        get config path
+
+        Args:
+            weight_path (str): The path to the weight
+
+        Returns:
+            config_path (str): The path to the config
+
+        """
+
+        config_path = Path(weight_path).parent.parent / "config.yaml"
+
+        return config_path
+
+    def get_eval_kwargs(self) -> dict:
+        """get key-value arguments of model evalution function
+
+        Returns:
+            dict: the arguments of evaluation function.
+        """
+        device = self.get_device()
+        # XXX:
+        os.environ.pop("FLAGS_npu_jit_compile", None)
+        return {"weight_path": self.eval_config.weight_path, "device": device}

+ 22 - 0
paddlex/modules/anomaly_detection/exportor.py

@@ -0,0 +1,22 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ..base import BaseExportor
+from .model_list import MODELS
+
+
+class UadExportor(BaseExportor):
+    """Semantic Segmentation Model Exportor"""
+
+    entities = MODELS

+ 18 - 0
paddlex/modules/anomaly_detection/model_list.py

@@ -0,0 +1,18 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+MODELS = [
+    "STFPM"
+]

+ 17 - 0
paddlex/modules/anomaly_detection/predictor/__init__.py

@@ -0,0 +1,17 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .predictor import UadPredictor
+from . import transforms

+ 32 - 0
paddlex/modules/anomaly_detection/predictor/keys.py

@@ -0,0 +1,32 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class SegKeys(object):
+    """
+    This class defines a set of keys used for communication of Seg predictors
+    and transforms. Both predictors and transforms accept a dict or a list of
+    dicts as input, and they get the objects of their interest from the dict, or
+    put the generated objects into the dict, all based on these keys.
+    """
+
+    # Common keys
+    IMAGE = "image"
+    IM_PATH = "input_path"
+    IM_SIZE = "image_size"
+
+    # Suite-specific keys
+    PROB_MAP = "prob_map"
+    SEG_MAP = "seg_map"
+    PC_MAP = "pc_map"

+ 106 - 0
paddlex/modules/anomaly_detection/predictor/predictor.py

@@ -0,0 +1,106 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+
+import numpy as np
+
+from ....utils import logging
+from ...base.predictor.transforms import image_common
+from ...base import BasePredictor
+from .keys import SegKeys as K
+from . import transforms as T
+from .utils import InnerConfig
+from ..model_list import MODELS
+
+
+class UadPredictor(BasePredictor):
+    """UadPredictor"""
+
+    entities = MODELS
+
+    def __init__(self, has_prob_map=False, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.has_prob_map = has_prob_map
+
+    def load_other_src(self):
+        """load the inner config file"""
+        infer_cfg_file_path = os.path.join(self.model_dir, "inference.yml")
+        if not os.path.exists(infer_cfg_file_path):
+            raise FileNotFoundError(f"Cannot find config file: {infer_cfg_file_path}")
+        return InnerConfig(infer_cfg_file_path)
+
+    @classmethod
+    def get_input_keys(cls):
+        """get input keys"""
+        return [[K.IMAGE], [K.IM_PATH]]
+
+    @classmethod
+    def get_output_keys(cls):
+        """get output keys"""
+        return [K.SEG_MAP]
+
+    def _run(self, batch_input):
+        """run"""
+        # XXX:
+        os.environ.pop("FLAGS_npu_jit_compile", None)
+        images = [data[K.IMAGE] for data in batch_input]
+        input_ = np.stack(images, axis=0)
+        if input_.ndim == 3:
+            input_ = input_[:, np.newaxis]
+        input_ = input_.astype(dtype=np.float32, copy=False)
+        outputs = self._predictor.predict([input_])
+        out_maps = outputs[0]
+        # In-place update
+        pred = batch_input
+        for dict_, out_map in zip(pred, out_maps):
+            if self.has_prob_map:
+                # `out_map` is prob map
+                dict_[K.PROB_MAP] = out_map
+                dict_[K.SEG_MAP] = np.argmax(out_map, axis=1)
+            else:
+                # `out_map` is seg map
+                dict_[K.SEG_MAP] = out_map
+        return pred
+
+    def _get_pre_transforms_from_config(self):
+        """_get_pre_transforms_from_config"""
+        # If `K.IMAGE` (the decoded image) is found, return a default list of
+        # transformation operators for the input (if possible).
+        # If `K.IMAGE` (the decoded image) is not found, `K.IM_PATH` (the image
+        # path) must be contained in the input. In this case, we infer
+        # transformation operators from the config file.
+        # In cases where the input contains both `K.IMAGE` and `K.IM_PATH`,
+        # `K.IMAGE` takes precedence over `K.IM_PATH`.
+        logging.info(
+            f"Transformation operators for data preprocessing will be inferred from config file."
+        )
+        pre_transforms = self.other_src.pre_transforms
+        pre_transforms.insert(0, image_common.ReadImage(format="RGB"))
+        pre_transforms.append(image_common.ToCHWImage())
+        return pre_transforms
+
+    def _get_post_transforms_from_config(self):
+        """_get_post_transforms_from_config"""
+        post_transforms = []
+        if not self.disable_print:
+            post_transforms.append(T.PrintResult())
+        if not self.disable_save:
+            post_transforms.extend([T.GeneratePCMap(), T.SaveSegResults(self.output)])
+        return post_transforms
+    
+    def _get_STFPM_post_transforms_from_config(self):
+        """_get_STFPM_post_transforms_from_config"""
+        return [T.Map_to_mask(), T.GeneratePCMap(), T.SaveSegResults(self.output), T.PrintResult()]

+ 194 - 0
paddlex/modules/anomaly_detection/predictor/transforms.py

@@ -0,0 +1,194 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+
+import numpy as np
+from PIL import Image
+
+from ....utils import logging
+from ...base import BaseTransform
+from ...base.predictor.io.writers import ImageWriter
+from .keys import SegKeys as K
+from skimage import measure, morphology
+
+__all__ = ["GeneratePCMap", "SaveSegResults"]
+
+
+class GeneratePCMap(BaseTransform):
+    """GeneratePCMap"""
+
+    def __init__(self, color_map=None):
+        super().__init__()
+        self.color_map = color_map
+
+    def apply(self, data):
+        """apply"""
+        pred = data[K.SEG_MAP]
+        pc_map = self.get_pseudo_color_map(pred)
+        data[K.PC_MAP] = pc_map
+        return data
+
+    @classmethod
+    def get_input_keys(cls):
+        """get input keys"""
+        return [K.SEG_MAP]
+
+    @classmethod
+    def get_output_keys(cls):
+        """get input keys"""
+        return [K.PC_MAP]
+
+    def get_pseudo_color_map(self, pred):
+        """get_pseudo_color_map"""
+        if pred.min() < 0 or pred.max() > 255:
+            raise ValueError("`pred` cannot be cast to uint8.")
+        pred = pred.astype(np.uint8)
+        pred_mask = Image.fromarray(pred, mode="P")
+        if self.color_map is None:
+            color_map = self._get_color_map_list(256)
+        else:
+            color_map = self.color_map
+        pred_mask.putpalette(color_map)
+        return pred_mask
+
+    @staticmethod
+    def _get_color_map_list(num_classes, custom_color=None):
+        """_get_color_map_list"""
+        num_classes += 1
+        color_map = num_classes * [0, 0, 0]
+        for i in range(0, num_classes):
+            j = 0
+            lab = i
+            while lab:
+                color_map[i * 3] |= ((lab >> 0) & 1) << (7 - j)
+                color_map[i * 3 + 1] |= ((lab >> 1) & 1) << (7 - j)
+                color_map[i * 3 + 2] |= ((lab >> 2) & 1) << (7 - j)
+                j += 1
+                lab >>= 3
+        color_map = color_map[3:]
+
+        if custom_color:
+            color_map[: len(custom_color)] = custom_color
+        return color_map
+
+
+class SaveSegResults(BaseTransform):
+    """SaveSegResults"""
+
+    _PC_MAP_SUFFIX = "_pc"
+    _FILE_EXT = ".png"
+
+    def __init__(self, save_dir, save_pc_map=True):
+        super().__init__()
+        self.save_dir = save_dir
+        self.save_pc_map = save_pc_map
+
+        # We use pillow backend to save both numpy arrays and PIL Image objects
+        self._writer = ImageWriter(backend="pillow")
+
+    def apply(self, data):
+        """apply"""
+        seg_map = data[K.SEG_MAP]
+        ori_path = data[K.IM_PATH]
+        file_name = os.path.basename(ori_path)
+        file_name = self._replace_ext(file_name, self._FILE_EXT)
+        seg_map_save_path = os.path.join(self.save_dir, file_name)
+        self._write_im(seg_map_save_path, seg_map)
+        if self.save_pc_map:
+            if K.PC_MAP in data:
+                pc_map_save_path = self._add_suffix(
+                    seg_map_save_path, self._PC_MAP_SUFFIX
+                )
+                pc_map = data[K.PC_MAP]
+                self._write_im(pc_map_save_path, pc_map)
+            else:
+                logging.warning(f"The {K.PC_MAP} result don't exist!")
+        return data
+
+    @classmethod
+    def get_input_keys(cls):
+        """get input keys"""
+        return [K.IM_PATH, K.SEG_MAP]
+
+    @classmethod
+    def get_output_keys(cls):
+        """get output keys"""
+        return []
+
+    def _write_im(self, path, im):
+        """write image"""
+        if os.path.exists(path):
+            logging.warning(f"{path} already exists. Overwriting it.")
+        self._writer.write(path, im)
+
+    @staticmethod
+    def _add_suffix(path, suffix):
+        """add suffix"""
+        stem, ext = os.path.splitext(path)
+        return stem + suffix + ext
+
+    @staticmethod
+    def _replace_ext(path, new_ext):
+        """replace ext"""
+        stem, _ = os.path.splitext(path)
+        return stem + new_ext
+
+
+class PrintResult(BaseTransform):
+    """Print Result Transform"""
+
+    def apply(self, data):
+        """apply"""
+        logging.info("The prediction result is:")
+        logging.info(f"keys: {data.keys()}")
+        return data
+
+    @classmethod
+    def get_input_keys(cls):
+        """get input keys"""
+        return [K.SEG_MAP]
+
+    @classmethod
+    def get_output_keys(cls):
+        """get output keys"""
+        return []
+
+
+class Map_to_mask(BaseTransform):
+    """Map_to_mask"""
+
+    def apply(self, data):
+        """apply"""
+        score_map = data[K.SEG_MAP]
+        thred = 0.01
+        mask = score_map[0]
+        mask[mask > thred] = 255
+        mask[mask <= thred] = 0
+        kernel = morphology.disk(4)
+        mask = morphology.opening(mask, kernel)
+        mask = mask.astype(np.uint8)
+        data[K.SEG_MAP] = mask
+        return data
+
+    @classmethod
+    def get_input_keys(cls):
+        """get input keys"""
+        return [K.SEG_MAP]
+
+    @classmethod
+    def get_output_keys(cls):
+        """get output keys"""
+        return []

+ 85 - 0
paddlex/modules/anomaly_detection/predictor/utils.py

@@ -0,0 +1,85 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import codecs
+import yaml
+
+from ....utils import logging
+from ...base.predictor.transforms import image_common
+
+
+class InnerConfig(object):
+    """Inner Config"""
+
+    def __init__(self, config_path):
+        self.inner_cfg = self.load(config_path)
+
+    def load(self, config_path):
+        """load config"""
+        with codecs.open(config_path, "r", "utf-8") as file:
+            dic = yaml.load(file, Loader=yaml.FullLoader)
+        return dic
+
+    @property
+    def pre_transforms(self):
+        """read preprocess transforms from  config file"""
+
+        def _process_incompct_args(cfg, arg_names, action):
+            for name in arg_names:
+                if name in cfg:
+                    if action == "ignore":
+                        logging.warning(f"Ignoring incompatible argument: {name}")
+                    elif action == "raise":
+                        raise RuntimeError(f"Incompatible argument detected: {name}")
+                    else:
+                        raise ValueError(f"Unknown action: {action}")
+
+        tfs_cfg = self.inner_cfg["Deploy"]["transforms"]
+        tfs = []
+        for cfg in tfs_cfg:
+            if cfg["type"] == "Normalize":
+                tf = image_common.Normalize(
+                    mean=cfg.get("mean", 0.5), std=cfg.get("std", 0.5)
+                )
+            elif cfg["type"] == "Resize":
+                tf = image_common.Resize(
+                    target_size=cfg.get("target_size", (512, 512)),
+                    keep_ratio=cfg.get("keep_ratio", False),
+                    size_divisor=cfg.get("size_divisor", None),
+                    interp=cfg.get("interp", "LINEAR"),
+                )
+            elif cfg["type"] == "ResizeByLong":
+                tf = image_common.ResizeByLong(
+                    target_long_edge=cfg["long_size"],
+                    size_divisor=None,
+                    interp="LINEAR",
+                )
+            elif cfg["type"] == "ResizeByShort":
+                _process_incompct_args(cfg, ["max_size"], action="raise")
+                tf = image_common.ResizeByShort(
+                    target_short_edge=cfg["short_size"],
+                    size_divisor=None,
+                    interp="LINEAR",
+                )
+            elif cfg["type"] == "Padding":
+                _process_incompct_args(cfg, ["label_padding_value"], action="ignore")
+                tf = image_common.Pad(
+                    target_size=cfg["target_size"],
+                    val=cfg.get("im_padding_value", 127.5),
+                )
+            else:
+                raise RuntimeError(f"Unsupported type: {cfg['type']}")
+            tfs.append(tf)
+        return tfs

+ 236 - 0
paddlex/modules/anomaly_detection/trainer.py

@@ -0,0 +1,236 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import glob
+from pathlib import Path
+import paddle
+
+from ..base import BaseTrainer, BaseTrainDeamon
+from ...utils.config import AttrDict
+from .model_list import MODELS
+
+
+class UadTrainer(BaseTrainer):
+    """Uad Model Trainer"""
+
+    entities = MODELS
+
+    def build_deamon(self, config: AttrDict) -> "SegTrainDeamon":
+        """build deamon thread for saving training outputs timely
+
+        Args:
+            config (AttrDict): PaddleX pipeline config, which is loaded from pipeline yaml file.
+
+        Returns:
+            SegTrainDeamon: the training deamon thread object for saving training outputs timely.
+        """
+        return SegTrainDeamon(config)
+
+    def update_config(self):
+        """update training config"""
+        self.pdx_config.update_dataset(self.global_config.dataset_dir, "SegDataset")
+        if self.train_config.num_classes is not None:
+            self.pdx_config.update_num_classes(self.train_config.num_classes)
+        if (
+            self.train_config.pretrain_weight_path
+            and self.train_config.pretrain_weight_path != ""
+        ):
+            self.pdx_config.update_pretrained_weights(
+                self.train_config.pretrain_weight_path, is_backbone=True
+            )
+
+    def get_train_kwargs(self) -> dict:
+        """get key-value arguments of model training function
+
+        Returns:
+            dict: the arguments of training function.
+        """
+        train_args = {"device": self.get_device()}
+        # XXX:
+        os.environ.pop("FLAGS_npu_jit_compile", None)
+        if self.train_config.batch_size is not None:
+            train_args["batch_size"] = self.train_config.batch_size
+        if self.train_config.learning_rate is not None:
+            train_args["learning_rate"] = self.train_config.learning_rate
+        if self.train_config.epochs_iters is not None:
+            train_args["epochs_iters"] = self.train_config.epochs_iters
+        if (
+            self.train_config.resume_path is not None
+            and self.train_config.resume_path != ""
+        ):
+            train_args["resume_path"] = self.train_config.resume_path
+        if self.global_config.output is not None:
+            train_args["save_dir"] = self.global_config.output
+        if self.train_config.log_interval:
+            train_args["log_iters"] = self.train_config.log_interval
+        if self.train_config.eval_interval:
+            train_args["do_eval"] = True
+            train_args["save_interval"] = self.train_config.eval_interval
+        train_args["dy2st"] = self.train_config.get("dy2st", False)
+        return train_args
+
+
+class SegTrainDeamon(BaseTrainDeamon):
+    """SegTrainResultDemon"""
+
+    last_k = 1
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+    def get_the_pdparams_suffix(self):
+        """get the suffix of pdparams file"""
+        return "pdparams"
+
+    def get_the_pdema_suffix(self):
+        """get the suffix of pdema file"""
+        return "pdema"
+
+    def get_the_pdopt_suffix(self):
+        """get the suffix of pdopt file"""
+        return "pdopt"
+
+    def get_the_pdstates_suffix(self):
+        """get the suffix of pdstates file"""
+        return "pdstates"
+
+    def get_ith_ckp_prefix(self, epoch_id):
+        """get the prefix of the epoch_id checkpoint file"""
+        return f"iter_{epoch_id}/model"
+
+    def get_best_ckp_prefix(self):
+        """get the prefix of the best checkpoint file"""
+        return "best_model/model"
+
+    def get_score(self, pdstates_path):
+        """get the score by pdstates file"""
+        if not Path(pdstates_path).exists():
+            return 0
+        return paddle.load(pdstates_path)["mIoU"]
+
+    def get_epoch_id_by_pdparams_prefix(self, pdparams_dir):
+        """get the epoch_id by pdparams file"""
+        return int(pdparams_dir.parent.name.split("_")[-1])
+
+    def update_result(self, result, train_output):
+        """update every result"""
+        train_output = Path(train_output).resolve()
+        config_path = train_output.joinpath("config.yaml").resolve()
+        if not config_path.exists():
+            return result
+
+        model_name = result["model_name"]
+        if (
+            model_name in self.config_recorder
+            and self.config_recorder[model_name] != config_path
+        ):
+            result["models"] = self.init_model_pkg()
+        result["config"] = config_path
+        self.config_recorder[model_name] = config_path
+
+        result["visualdl_log"] = self.update_vdl_log(train_output)
+        result["label_dict"] = self.update_label_dict(train_output)
+
+        model = self.get_model(result["model_name"], config_path)
+
+        params_path_list = list(
+            train_output.glob(
+                ".".join(
+                    [self.get_ith_ckp_prefix("[0-9]*"), self.get_the_pdparams_suffix()]
+                )
+            )
+        )
+        iter_ids = []
+        for params_path in params_path_list:
+            iter_id = self.get_epoch_id_by_pdparams_prefix(params_path)
+            iter_ids.append(iter_id)
+        iter_ids.sort()
+        # TODO(gaotingquan): how to avoid that the latest ckp files is being saved
+        # epoch_ids = epoch_ids[:-1]
+        for i in range(1, self.last_k + 1):
+            if len(iter_ids) < i:
+                break
+            self.update_models(
+                result,
+                model,
+                train_output,
+                f"last_{i}",
+                self.get_ith_ckp_prefix(iter_ids[-i]),
+            )
+        self.update_models(
+            result, model, train_output, "best", self.get_best_ckp_prefix()
+        )
+        return result
+
+    def update_models(self, result, model, train_output, model_key, ckp_prefix):
+        """update info of the models to be saved"""
+        pdparams = train_output.joinpath(
+            ".".join([ckp_prefix, self.get_the_pdparams_suffix()])
+        )
+        if pdparams.exists():
+            recorder_key = f"{train_output.name}_{model_key}"
+            if (
+                model_key != "best"
+                and recorder_key in self.model_recorder
+                and self.model_recorder[recorder_key] == pdparams
+            ):
+                return
+
+            self.model_recorder[recorder_key] = pdparams
+
+            pdema = ""
+            pdema_suffix = self.get_the_pdema_suffix()
+            if pdema_suffix:
+                pdema = pdparams.parents[1].joinpath(
+                    ".".join([ckp_prefix, pdema_suffix])
+                )
+                if not pdema.exists():
+                    pdema = ""
+
+            pdopt = ""
+            pdopt_suffix = self.get_the_pdopt_suffix()
+            if pdopt_suffix:
+                pdopt = pdparams.parents[1].joinpath(
+                    ".".join([ckp_prefix, pdopt_suffix])
+                )
+                if not pdopt.exists():
+                    pdopt = ""
+
+            pdstates = ""
+            pdstates_suffix = self.get_the_pdstates_suffix()
+            if pdstates_suffix:
+                pdstates = pdparams.parents[1].joinpath(
+                    ".".join([ckp_prefix, pdstates_suffix])
+                )
+                if not pdstates.exists():
+                    pdstates = ""
+
+            score = self.get_score(Path(pdstates).resolve().as_posix())
+
+            result["models"][model_key] = {
+                "score": score,
+                "pdparams": pdparams,
+                "pdema": pdema,
+                "pdopt": pdopt,
+                "pdstates": pdstates,
+            }
+
+            self.update_inference_model(
+                model,
+                pdparams,
+                train_output.joinpath(f"{ckp_prefix}"),
+                result["models"][model_key],
+            )

+ 10 - 5
paddlex/modules/base/predictor/predictor.py

@@ -75,11 +75,16 @@ Env: {self.kernel_option}"
             else self._get_pre_transforms_from_config()
         )
         logging.debug(f"Preprocess Ops: {self._format_transforms(pre_tfs)}")
-        post_tfs = (
-            post_transforms
-            if post_transforms is not None
-            else self._get_post_transforms_from_config()
-        )
+        if self.model_name == 'STFPM':
+            post_tfs = (
+                self._get_STFPM_post_transforms_from_config()
+            )
+        else:
+            post_tfs = (
+                post_transforms
+                if post_transforms is not None
+                else self._get_post_transforms_from_config()
+            )
         logging.debug(f"Postprocessing: {self._format_transforms(post_tfs)}")
         return pre_tfs, post_tfs
 

+ 1 - 0
paddlex/modules/base/predictor/utils/official_models.py

@@ -212,6 +212,7 @@ openatom_rec_svtrv2_ch_infer.tar",
     "PatchTST_ad": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b1/PatchTST_ad_infer.tar",
     "TimesNet_ad": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b1/TimesNet_ad_infer.tar",
     "TimesNet_cls": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b1/TimesNet_cls_infer.tar",
+    "STFPM": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0b1/STFPM_infer.tar",
 }
 
 

+ 49 - 0
paddlex/repo_apis/PaddleSeg_api/configs/STFPM.yaml

@@ -0,0 +1,49 @@
+batch_size: 1
+iters: 20
+output_op: none
+
+model:
+  type: STFPM
+  backbone:
+    type: ResNet18
+
+train_dataset:
+  type: Dataset
+  num_classes: 1
+  dataset_root: /mv_dataset/hazelnut
+  transforms:
+    - type: Resize
+      target_size: [256, 256]
+    - type: Normalize
+      mean: [0.485, 0.456, 0.406]
+      std: [0.229, 0.224, 0.225]
+  mode: train
+
+val_dataset:
+  type: Dataset 
+  num_classes: 1
+  dataset_root: /mv_dataset/hazelnut
+  transforms:
+    - type: Resize
+      target_size: [256, 256]
+    - type: Normalize
+      mean: [0.485, 0.456, 0.406]
+      std: [0.229, 0.224, 0.225]
+  mode: val
+
+loss:
+  types:
+    - type: DistillationLoss
+  coef: [1]
+
+optimizer:
+  type: SGD
+  momentum: 0.9
+  weight_decay: 1.0e-4
+
+lr_scheduler:
+  type: PolynomialDecay
+  learning_rate: 0.4
+  end_lr: 0.4
+  power: 0.9
+

+ 6 - 2
paddlex/repo_apis/PaddleSeg_api/seg/model.py

@@ -339,12 +339,16 @@ class SegModel(BaseModel):
         if input_shape is not None:
             cli_args.append(CLIArgument("--input_shape", *input_shape))
 
-        output_op = kwargs.pop("output_op", None)
+        try:
+            output_op = config['output_op']
+        except:
+            output_op = kwargs.pop("output_op", None)
         if output_op is not None:
             assert output_op in [
                 "softmax",
                 "argmax",
-            ], "`output_op` must be 'softmax' or 'argmax'."
+                "none",
+            ], "`output_op` must be 'none', 'softmax' or 'argmax'."
             cli_args.append(CLIArgument("--output_op", output_op))
 
         # PDX related settings

+ 10 - 0
paddlex/repo_apis/PaddleSeg_api/seg/register.py

@@ -224,6 +224,16 @@ register_model_info(
 )
 
 
+register_model_info(
+    {
+        "model_name": "STFPM",
+        "suite": "Seg",
+        "config_path": osp.join(PDX_CONFIG_DIR, "STFPM.yaml"),
+        "supported_apis": ["train", "evaluate", "predict", "export"],
+    }
+)
+
+
 # For compatibility
 def _set_alias(model_name, alias):
     from ...base.register import get_registered_model_info

+ 1 - 0
requirements.txt

@@ -17,3 +17,4 @@ parsley
 requests
 tokenizers==0.19.1
 GPUtil>=1.4.0
+scikit-image