From bb77cd9855adff18de60a74d8a45dce964407099 Mon Sep 17 00:00:00 2001
From: CSH <40987381+csatsurnh@users.noreply.github.com>
Date: Mon, 6 Mar 2023 14:49:37 +0800
Subject: [PATCH 01/32] [Doc] Add zh_cn add_transforms doc (#2673)
as title
---
docs/zh_cn/advanced_guides/add_transforms.md | 52 +++++++++++++++++++-
1 file changed, 50 insertions(+), 2 deletions(-)
diff --git a/docs/zh_cn/advanced_guides/add_transforms.md b/docs/zh_cn/advanced_guides/add_transforms.md
index 58a2485e04..d7206680d3 100644
--- a/docs/zh_cn/advanced_guides/add_transforms.md
+++ b/docs/zh_cn/advanced_guides/add_transforms.md
@@ -1,3 +1,51 @@
-# 新增数据增强(待更新)
+# 新增数据增强
-中文版文档支持中,请先阅读[英文版本](../../en/advanced_guides/add_transform.md)
+## 自定义数据增强
+
+自定义数据增强必须继承 `BaseTransform` 并实现 `transform` 函数。这里我们使用一个简单的翻转变换作为示例:
+
+```python
+import random
+import mmcv
+from mmcv.transforms import BaseTransform, TRANSFORMS
+
+@TRANSFORMS.register_module()
+class MyFlip(BaseTransform):
+ def __init__(self, direction: str):
+ super().__init__()
+ self.direction = direction
+
+ def transform(self, results: dict) -> dict:
+ img = results['img']
+ results['img'] = mmcv.imflip(img, direction=self.direction)
+ return results
+```
+
+此外,新的类需要被导入。
+
+```python
+from .my_pipeline import MyFlip
+```
+
+这样,我们就可以实例化一个 `MyFlip` 对象并使用它来处理数据字典。
+
+```python
+import numpy as np
+
+transform = MyFlip(direction='horizontal')
+data_dict = {'img': np.random.rand(224, 224, 3)}
+data_dict = transform(data_dict)
+processed_img = data_dict['img']
+```
+
+或者,我们可以在配置文件中的数据流程中使用 `MyFlip` 变换。
+
+```python
+pipeline = [
+ ...
+ dict(type='MyFlip', direction='horizontal'),
+ ...
+]
+```
+
+需要注意,如果要在配置文件中使用 `MyFlip`,必须确保在运行时导入了包含 `MyFlip` 的文件。
From e541d1acd48cac67d9a71a1bcadba7cce3bb9c4e Mon Sep 17 00:00:00 2001
From: CSH <40987381+csatsurnh@users.noreply.github.com>
Date: Mon, 6 Mar 2023 18:03:12 +0800
Subject: [PATCH 02/32] [Doc] Add zh_cn add_models doc & fix link (#2702)
as title
---
docs/en/advanced_guides/add_models.md | 2 +-
docs/zh_cn/advanced_guides/add_models.md | 261 ++++++++++++++++++++++-
2 files changed, 260 insertions(+), 3 deletions(-)
diff --git a/docs/en/advanced_guides/add_models.md b/docs/en/advanced_guides/add_models.md
index 1f1969db39..e470e48ef2 100644
--- a/docs/en/advanced_guides/add_models.md
+++ b/docs/en/advanced_guides/add_models.md
@@ -202,7 +202,7 @@ In MMSegmentation 1.x versions, we use [SegDataPreProcessor](https://github.com/
## Develop new segmentors
-The segmentor is an algorithmic architecture in which users can customize their algorithms by adding customized components and defining the logic of algorithm execution. Please refer to [the model document](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/advanced_guides/models.md) for more details.
+The segmentor is an algorithmic architecture in which users can customize their algorithms by adding customized components and defining the logic of algorithm execution. Please refer to [the model document](./models.md) for more details.
Since the [BaseSegmentor](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/segmentors/base.py#L15) in MMSegmentation unifies three modes for a forward process, to develop a new segmentor, users need to overwrite `loss`, `predict` and `_forward` methods corresponding to the `loss`, `predict` and `tensor` modes.
diff --git a/docs/zh_cn/advanced_guides/add_models.md b/docs/zh_cn/advanced_guides/add_models.md
index 3f86a0c7c6..2f0a5af0d1 100644
--- a/docs/zh_cn/advanced_guides/add_models.md
+++ b/docs/zh_cn/advanced_guides/add_models.md
@@ -1,3 +1,260 @@
-# 新增模块(待更新)
+# 新增模块
-中文版文档支持中,请先阅读[英文版本](../../en/advanced_guides/add_models.md)
+## 开发新组件
+
+我们可以自定义 [模型文档](./models.md) 中介绍的所有组件,例如**主干网络(backbone)**、**头(head)**、**损失函数(loss function)**和**数据预处理器(data preprocessor)**。
+
+### 添加新的主干网络(backbone)
+
+在这里,我们以 MobileNet 为例展示如何开发新的主干网络。
+
+1. 创建一个新文件 `mmseg/models/backbones/mobilenet.py`。
+
+ ```python
+ import torch.nn as nn
+
+ from mmseg.registry import MODELS
+
+
+ @MODELS.register_module()
+ class MobileNet(nn.Module):
+
+ def __init__(self, arg1, arg2):
+ pass
+
+ def forward(self, x): # should return a tuple
+ pass
+
+ def init_weights(self, pretrained=None):
+ pass
+ ```
+
+2. 在 `mmseg/models/backbones/__init__.py` 中引入模块。
+
+ ```python
+ from .mobilenet import MobileNet
+ ```
+
+3. 在配置文件中使用它。
+
+ ```python
+ model = dict(
+ ...
+ backbone=dict(
+ type='MobileNet',
+ arg1=xxx,
+ arg2=xxx),
+ ...
+ ```
+
+### 添加新的头(head)
+
+在 MMSegmentation 中,我们提供 [BaseDecodeHead](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/decode_heads/decode_head.py#L17) 用于开发所有分割头。
+所有新实现的解码头都应该从中派生出来。
+接下来我们以 [PSPNet](https://arxiv.org/abs/1612.01105) 为例说明如何开发新的头。
+
+首先,在 `mmseg/models/decode_heads/psp_head.py` 中添加一个新的解码头。
+PSPNet 实现了用于分割解码的解码头。
+为了实现解码头,在新模块中我们需要执行以下三个函数。
+
+```python
+from mmseg.registry import MODELS
+
+@MODELS.register_module()
+class PSPHead(BaseDecodeHead):
+
+ def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
+ super(PSPHead, self).__init__(**kwargs)
+
+ def init_weights(self):
+ pass
+
+ def forward(self, inputs):
+ pass
+```
+
+接下来,用户需要在 `mmseg/models/decode_heads/__init__.py` 中添加模块,这样相应的注册器就可以找到并加载它们。
+
+PSPNet 的配置文件如下
+
+```python
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='pretrain_model/resnet50_v1c_trick-2cccc1ad.pth',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='PSPHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ pool_scales=(1, 2, 3, 6),
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
+
+```
+
+### 添加新的损失函数(loss)
+
+假设您想为分割解码添加一个叫做 `MyLoss` 的新的损失函数。
+要添加新的损失函数,用户需要在 `mmseg/models/loss/my_loss.py` 中实现它。
+修饰器 `weighted_loss` 可以对损失的每个元素进行加权。
+
+```python
+import torch
+import torch.nn as nn
+
+from mmseg.registry import MODELS
+from .utils import weighted_loss
+
+@weighted_loss
+def my_loss(pred, target):
+ assert pred.size() == target.size() and target.numel() > 0
+ loss = torch.abs(pred - target)
+ return loss
+
+@MODELS.register_module()
+class MyLoss(nn.Module):
+
+ def __init__(self, reduction='mean', loss_weight=1.0):
+ super(MyLoss, self).__init__()
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss = self.loss_weight * my_loss(
+ pred, target, weight, reduction=reduction, avg_factor=avg_factor)
+ return loss
+```
+
+然后,用户需要将其添加到 `mmseg/models/loss/__init__.py` 中。
+
+```python
+from .my_loss import MyLoss, my_loss
+
+```
+
+要使用它,请修改 `loss_xx` 字段。
+然后需要修改头中的 `loss_decode` 字段。
+`loss_weight` 可用于平衡多重损失。
+
+```python
+loss_decode=dict(type='MyLoss', loss_weight=1.0))
+```
+
+### 添加新的数据预处理器(data preprocessor)
+
+在 MMSegmentation 1.x 版本中,我们使用 [SegDataPreProcessor](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/mmseg/models/data_preprocessor.py#L13) 将数据复制到目标设备,并将数据预处理为默认的模型输入格式。这里我们将展示如何开发一个新的数据预处理器。
+
+1. 创建一个新文件 `mmseg/models/my_datapreprocessor.py`。
+
+ ```python
+ from mmengine.model import BaseDataPreprocessor
+
+ from mmseg.registry import MODELS
+
+ @MODELS.register_module()
+ class MyDataPreProcessor(BaseDataPreprocessor):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def forward(self, data: dict, training: bool=False) -> Dict[str, Any]:
+ # TODO Define the logic for data pre-processing in the forward method
+ pass
+ ```
+
+2. 在 `mmseg/models/__init__.py` 中导入数据预处理器
+
+ ```python
+ from .my_datapreprocessor import MyDataPreProcessor
+ ```
+
+3. 在配置文件中使用它。
+
+ ```python
+ model = dict(
+ data_preprocessor=dict(type='MyDataPreProcessor)
+ ...
+ )
+ ```
+
+## 开发新的分割器(segmentor)
+
+分割器是一种户可以通过添加自定义组件和定义算法执行逻辑来自定义其算法的算法架构。请参考[模型文档](./models.md)了解更多详情。
+
+由于 MMSegmentation 中的 [BaseSegmenter](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/segmentors/base.py#L15) 统一了前向过程的三种模式,为了开发新的分割器,用户需要重写与 `loss`、`predict` 和 `tensor` 相对应的 `loss`、`predict` 和 `_forward` 方法。
+
+这里我们将展示如何开发一个新的分割器。
+
+1. 创建一个新文件 `mmseg/models/segmentors/my_segmentor.py`。
+
+ ```python
+ from typing import Dict, Optional, Union
+
+ import torch
+
+ from mmseg.registry import MODELS
+ from mmseg.models import BaseSegmentor
+
+ @MODELS.register_module()
+ class MySegmentor(BaseSegmentor):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ # TODO users should build components of the network here
+
+ def loss(self, inputs: Tensor, data_samples: SampleList) -> dict:
+ """Calculate losses from a batch of inputs and data samples."""
+ pass
+
+ def predict(self, inputs: Tensor, data_samples: OptSampleList=None) -> SampleList:
+ """Predict results from a batch of inputs and data samples with post-
+ processing."""
+ pass
+
+ def _forward(self,
+ inputs: Tensor,
+ data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:
+ """Network forward process.
+
+ Usually includes backbone, neck and head forward without any post-
+ processing.
+ """
+ pass
+ ```
+
+2. 在 `mmseg/models/segmentors/__init__.py` 中导入分割器。
+
+ ```python
+ from .my_segmentor import MySegmentor
+ ```
+
+3. 在配置文件中使用它。
+
+ ```python
+ model = dict(
+ type='MySegmentor'
+ ...
+ )
+ ```
From 6c3599bd9dd7106de7b9167e5e5113e72e9ea435 Mon Sep 17 00:00:00 2001
From: CSH <40987381+csatsurnh@users.noreply.github.com>
Date: Tue, 7 Mar 2023 11:47:10 +0800
Subject: [PATCH 03/32] [Doc] Add zh_cn models doc and fix en doc typo (#2703)
as title
---------
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
---
docs/en/advanced_guides/models.md | 10 +-
docs/zh_cn/advanced_guides/models.md | 176 ++++++++++++++++++++++++++-
2 files changed, 179 insertions(+), 7 deletions(-)
diff --git a/docs/en/advanced_guides/models.md b/docs/en/advanced_guides/models.md
index 8202e95b7c..84e6cb6a9e 100644
--- a/docs/en/advanced_guides/models.md
+++ b/docs/en/advanced_guides/models.md
@@ -1,7 +1,5 @@
# Models
-# Models
-
We usually define a neural network in a deep learning task as a model, and this model is the core of an algorithm. [MMEngine](https://github.com/open-mmlab/mmengine) abstracts a unified model [BaseModel](https://github.com/open-mmlab/mmengine/blob/main/mmengine/model/base_model/base_model.py#L16) to standardize the interfaces for training, testing and other processes. All models implemented by MMSegmentation inherit from `BaseModel`, and in MMSegmentation we implemented forward and added some functions for the semantic segmentation algorithm.
## Common components
@@ -22,9 +20,9 @@ In MMSegmentation, we abstract the network architecture as a **Segmentor**, it i
**Neck** is the part that connects the backbone and heads. It performs some refinements or reconfigurations on the raw feature maps produced by the backbone. An example is **Feature Pyramid Network (FPN)**.
-### Decode Head
+### Decode head
-**Decode Head** is the part that transforms the feature maps into a segmentation mask, such as **PSPNet**.
+**Decode head** is the part that transforms the feature maps into a segmentation mask, such as **PSPNet**.
### Auxiliary head
@@ -110,7 +108,7 @@ Parameters:
- data (dict or tuple or list) - Data sampled from the dataset. In MMSegmentation, the data dict contains `inputs` and `data_samples` two fields.
- optim_wrapper (OptimWrapper) - OptimWrapper instance used to update model parameters.
-**Note:** [OptimWrapper](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/optimizer/optimizer_wrapper.py#L17) provides a common interface for updating parameters, please refer to optimizer wrapper [documentation](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/optim_wrapper.html) in [MMEngine](https://github.com/open-mmlab/mmengine) for more information.
+**Note:** [OptimWrapper](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/optimizer/optimizer_wrapper.py#L17) provides a common interface for updating parameters, please refer to optimizer wrapper [documentation](https://mmengine.readthedocs.io/en/latest/tutorials/optim_wrapper.html) in [MMEngine](https://github.com/open-mmlab/mmengine) for more information.
Returns:
@@ -157,7 +155,7 @@ The parameters of the `SegDataPreProcessor` constructor:
- pad_val (float, optional) - Padding value. Default: 0.
- seg_pad_val (float, optional) - Padding value of segmentation map. Default: 255.
- bgr_to_rgb (bool) - whether to convert image from BGR to RGB. Defaults to False.
-- rgb_to_bgr (bool) - whether to convert image from RGB to RGB. Defaults to False.
+- rgb_to_bgr (bool) - whether to convert image from RGB to BGR. Defaults to False.
- batch_augments (list\[dict\], optional) - Batch-level augmentations. Default to None.
The data will be processed as follows:
diff --git a/docs/zh_cn/advanced_guides/models.md b/docs/zh_cn/advanced_guides/models.md
index 62dbea38c4..408a57863c 100644
--- a/docs/zh_cn/advanced_guides/models.md
+++ b/docs/zh_cn/advanced_guides/models.md
@@ -1,3 +1,177 @@
# 模型
-中文版文档支持中,请先阅读[英文版本](../../en/advanced_guides/models.md)
+我们通常将深度学习任务中的神经网络定义为模型,这个模型即是算法的核心。[MMEngine](https://github.com/open-mmlab/mmengine) 抽象出了一个统一模型 [BaseModel](https://github.com/open-mmlab/mmengine/blob/main/mmengine/model/base_model/base_model.py#L16) 以标准化训练、测试和其他过程。MMSegmentation 实现的所有模型都继承自 `BaseModel`,并且在 MMSegmention 中,我们实现了前向传播并为语义分割算法添加了一些功能。
+
+## 常用组件
+
+### 分割器(Segmentor)
+
+在 MMSegmentation 中,我们将网络架构抽象为**分割器**,它是一个包含网络所有组件的模型。我们已经实现了**编码器解码器(EncoderDecoder)**和**级联编码器解码器(CascadeEncoderDecoder)**,它们通常由**数据预处理器**、**骨干网络**、**解码头**和**辅助头**组成。
+
+### 数据预处理器(Data preprocessor)
+
+**数据预处理器**是将数据复制到目标设备并将数据预处理为模型输入格式的部分。
+
+### 主干网络(Backbone)
+
+**主干网络**是将图像转换为特征图的部分,例如没有最后全连接层的 **ResNet-50**。
+
+### 颈部(Neck)
+
+**颈部**是连接主干网络和头的部分。它对主干网络生成的原始特征图进行一些改进或重新配置。例如 **Feature Pyramid Network(FPN)**。
+
+### 解码头(Decode head)
+
+**解码头**是将特征图转换为分割掩膜的部分,例如 **PSPNet**。
+
+### 辅助头(Auxiliary head)
+
+**辅助头**是一个可选组件,它将特征图转换为仅用于计算辅助损失的分割掩膜。
+
+## 基本接口
+
+MMSegmentation 封装 `BaseModel` 并实现了 [BaseSegmenter](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/segmentors/base.py#L15) 类,主要提供 `forward`、`train_step`、`val_step` 和 `test_step` 接口。接下来将详细介绍这些接口。
+
+### forward
+
+
+
+ 编码器解码器数据流
+
+
+
+
+ 级联编码器解码器数据流
+
+
+`forward` 方法返回训练、验证、测试和简单推理过程的损失或预测。
+
+该方法应接受三种模式:“tensor”、“predict” 和 “loss”:
+
+- “tensor”:前向推理整个网络并返回张量或张量数组,无需任何后处理,与常见的 `nn.Module` 相同。
+- “predict”:前向推理并返回预测值,这些预测值将被完全处理到 `SegDataSample` 列表中。
+- “loss”:前向推理并根据给定的输入和数据样本返回损失的`字典`。
+
+**注:**[SegDataSample](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/structures/seg_data_sample.py) 是 MMSegmentation 的数据结构接口,用作不同组件之间的接口。`SegDataSample` 实现了抽象数据元素 `mmengine.structures.BaseDataElement`,请参阅 [MMMEngine](https://github.com/open-mmlab/mmengine) 中的 [SegDataSample 文档](https://mmsegmentation.readthedocs.io/zh_CN/1.x/advanced_guides/structures.html)和[数据元素文档](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/data_element.html)了解更多信息。
+
+注意,此方法不处理在 `train_step` 方法中完成的反向传播或优化器更新。
+
+参数:
+
+- inputs(torch.Tensor)- 通常为形状是(N, C, ...) 的输入张量。
+- data_sample(list\[[SegDataSample](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/structures/seg_data_sample.py)\]) - 分割数据样本。它通常包括 `metainfo` 和 `gt_sem_seg` 等信息。默认值为 None。
+- mode (str) - 返回什么类型的值。默认为 'tensor'。
+
+返回值:
+
+- `dict` 或 `list`:
+ - 如果 `mode == "loss"`,则返回用于反向过程和日志记录的损失张量`字典`。
+ - 如果 `mode == "predict"`,则返回 `SegDataSample` 的`列表`,推理结果将被递增地添加到传递给 forward 方法的 `data_sample` 参数中,每个 `SegDataSeample` 包含以下关键词:
+ - pred_sm_seg (`PixelData`):语义分割的预测。
+ - seg_logits (`PixelData`):标准化前语义分割的预测指标。
+ - 如果 `mode == "tensor"`,则返回`张量`或`张量数组`的`字典`以供自定义使用。
+
+### 预测模式
+
+我们在[配置文档](../user_guides/1_config.md)中简要描述了模型配置的字段,这里我们详细介绍 `model.test_cfg` 字段。`model.test_cfg` 用于控制前向行为,`"predict"` 模式下的 `forward` 方法可以在两种模式下运行:
+
+- `whole_inference`:如果 `cfg.model.test_cfg.mode == 'whole'`,则模型将使用完整图像进行推理。
+
+ `whole_inference` 模式的一个示例配置:
+
+ ```python
+ model = dict(
+ type='EncoderDecoder'
+ ...
+ test_cfg=dict(mode='whole')
+ )
+ ```
+
+- `slide_inference`:如果 `cfg.model.test_cfg.mode == ‘slide’`,则模型将通过滑动窗口进行推理。**注意:** 如果选择 `slide` 模式,还应指定 `cfg.model.test_cfg.stride` 和 `cfg.model.test_cfg.crop_size`。
+
+ `slide_inference` 模式的一个示例配置:
+
+ ```python
+ model = dict(
+ type='EncoderDecoder'
+ ...
+ test_cfg=dict(mode='slide', crop_size=256, stride=170)
+ )
+ ```
+
+### train_step
+
+`train_step` 方法调用 `loss` 模式的前向接口以获得损失`字典`。`BaseModel` 类实现默认的模型训练过程,包括预处理、模型前向传播、损失计算、优化和反向传播。
+
+参数:
+
+- data (dict or tuple or list) - 从数据集采样的数据。在 MMSegmentation 中,数据字典包含 `inputs` 和 `data_samples` 两个字段。
+- optim_wrapper (OptimWrapper) - 用于更新模型参数的 OptimWrapper 实例。
+
+**注:**[OptimWrapper](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/optimizer/optimizer_wrapper.py#L17) 提供了一个用于更新参数的通用接口,请参阅 [MMMEngine](https://github.com/open-mmlab/mmengine) 中的优化器封装[文档](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/optim_wrapper.html)了解更多信息。
+
+返回值:
+
+-Dict\[str, `torch.Tensor`\]:用于记录日志的张量的`字典`。
+
+
+
+ train_step 数据流
+
+
+### val_step
+
+`val_step` 方法调用 `predict` 模式的前向接口并返回预测结果,预测结果将进一步被传递给评测器的进程接口和钩子的 `after_val_inter` 接口。
+
+参数:
+
+- data (`dict` or `tuple` or `list`) - 从数据集中采样的数据。在 MMSegmentation 中,数据字典包含 `inputs` 和 `data_samples` 两个字段。
+
+返回值:
+
+- `list` - 给定数据的预测结果。
+
+
+
+ test_step/val_step 数据流
+
+
+### test_step
+
+`BaseModel` 中 `test_step` 与 `val_step` 的实现相同。
+
+## 数据预处理器(Data Preprocessor)
+
+MMSegmentation 实现的 [SegDataPreProcessor](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/data_preprocessor.py#L13) 继承自由 [MMEngine](https://github.com/open-mmlab/mmengine) 实现的 [BaseDataPreprocessor](https://github.com/open-mmlab/mmengine/blob/main/mmengine/model/base_model/data_preprocessor.py#L18),提供数据预处理和将数据复制到目标设备的功能。
+
+Runner 在构建阶段将模型传送到指定的设备,而 [SegDataPreProcessor](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/data_preprocessor.py#L13) 在 `train_step`、`val_step` 和 `test_step` 中将数据传送到指定设备,之后处理后的数据将被进一步传递给模型。
+
+`SegDataPreProcessor` 构造函数的参数:
+
+- mean (Sequence\[Number\], 可选) - R、G、B 通道的像素平均值。默认为 None。
+- std (Sequence\[Number\], 可选) - R、G、B 通道的像素标准差。默认为 None。
+- size (tuple, 可选) - 固定的填充大小。
+- size_divisor (int, 可选) - 填充大小的除法因子。
+- pad_val (float, 可选) - 填充值。默认值:0。
+- seg_pad_val (float, 可选) - 分割图的填充值。默认值:255。
+- bgr_to_rgb (bool) - 是否将图像从 BGR 转换为 RGB。默认为 False。
+- rgb_to_bgr (bool) - 是否将图像从 RGB 转换为 BGR。默认为 False。
+- batch_augments (list\[dict\], 可选) - 批量化的数据增强。默认值为 None。
+
+数据将按如下方式处理:
+
+- 收集数据并将其移动到目标设备。
+- 用定义的 `pad_val` 将输入填充到输入大小,并用定义的 `seg_Pad_val` 填充分割图。
+- 将输入堆栈到 batch_input。
+- 如果输入的形状为 (3, H, W),则将输入从 BGR 转换为 RGB。
+- 使用定义的标准差和平均值标准化图像。
+- 在训练期间进行如 Mixup 和 Cutmix 的批量化数据增强。
+
+`forward` 方法的参数:
+
+- data (dict) - 从数据加载器采样的数据。
+- training (bool) - 是否启用训练时数据增强。
+
+`forward` 方法的返回值:
+
+- Dict:与模型输入格式相同的数据。
From a8aafdd902b9fe73596fb4b8c03ec693416acf42 Mon Sep 17 00:00:00 2001
From: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Date: Tue, 7 Mar 2023 17:57:37 +0800
Subject: [PATCH 04/32] [Fix] Support format_result and fix prefix param in
cityscape metric, and rename CitysMetric to CityscapesMetric (#2660)
as title
---
mmseg/evaluation/__init__.py | 4 +-
mmseg/evaluation/metrics/__init__.py | 4 +-
mmseg/evaluation/metrics/citys_metric.py | 116 ++++++++++--------
.../test_metrics/test_citys_metric.py | 44 ++++---
4 files changed, 94 insertions(+), 74 deletions(-)
diff --git a/mmseg/evaluation/__init__.py b/mmseg/evaluation/__init__.py
index c28bb75cb4..a82008f3ad 100644
--- a/mmseg/evaluation/__init__.py
+++ b/mmseg/evaluation/__init__.py
@@ -1,4 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
-from .metrics import CitysMetric, IoUMetric
+from .metrics import CityscapesMetric, IoUMetric
-__all__ = ['IoUMetric', 'CitysMetric']
+__all__ = ['IoUMetric', 'CityscapesMetric']
diff --git a/mmseg/evaluation/metrics/__init__.py b/mmseg/evaluation/metrics/__init__.py
index aec08bb071..0aa39e480c 100644
--- a/mmseg/evaluation/metrics/__init__.py
+++ b/mmseg/evaluation/metrics/__init__.py
@@ -1,5 +1,5 @@
# Copyright (c) OpenMMLab. All rights reserved.
-from .citys_metric import CitysMetric
+from .citys_metric import CityscapesMetric
from .iou_metric import IoUMetric
-__all__ = ['IoUMetric', 'CitysMetric']
+__all__ = ['IoUMetric', 'CityscapesMetric']
diff --git a/mmseg/evaluation/metrics/citys_metric.py b/mmseg/evaluation/metrics/citys_metric.py
index 50e9ea68a0..a2c008b99d 100644
--- a/mmseg/evaluation/metrics/citys_metric.py
+++ b/mmseg/evaluation/metrics/citys_metric.py
@@ -1,30 +1,41 @@
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
-from typing import Dict, List, Optional, Sequence
+import shutil
+from collections import OrderedDict
+from typing import Dict, Optional, Sequence
+
+try:
+
+ import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa
+ import cityscapesscripts.helpers.labels as CSLabels
+except ImportError:
+ CSLabels = None
+ CSEval = None
import numpy as np
+from mmengine.dist import is_main_process, master_only
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger, print_log
-from mmengine.utils import mkdir_or_exist, scandir
+from mmengine.utils import mkdir_or_exist
from PIL import Image
from mmseg.registry import METRICS
@METRICS.register_module()
-class CitysMetric(BaseMetric):
+class CityscapesMetric(BaseMetric):
"""Cityscapes evaluation metric.
Args:
+ output_dir (str): The directory for output prediction
ignore_index (int): Index that will be ignored in evaluation.
Default: 255.
- citys_metrics (list[str] | str): Metrics to be evaluated,
- Default: ['cityscapes'].
- to_label_id (bool): whether convert output to label_id for
- submission. Default: True.
- suffix (str): The filename prefix of the png files.
- If the prefix is "somepath/xxx", the png files will be
- named "somepath/xxx.png". Default: '.format_cityscapes'.
+ format_only (bool): Only format result for results commit without
+ perform evaluation. It is useful when you want to format the result
+ to a specific format and submit it to the test server.
+ Defaults to False.
+ keep_results (bool): Whether to keep the results. When ``format_only``
+ is True, ``keep_results`` must be True. Defaults to False.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
@@ -35,19 +46,34 @@ class CitysMetric(BaseMetric):
"""
def __init__(self,
+ output_dir: str,
ignore_index: int = 255,
- citys_metrics: List[str] = ['cityscapes'],
- to_label_id: bool = True,
- suffix: str = '.format_cityscapes',
+ format_only: bool = False,
+ keep_results: bool = False,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
-
+ if CSEval is None:
+ raise ImportError('Please run "pip install cityscapesscripts" to '
+ 'install cityscapesscripts first.')
+ self.output_dir = output_dir
self.ignore_index = ignore_index
- self.metrics = citys_metrics
- assert self.metrics[0] == 'cityscapes'
- self.to_label_id = to_label_id
- self.suffix = suffix
+
+ self.format_only = format_only
+ if format_only:
+ assert keep_results, (
+ 'When format_only is True, the results must be keep, please '
+ f'set keep_results as True, but got {keep_results}')
+ self.keep_results = keep_results
+ self.prefix = prefix
+ if is_main_process():
+ mkdir_or_exist(self.output_dir)
+
+ @master_only
+ def __del__(self) -> None:
+ """Clean up."""
+ if not self.keep_results:
+ shutil.rmtree(self.output_dir)
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data and data_samples.
@@ -59,26 +85,23 @@ def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
- mkdir_or_exist(self.suffix)
+ mkdir_or_exist(self.output_dir)
for data_sample in data_samples:
pred_label = data_sample['pred_sem_seg']['data'][0].cpu().numpy()
- # results2img
- if self.to_label_id:
- pred_label = self._convert_to_label_id(pred_label)
+ # when evaluating with official cityscapesscripts,
+ # labelIds should be used
+ pred_label = self._convert_to_label_id(pred_label)
basename = osp.splitext(osp.basename(data_sample['img_path']))[0]
- png_filename = osp.join(self.suffix, f'{basename}.png')
+ png_filename = osp.abspath(
+ osp.join(self.output_dir, f'{basename}.png'))
output = Image.fromarray(pred_label.astype(np.uint8)).convert('P')
- import cityscapesscripts.helpers.labels as CSLabels
- palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8)
- for label_id, label in CSLabels.id2label.items():
- palette[label_id] = label.color
- output.putpalette(palette)
output.save(png_filename)
-
- ann_dir = osp.join(data_samples[0]['seg_map_path'].split('val')[0],
- 'val')
- self.results.append(ann_dir)
+ # when evaluating with official cityscapesscripts,
+ # **_gtFine_labelIds.png is used
+ gt_filename = data_sample['seg_map_path'].replace(
+ 'labelTrainIds.png', 'labelIds.png')
+ self.results.append((png_filename, gt_filename))
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
@@ -90,38 +113,28 @@ def compute_metrics(self, results: list) -> Dict[str, float]:
dict[str: float]: Cityscapes evaluation results.
"""
logger: MMLogger = MMLogger.get_current_instance()
- try:
- import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa
- except ImportError:
- raise ImportError('Please run "pip install cityscapesscripts" to '
- 'install cityscapesscripts first.')
- msg = 'Evaluating in Cityscapes style'
+ if self.format_only:
+ logger.info(f'results are saved to {osp.dirname(self.output_dir)}')
+ return OrderedDict()
+ msg = 'Evaluating in Cityscapes style'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
- result_dir = self.suffix
-
eval_results = dict()
- print_log(f'Evaluating results under {result_dir} ...', logger=logger)
+ print_log(
+ f'Evaluating results under {self.output_dir} ...', logger=logger)
CSEval.args.evalInstLevelScore = True
- CSEval.args.predictionPath = osp.abspath(result_dir)
+ CSEval.args.predictionPath = osp.abspath(self.output_dir)
CSEval.args.evalPixelAccuracy = True
CSEval.args.JSONOutput = False
- seg_map_list = []
- pred_list = []
- ann_dir = results[0]
- # when evaluating with official cityscapesscripts,
- # **_gtFine_labelIds.png is used
- for seg_map in scandir(ann_dir, 'gtFine_labelIds.png', recursive=True):
- seg_map_list.append(osp.join(ann_dir, seg_map))
- pred_list.append(CSEval.getPrediction(CSEval.args, seg_map))
+ pred_list, gt_list = zip(*results)
metric = dict()
eval_results.update(
- CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args))
+ CSEval.evaluateImgLists(pred_list, gt_list, CSEval.args))
metric['averageScoreCategories'] = eval_results[
'averageScoreCategories']
metric['averageScoreInstCategories'] = eval_results[
@@ -133,7 +146,6 @@ def _convert_to_label_id(result):
"""Convert trainId to id for cityscapes."""
if isinstance(result, str):
result = np.load(result)
- import cityscapesscripts.helpers.labels as CSLabels
result_copy = result.copy()
for trainId, label in CSLabels.trainId2label.items():
result_copy[result == trainId] = label.id
diff --git a/tests/test_evaluation/test_metrics/test_citys_metric.py b/tests/test_evaluation/test_metrics/test_citys_metric.py
index a6d6db5caa..0a20b41aee 100644
--- a/tests/test_evaluation/test_metrics/test_citys_metric.py
+++ b/tests/test_evaluation/test_metrics/test_citys_metric.py
@@ -1,15 +1,17 @@
# Copyright (c) OpenMMLab. All rights reserved.
+import os.path as osp
from unittest import TestCase
import numpy as np
+import pytest
import torch
from mmengine.structures import BaseDataElement, PixelData
-from mmseg.evaluation import CitysMetric
+from mmseg.evaluation import CityscapesMetric
from mmseg.structures import SegDataSample
-class TestCitysMetric(TestCase):
+class TestCityscapesMetric(TestCase):
def _demo_mm_inputs(self,
batch_size=1,
@@ -42,9 +44,8 @@ def _demo_mm_inputs(self,
gt_sem_seg_data = dict(data=gt_semantic_seg)
data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data)
mm_inputs['data_sample'] = data_sample.to_dict()
- mm_inputs['data_sample']['seg_map_path'] = \
- 'tests/data/pseudo_cityscapes_dataset/gtFine/val/\
- frankfurt/frankfurt_000000_000294_gtFine_labelTrainIds.png'
+ mm_inputs['data_sample'][
+ 'seg_map_path'] = 'tests/data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelTrainIds.png' # noqa
mm_inputs['seg_map_path'] = mm_inputs['data_sample'][
'seg_map_path']
@@ -86,9 +87,8 @@ def _demo_mm_model_output(self,
for pred in batch_datasampes:
if isinstance(pred, BaseDataElement):
test_data = pred.to_dict()
- test_data['img_path'] = \
- 'tests/data/pseudo_cityscapes_dataset/leftImg8bit/val/\
- frankfurt/frankfurt_000000_000294_leftImg8bit.png'
+ test_data[
+ 'img_path'] = 'tests/data/pseudo_cityscapes_dataset/leftImg8bit/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png' # noqa
_predictions.append(test_data)
else:
@@ -104,15 +104,23 @@ def test_evaluate(self):
dict(**data, **result)
for data, result in zip(data_batch, predictions)
]
- iou_metric = CitysMetric(citys_metrics=['cityscapes'])
- iou_metric.process(data_batch, data_samples)
- res = iou_metric.evaluate(6)
- self.assertIsInstance(res, dict)
- # test to_label_id = True
- iou_metric = CitysMetric(
- citys_metrics=['cityscapes'], to_label_id=True)
- iou_metric.process(data_batch, data_samples)
- res = iou_metric.evaluate(6)
+ # test keep_results should be True when format_only is True
+ with pytest.raises(AssertionError):
+ CityscapesMetric(
+ output_dir='tmp', format_only=True, keep_results=False)
+
+ # test evaluate with cityscape metric
+ metric = CityscapesMetric(output_dir='tmp')
+ metric.process(data_batch, data_samples)
+ res = metric.evaluate(2)
self.assertIsInstance(res, dict)
+
+ # test format_only
+ metric = CityscapesMetric(
+ output_dir='tmp', format_only=True, keep_results=True)
+ metric.process(data_batch, data_samples)
+ metric.evaluate(2)
+ assert osp.exists('tmp')
+ assert osp.isfile('tmp/frankfurt_000000_000294_leftImg8bit.png')
import shutil
- shutil.rmtree('.format_cityscapes')
+ shutil.rmtree('tmp')
From 91c62abcf44528f007ffa220af8873499790337d Mon Sep 17 00:00:00 2001
From: MengzhangLI
Date: Tue, 7 Mar 2023 21:21:01 +0800
Subject: [PATCH 05/32] [Fix] Fix the correct location of FAQ in dev-1.x
(#2717)
---
docs/en/get_started.md | 2 +-
docs/zh_cn/get_started.md | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/en/get_started.md b/docs/en/get_started.md
index cf861b1fe8..b082508693 100644
--- a/docs/en/get_started.md
+++ b/docs/en/get_started.md
@@ -197,5 +197,5 @@ docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmsegmentation/data mmseg
## Trouble shooting
-If you have some issues during the installation, please first view the [FAQ](faq.md) page.
+If you have some issues during the installation, please first view the [FAQ](notes/faq.md) page.
You may [open an issue](https://github.com/open-mmlab/mmsegmentation/issues/new/choose) on GitHub if no solution is found.
diff --git a/docs/zh_cn/get_started.md b/docs/zh_cn/get_started.md
index da6d728a15..38e93e9cb4 100644
--- a/docs/zh_cn/get_started.md
+++ b/docs/zh_cn/get_started.md
@@ -197,4 +197,4 @@ docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmsegmentation/data mmseg
## 问题解答
-如果您在安装过程中遇到了其他问题,请第一时间查阅 [FAQ](faq.md) 文件。如果没有找到答案,您也可以在 GitHub 上提出 [issue](https://github.com/open-mmlab/mmsegmentation/issues/new/choose)
+如果您在安装过程中遇到了其他问题,请第一时间查阅 [FAQ](notes/faq.md) 文件。如果没有找到答案,您也可以在 GitHub 上提出 [issue](https://github.com/open-mmlab/mmsegmentation/issues/new/choose)
From 7e41b5af8d769b6b4e0aa95943068b25ef7e3c75 Mon Sep 17 00:00:00 2001
From: jinxianwei <81373517+jinxianwei@users.noreply.github.com>
Date: Tue, 7 Mar 2023 21:23:04 +0800
Subject: [PATCH 06/32] en doc of uisualization_feature_map.md (#2715)
## Motivation
En doc for visualization_feature_map.md and index.rst
## Modification
Add new file and change index.rst
---
docs/en/user_guides/index.rst | 1 +
.../user_guides/visualization_feature_map.md | 201 ++++++++++++++++++
2 files changed, 202 insertions(+)
create mode 100644 docs/en/user_guides/visualization_feature_map.md
diff --git a/docs/en/user_guides/index.rst b/docs/en/user_guides/index.rst
index 9e7d365925..1feb1271ae 100644
--- a/docs/en/user_guides/index.rst
+++ b/docs/en/user_guides/index.rst
@@ -18,3 +18,4 @@ Useful Tools
visualization.md
useful_tools.md
deployment.md
+ visualization_feature_map.md
diff --git a/docs/en/user_guides/visualization_feature_map.md b/docs/en/user_guides/visualization_feature_map.md
new file mode 100644
index 0000000000..d61226f055
--- /dev/null
+++ b/docs/en/user_guides/visualization_feature_map.md
@@ -0,0 +1,201 @@
+# Wandb Feature Map Visualization
+
+MMSegmentation 1.x provides backend support for Weights & Biases to facilitate visualization and management of project code results.
+
+## Wandb Configuration
+
+Install Weights & Biases following [official instructions](https://docs.wandb.ai/quickstart) e.g.
+
+```shell
+pip install wandb
+wandb login
+```
+
+Add `WandbVisBackend` in `vis_backend` of `visualizer` in `default_runtime.py` config file:
+
+```python
+vis_backends=[dict(type='LocalVisBackend'),
+ dict(type='TensorboardVisBackend'),
+ dict(type='WandbVisBackend')]
+```
+
+## Examining feature map visualization in Wandb
+
+`SegLocalVisualizer` is child class inherits from `Visualizer` in MMEngine and works for MMSegmentation visualization, for more details about `Visualizer` please refer to [visualization tutorial](https://github.com/open-mmlab/mmengine/blob/main/docs/en/advanced_tutorials/visualization.md) in MMEngine.
+
+Here is an example about `SegLocalVisualizer`, first you may download example data below by following commands:
+
+
+
+
+
+```shell
+wget https://user-images.githubusercontent.com/24582831/189833109-eddad58f-f777-4fc0-b98a-6bd429143b06.png --output-document aachen_000000_000019_leftImg8bit.png
+wget https://user-images.githubusercontent.com/24582831/189833143-15f60f8a-4d1e-4cbb-a6e7-5e2233869fac.png --output-document aachen_000000_000019_gtFine_labelTrainIds.png
+
+wget https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth
+
+```
+
+```python
+# Copyright (c) OpenMMLab. All rights reserved.
+from argparse import ArgumentParser
+from typing import Type
+
+import mmcv
+import torch
+import torch.nn as nn
+
+from mmengine.model import revert_sync_batchnorm
+from mmengine.structures import PixelData
+from mmseg.apis import inference_model, init_model
+from mmseg.structures import SegDataSample
+from mmseg.utils import register_all_modules
+from mmseg.visualization import SegLocalVisualizer
+
+
+class Recorder:
+ """record the forward output feature map and save to data_buffer."""
+
+ def __init__(self) -> None:
+ self.data_buffer = list()
+
+ def __enter__(self, ):
+ self._data_buffer = list()
+
+ def record_data_hook(self, model: nn.Module, input: Type, output: Type):
+ self.data_buffer.append(output)
+
+ def __exit__(self, *args, **kwargs):
+ pass
+
+
+def visualize(args, model, recorder, result):
+ seg_visualizer = SegLocalVisualizer(
+ vis_backends=[dict(type='WandbVisBackend')],
+ save_dir='temp_dir',
+ alpha=0.5)
+ seg_visualizer.dataset_meta = dict(
+ classes=model.dataset_meta['classes'],
+ palette=model.dataset_meta['palette'])
+
+ image = mmcv.imread(args.img, 'color')
+
+ seg_visualizer.add_datasample(
+ name='predict',
+ image=image,
+ data_sample=result,
+ draw_gt=False,
+ draw_pred=True,
+ wait_time=0,
+ out_file=None,
+ show=False)
+
+ # add feature map to wandb visualizer
+ for i in range(len(recorder.data_buffer)):
+ feature = recorder.data_buffer[i][0] # remove the batch
+ drawn_img = seg_visualizer.draw_featmap(
+ feature, image, channel_reduction='select_max')
+ seg_visualizer.add_image(f'feature_map{i}', drawn_img)
+
+ if args.gt_mask:
+ sem_seg = mmcv.imread(args.gt_mask, 'unchanged')
+ sem_seg = torch.from_numpy(sem_seg)
+ gt_mask = dict(data=sem_seg)
+ gt_mask = PixelData(**gt_mask)
+ data_sample = SegDataSample()
+ data_sample.gt_sem_seg = gt_mask
+
+ seg_visualizer.add_datasample(
+ name='gt_mask',
+ image=image,
+ data_sample=data_sample,
+ draw_gt=True,
+ draw_pred=False,
+ wait_time=0,
+ out_file=None,
+ show=False)
+
+ seg_visualizer.add_image('image', image)
+
+
+def main():
+ parser = ArgumentParser(
+ description='Draw the Feature Map During Inference')
+ parser.add_argument('img', help='Image file')
+ parser.add_argument('config', help='Config file')
+ parser.add_argument('checkpoint', help='Checkpoint file')
+ parser.add_argument('--gt_mask', default=None, help='Path of gt mask file')
+ parser.add_argument('--out-file', default=None, help='Path to output file')
+ parser.add_argument(
+ '--device', default='cuda:0', help='Device used for inference')
+ parser.add_argument(
+ '--opacity',
+ type=float,
+ default=0.5,
+ help='Opacity of painted segmentation map. In (0, 1] range.')
+ parser.add_argument(
+ '--title', default='result', help='The image identifier.')
+ args = parser.parse_args()
+
+ register_all_modules()
+
+ # build the model from a config file and a checkpoint file
+ model = init_model(args.config, args.checkpoint, device=args.device)
+ if args.device == 'cpu':
+ model = revert_sync_batchnorm(model)
+
+ # show all named module in the model and use it in source list below
+ for name, module in model.named_modules():
+ print(name)
+
+ source = [
+ 'decode_head.fusion.stages.0.query_project.activate',
+ 'decode_head.context.stages.0.key_project.activate',
+ 'decode_head.context.bottleneck.activate'
+ ]
+ source = dict.fromkeys(source)
+
+ count = 0
+ recorder = Recorder()
+ # registry the forward hook
+ for name, module in model.named_modules():
+ if name in source:
+ count += 1
+ module.register_forward_hook(recorder.record_data_hook)
+ if count == len(source):
+ break
+
+ with recorder:
+ # test a single image, and record feature map to data_buffer
+ result = inference_model(model, args.img)
+
+ visualize(args, model, recorder, result)
+
+
+if __name__ == '__main__':
+ main()
+
+```
+
+Save the above code as feature_map_visual.py and execute the following code in terminal
+
+```shell
+python feature_map_visual.py ${image} ${config} ${checkpoint} [optional args]
+```
+
+e.g
+
+```shell
+python feature_map_visual.py \
+aachen_000000_000019_leftImg8bit.png \
+configs/ann/ann_r50-d8_4xb2-40k_cityscapes-512x1024.py \
+ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth \
+--gt_mask aachen_000000_000019_gtFine_labelTrainIds.png
+```
+
+The visualized image result and its corresponding reature map will appear in the wandb account.
+
+
+
+
From 645dcf8c49e78529176f9f9893faa878f843cc65 Mon Sep 17 00:00:00 2001
From: CSH <40987381+csatsurnh@users.noreply.github.com>
Date: Thu, 9 Mar 2023 22:27:42 +0800
Subject: [PATCH 07/32] [Doc] Add zh_cn evaluation doc and fix en typo (#2701)
as title
---------
Signed-off-by: csatsurnh
---
docs/en/advanced_guides/evaluation.md | 28 ++--
docs/zh_cn/advanced_guides/evaluation.md | 157 ++++++++++++++++++++++-
2 files changed, 170 insertions(+), 15 deletions(-)
diff --git a/docs/en/advanced_guides/evaluation.md b/docs/en/advanced_guides/evaluation.md
index ee5a927ff7..1e42db2a10 100644
--- a/docs/en/advanced_guides/evaluation.md
+++ b/docs/en/advanced_guides/evaluation.md
@@ -1,6 +1,6 @@
# Evaluation
-The evaluation procedure would be executed at [ValLoop](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L300) and [TestLoop](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L373), users can evaluate model performance during training or using the test script with simple settings in the configuration file. The `ValLoop` and `TestLoop` are properties of [Runner](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/runner.py#L59), they will be built the first time they are called. To build the `ValLoop` successfully, the `val_dataloader` and `val_evaluator` must be set when building `Runner` since `dataloder` and `evaluator` are required parameters, and the same goes for `TestLoop`. For more information about the Runner's design, please refer to the [documentoation](https://github.com/open-mmlab/mmengine/blob/main/docs/en/design/runner.md) of [MMEngine](https://github.com/open-mmlab/mmengine).
+The evaluation procedure would be executed at [ValLoop](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L300) and [TestLoop](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L373), users can evaluate model performance during training or using the test script with simple settings in the configuration file. The `ValLoop` and `TestLoop` are properties of [Runner](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/runner.py#L59), they will be built the first time they are called. To build the `ValLoop` successfully, the `val_dataloader` and `val_evaluator` must be set when building `Runner` since `dataloader` and `evaluator` are required parameters, and the same goes for `TestLoop`. For more information about the Runner's design, please refer to the [documentation](https://github.com/open-mmlab/mmengine/blob/main/docs/en/design/runner.md) of [MMEngine](https://github.com/open-mmlab/mmengine).
@@ -61,7 +61,7 @@ In MMSegmentation, the settings of `test_dataloader` and `test_evaluator` are th
## IoUMetric
-MMSegmentation implements [IoUMetric](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/evaluation/metrics/iou_metric.py) and [CitysMetric](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/evaluation/metrics/citys_metric.py) for evaluating the performance of models, based on the [BaseMetric](https://github.com/open-mmlab/mmengine/blob/main/mmengine/evaluator/metric.py) provided by [MMEngine](https://github.com/open-mmlab/mmengine). Please refer to [the documentation](https://mmengine.readthedocs.io/en/latest/tutorials/evaluation.html) for more details about the unified evaluation interface.
+MMSegmentation implements [IoUMetric](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/evaluation/metrics/iou_metric.py) and [CityscapesMetric](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/evaluation/metrics/citys_metric.py) for evaluating the performance of models, based on the [BaseMetric](https://github.com/open-mmlab/mmengine/blob/main/mmengine/evaluator/metric.py) provided by [MMEngine](https://github.com/open-mmlab/mmengine). Please refer to [the documentation](https://mmengine.readthedocs.io/en/latest/tutorials/evaluation.html) for more details about the unified evaluation interface.
Here we briefly describe the arguments and the two main methods of `IoUMetric`.
@@ -102,9 +102,9 @@ Returns:
- Dict\[str, float\] - The computed metrics. The keys are the names of the metrics, and the values are corresponding results. The key mainly includes **aAcc**, **mIoU**, **mAcc**, **mDice**, **mFscore**, **mPrecision**, **mRecall**.
-## CitysMetric
+## CityscapesMetric
-`CitysMetric` uses the official [CityscapesScripts](https://github.com/mcordts/cityscapesScripts) provided by Cityscapes to evaluate model performance.
+`CityscapesMetric` uses the official [CityscapesScripts](https://github.com/mcordts/cityscapesScripts) provided by Cityscapes to evaluate model performance.
### Usage
@@ -114,10 +114,10 @@ Before using it, please install the `cityscapesscripts` package first:
pip install cityscapesscripts
```
-Since the `IoUMetric` is used as the default evaluator in MMSegmentation, if you would like to use `CitysMetric`, customizing the config file is required. In your customized config file, you should overwrite the default evaluator as follows.
+Since the `IoUMetric` is used as the default evaluator in MMSegmentation, if you would like to use `CityscapesMetric`, customizing the config file is required. In your customized config file, you should overwrite the default evaluator as follows.
```python
-val_evaluator = dict(type='CitysMetric', citys_metrics=['cityscapes'])
+val_evaluator = dict(type='CityscapesMetric', output_dir='tmp')
test_evaluator = val_evaluator
```
@@ -125,27 +125,27 @@ test_evaluator = val_evaluator
The arguments of the constructor:
+- output_dir (str) - The directory for output prediction
- ignore_index (int) - Index that will be ignored in evaluation. Default: 255.
-- citys_metrics (list\[str\] | str) - Metrics to be evaluated, Default: \['cityscapes'\].
-- to_label_id (bool) - whether convert output to label_id for submission. Default: True.
-- suffix (str): The filename prefix of the png files. If the prefix is "somepath/xxx", the png files will be named "somepath/xxx.png". Default: '.format_cityscapes'.
-- collect_device (str): Device name used for collecting results from different ranks during distributed training. Must be 'cpu' or 'gpu'. Defaults to 'cpu'.
-- prefix (str, optional): The prefix that will be added in the metric names to disambiguate homonymous metrics of different evaluators. If the prefix is not provided in the argument, self.default_prefix will be used instead. Defaults to None.
+- format_only (bool) - Only format result for results commit without perform evaluation. It is useful when you want to format the result to a specific format and submit it to the test server. Defaults to False.
+- keep_results (bool) - Whether to keep the results. When `format_only` is True, `keep_results` must be True. Defaults to False.
+- collect_device (str) - Device name used for collecting results from different ranks during distributed training. Must be 'cpu' or 'gpu'. Defaults to 'cpu'.
+- prefix (str, optional) - The prefix that will be added in the metric names to disambiguate homonymous metrics of different evaluators. If prefix is not provided in the argument, self.default_prefix will be used instead. Defaults to None.
-#### CitysMetric.process
+#### CityscapesMetric.process
This method would draw the masks on images and save the painted images to `work_dir`.
Parameters:
-- data_batch (Any) - A batch of data from the dataloader.
+- data_batch (dict) - A batch of data from the dataloader.
- data_samples (Sequence\[dict\]) - A batch of outputs from the model.
Returns:
This method doesn't have returns, the annotations' path would be stored in `self.results`, which will be used to compute the metrics when all batches have been processed.
-#### CitysMetric.compute_metrics
+#### CityscapesMetric.compute_metrics
This method would call `cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling` tool to calculate metrics.
diff --git a/docs/zh_cn/advanced_guides/evaluation.md b/docs/zh_cn/advanced_guides/evaluation.md
index a82311ccc7..dc93a46e13 100644
--- a/docs/zh_cn/advanced_guides/evaluation.md
+++ b/docs/zh_cn/advanced_guides/evaluation.md
@@ -1,3 +1,158 @@
# 模型评测
-中文版文档支持中,请先阅读[英文版本](../../en/advanced_guides/evaluation.md)
+模型评测过程会分别在 [ValLoop](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L300) 和 [TestLoop](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L373) 中被执行,用户可以在训练期间或使用配置文件中简单设置的测试脚本进行模型性能评估。`ValLoop` 和 `TestLoop` 属于 [Runner](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/runner.py#L59),它们会在第一次被调用时构建。由于 `dataloader` 与 `evaluator` 是必需的参数,所以要成功构建 `ValLoop`,在构建 `Runner` 时必须设置 `val_dataloader` 和 `val_evaluator`,`TestLoop` 亦然。有关 Runner 设计的更多信息,请参阅 [MMEngine](https://github.com/open-mmlab/mmengine) 的[文档](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/design/runner.md)。
+
+
+
+ 测试/验证 数据流
+
+
+在 MMSegmentation 中,默认情况下,我们将 dataloader 和 metrics 的设置写在数据集配置文件中,并将 evaluation loop 的配置写在 `schedule_x` 配置文件中。
+
+例如,在 ADE20K 配置文件 `configs/_base_/dataset/ADE20K.py` 中,在第37到48行,我们配置了 `val_dataloader`,在第51行,我们选择 `IoUMetric` 作为 evaluator,并设置 `mIoU` 作为指标:
+
+```python
+val_dataloader = dict(
+ batch_size=1,
+ num_workers=4,
+ persistent_workers=True,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ data_root=data_root,
+ data_prefix=dict(
+ img_path='images/validation',
+ seg_map_path='annotations/validation'),
+ pipeline=test_pipeline))
+
+val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
+```
+
+为了能够在训练期间进行评估模型,我们将评估配置添加到了 `configs/schedules/schedule_40k.py` 文件的第15至16行:
+
+```python
+train_cfg = dict(type='IterBasedTrainLoop', max_iters=40000, val_interval=4000)
+val_cfg = dict(type='ValLoop')
+```
+
+使用以上两种设置,MMSegmentation 在 40K 迭代训练期间,每 4000 次迭代进行一次模型 **mIoU** 指标的评估。
+
+如果我们希望在训练后测试模型,则需要将 `test_dataloader`、`test_evaluator` 和 `test_cfg` 配置添加到配置文件中。
+
+```python
+test_dataloader = dict(
+ batch_size=1,
+ num_workers=4,
+ persistent_workers=True,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ data_root=data_root,
+ data_prefix=dict(
+ img_path='images/validation',
+ seg_map_path='annotations/validation'),
+ pipeline=test_pipeline))
+
+test_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
+test_cfg = dict(type='TestLoop')
+```
+
+在 MMSegmentation 中,默认情况下,`test_dataloader` 和 `test_evaluator` 的设置与 `ValLoop` 的 dataloader 和 evaluator 相同,我们可以修改这些设置以满足我们的需要。
+
+## IoUMetric
+
+MMSegmentation 基于 [MMEngine](https://github.com/open-mmlab/mmengine) 提供的 [BaseMetric](https://github.com/open-mmlab/mmengine/blob/main/mmengine/evaluator/metric.py) 实现 [IoUMetric](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/evaluation/metrics/iou_metric.py) 和 [CityscapesMetric](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/evaluation/metrics/citys_metric.py),以评估模型的性能。有关统一评估接口的更多详细信息,请参阅[文档](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/evaluation.html)。
+
+这里我们简要介绍 `IoUMetric` 的参数和两种主要方法。
+
+除了 `collect_device` 和 `prefix` 之外,`IoUMetric` 的构建还包含一些其他参数。
+
+构造函数的参数:
+
+- ignore_index(int)- 将在评估中忽略的类别索引。默认值:255。
+- iou_metrics(list\[str\] | str)- 需要计算的指标,可选项包括 'mIoU'、'mDice' 和 'mFscore'。
+- nan_to_num(int,可选)- 如果指定,NaN 值将被用户定义的数字替换。默认值:None。
+- beta(int)- 决定综合评分中 recall 的权重。默认值:1。
+- collect_device(str)- 用于在分布式训练期间从不同进程收集结果的设备名称。必须是 'cpu' 或 'gpu'。默认为 'cpu'。
+- prefix(str,可选)- 将添加到指标名称中的前缀,以消除不同 evaluator 的同名指标的歧义。如果参数中未提供前缀,则将使用 self.default_prefix 进行替代。默认为 None。
+
+`IoUMetric` 实现 IoU 指标的计算,`IoUMetric` 的两个核心方法是 `process` 和 `compute_metrics`。
+
+- `process` 方法处理一批 data 和 data_samples。
+- `compute_metrics` 方法根据处理的结果计算指标。
+
+### IoUMetric.process
+
+参数:
+
+- data_batch(Any)- 来自 dataloader 的一批数据。
+- data_samples(Sequence\[dict\])- 模型的一批输出。
+
+返回值:
+
+此方法没有返回值,因为处理的结果将存储在 `self.results` 中,以在处理完所有批次后进行指标的计算。
+
+### IoUMetric.compute_metrics
+
+参数:
+
+- results(list)- 每个批次的处理结果。
+
+返回值:
+
+- Dict\[str,float\] - 计算的指标。指标的名称为 key,值是相应的结果。key 主要包括 **aAcc**、**mIoU**、**mAcc**、**mDice**、**mFscore**、**mPrecision**、**mPrecall**。
+
+## CityscapesMetric
+
+`CityscapesMetric` 使用由 Cityscapes 官方提供的 [CityscapesScripts](https://github.com/mcordts/cityscapesScripts) 进行模型性能的评估。
+
+### 使用方法
+
+在使用之前,请先安装 `cityscapesscripts` 包:
+
+```shell
+pip install cityscapesscripts
+```
+
+由于 `IoUMetric` 在 MMSegmentation 中作为默认的 evaluator 使用,如果您想使用 `CityscapesMetric`,则需要自定义配置文件。在自定义配置文件中,应按如下方式替换默认 evaluator。
+
+```python
+val_evaluator = dict(type='CityscapesMetric', output_dir='tmp')
+test_evaluator = val_evaluator
+```
+
+### 接口
+
+构造函数的参数:
+
+- output_dir (str) - 预测结果输出的路径
+- ignore_index (int) - 将在评估中忽略的类别索引。默认值:255。
+- format_only (bool) - 只为提交进行结果格式化而不进行评估。当您希望将结果格式化为特定格式并将其提交给测试服务器时有用。默认为 False。
+- keep_results (bool) - 是否保留结果。当 `format_only` 为 True 时,`keep_results` 必须为 True。默认为 False。
+- collect_device (str) - 用于在分布式训练期间从不同进程收集结果的设备名称。必须是 'cpu' 或 'gpu'。默认为 'cpu'。
+- prefix (str,可选) - 将添加到指标名称中的前缀,以消除不同 evaluator 的同名指标的歧义。如果参数中未提供前缀,则将使用 self.default_prefix 进行替代。默认为 None。
+
+#### CityscapesMetric.process
+
+该方法将在图像上绘制 mask,并将绘制的图像保存到 `work_dir` 中。
+
+参数:
+
+- data_batch(dict)- 来自 dataloader 的一批数据。
+- data_samples(Sequence\[dict\])- 模型的一批输出。
+
+返回值:
+
+此方法没有返回值,因为处理的结果将存储在 `self.results` 中,以在处理完所有批次后进行指标的计算。
+
+#### CityscapesMetric.compute_metrics
+
+此方法将调用 `cityscapessscripts.evaluation.evalPixelLevelSemanticLabeling` 工具来计算指标。
+
+参数:
+
+- results(list)- 数据集的测试结果。
+
+返回值:
+
+- dict\[str:float\] - Cityscapes 评测结果。
From 6468d3150a0a02c212bd1515b9b52f9a74a95f94 Mon Sep 17 00:00:00 2001
From: CSH <40987381+csatsurnh@users.noreply.github.com>
Date: Thu, 9 Mar 2023 22:28:58 +0800
Subject: [PATCH 08/32] [Doc] Add zh_cn transforms doc and format en doc
(#2722)
as title
---
docs/en/advanced_guides/transforms.md | 24 ++---
docs/zh_cn/advanced_guides/transforms.md | 118 ++++++++++++++++++++++-
2 files changed, 124 insertions(+), 18 deletions(-)
diff --git a/docs/en/advanced_guides/transforms.md b/docs/en/advanced_guides/transforms.md
index e0c4155b57..68b1f44bd3 100644
--- a/docs/en/advanced_guides/transforms.md
+++ b/docs/en/advanced_guides/transforms.md
@@ -12,15 +12,10 @@ The structure of this guide is as follows:
## Design of Data pipelines
-Following typical conventions, we use `Dataset` and `DataLoader` for data loading
-with multiple workers. `Dataset` returns a dict of data items corresponding
-the arguments of models' forward method.
-Since the data in semantic segmentation may not be the same size,
-we introduce a new `DataContainer` type in MMCV to help collect and distribute
-data of different size.
-See [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) for more details.
+Following typical conventions, we use `Dataset` and `DataLoader` for data loading with multiple workers. `Dataset` returns a dict of data items corresponding the arguments of models' forward method. Since the data in semantic segmentation may not be the same size, we introduce a new `DataContainer` type in MMCV to help collect and distribute data of different size. See [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) for more details.
In 1.x version of MMSegmentation, all data transformations are inherited from [`BaseTransform`](https://github.com/open-mmlab/mmcv/blob/2.x/mmcv/transforms/base.py#L6).
+
The input and output types of transformations are both dict. A simple example is as follows:
```python
@@ -38,13 +33,11 @@ The input and output types of transformations are both dict. A simple example is
dict_keys(['img_path', 'seg_map_path', 'reduce_zero_label', 'seg_fields', 'gt_seg_map'])
```
-The data preparation pipeline and the dataset are decomposed. Usually a dataset
-defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict.
-A pipeline consists of a sequence of operations. Each operation takes a dict as input and also outputs a dict for the next transform.
+The data preparation pipeline and the dataset are decomposed. Usually a dataset defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict. A pipeline consists of a sequence of operations. Each operation takes a dict as input and also outputs a dict for the next transform.
The operations are categorized into data loading, pre-processing, formatting and test-time augmentation.
-Here is a pipeline example for PSPNet.
+Here is a pipeline example for PSPNet:
```python
crop_size = (512, 1024)
@@ -71,8 +64,7 @@ test_pipeline = [
]
```
-For each operation, we list the related dict fields that are `added`/`updated`/`removed`.
-Before pipelines, the information we can directly obtain from the datasets are `img_path` and `seg_map_path`.
+For each operation, we list the related dict fields that are `added`/`updated`/`removed`. Before pipelines, the information we can directly obtain from the datasets are `img_path` and `seg_map_path`.
### Data loading
@@ -98,16 +90,14 @@ Before pipelines, the information we can directly obtain from the datasets are `
`RandomCrop`: Random crop image & segmentation map.
-- update: `img`, `gt_seg_map`, `img_shape`.
+- update: `img`, `gt_seg_map`, `img_shape`
`RandomFlip`: Flip the image & segmentation map.
- add: `flip`, `flip_direction`
- update: `img`, `gt_seg_map`
-`PhotoMetricDistortion`: Apply photometric distortion to image sequentially,
-every transformation is applied with a probability of 0.5.
-The position of random contrast is in second or second to last(mode 0 or 1 below, respectively).
+`PhotoMetricDistortion`: Apply photometric distortion to image sequentially, every transformation is applied with a probability of 0.5. The position of random contrast is in second or second to last(mode 0 or 1 below, respectively).
```
1. random brightness
diff --git a/docs/zh_cn/advanced_guides/transforms.md b/docs/zh_cn/advanced_guides/transforms.md
index 1cbe79ba49..e5f3bebf6d 100644
--- a/docs/zh_cn/advanced_guides/transforms.md
+++ b/docs/zh_cn/advanced_guides/transforms.md
@@ -1,3 +1,119 @@
# 数据增强变化
-中文版文档支持中,请先阅读[英文版本](../../en/advanced_guides/transforms.md)
+在本教程中,我们将介绍 MMSegmentation 中数据增强变化流程的设计。
+
+本指南的结构如下:
+
+- [数据增强变化](#数据增强变化)
+ - [数据增强变化流程设计](#数据增强变化流程设计)
+ - [数据加载](#数据加载)
+ - [预处理](#预处理)
+ - [格式修改](#格式修改)
+
+## 数据增强变化流程设计
+
+按照惯例,我们使用 `Dataset` 和 `DataLoader` 多进程地加载数据。`Dataset` 返回与模型 forward 方法的参数相对应的数据项的字典。由于语义分割中的数据可能大小不同,我们在 MMCV 中引入了一种新的 `DataContainer` 类型,以帮助收集和分发不同大小的数据。参见[此处](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py)了解更多详情。
+
+在 MMSegmentation 的 1.x 版本中,所有数据转换都继承自 [`BaseTransform`](https://github.com/open-mmlab/mmcv/blob/2.x/mmcv/transforms/base.py#L6).
+
+转换的输入和输出类型都是字典。一个简单的示例如下:
+
+```python
+>>> from mmseg.datasets.transforms import LoadAnnotations
+>>> transforms = LoadAnnotations()
+>>> img_path = './data/cityscapes/leftImg8bit/train/aachen/aachen_000000_000019_leftImg8bit.png.png'
+>>> gt_path = './data/cityscapes/gtFine/train/aachen/aachen_000015_000019_gtFine_instanceTrainIds.png'
+>>> results = dict(
+>>> img_path=img_path,
+>>> seg_map_path=gt_path,
+>>> reduce_zero_label=False,
+>>> seg_fields=[])
+>>> data_dict = transforms(results)
+>>> print(data_dict.keys())
+dict_keys(['img_path', 'seg_map_path', 'reduce_zero_label', 'seg_fields', 'gt_seg_map'])
+```
+
+数据准备流程和数据集是解耦的。通常,数据集定义如何处理标注,数据流程定义准备数据字典的所有步骤。流程由一系列操作组成。每个操作都将字典作为输入,并为接下来的转换输出字典。
+
+操作分为数据加载、预处理、格式修改和测试数据增强。
+
+这里是 PSPNet 的流程示例:
+
+```python
+crop_size = (512, 1024)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(
+ type='RandomResize',
+ scale=(2048, 1024),
+ ratio_range=(0.5, 2.0),
+ keep_ratio=True),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='PackSegInputs')
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
+ # add loading annotation after ``Resize`` because ground truth
+ # does not need to resize data transform
+ dict(type='LoadAnnotations'),
+ dict(type='PackSegInputs')
+]
+```
+
+对于每个操作,我们列出了 `添加`/`更新`/`删除` 相关的字典字段。在流程前,我们可以从数据集直接获得的信息是 `img_path` 和 `seg_map_path`。
+
+### 数据加载
+
+`LoadImageFromFile`:从文件加载图像。
+
+- 添加:`img`,`img_shape`,`ori_shape`
+
+`LoadAnnotations`:加载数据集提供的语义分割图。
+
+- 添加:`seg_fields`,`gt_seg_map`
+
+### 预处理
+
+`RandomResize`:随机调整图像和分割图大小。
+
+-添加:`scale`,`scale_factor`,`keep_ratio`
+-更新:`img`,`img_shape`,`gt_seg_map`
+
+`Resize`:调整图像和分割图的大小。
+
+-添加:`scale`,`scale_factor`,`keep_ratio`
+-更新:`img`,`gt_seg_map`,`img_shape`
+
+`RandomCrop`:随机裁剪图像和分割图。
+
+-更新:`img`,`gt_seg_map`,`img_shape`
+
+`RandomFlip`:翻转图像和分割图。
+
+-添加:`flip`,`flip_direction`
+-更新:`img`,`gt_seg_map`
+
+`PhotoMetricDistortion`:按顺序对图像应用光度失真,每个变换的应用概率为 0.5。随机对比度的位置是第二或倒数第二(分别为下面的模式 0 或 1)。
+
+```
+1.随机亮度
+2.随机对比度(模式 0)
+3.将颜色从 BGR 转换为 HSV
+4.随机饱和度
+5.随机色调
+6.将颜色从 HSV 转换为 BGR
+7.随机对比度(模式 1)
+```
+
+- 更新:`img`
+
+### 格式修改
+
+`PackSegInputs`:为语义分段打包输入数据。
+
+- 添加:`inputs`,`data_sample`
+- 删除:由 `meta_keys` 指定的 keys(合并到 data_sample 的 metainfo 中),所有其他 keys
From 45fae72de5d3bf933504348daba5c848f752d4a1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=B0=A2=E6=98=95=E8=BE=B0?=
Date: Fri, 10 Mar 2023 19:25:47 +0800
Subject: [PATCH 09/32] [Feature] Support calculating FLOPs of segmentors
(#2706)
## Motivation
fix compute flops problems
## Modification
Please briefly describe what modification is made in this PR.
---
tools/analysis_tools/get_flops.py | 108 ++++++++++++++++++++++++------
1 file changed, 86 insertions(+), 22 deletions(-)
diff --git a/tools/analysis_tools/get_flops.py b/tools/analysis_tools/get_flops.py
index 1e8f188e18..66b2d52fcd 100644
--- a/tools/analysis_tools/get_flops.py
+++ b/tools/analysis_tools/get_flops.py
@@ -1,10 +1,23 @@
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
+import tempfile
+from pathlib import Path
-from mmcv.cnn import get_model_complexity_info
-from mmengine import Config
+import torch
+from mmengine import Config, DictAction
+from mmengine.logging import MMLogger
+from mmengine.model import revert_sync_batchnorm
+from mmengine.registry import init_default_scope
-from mmseg.models import build_segmentor
+from mmseg.models import BaseSegmentor
+from mmseg.registry import MODELS
+from mmseg.structures import SegDataSample
+
+try:
+ from mmengine.analysis import get_model_complexity_info
+ from mmengine.analysis.print_helper import _format_size
+except ImportError:
+ raise ImportError('Please upgrade mmengine >= 0.6.0 to use this script.')
def parse_args():
@@ -17,13 +30,33 @@ def parse_args():
nargs='+',
default=[2048, 1024],
help='input image size')
+ parser.add_argument(
+ '--cfg-options',
+ nargs='+',
+ action=DictAction,
+ help='override some settings in the used config, the key-value pair '
+ 'in xxx=yyy format will be merged into config file. If the value to '
+ 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
+ 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
+ 'Note that the quotation marks are necessary and that no white space '
+ 'is allowed.')
args = parser.parse_args()
return args
-def main():
+def inference(args: argparse.Namespace, logger: MMLogger) -> dict:
+ config_name = Path(args.config)
- args = parse_args()
+ if not config_name.exists():
+ logger.error(f'Config file {config_name} does not exist')
+
+ cfg: Config = Config.fromfile(config_name)
+ cfg.work_dir = tempfile.TemporaryDirectory().name
+ cfg.log_level = 'WARN'
+ if args.cfg_options is not None:
+ cfg.merge_from_dict(args.cfg_options)
+
+ init_default_scope(cfg.get('scope', 'mmseg'))
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
@@ -31,29 +64,60 @@ def main():
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
+ result = {}
- cfg = Config.fromfile(args.config)
- cfg.model.pretrained = None
- model = build_segmentor(
- cfg.model,
- train_cfg=cfg.get('train_cfg'),
- test_cfg=cfg.get('test_cfg')).cuda()
+ model: BaseSegmentor = MODELS.build(cfg.model)
+ if hasattr(model, 'auxiliary_head'):
+ model.auxiliary_head = None
+ if torch.cuda.is_available():
+ model.cuda()
+ model = revert_sync_batchnorm(model)
+ result['ori_shape'] = input_shape[-2:]
+ result['pad_shape'] = input_shape[-2:]
+ data_batch = {
+ 'inputs': [torch.rand(input_shape)],
+ 'data_samples': [SegDataSample(metainfo=result)]
+ }
+ data = model.data_preprocessor(data_batch)
model.eval()
+ if cfg.model.decode_head.type in ['MaskFormerHead', 'Mask2FormerHead']:
+ # TODO: Support MaskFormer and Mask2Former
+ raise NotImplementedError('MaskFormer and Mask2Former are not '
+ 'supported yet.')
+ outputs = get_model_complexity_info(
+ model,
+ input_shape,
+ inputs=data['inputs'],
+ show_table=False,
+ show_arch=False)
+ result['flops'] = _format_size(outputs['flops'])
+ result['params'] = _format_size(outputs['params'])
+ result['compute_type'] = 'direct: randomly generate a picture'
+ return result
- if hasattr(model, 'forward_dummy'):
- model.forward = model.forward_dummy
- else:
- raise NotImplementedError(
- 'FLOPs counter is currently not currently supported with {}'.
- format(model.__class__.__name__))
- flops, params = get_model_complexity_info(model, input_shape)
+def main():
+
+ args = parse_args()
+ logger = MMLogger.get_instance(name='MMLogger')
+
+ result = inference(args, logger)
split_line = '=' * 30
- print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format(
- split_line, input_shape, flops, params))
+ ori_shape = result['ori_shape']
+ pad_shape = result['pad_shape']
+ flops = result['flops']
+ params = result['params']
+ compute_type = result['compute_type']
+
+ if pad_shape != ori_shape:
+ print(f'{split_line}\nUse size divisor set input shape '
+ f'from {ori_shape} to {pad_shape}')
+ print(f'{split_line}\nCompute type: {compute_type}\n'
+ f'Input shape: {pad_shape}\nFlops: {flops}\n'
+ f'Params: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
- 'You may need to check if all ops are supported and verify that the '
- 'flops computation is correct.')
+ 'You may need to check if all ops are supported and verify '
+ 'that the flops computation is correct.')
if __name__ == '__main__':
From 684d79fedc2d144c8ac9e4ce16cd3b0dbd524ccb Mon Sep 17 00:00:00 2001
From: CSH <40987381+csatsurnh@users.noreply.github.com>
Date: Mon, 13 Mar 2023 19:23:22 +0800
Subject: [PATCH 10/32] [Doc] add zh_cn migration doc (#2733)
as title
---------
Co-authored-by: MeowZheng
---
README_zh-CN.md | 2 +-
docs/en/migration/interface.md | 39 +--
docs/en/migration/package.md | 7 +-
docs/zh_cn/migration.md | 3 -
docs/zh_cn/migration/index.rst | 8 +
docs/zh_cn/migration/interface.md | 496 ++++++++++++++++++++++++++++++
docs/zh_cn/migration/package.md | 113 +++++++
7 files changed, 637 insertions(+), 31 deletions(-)
delete mode 100644 docs/zh_cn/migration.md
create mode 100644 docs/zh_cn/migration/index.rst
create mode 100644 docs/zh_cn/migration/interface.md
create mode 100644 docs/zh_cn/migration/package.md
diff --git a/README_zh-CN.md b/README_zh-CN.md
index 858485fd54..f7c3b0b18e 100644
--- a/README_zh-CN.md
+++ b/README_zh-CN.md
@@ -76,7 +76,7 @@ MMSegmentation 是一个基于 PyTorch 的语义分割开源工具箱。它是 O
同时,我们提供了 Colab 教程。你可以在[这里](demo/MMSegmentation_Tutorial.ipynb)浏览教程,或者直接在 Colab 上[运行](https://colab.research.google.com/github/open-mmlab/mmsegmentation/blob/1.x/demo/MMSegmentation_Tutorial.ipynb)。
-若需要将0.x版本的代码迁移至新版,请参考[迁移文档](docs/zh_cn/migration.md)。
+若需要将0.x版本的代码迁移至新版,请参考[迁移文档](docs/zh_cn/migration)。
## 基准测试和模型库
diff --git a/docs/en/migration/interface.md b/docs/en/migration/interface.md
index d75f8ec3ef..3dab125ebc 100644
--- a/docs/en/migration/interface.md
+++ b/docs/en/migration/interface.md
@@ -6,14 +6,17 @@ This guide describes the fundamental differences between MMSegmentation 0.x and
## New dependencies
-MMSegmentation 1.x depends on some new packages, you can prepare a new clean environment and install again according to the [installation tutorial](get_started.md).
+MMSegmentation 1.x depends on some new packages, you can prepare a new clean environment and install again according to the [installation tutorial](../get_started.md).
+
Or install the below packages manually.
1. [MMEngine](https://github.com/open-mmlab/mmengine): MMEngine is the core the OpenMMLab 2.0 architecture, and we splited many compentents unrelated to computer vision from MMCV to MMEngine.
2. [MMCV](https://github.com/open-mmlab/mmcv): The computer vision package of OpenMMLab. This is not a new dependency, but you need to upgrade it to above **2.0.0rc1** version.
-3. [MMClassification](https://github.com/open-mmlab/mmclassification)(Optional): The image classification toolbox and benchmark of OpenMMLab. This is not a new dependency, but you need to upgrade it to above **1.0.0rc0** version.
+3. [MMClassification](https://github.com/open-mmlab/mmclassification)(Optional): The image classification toolbox and benchmark of OpenMMLab. This is not a new dependency, but you need to upgrade it to above **1.0.0rc0** version.
+
+4. [MMDetection](https://github.com/open-mmlab/mmdetection)(Optional): The object detection toolbox and benchmark of OpenMMLab. This is not a new dependency, but you need to upgrade it to above **3.0.0rc0** version.
## Train launch
@@ -86,7 +89,7 @@ Add `model.data_preprocessor` field to configure the `DataPreProcessor`, includi
- `bgr_to_rgb` (bool): whether to convert image from BGR to RGB.Defaults to False.
-- `rgb_to_bgr` (bool): whether to convert image from RGB to RGB. Defaults to False.
+- `rgb_to_bgr` (bool): whether to convert image from RGB to BGR. Defaults to False.
**Note:**
Please refer [models documentation](../advanced_guides/models.md) for more details.
@@ -260,8 +263,7 @@ tta_pipeline = [
Changes in **`evaluation`**:
- The **`evaluation`** field is split to `val_evaluator` and `test_evaluator`. And it won't support `interval` and `save_best` arguments.
- The `interval` is moved to `train_cfg.val_interval`, and the `save_best`
- is moved to `default_hooks.checkpoint.save_best`. `pre_eval` has been removed.
+ The `interval` is moved to `train_cfg.val_interval`, and the `save_best` is moved to `default_hooks.checkpoint.save_best`. `pre_eval` has been removed.
- `'mIoU'` has been changed to `'IoUMetric'`.
@@ -291,8 +293,7 @@ test_evaluator = val_evaluator
Changes in **`optimizer`** and **`optimizer_config`**:
-- Now we use `optim_wrapper` field to specify all configuration about the optimization process. And the
- `optimizer` is a sub field of `optim_wrapper` now.
+- Now we use `optim_wrapper` field to specify all configuration about the optimization process. And the `optimizer` is a sub field of `optim_wrapper` now.
- `paramwise_cfg` is also a sub field of `optim_wrapper`, instead of `optimizer`.
- `optimizer_config` is removed now, and all configurations of it are moved to `optim_wrapper`.
- `grad_clip` is renamed to `clip_grad`.
@@ -326,11 +327,9 @@ optim_wrapper = dict(
Changes in **`lr_config`**:
- The `lr_config` field is removed and we use new `param_scheduler` to replace it.
-- The `warmup` related arguments are removed, since we use schedulers combination to implement this
- functionality.
+- The `warmup` related arguments are removed, since we use schedulers combination to implement this functionality.
-The new schedulers combination mechanism is very flexible, and you can use it to design many kinds of learning
-rate / momentum curves. See [the tutorial](TODO) for more details.
+The new schedulers combination mechanism is very flexible, and you can use it to design many kinds of learning rate / momentum curves. See [the tutorial](TODO) for more details.
@@ -374,8 +373,7 @@ param_scheduler = [
Changes in **`runner`**:
-Most configuration in the original `runner` field is moved to `train_cfg`, `val_cfg` and `test_cfg`, which
-configure the loop in training, validation and test.
+Most configuration in the original `runner` field is moved to `train_cfg`, `val_cfg` and `test_cfg`, which configure the loop in training, validation and test.
@@ -402,8 +400,7 @@ test_cfg = dict(type='TestLoop') # Use the default test loop.
-In fact, in OpenMMLab 2.0, we introduced `Loop` to control the behaviors in training, validation and test. The functionalities of `Runner` are also changed. You can find more details of [runner tutorial](https://github.com/open-mmlab/mmengine/blob/main/docs/en/design/runner.md)
-in [MMEngine](https://github.com/open-mmlab/mmengine/).
+In fact, in OpenMMLab 2.0, we introduced `Loop` to control the behaviors in training, validation and test. The functionalities of `Runner` are also changed. You can find more details of [runner tutorial](https://github.com/open-mmlab/mmengine/blob/main/docs/en/design/runner.md) in [MMEngine](https://github.com/open-mmlab/mmengine/).
### Runtime settings
@@ -433,8 +430,7 @@ default_hooks = dict(
visualization=dict(type='SegVisualizationHook'))
```
-In addition, we split the original logger to logger and visualizer. The logger is used to record
-information and the visualizer is used to show the logger in different backends, like terminal and TensorBoard.
+In addition, we split the original logger to logger and visualizer. The logger is used to record information and the visualizer is used to show the logger in different backends, like terminal and TensorBoard.
@@ -478,8 +474,7 @@ Changes in **`load_from`** and **`resume_from`**:
- If `resume=False` and `load_from` is **not None**, only load the checkpoint, not resume training.
- If `resume=False` and `load_from` is **None**, do not load nor resume.
-Changes in **`dist_params`**: The `dist_params` field is a sub field of `env_cfg` now. And there are some new
-configurations in the `env_cfg`.
+Changes in **`dist_params`**: The `dist_params` field is a sub field of `env_cfg` now. And there are some new configurations in the `env_cfg`.
```python
env_cfg = dict(
@@ -496,8 +491,6 @@ env_cfg = dict(
Changes in **`workflow`**: `workflow` related functionalities are removed.
-New field **`visualizer`**: The visualizer is a new design in OpenMMLab 2.0 architecture. We use a
-visualizer instance in the runner to handle results & log visualization and save to different backends.
-See the [visualization tutorial](user_guides/visualization.md) for more details.
+New field **`visualizer`**: The visualizer is a new design in OpenMMLab 2.0 architecture. We use a visualizer instance in the runner to handle results & log visualization and save to different backends. See the [visualization tutorial](../user_guides/visualization.md) for more details.
-New field **`default_scope`**: The start point to search module for all registries. The `default_scope` in MMSegmentation is `mmseg`. See [the registry tutorial](https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md) for more details.
+New field **`default_scope`**: The start point to search module for all registries. The `default_scope` in MMSegmentation is `mmseg`. See [the registry tutorial](https://github.com/open-mmlab/mmengine/blob/main/docs/en/advanced_tutorials/registry.md) for more details.
diff --git a/docs/en/migration/package.md b/docs/en/migration/package.md
index c0aa1d6e31..728e9a9bb6 100644
--- a/docs/en/migration/package.md
+++ b/docs/en/migration/package.md
@@ -74,9 +74,8 @@ We moved registry implementations for all kinds of modules in MMSegmentation in
### `mmseg.apis`
-OpenMMLab 2.0 tries to support unified interface for multitasking of Computer Vision,
-and releases much stronger [`Runner`](https://github.com/open-mmlab/mmengine/blob/main/docs/en/design/runner.md),
-so MMSeg 1.x removed modules in `train.py` and `test.py` renamed `init_segmentor` to `init_model` and `inference_segmentor` to `inference_model`
+OpenMMLab 2.0 tries to support unified interface for multitasking of Computer Vision, and releases much stronger [`Runner`](https://github.com/open-mmlab/mmengine/blob/main/docs/en/design/runner.md), so MMSeg 1.x removed modules in `train.py` and `test.py` renamed `init_segmentor` to `init_model` and `inference_segmentor` to `inference_model`.
+
Here is the changes of `mmseg.apis`:
| Function | Changes |
@@ -92,7 +91,7 @@ Here is the changes of `mmseg.apis`:
### `mmseg.datasets`
-OpenMMLab 2.0 defines the `BaseDataset` to function and interface of dataset, and MMSegmentation 1.x also follow this protocol and defines the `BaseSegDataset` inherited from `BaseDataset`. MMCV 2.x collects general data transforms for multiple tasks e.g. classification, detection, segmentation, so MMSegmentation 1.x uses these data transforms and removes them from mmseg.datasets
+OpenMMLab 2.0 defines the `BaseDataset` to function and interface of dataset, and MMSegmentation 1.x also follow this protocol and defines the `BaseSegDataset` inherited from `BaseDataset`. MMCV 2.x collects general data transforms for multiple tasks e.g. classification, detection, segmentation, so MMSegmentation 1.x uses these data transforms and removes them from mmseg.datasets.
| Packages/Modules | Changes |
| :-------------------: | :------------------------------------------------------------------------------------------ |
diff --git a/docs/zh_cn/migration.md b/docs/zh_cn/migration.md
deleted file mode 100644
index 3f19b26714..0000000000
--- a/docs/zh_cn/migration.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# 迁移文档
-
-中文迁移文档在支持中,请先阅读[英文版迁移文档](../en/migration/)
diff --git a/docs/zh_cn/migration/index.rst b/docs/zh_cn/migration/index.rst
new file mode 100644
index 0000000000..854b9e61d0
--- /dev/null
+++ b/docs/zh_cn/migration/index.rst
@@ -0,0 +1,8 @@
+迁移
+***************
+
+.. toctree::
+ :maxdepth: 1
+
+ interface.md
+ package.md
diff --git a/docs/zh_cn/migration/interface.md b/docs/zh_cn/migration/interface.md
new file mode 100644
index 0000000000..a329ce38a8
--- /dev/null
+++ b/docs/zh_cn/migration/interface.md
@@ -0,0 +1,496 @@
+# 从 MMSegmentation 0.x 迁移
+
+## 引言
+
+本指南介绍了 MMSegmentation 0.x 和 MMSegmentation1.x 在行为和 API 方面的基本区别,以及这些如何都与您的迁移过程相关。
+
+## 新的依赖
+
+MMSegmentation 1.x 依赖于一些新的软件包,您可以准备一个新的干净环境,然后根据[安装教程](../get_started.md)重新安装。
+
+或手动安装以下软件包。
+
+1. [MMEngine](https://github.com/open-mmlab/mmengine):MMEngine 是 OpenMMLab 2.0 架构的核心,我们将许多与计算机视觉无关的内容从 MMCV 拆分到 MMEngine 中。
+
+2. [MMCV](https://github.com/open-mmlab/mmcv):OpenMMLab 的计算机视觉包。这不是一个新的依赖,但您需要将其升级到 **2.0.0rc1** 以上的版本。
+
+3. [MMClassification](https://github.com/open-mmlab/mmclassification)(可选):OpenMMLab 的图像分类工具箱和基准。这不是一个新的依赖,但您需要将其升级到 **1.0.0rc0** 以上的版本。
+
+4. [MMDetection](https://github.com/open-mmlab/mmdetection)(可选): OpenMMLab 的目标检测工具箱和基准。这不是一个新的依赖,但您需要将其升级到 **3.0.0rc0** 以上的版本。
+
+## 启动训练
+
+OpenMMLab 2.0 的主要改进是发布了 MMEngine,它为启动训练任务的统一接口提供了通用且强大的执行器。
+
+与 MMSeg 0.x 相比,MMSeg 1.x 在 `tools/train.py` 中提供的命令行参数更少
+
+
+
+功能 |
+原版 |
+新版 |
+
+
+加载预训练模型 |
+--load_from=$CHECKPOINT |
+--cfg-options load_from=$CHECKPOINT |
+
+
+从特定检查点恢复训练 |
+--resume-from=$CHECKPOINT |
+--resume=$CHECKPOINT |
+
+
+从最新的检查点恢复训练 |
+--auto-resume |
+--resume='auto' |
+
+
+培训练期间是否不评估检查点 |
+--no-validate |
+--cfg-options val_cfg=None val_dataloader=None val_evaluator=None |
+
+
+指定训练设备 |
+--gpu-id=$DEVICE_ID |
+- |
+
+
+是否为不同进程设置不同的种子 |
+--diff-seed |
+--cfg-options randomness.diff_rank_seed=True |
+
+是否为 CUDNN 后端设置确定性选项 |
+--deterministic |
+--cfg-options randomness.deterministic=True |
+
+
+## 配置文件
+
+### 模型设置
+
+`model.backend`、`model.neck`、`model.decode_head` 和 `model.loss` 字段没有更改。
+
+添加 `model.data_preprocessor` 字段以配置 `DataPreProcessor`,包括:
+
+- `mean`(Sequence,可选):R、G、B 通道的像素平均值。默认为 None。
+
+- `std`(Sequence,可选):R、G、B通道的像素标准差。默认为 None。
+
+- `size`(Sequence,可选):固定的填充大小。
+
+- `size_divisor`(int,可选):填充大小的除法因子。
+
+- `seg_pad_val`(float,可选):分割图的填充值。默认值:255。
+
+- `padding_mode`(str):填充类型。默认值:'constant'。
+
+ - constant:常量值填充,值由 pad_val 指定。
+
+- `bgr_to_rgb`(bool):是否将图像从 BGR 转换为 RGB。默认为 False。
+
+- `rgb_to_bgr`(bool):是否将图像从 RGB 转换为 BGR。默认为 False。
+
+**注:**
+有关详细信息,请参阅[模型文档](../advanced_guides/models.md)。
+
+### 数据集设置
+
+**data** 的更改:
+
+原版 `data` 字段被拆分为 `train_dataloader`、`val_dataloader` 和 `test_dataloader`。这允许我们以细粒度配置它们。例如,您可以在训练和测试期间指定不同的采样器和批次大小。
+`samples_per_gpu` 重命名为 `batch_size`。
+`workers_per_gpu` 重命名为 `num_workers`。
+
+
+
+原版 |
+
+
+```python
+data = dict(
+ samples_per_gpu=4,
+ workers_per_gpu=4,
+ train=dict(...),
+ val=dict(...),
+ test=dict(...),
+)
+```
+
+ |
+
+新版 |
+
+
+```python
+train_dataloader = dict(
+ batch_size=4,
+ num_workers=4,
+ dataset=dict(...),
+ sampler=dict(type='DefaultSampler', shuffle=True) # necessary
+)
+
+val_dataloader = dict(
+ batch_size=4,
+ num_workers=4,
+ dataset=dict(...),
+ sampler=dict(type='DefaultSampler', shuffle=False) # necessary
+)
+
+test_dataloader = val_dataloader
+```
+
+ |
+
+
+
+**流程**变更
+
+- 原始格式转换 **`ToTensor`**、**`ImageToTensor`**、**`Collect`** 组合为 [`PackSegInputs`](mmseg.datasets.transforms.PackSegInputs)
+- 我们不建议在数据集流程中执行 **`Normalize`** 和 **Pad**。请将其从流程中删除,并将其设置在 `data_preprocessor` 字段中。
+- MMSeg 1.x 中原始的 **`Resize`** 已更改为 **`RandomResize `**,输入参数 `img_scale` 重命名为 `scale`,`keep_ratio` 的默认值修改为 False。
+- 原始的 `test_pipeline` 将单尺度和多尺度测试结合在一起,在 MMSeg 1.x 中,我们将其分为 `test_pipeline` 和 `tta_pipeline`。
+
+**注:**
+我们将一些数据转换工作转移到数据预处理器中,如归一化,请参阅[文档](package.md)了解更多详细信息。
+
+训练流程
+
+
+
+原版 |
+
+
+```python
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations', reduce_zero_label=True),
+ dict(type='Resize', img_scale=(2560, 640), ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+```
+
+ |
+
+新版 |
+
+
+```python
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations', reduce_zero_label=True),
+ dict(
+ type='RandomResize',
+ scale=(2560, 640),
+ ratio_range=(0.5, 2.0),
+ keep_ratio=True),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='PackSegInputs')
+]
+```
+
+ |
+
+
+
+测试流程
+
+
+
+原版 |
+
+
+```python
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=(2560, 640),
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+```
+
+ |
+
+新版 |
+
+
+```python
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', scale=(2560, 640), keep_ratio=True),
+ dict(type='LoadAnnotations', reduce_zero_label=True),
+ dict(type='PackSegInputs')
+]
+img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
+tta_pipeline = [
+ dict(type='LoadImageFromFile', backend_args=None),
+ dict(
+ type='TestTimeAug',
+ transforms=[
+ [
+ dict(type='Resize', scale_factor=r, keep_ratio=True)
+ for r in img_ratios
+ ],
+ [
+ dict(type='RandomFlip', prob=0., direction='horizontal'),
+ dict(type='RandomFlip', prob=1., direction='horizontal')
+ ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]
+ ])
+]
+```
+
+ |
+
+
+
+**`evaluation`** 中的更改:
+
+- **`evaluation`** 字段被拆分为 `val_evaluator` 和 `test_evaluator `。而且不再支持 `interval` 和 `save_best` 参数。
+ `interval` 已移动到 `train_cfg.val_interval`,`save_best` 已移动到 `default_hooks.checkpoint.save_best`。`pre_eval` 已删除。
+- `IoU` 已更改为 `IoUMetric`。
+
+
+
+原版 |
+
+
+```python
+evaluation = dict(interval=2000, metric='mIoU', pre_eval=True)
+```
+
+ |
+
+新版 |
+
+
+```python
+val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
+test_evaluator = val_evaluator
+```
+
+ |
+
+
+
+### Optimizer 和 Schedule 设置
+
+**`optimizer`** 和 **`optimizer_config`** 中的更改:
+
+- 现在我们使用 `optim_wrapper` 字段来指定优化过程的所有配置。以及 `optimizer` 是 `optim_wrapper` 的一个子字段。
+- `paramwise_cfg` 也是 `optim_wrapper` 的一个子字段,以替代 `optimizer`。
+- `optimizer_config` 现在被删除,它的所有配置都被移动到 `optim_wrapper` 中。
+- `grad_clip` 重命名为 `clip_grad`。
+
+
+
+原版 |
+
+
+```python
+optimizer = dict(type='AdamW', lr=0.0001, weight_decay=0.0005)
+optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2))
+```
+
+ |
+
+新版 |
+
+
+```python
+optim_wrapper = dict(
+ type='OptimWrapper',
+ optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0005),
+ clip_grad=dict(max_norm=1, norm_type=2))
+```
+
+ |
+
+
+
+**`lr_config`** 中的更改:
+
+- 我们将 `lr_config` 字段删除,并使用新的 `param_scheduler` 替代。
+- 我们删除了与 `warmup` 相关的参数,因为我们使用 scheduler 组合来实现该功能。
+
+新的 scheduler 组合机制非常灵活,您可以使用它来设计多种学习率/动量曲线。有关详细信息,请参见[教程](TODO)。
+
+
+
+原版 |
+
+
+```python
+lr_config = dict(
+ policy='poly',
+ warmup='linear',
+ warmup_iters=1500,
+ warmup_ratio=1e-6,
+ power=1.0,
+ min_lr=0.0,
+ by_epoch=False)
+```
+
+ |
+
+新版 |
+
+
+```python
+param_scheduler = [
+ dict(
+ type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
+ dict(
+ type='PolyLR',
+ power=1.0,
+ begin=1500,
+ end=160000,
+ eta_min=0.0,
+ by_epoch=False,
+ )
+]
+```
+
+ |
+
+
+
+**`runner`** 中的更改:
+
+原版 `runner` 字段中的大多数配置被移动到 `train_cfg`、`val_cfg` 和 `test_cfg` 中,以在训练、验证和测试中配置 loop。
+
+
+
+原版 |
+
+
+```python
+runner = dict(type='IterBasedRunner', max_iters=20000)
+```
+
+ |
+
+新版 |
+
+
+```python
+# The `val_interval` is the original `evaluation.interval`.
+train_cfg = dict(type='IterBasedTrainLoop', max_iters=20000, val_interval=2000)
+val_cfg = dict(type='ValLoop') # Use the default validation loop.
+test_cfg = dict(type='TestLoop') # Use the default test loop.
+```
+
+ |
+
+
+
+事实上,在 OpenMMLab 2.0 中,我们引入了 `Loop` 来控制训练、验证和测试中的行为。`Runner` 的功能也发生了变化。您可以在 [MMMEngine](https://github.com/open-mmlab/mmengine/) 的[执行器教程](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/design/runner.md) 中找到更多的详细信息。
+
+### 运行时设置
+
+**`checkpoint_config`** 和 **`log_config`** 中的更改:
+
+`checkpoint_config` 被移动到 `default_hooks.checkpoint` 中,`log_config` 被移动到 `default_hooks.logger` 中。
+并且我们将许多钩子设置从脚本代码移动到运行时配置的 `default_hooks` 字段中。
+
+```python
+default_hooks = dict(
+ # record the time of every iterations.
+ timer=dict(type='IterTimerHook'),
+
+ # print log every 50 iterations.
+ logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False),
+
+ # enable the parameter scheduler.
+ param_scheduler=dict(type='ParamSchedulerHook'),
+
+ # save checkpoint every 2000 iterations.
+ checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000),
+
+ # set sampler seed in distributed environment.
+ sampler_seed=dict(type='DistSamplerSeedHook'),
+
+ # validation results visualization.
+ visualization=dict(type='SegVisualizationHook'))
+```
+
+此外,我们将原版 logger 拆分为 logger 和 visualizer。logger 用于记录信息,visualizer 用于在不同的后端显示 logger,如 terminal 和 TensorBoard。
+
+
+
+原版 |
+
+
+```python
+log_config = dict(
+ interval=100,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ dict(type='TensorboardLoggerHook'),
+ ])
+```
+
+ |
+
+新版 |
+
+
+```python
+default_hooks = dict(
+ ...
+ logger=dict(type='LoggerHook', interval=100),
+)
+vis_backends = [dict(type='LocalVisBackend'),
+ dict(type='TensorboardVisBackend')]
+visualizer = dict(
+ type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer')
+```
+
+ |
+
+
+
+**`load_from`** 和 **`resume_from`** 中的更改:
+
+- 删除 `resume_from`。我们使用 `resume` 和 `load_from` 来替换它。
+ - 如果 `resume=True` 且 `load_from` 为 **not None**,则从 `load_from` 中的检查点恢复训练。
+ - 如果 `resume=True` 且 `load_from` 为 **None**,则尝试从工作目录中的最新检查点恢复。
+ - 如果 `resume=False` 且 `load_from` 为 **not None**,则只加载检查点,而不继续训练。
+ - 如果 `resume=False` 且 `load_from` 为 **None**,则不加载或恢复。
+
+**`dist_params`** 中的更改:`dist_params` 字段现在是 `env_cfg` 的子字段。并且 `env_cfg` 中还有一些新的配置。
+
+```python
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+```
+
+**`workflow`** 的改动:`workflow` 相关功能被删除。
+
+新字段 **`visualizer`**:visualizer 是 OpenMMLab 2.0 体系结构中的新设计。我们在 runner 中使用 visualizer 实例来处理结果和日志可视化,并保存到不同的后端。更多详细信息,请参阅[可视化教程](../user_guides/visualization.md)。
+
+新字段 **`default_scope`**:搜索所有注册模块的起点。MMSegmentation 中的 `default_scope` 为 `mmseg`。请参见[注册器教程](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/advanced_tutorials/registry.md)了解更多详情。
diff --git a/docs/zh_cn/migration/package.md b/docs/zh_cn/migration/package.md
new file mode 100644
index 0000000000..d8d2245bed
--- /dev/null
+++ b/docs/zh_cn/migration/package.md
@@ -0,0 +1,113 @@
+#包结构更改
+
+本节包含您对 MMSeg 0.x 和 1.x 之间的变化感到好奇的内容。
+
+
+
+MMSegmentation 0.x |
+MMSegmentation 1.x |
+
+
+mmseg.api |
+mmseg.api |
+
+
+- mmseg.core |
++ mmseg.engine |
+
+
+mmseg.datasets |
+mmseg.datasets |
+
+
+mmseg.models |
+mmseg.models |
+
+
+- mmseg.ops |
++ mmseg.structure |
+
+
+mmseg.utils |
+mmseg.utils |
+
+
+ |
++ mmseg.evaluation |
+
+
+ |
++ mmseg.registry |
+
+
+
+## 已删除的包
+
+### `mmseg.core`
+
+在 OpenMMLab 2.0 中,`core` 包已被删除。`core` 的 `hooks` 和 `optimizers` 被移动到了 `mmseg.engine` 中,而 `core` 中的 `evaluation` 目前是 mmseg.evaluation。
+
+## `mmseg.ops`
+
+`ops` 包包含 `encoding` 和 `wrappers`,它们被移到了 `mmseg.models.utils` 中。
+
+## 增加的包
+
+### `mmseg.engine`
+
+OpenMMLab 2.0 增加了一个新的深度学习训练基础库 MMEngine。它是所有 OpenMMLab 代码库的训练引擎。
+mmseg 的 `engine` 包是一些用于语义分割任务的定制模块,如 `SegVisualizationHook` 用于可视化分割掩膜。
+
+### `mmseg.structure`
+
+在 OpenMMLab 2.0 中,我们为计算机视觉任务设计了数据结构,在 mmseg 中,我们在 `structure` 包中实现了 `SegDataSample`。
+
+### `mmseg.evaluation`
+
+我们将所有评估指标都移动到了 `mmseg.evaluation` 中。
+
+### `mmseg.registry`
+
+我们将 MMSegmentation 中所有类型模块的注册实现移动到 `mmseg.registry` 中。
+
+## 修改的包
+
+### `mmseg.apis`
+
+OpenMMLab 2.0 尝试支持计算机视觉的多任务统一接口,并发布了更强的 [`Runner`](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/design/runner.md),因此 MMSeg 1.x 删除了 `train.py` 和 `test.py` 中的模块,并将 `init_segmentor` 重命名为 `init_model`,将 `inference_segmentor` 重命名为 `inference_model`。
+
+以下是 `mmseg.apis` 的更改:
+
+| 函数 | 变化 |
+| :-------------------: | :--------------------------------------------- |
+| `init_segmentor` | 重命名为 `init_model` |
+| `inference_segmentor` | 重命名为 `inference_model` |
+| `show_result_pyplot` | 基于 `SegLocalVisualizer` 实现 |
+| `train_model` | 删除,使用 `runner.train` 训练。 |
+| `multi_gpu_test` | 删除,使用 `runner.test` 测试。 |
+| `single_gpu_test` | 删除,使用 `runner.test` 测试。 |
+| `set_random_seed` | 删除,使用 `mmengine.runner.set_random_seed`。 |
+| `init_random_seed` | 删除,使用 `mmengine.dist.sync_random_seed`。 |
+
+### `mmseg.datasets`
+
+OpenMMLab 2.0 将 `BaseDataset` 定义为数据集的函数和接口,MMSegmentation 1.x 也遵循此协议,并定义了从 `BaseDataset` 继承的 `BaseSegDataset`。MMCV 2.x 收集多种任务的通用数据转换,例如分类、检测、分割,因此 MMSegmentation 1.x 使用这些数据转换并将其从 mmseg.dataset 中删除。
+
+| 包/模块 | 更改 |
+| :-------------------: | :----------------------------------------------------------------------------------- |
+| `mmseg.pipelines` | 移动到 `mmcv.transforms` 中 |
+| `mmseg.sampler` | 移动到 `mmengine.dataset.sampler` 中 |
+| `CustomDataset` | 重命名为 `BaseSegDataset` 并从 MMEngine 中的 `BaseDataset` 继承 |
+| `DefaultFormatBundle` | 替换为 `PackSegInputs` |
+| `LoadImageFromFile` | 移动到 `mmcv.transforms.LoadImageFromFile` 中 |
+| `LoadAnnotations` | 移动到 `mmcv.transforms.LoadAnnotations` 中 |
+| `Resize` | 移动到 `mmcv.transforms` 中并拆分为 `Resize`,`RandomResize` 和 `RandomChoiceResize` |
+| `RandomFlip` | 移动到 `mmcv.transforms.RandomFlip` 中 |
+| `Pad` | 移动到 `mmcv.transforms.Pad` 中 |
+| `Normalize` | 移动到 `mmcv.transforms.Normalize` 中 |
+| `Compose` | 移动到 `mmcv.transforms.Compose` 中 |
+| `ImageToTensor` | 移动到 `mmcv.transforms.ImageToTensor` 中 |
+
+### `mmseg.models`
+
+`models` 没有太大变化,只是从以前的 `mmseg.ops` 中添加了 `encoding` 和 `wrappers`
From 6ba4696648c7bef22ccfcd19eb7031925e982f82 Mon Sep 17 00:00:00 2001
From: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Date: Tue, 14 Mar 2023 23:14:41 +0800
Subject: [PATCH 11/32] [Enhancement] Support input gt seg map is not 2D
(#2739)
Thanks for your contribution and we appreciate it a lot. The following
instructions would make your pull request more healthy and more easily
get feedback. If you do not understand some items, don't worry, just
make the pull request and seek help from maintainers.
## Motivation
fix #2593
## Modification
1. Only when gt seg map is 2D, extend its shape to 3D PixelData
2. If seg map is not 2D, we raised warning for users.
---------
Co-authored-by: xiexinch
---
mmseg/datasets/transforms/formatting.py | 15 ++++++++++++---
tests/test_datasets/test_formatting.py | 5 +++++
2 files changed, 17 insertions(+), 3 deletions(-)
diff --git a/mmseg/datasets/transforms/formatting.py b/mmseg/datasets/transforms/formatting.py
index 4391161dfd..57cda9b10e 100644
--- a/mmseg/datasets/transforms/formatting.py
+++ b/mmseg/datasets/transforms/formatting.py
@@ -1,4 +1,6 @@
# Copyright (c) OpenMMLab. All rights reserved.
+import warnings
+
import numpy as np
from mmcv.transforms import to_tensor
from mmcv.transforms.base import BaseTransform
@@ -72,9 +74,16 @@ def transform(self, results: dict) -> dict:
data_sample = SegDataSample()
if 'gt_seg_map' in results:
- gt_sem_seg_data = dict(
- data=to_tensor(results['gt_seg_map'][None,
- ...].astype(np.int64)))
+ if results['gt_seg_map'].shape == 2:
+ data = to_tensor(results['gt_seg_map'][None,
+ ...].astype(np.int64))
+ else:
+ warnings.warn('Please pay attention your ground truth '
+ 'segmentation map, usually the segentation '
+ 'map is 2D, but got '
+ f'{results["gt_seg_map"].shape}')
+ data = to_tensor(results['gt_seg_map'].astype(np.int64))
+ gt_sem_seg_data = dict(data=data)
data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data)
if 'gt_edge_map' in results:
diff --git a/tests/test_datasets/test_formatting.py b/tests/test_datasets/test_formatting.py
index 4babaad269..51fd90d048 100644
--- a/tests/test_datasets/test_formatting.py
+++ b/tests/test_datasets/test_formatting.py
@@ -45,6 +45,11 @@ def test_transform(self):
BaseDataElement)
self.assertEqual(results['data_samples'].ori_shape,
results['data_samples'].gt_sem_seg.shape)
+ results = copy.deepcopy(self.results)
+ results['gt_seg_map'] = np.random.rand(3, 300, 400)
+ results = transform(results)
+ self.assertEqual(results['data_samples'].ori_shape,
+ results['data_samples'].gt_sem_seg.shape)
def test_repr(self):
transform = PackSegInputs(meta_keys=self.meta_keys)
From 447a398c24f76be71b8851183a347ff181e5a1c1 Mon Sep 17 00:00:00 2001
From: Junhwa Song
Date: Wed, 15 Mar 2023 00:56:40 +0900
Subject: [PATCH 12/32] [Typo] Change indexes to indices (#2747)
## Modification
I just replaced the `indexes` variable name with `indices` for naming
consistency.
---
mmseg/datasets/dataset_wrappers.py | 8 ++++----
mmseg/datasets/transforms/transforms.py | 8 ++++----
.../tools/dataset_converters/mapillary.py | 2 +-
3 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/mmseg/datasets/dataset_wrappers.py b/mmseg/datasets/dataset_wrappers.py
index 933eb50d99..082c116ff4 100644
--- a/mmseg/datasets/dataset_wrappers.py
+++ b/mmseg/datasets/dataset_wrappers.py
@@ -106,11 +106,11 @@ def __getitem__(self, idx):
continue
if hasattr(transform, 'get_indices'):
- indexes = transform.get_indices(self.dataset)
- if not isinstance(indexes, collections.abc.Sequence):
- indexes = [indexes]
+ indices = transform.get_indices(self.dataset)
+ if not isinstance(indices, collections.abc.Sequence):
+ indices = [indices]
mix_results = [
- copy.deepcopy(self.dataset[index]) for index in indexes
+ copy.deepcopy(self.dataset[index]) for index in indices
]
results['mix_results'] = mix_results
diff --git a/mmseg/datasets/transforms/transforms.py b/mmseg/datasets/transforms/transforms.py
index ef4e78dd8c..4f5316026f 100644
--- a/mmseg/datasets/transforms/transforms.py
+++ b/mmseg/datasets/transforms/transforms.py
@@ -1029,17 +1029,17 @@ def transform(self, results: dict) -> dict:
return results
def get_indices(self, dataset: MultiImageMixDataset) -> list:
- """Call function to collect indexes.
+ """Call function to collect indices.
Args:
dataset (:obj:`MultiImageMixDataset`): The dataset.
Returns:
- list: indexes.
+ list: indices.
"""
- indexes = [random.randint(0, len(dataset)) for _ in range(3)]
- return indexes
+ indices = [random.randint(0, len(dataset)) for _ in range(3)]
+ return indices
@cache_randomness
def generate_mosaic_center(self):
diff --git a/projects/mapillary_dataset/tools/dataset_converters/mapillary.py b/projects/mapillary_dataset/tools/dataset_converters/mapillary.py
index 3ccb2d67b3..a881564cab 100644
--- a/projects/mapillary_dataset/tools/dataset_converters/mapillary.py
+++ b/projects/mapillary_dataset/tools/dataset_converters/mapillary.py
@@ -106,7 +106,7 @@ def mapillary_colormap2label(colormap: np.ndarray) -> list:
Returns:
list: values are mask labels,
- indexes are palette's convert results.、
+ indices are palette's convert results.
"""
colormap2label = np.zeros(256**3, dtype=np.longlong)
for i, colormap_ in enumerate(colormap):
From 8c89ff3dd1409445858399cc346ff2518ddbe5df Mon Sep 17 00:00:00 2001
From: Tianlong Ai <50650583+AI-Tianlong@users.noreply.github.com>
Date: Wed, 15 Mar 2023 14:44:38 +0800
Subject: [PATCH 13/32] [Datasets] Add Mapillary Vistas Datasets to MMSeg Core
Package. (#2576)
## [Datasets] Add Mapillary Vistas Datasets to MMSeg Core Package .
## Motivation
Add Mapillary Vistas Datasets to core package.
Old PR #2484
## Modification
- Add Mapillary Vistas Datasets to core package.
- Delete `tools/datasets_convert/mapillary.py` , dataset does't need
converting.
- Add `schedule_240k.py` config.
- Add configs files.
```none
deeplabv3plus_r101-d8_4xb2-240k_mapillay_v1-512x1024.py
deeplabv3plus_r101-d8_4xb2-240k_mapillay_v2-512x1024.py
maskformer_swin-s_4xb2-240k_mapillary_v1-512x1024.py
maskformer_swin-s_4xb2-240k_mapillary_v2-512x1024.py
maskformer_r101-d8_4xb2-240k_mapillary_v1-512x1024.py
maskformer_r101-d8_4xb2-240k_mapillary_v2-512x1024.py
pspnet_r101-d8_4xb2-240k_mapillay_v1-512x1024.py
pspnet_r101-d8_4xb2-240k_mapillay_v2-512x1024.py
```
- Synchronized changes to `projects/mapillary_datasets`
---------
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: xiexinch
---
README.md | 1 +
README_zh-CN.md | 1 +
.../_base_/datasets/mapillary_v1.py | 7 +-
configs/_base_/datasets/mapillary_v1_65.py | 37 +++
.../_base_/datasets/mapillary_v2.py | 7 +-
configs/_base_/schedules/schedule_240k.py | 25 ++
configs/deeplabv3plus/README.md | 6 +
configs/deeplabv3plus/deeplabv3plus.yml | 22 ++
...0-d8_4xb2-300k_mapillay_v1_65-1280x1280.py | 58 ++++
docs/en/user_guides/2_dataset_prepare.md | 74 +++++
mmseg/datasets/__init__.py | 4 +-
.../datasets/mapillary.py | 66 +++-
mmseg/utils/class_names.py | 124 +++++++-
projects/mapillary_dataset/README.md | 23 +-
.../configs/_base_/datasets/mapillary_v1.py | 68 ++++
.../_base_/datasets/mapillary_v1_65.py | 37 +++
.../configs/_base_/datasets/mapillary_v2.py | 68 ++++
...lus_r101-d8_4xb2-240k_mapillay-512x1024.py | 103 ------
..._r101-d8_4xb2-240k_mapillay_v1-512x1024.py | 17 +
..._r101-d8_4xb2-240k_mapillay_v2-512x1024.py | 16 +
..._r101-d8_4xb2-240k_mapillay_v1-512x1024.py | 16 +
..._r101-d8_4xb2-240k_mapillay_v2-512x1024.py | 16 +
.../docs/en/user_guides/2_dataset_prepare.md | 300 +++++++++++++-----
.../mmseg/datasets/mapillary.py | 177 +++++++++++
.../mmseg/datasets/mapillary_v1_2.py | 65 ----
.../tools/dataset_converters/mapillary.py | 245 --------------
.../images/__CRyFzoDOXn6unQ6a3DnQ.jpg | Bin 0 -> 1227776 bytes
.../v1.2/__CRyFzoDOXn6unQ6a3DnQ.png | Bin 0 -> 75488 bytes
.../v2.0/__CRyFzoDOXn6unQ6a3DnQ.png | Bin 0 -> 75316 bytes
tests/test_datasets/test_dataset.py | 24 +-
30 files changed, 1089 insertions(+), 518 deletions(-)
rename projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_2.py => configs/_base_/datasets/mapillary_v1.py (91%)
create mode 100644 configs/_base_/datasets/mapillary_v1_65.py
rename projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2_0.py => configs/_base_/datasets/mapillary_v2.py (91%)
create mode 100644 configs/_base_/schedules/schedule_240k.py
create mode 100644 configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280.py
rename projects/mapillary_dataset/mmseg/datasets/mapillary_v2_0.py => mmseg/datasets/mapillary.py (66%)
create mode 100644 projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1.py
create mode 100644 projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_65.py
create mode 100644 projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2.py
delete mode 100644 projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py
create mode 100644 projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay_v1-512x1024.py
create mode 100644 projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay_v2-512x1024.py
create mode 100644 projects/mapillary_dataset/configs/pspnet_r101-d8_4xb2-240k_mapillay_v1-512x1024.py
create mode 100644 projects/mapillary_dataset/configs/pspnet_r101-d8_4xb2-240k_mapillay_v2-512x1024.py
create mode 100644 projects/mapillary_dataset/mmseg/datasets/mapillary.py
delete mode 100644 projects/mapillary_dataset/mmseg/datasets/mapillary_v1_2.py
delete mode 100644 projects/mapillary_dataset/tools/dataset_converters/mapillary.py
create mode 100644 tests/data/pseudo_mapillary_dataset/images/__CRyFzoDOXn6unQ6a3DnQ.jpg
create mode 100644 tests/data/pseudo_mapillary_dataset/v1.2/__CRyFzoDOXn6unQ6a3DnQ.png
create mode 100644 tests/data/pseudo_mapillary_dataset/v2.0/__CRyFzoDOXn6unQ6a3DnQ.png
diff --git a/README.md b/README.md
index 9b4a580f39..52eda092c8 100644
--- a/README.md
+++ b/README.md
@@ -181,6 +181,7 @@ Results and models are available in the [model zoo](docs/en/model_zoo.md).
- [x] [Potsdam](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#isprs-potsdam)
- [x] [Vaihingen](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#isprs-vaihingen)
- [x] [iSAID](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#isaid)
+- [x] [Mapillary Vistas](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#mapillary-vistas-datasets)
diff --git a/README_zh-CN.md b/README_zh-CN.md
index f7c3b0b18e..167ecbdc40 100644
--- a/README_zh-CN.md
+++ b/README_zh-CN.md
@@ -162,6 +162,7 @@ MMSegmentation 是一个基于 PyTorch 的语义分割开源工具箱。它是 O
- [x] [Potsdam](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#isprs-potsdam)
- [x] [Vaihingen](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#isprs-vaihingen)
- [x] [iSAID](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#isaid)
+- [x] [Mapillary Vistas](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#mapillary-vistas-datasets)
diff --git a/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_2.py b/configs/_base_/datasets/mapillary_v1.py
similarity index 91%
rename from projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_2.py
rename to configs/_base_/datasets/mapillary_v1.py
index a0e7d14b52..611aa4741b 100644
--- a/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_2.py
+++ b/configs/_base_/datasets/mapillary_v1.py
@@ -1,5 +1,5 @@
# dataset settings
-dataset_type = 'MapillaryDataset_v1_2'
+dataset_type = 'MapillaryDataset_v1'
data_root = 'data/mapillary/'
crop_size = (512, 1024)
train_pipeline = [
@@ -48,8 +48,7 @@
type=dataset_type,
data_root=data_root,
data_prefix=dict(
- img_path='training/images',
- seg_map_path='training/v1.2/labels_mask'),
+ img_path='training/images', seg_map_path='training/v1.2/labels'),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
@@ -61,7 +60,7 @@
data_root=data_root,
data_prefix=dict(
img_path='validation/images',
- seg_map_path='validation/v1.2/labels_mask'),
+ seg_map_path='validation/v1.2/labels'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
diff --git a/configs/_base_/datasets/mapillary_v1_65.py b/configs/_base_/datasets/mapillary_v1_65.py
new file mode 100644
index 0000000000..f594f37333
--- /dev/null
+++ b/configs/_base_/datasets/mapillary_v1_65.py
@@ -0,0 +1,37 @@
+# dataset settings
+_base_ = './mapillary_v1.py'
+metainfo = dict(
+ classes=('Bird', 'Ground Animal', 'Curb', 'Fence', 'Guard Rail', 'Barrier',
+ 'Wall', 'Bike Lane', 'Crosswalk - Plain', 'Curb Cut', 'Parking',
+ 'Pedestrian Area', 'Rail Track', 'Road', 'Service Lane',
+ 'Sidewalk', 'Bridge', 'Building', 'Tunnel', 'Person', 'Bicyclist',
+ 'Motorcyclist', 'Other Rider', 'Lane Marking - Crosswalk',
+ 'Lane Marking - General', 'Mountain', 'Sand', 'Sky', 'Snow',
+ 'Terrain', 'Vegetation', 'Water', 'Banner', 'Bench', 'Bike Rack',
+ 'Billboard', 'Catch Basin', 'CCTV Camera', 'Fire Hydrant',
+ 'Junction Box', 'Mailbox', 'Manhole', 'Phone Booth', 'Pothole',
+ 'Street Light', 'Pole', 'Traffic Sign Frame', 'Utility Pole',
+ 'Traffic Light', 'Traffic Sign (Back)', 'Traffic Sign (Front)',
+ 'Trash Can', 'Bicycle', 'Boat', 'Bus', 'Car', 'Caravan',
+ 'Motorcycle', 'On Rails', 'Other Vehicle', 'Trailer', 'Truck',
+ 'Wheeled Slow', 'Car Mount', 'Ego Vehicle'),
+ palette=[[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153],
+ [180, 165, 180], [90, 120, 150], [102, 102, 156], [128, 64, 255],
+ [140, 140, 200], [170, 170, 170], [250, 170, 160], [96, 96, 96],
+ [230, 150, 140], [128, 64, 128], [110, 110, 110], [244, 35, 232],
+ [150, 100, 100], [70, 70, 70], [150, 120, 90], [220, 20, 60],
+ [255, 0, 0], [255, 0, 100], [255, 0, 200], [200, 128, 128],
+ [255, 255, 255], [64, 170, 64], [230, 160, 50], [70, 130, 180],
+ [190, 255, 255], [152, 251, 152], [107, 142, 35], [0, 170, 30],
+ [255, 255, 128], [250, 0, 30], [100, 140, 180], [220, 220, 220],
+ [220, 128, 128], [222, 40, 40], [100, 170, 30], [40, 40, 40],
+ [33, 33, 33], [100, 128, 160], [142, 0, 0], [70, 100, 150],
+ [210, 170, 100], [153, 153, 153], [128, 128, 128], [0, 0, 80],
+ [250, 170, 30], [192, 192, 192], [220, 220, 0], [140, 140, 20],
+ [119, 11, 32], [150, 0, 255], [0, 60, 100], [0, 0, 142],
+ [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
+ [0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, 10]])
+
+train_dataloader = dict(dataset=dict(metainfo=metainfo))
+val_dataloader = dict(dataset=dict(metainfo=metainfo))
+test_dataloader = val_dataloader
diff --git a/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2_0.py b/configs/_base_/datasets/mapillary_v2.py
similarity index 91%
rename from projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2_0.py
rename to configs/_base_/datasets/mapillary_v2.py
index 7332d43fad..7cb7a958e5 100644
--- a/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2_0.py
+++ b/configs/_base_/datasets/mapillary_v2.py
@@ -1,5 +1,5 @@
# dataset settings
-dataset_type = 'MapillaryDataset_v2_0'
+dataset_type = 'MapillaryDataset_v2'
data_root = 'data/mapillary/'
crop_size = (512, 1024)
train_pipeline = [
@@ -48,8 +48,7 @@
type=dataset_type,
data_root=data_root,
data_prefix=dict(
- img_path='training/images',
- seg_map_path='training/v2.0/labels_mask'),
+ img_path='training/images', seg_map_path='training/v2.0/labels'),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
@@ -61,7 +60,7 @@
data_root=data_root,
data_prefix=dict(
img_path='validation/images',
- seg_map_path='validation/v2.0/labels_mask'),
+ seg_map_path='validation/v2.0/labels'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
diff --git a/configs/_base_/schedules/schedule_240k.py b/configs/_base_/schedules/schedule_240k.py
new file mode 100644
index 0000000000..feb2ce9637
--- /dev/null
+++ b/configs/_base_/schedules/schedule_240k.py
@@ -0,0 +1,25 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
+optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None)
+# learning policy
+param_scheduler = [
+ dict(
+ type='PolyLR',
+ eta_min=1e-4,
+ power=0.9,
+ begin=0,
+ end=240000,
+ by_epoch=False)
+]
+# training schedule for 240k
+train_cfg = dict(
+ type='IterBasedTrainLoop', max_iters=240000, val_interval=24000)
+val_cfg = dict(type='ValLoop')
+test_cfg = dict(type='TestLoop')
+default_hooks = dict(
+ timer=dict(type='IterTimerHook'),
+ logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False),
+ param_scheduler=dict(type='ParamSchedulerHook'),
+ checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=24000),
+ sampler_seed=dict(type='DistSamplerSeedHook'),
+ visualization=dict(type='SegVisualizationHook'))
diff --git a/configs/deeplabv3plus/README.md b/configs/deeplabv3plus/README.md
index b3d3ce7678..f60f547391 100644
--- a/configs/deeplabv3plus/README.md
+++ b/configs/deeplabv3plus/README.md
@@ -124,6 +124,12 @@ Spatial pyramid pooling module or encode-decoder structure are used in deep neur
| DeepLabV3+ | R-18-D8 | 896x896 | 80000 | 6.19 | 24.81 | 61.35 | 62.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_isaid-896x896.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid_20220110_180526-7059991d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid_20220110_180526.log.json) |
| DeepLabV3+ | R-50-D8 | 896x896 | 80000 | 21.45 | 8.42 | 67.06 | 68.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_isaid-896x896.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid_20220110_180526-598be439.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid_20220110_180526.log.json) |
+### Mapillary Vistas v1.2
+
+| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | Device | mIoU | mIoU(ms+flip) | config | download |
+| ---------- | -------- | --------- | ------: | -------- | -------------- | ------ | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| DeepLabV3+ | R-50-D8 | 1280x1280 | 300000 | 24.04 | 17.92 | A100 | 47.35 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280_20230301_110504-655f8e43.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280_20230301_110504.json) |
+
Note:
- `D-8`/`D-16` here corresponding to the output stride 8/16 setting for DeepLab series.
diff --git a/configs/deeplabv3plus/deeplabv3plus.yml b/configs/deeplabv3plus/deeplabv3plus.yml
index 755c1fd4be..949cbcecc0 100644
--- a/configs/deeplabv3plus/deeplabv3plus.yml
+++ b/configs/deeplabv3plus/deeplabv3plus.yml
@@ -11,6 +11,7 @@ Collections:
- Potsdam
- Vaihingen
- iSAID
+ - Mapillary Vistas v1.2
Paper:
URL: https://arxiv.org/abs/1802.02611
Title: Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation
@@ -848,3 +849,24 @@ Models:
mIoU(ms+flip): 68.02
Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_isaid-896x896.py
Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid_20220110_180526-598be439.pth
+- Name: deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280
+ In Collection: DeepLabV3+
+ Metadata:
+ backbone: R-50-D8
+ crop size: (1280,1280)
+ lr schd: 300000
+ inference time (ms/im):
+ - value: 55.8
+ hardware: V100
+ backend: PyTorch
+ batch size: 1
+ mode: FP32
+ resolution: (1280,1280)
+ Training Memory (GB): 24.04
+ Results:
+ - Task: Semantic Segmentation
+ Dataset: Mapillary Vistas v1.2
+ Metrics:
+ mIoU: 47.35
+ Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280.py
+ Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280_20230301_110504-655f8e43.pth
diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280.py
new file mode 100644
index 0000000000..133c45ae1d
--- /dev/null
+++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-300k_mapillay_v1_65-1280x1280.py
@@ -0,0 +1,58 @@
+_base_ = [
+ '../_base_/models/deeplabv3plus_r50-d8.py',
+ '../_base_/datasets/mapillary_v1_65.py',
+ '../_base_/default_runtime.py',
+]
+
+crop_size = (1280, 1280)
+data_preprocessor = dict(size=crop_size)
+model = dict(
+ data_preprocessor=data_preprocessor,
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(depth=50),
+ decode_head=dict(num_classes=65),
+ auxiliary_head=dict(num_classes=65))
+
+iters = 300000
+# optimizer
+optimizer = dict(
+ type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.0001)
+# optimizer
+optim_wrapper = dict(
+ type='OptimWrapper',
+ optimizer=optimizer,
+ clip_grad=dict(max_norm=0.01, norm_type=2),
+ paramwise_cfg=dict(
+ custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}))
+param_scheduler = [
+ dict(
+ type='PolyLR',
+ eta_min=0,
+ power=0.9,
+ begin=0,
+ end=iters,
+ by_epoch=False)
+]
+
+# training schedule for 300k
+train_cfg = dict(
+ type='IterBasedTrainLoop', max_iters=iters, val_interval=iters // 10)
+val_cfg = dict(type='ValLoop')
+test_cfg = dict(type='TestLoop')
+
+default_hooks = dict(
+ timer=dict(type='IterTimerHook'),
+ logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False),
+ param_scheduler=dict(type='ParamSchedulerHook'),
+ checkpoint=dict(
+ type='CheckpointHook', by_epoch=False, interval=iters // 10),
+ sampler_seed=dict(type='DistSamplerSeedHook'),
+ visualization=dict(type='SegVisualizationHook'))
+
+train_dataloader = dict(batch_size=2)
+
+# Default setting for scaling LR automatically
+# - `enable` means enable scaling LR automatically
+# or not by default.
+# - `base_batch_size` = (4 GPUs) x (2 samples per GPU).
+auto_scale_lr = dict(enable=False, base_batch_size=8)
diff --git a/docs/en/user_guides/2_dataset_prepare.md b/docs/en/user_guides/2_dataset_prepare.md
index 5d36061d89..6c81d60f41 100644
--- a/docs/en/user_guides/2_dataset_prepare.md
+++ b/docs/en/user_guides/2_dataset_prepare.md
@@ -154,6 +154,29 @@ mmsegmentation
│ │ │ ├── training
│ │ │ ├── validation
│ │ │ ├── test
+│ ├── mapillary
+│ │ ├── training
+│ │ │ ├── images
+│ │ │ ├── v1.2
+| │ │ │ ├── instances
+| │ │ │ ├── labels
+| │ │ │ └── panoptic
+│ │ │ ├── v2.0
+| │ │ │ ├── instances
+| │ │ │ ├── labels
+| │ │ │ ├── panoptic
+| │ │ │ └── polygons
+│ │ ├── validation
+│ │ │ ├── images
+| │ │ ├── v1.2
+| │ │ │ ├── instances
+| │ │ │ ├── labels
+| │ │ │ └── panoptic
+│ │ │ ├── v2.0
+| │ │ │ ├── instances
+| │ │ │ ├── labels
+| │ │ │ ├── panoptic
+| │ │ │ └── polygons
```
### Cityscapes
@@ -551,3 +574,54 @@ The script will make directory structure below:
```
It includes 400 images for training, 400 images for validation and 400 images for testing which is the same as REFUGE 2018 dataset.
+
+## Mapillary Vistas Datasets
+
+- The dataset could be download [here](https://www.mapillary.com/dataset/vistas) after registration.
+
+- Mapillary Vistas Dataset use 8-bit with color-palette to store labels. No conversion operation is required.
+
+- Assumption you have put the dataset zip file in `mmsegmentation/data/mapillary`
+
+- Please run the following commands to unzip dataset.
+
+ ```bash
+ cd data/mapillary
+ unzip An-ZjB1Zm61yAZG0ozTymz8I8NqI4x0MrYrh26dq7kPgfu8vf9ImrdaOAVOFYbJ2pNAgUnVGBmbue9lTgdBOb5BbKXIpFs0fpYWqACbrQDChAA2fdX0zS9PcHu7fY8c-FOvyBVxPNYNFQuM.zip
+ ```
+
+- After unzip, you will get Mapillary Vistas Dataset like this structure. Semantic segmentation mask labels in `labels` folder.
+
+ ```none
+ mmsegmentation
+ ├── mmseg
+ ├── tools
+ ├── configs
+ ├── data
+ │ ├── mapillary
+ │ │ ├── training
+ │ │ │ ├── images
+ │ │ │ ├── v1.2
+ | │ │ │ ├── instances
+ | │ │ │ ├── labels
+ | │ │ │ └── panoptic
+ │ │ │ ├── v2.0
+ | │ │ │ ├── instances
+ | │ │ │ ├── labels
+ | │ │ │ ├── panoptic
+ | │ │ │ └── polygons
+ │ │ ├── validation
+ │ │ │ ├── images
+ | │ │ ├── v1.2
+ | │ │ │ ├── instances
+ | │ │ │ ├── labels
+ | │ │ │ └── panoptic
+ │ │ │ ├── v2.0
+ | │ │ │ ├── instances
+ | │ │ │ ├── labels
+ | │ │ │ ├── panoptic
+ | │ │ │ └── polygons
+ ```
+
+- You could set Datasets version with `MapillaryDataset_v1` and `MapillaryDataset_v2` in your configs.
+ View the Mapillary Vistas Datasets config file here [V1.2](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/_base_/datasets/mapillary_v1.py) and [V2.0](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/_base_/datasets/mapillary_v2.py)
diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py
index 0dd19ee312..a90d53c88e 100644
--- a/mmseg/datasets/__init__.py
+++ b/mmseg/datasets/__init__.py
@@ -14,6 +14,7 @@
from .isprs import ISPRSDataset
from .lip import LIPDataset
from .loveda import LoveDADataset
+from .mapillary import MapillaryDataset_v1, MapillaryDataset_v2
from .night_driving import NightDrivingDataset
from .pascal_context import PascalContextDataset, PascalContextDataset59
from .potsdam import PotsdamDataset
@@ -49,5 +50,6 @@
'DecathlonDataset', 'LIPDataset', 'ResizeShortestEdge',
'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur',
'BioMedicalRandomGamma', 'BioMedical3DPad', 'RandomRotFlip',
- 'SynapseDataset', 'REFUGEDataset'
+ 'SynapseDataset', 'REFUGEDataset', 'MapillaryDataset_v1',
+ 'MapillaryDataset_v2'
]
diff --git a/projects/mapillary_dataset/mmseg/datasets/mapillary_v2_0.py b/mmseg/datasets/mapillary.py
similarity index 66%
rename from projects/mapillary_dataset/mmseg/datasets/mapillary_v2_0.py
rename to mmseg/datasets/mapillary.py
index 9c67a8b212..6c2947338e 100644
--- a/projects/mapillary_dataset/mmseg/datasets/mapillary_v2_0.py
+++ b/mmseg/datasets/mapillary.py
@@ -1,10 +1,72 @@
# Copyright (c) OpenMMLab. All rights reserved.
-from mmseg.datasets.basesegdataset import BaseSegDataset
from mmseg.registry import DATASETS
+from .basesegdataset import BaseSegDataset
@DATASETS.register_module()
-class MapillaryDataset_v2_0(BaseSegDataset):
+class MapillaryDataset_v1(BaseSegDataset):
+ """Mapillary Vistas Dataset.
+
+ Dataset paper link:
+ http://ieeexplore.ieee.org/document/8237796/
+
+ v1.2 contain 66 object classes.
+ (37 instance-specific)
+
+ v2.0 contain 124 object classes.
+ (70 instance-specific, 46 stuff, 8 void or crowd).
+
+ The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
+ fixed to '.png' for Mapillary Vistas Dataset.
+ """
+ METAINFO = dict(
+ classes=('Bird', 'Ground Animal', 'Curb', 'Fence', 'Guard Rail',
+ 'Barrier', 'Wall', 'Bike Lane', 'Crosswalk - Plain',
+ 'Curb Cut', 'Parking', 'Pedestrian Area', 'Rail Track',
+ 'Road', 'Service Lane', 'Sidewalk', 'Bridge', 'Building',
+ 'Tunnel', 'Person', 'Bicyclist', 'Motorcyclist',
+ 'Other Rider', 'Lane Marking - Crosswalk',
+ 'Lane Marking - General', 'Mountain', 'Sand', 'Sky', 'Snow',
+ 'Terrain', 'Vegetation', 'Water', 'Banner', 'Bench',
+ 'Bike Rack', 'Billboard', 'Catch Basin', 'CCTV Camera',
+ 'Fire Hydrant', 'Junction Box', 'Mailbox', 'Manhole',
+ 'Phone Booth', 'Pothole', 'Street Light', 'Pole',
+ 'Traffic Sign Frame', 'Utility Pole', 'Traffic Light',
+ 'Traffic Sign (Back)', 'Traffic Sign (Front)', 'Trash Can',
+ 'Bicycle', 'Boat', 'Bus', 'Car', 'Caravan', 'Motorcycle',
+ 'On Rails', 'Other Vehicle', 'Trailer', 'Truck',
+ 'Wheeled Slow', 'Car Mount', 'Ego Vehicle', 'Unlabeled'),
+ palette=[[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153],
+ [180, 165, 180], [90, 120, 150], [102, 102, 156],
+ [128, 64, 255], [140, 140, 200], [170, 170, 170],
+ [250, 170, 160], [96, 96, 96],
+ [230, 150, 140], [128, 64, 128], [110, 110, 110],
+ [244, 35, 232], [150, 100, 100], [70, 70, 70], [150, 120, 90],
+ [220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200],
+ [200, 128, 128], [255, 255, 255], [64, 170,
+ 64], [230, 160, 50],
+ [70, 130, 180], [190, 255, 255], [152, 251, 152],
+ [107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30],
+ [100, 140, 180], [220, 220, 220], [220, 128, 128],
+ [222, 40, 40], [100, 170, 30], [40, 40, 40], [33, 33, 33],
+ [100, 128, 160], [142, 0, 0], [70, 100, 150], [210, 170, 100],
+ [153, 153, 153], [128, 128, 128], [0, 0, 80], [250, 170, 30],
+ [192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32],
+ [150, 0, 255], [0, 60, 100], [0, 0, 142], [0, 0, 90],
+ [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
+ [0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10,
+ 10], [0, 0, 0]])
+
+ def __init__(self,
+ img_suffix='.jpg',
+ seg_map_suffix='.png',
+ **kwargs) -> None:
+ super().__init__(
+ img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
+
+
+@DATASETS.register_module()
+class MapillaryDataset_v2(BaseSegDataset):
"""Mapillary Vistas Dataset.
Dataset paper link:
diff --git a/mmseg/utils/class_names.py b/mmseg/utils/class_names.py
index 662199f21e..961a08520d 100644
--- a/mmseg/utils/class_names.py
+++ b/mmseg/utils/class_names.py
@@ -126,6 +126,126 @@ def stare_classes():
return ['background', 'vessel']
+def mapillary_v1_classes():
+ """mapillary_v1 class names for external use."""
+ return [
+ 'Bird', 'Ground Animal', 'Curb', 'Fence', 'Guard Rail', 'Barrier',
+ 'Wall', 'Bike Lane', 'Crosswalk - Plain', 'Curb Cut', 'Parking',
+ 'Pedestrian Area', 'Rail Track', 'Road', 'Service Lane', 'Sidewalk',
+ 'Bridge', 'Building', 'Tunnel', 'Person', 'Bicyclist', 'Motorcyclist',
+ 'Other Rider', 'Lane Marking - Crosswalk', 'Lane Marking - General',
+ 'Mountain', 'Sand', 'Sky', 'Snow', 'Terrain', 'Vegetation', 'Water',
+ 'Banner', 'Bench', 'Bike Rack', 'Billboard', 'Catch Basin',
+ 'CCTV Camera', 'Fire Hydrant', 'Junction Box', 'Mailbox', 'Manhole',
+ 'Phone Booth', 'Pothole', 'Street Light', 'Pole', 'Traffic Sign Frame',
+ 'Utility Pole', 'Traffic Light', 'Traffic Sign (Back)',
+ 'Traffic Sign (Front)', 'Trash Can', 'Bicycle', 'Boat', 'Bus', 'Car',
+ 'Caravan', 'Motorcycle', 'On Rails', 'Other Vehicle', 'Trailer',
+ 'Truck', 'Wheeled Slow', 'Car Mount', 'Ego Vehicle', 'Unlabeled'
+ ]
+
+
+def mapillary_v1_palette():
+ """mapillary_v1_ palette for external use."""
+ return [[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153],
+ [180, 165, 180], [90, 120, 150], [102, 102, 156], [128, 64, 255],
+ [140, 140, 200], [170, 170, 170], [250, 170, 160], [96, 96, 96],
+ [230, 150, 140], [128, 64, 128], [110, 110, 110], [244, 35, 232],
+ [150, 100, 100], [70, 70, 70], [150, 120, 90], [220, 20, 60],
+ [255, 0, 0], [255, 0, 100], [255, 0, 200], [200, 128, 128],
+ [255, 255, 255], [64, 170, 64], [230, 160, 50], [70, 130, 180],
+ [190, 255, 255], [152, 251, 152], [107, 142, 35], [0, 170, 30],
+ [255, 255, 128], [250, 0, 30], [100, 140, 180], [220, 220, 220],
+ [220, 128, 128], [222, 40, 40], [100, 170, 30], [40, 40, 40],
+ [33, 33, 33], [100, 128, 160], [142, 0, 0], [70, 100, 150],
+ [210, 170, 100], [153, 153, 153], [128, 128, 128], [0, 0, 80],
+ [250, 170, 30], [192, 192, 192], [220, 220, 0], [140, 140, 20],
+ [119, 11, 32], [150, 0, 255], [0, 60, 100], [0, 0, 142],
+ [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
+ [0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, 10], [0, 0, 0]]
+
+
+def mapillary_v2_classes():
+ """mapillary_v2 class names for external use."""
+ return [
+ 'Bird', 'Ground Animal', 'Ambiguous Barrier', 'Concrete Block', 'Curb',
+ 'Fence', 'Guard Rail', 'Barrier', 'Road Median', 'Road Side',
+ 'Lane Separator', 'Temporary Barrier', 'Wall', 'Bike Lane',
+ 'Crosswalk - Plain', 'Curb Cut', 'Driveway', 'Parking',
+ 'Parking Aisle', 'Pedestrian Area', 'Rail Track', 'Road',
+ 'Road Shoulder', 'Service Lane', 'Sidewalk', 'Traffic Island',
+ 'Bridge', 'Building', 'Garage', 'Tunnel', 'Person', 'Person Group',
+ 'Bicyclist', 'Motorcyclist', 'Other Rider',
+ 'Lane Marking - Dashed Line', 'Lane Marking - Straight Line',
+ 'Lane Marking - Zigzag Line', 'Lane Marking - Ambiguous',
+ 'Lane Marking - Arrow (Left)', 'Lane Marking - Arrow (Other)',
+ 'Lane Marking - Arrow (Right)',
+ 'Lane Marking - Arrow (Split Left or Straight)',
+ 'Lane Marking - Arrow (Split Right or Straight)',
+ 'Lane Marking - Arrow (Straight)', 'Lane Marking - Crosswalk',
+ 'Lane Marking - Give Way (Row)', 'Lane Marking - Give Way (Single)',
+ 'Lane Marking - Hatched (Chevron)',
+ 'Lane Marking - Hatched (Diagonal)', 'Lane Marking - Other',
+ 'Lane Marking - Stop Line', 'Lane Marking - Symbol (Bicycle)',
+ 'Lane Marking - Symbol (Other)', 'Lane Marking - Text',
+ 'Lane Marking (only) - Dashed Line', 'Lane Marking (only) - Crosswalk',
+ 'Lane Marking (only) - Other', 'Lane Marking (only) - Test',
+ 'Mountain', 'Sand', 'Sky', 'Snow', 'Terrain', 'Vegetation', 'Water',
+ 'Banner', 'Bench', 'Bike Rack', 'Catch Basin', 'CCTV Camera',
+ 'Fire Hydrant', 'Junction Box', 'Mailbox', 'Manhole', 'Parking Meter',
+ 'Phone Booth', 'Pothole', 'Signage - Advertisement',
+ 'Signage - Ambiguous', 'Signage - Back', 'Signage - Information',
+ 'Signage - Other', 'Signage - Store', 'Street Light', 'Pole',
+ 'Pole Group', 'Traffic Sign Frame', 'Utility Pole', 'Traffic Cone',
+ 'Traffic Light - General (Single)', 'Traffic Light - Pedestrians',
+ 'Traffic Light - General (Upright)',
+ 'Traffic Light - General (Horizontal)', 'Traffic Light - Cyclists',
+ 'Traffic Light - Other', 'Traffic Sign - Ambiguous',
+ 'Traffic Sign (Back)', 'Traffic Sign - Direction (Back)',
+ 'Traffic Sign - Direction (Front)', 'Traffic Sign (Front)',
+ 'Traffic Sign - Parking', 'Traffic Sign - Temporary (Back)',
+ 'Traffic Sign - Temporary (Front)', 'Trash Can', 'Bicycle', 'Boat',
+ 'Bus', 'Car', 'Caravan', 'Motorcycle', 'On Rails', 'Other Vehicle',
+ 'Trailer', 'Truck', 'Vehicle Group', 'Wheeled Slow', 'Water Valve',
+ 'Car Mount', 'Dynamic', 'Ego Vehicle', 'Ground', 'Static', 'Unlabeled'
+ ]
+
+
+def mapillary_v2_palette():
+ """mapillary_v2_ palette for external use."""
+ return [[165, 42, 42], [0, 192, 0], [250, 170, 31], [250, 170, 32],
+ [196, 196, 196], [190, 153, 153], [180, 165, 180], [90, 120, 150],
+ [250, 170, 33], [250, 170, 34], [128, 128, 128], [250, 170, 35],
+ [102, 102, 156], [128, 64, 255], [140, 140, 200], [170, 170, 170],
+ [250, 170, 36], [250, 170, 160], [250, 170, 37], [96, 96, 96],
+ [230, 150, 140], [128, 64, 128], [110, 110, 110], [110, 110, 110],
+ [244, 35, 232], [128, 196, 128], [150, 100, 100], [70, 70, 70],
+ [150, 150, 150], [150, 120, 90], [220, 20, 60], [220, 20, 60],
+ [255, 0, 0], [255, 0, 100], [255, 0, 200], [255, 255, 255],
+ [255, 255, 255], [250, 170, 29], [250, 170, 28], [250, 170, 26],
+ [250, 170, 25], [250, 170, 24], [250, 170, 22], [250, 170, 21],
+ [250, 170, 20], [255, 255, 255], [250, 170, 19], [250, 170, 18],
+ [250, 170, 12], [250, 170, 11], [255, 255, 255], [255, 255, 255],
+ [250, 170, 16], [250, 170, 15], [250, 170, 15], [255, 255, 255],
+ [255, 255, 255], [255, 255, 255], [255, 255, 255], [64, 170, 64],
+ [230, 160, 50], [70, 130, 180], [190, 255, 255], [152, 251, 152],
+ [107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30],
+ [100, 140, 180], [220, 128, 128], [222, 40, 40], [100, 170, 30],
+ [40, 40, 40], [33, 33, 33], [100, 128, 160], [20, 20, 255],
+ [142, 0, 0], [70, 100, 150], [250, 171, 30], [250, 172, 30],
+ [250, 173, 30], [250, 174, 30], [250, 175, 30], [250, 176, 30],
+ [210, 170, 100], [153, 153, 153], [153, 153, 153], [128, 128, 128],
+ [0, 0, 80], [210, 60, 60], [250, 170, 30], [250, 170, 30],
+ [250, 170, 30], [250, 170, 30], [250, 170, 30], [250, 170, 30],
+ [192, 192, 192], [192, 192, 192], [192, 192, 192], [220, 220, 0],
+ [220, 220, 0], [0, 0, 196], [192, 192, 192], [220, 220, 0],
+ [140, 140, 20], [119, 11, 32], [150, 0, 255], [0, 60, 100],
+ [0, 0, 142], [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64],
+ [0, 0, 110], [0, 0, 70], [0, 0, 142], [0, 0, 192], [170, 170, 170],
+ [32, 32, 32], [111, 74, 0], [120, 10, 10], [81, 0, 81],
+ [111, 111, 0], [0, 0, 0]]
+
+
def cityscapes_palette():
"""Cityscapes palette for external use."""
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
@@ -313,7 +433,9 @@ def lip_palette():
],
'isaid': ['isaid', 'iSAID'],
'stare': ['stare', 'STARE'],
- 'lip': ['LIP', 'lip']
+ 'lip': ['LIP', 'lip'],
+ 'mapillary_v1': ['mapillary_v1'],
+ 'mapillary_v2': ['mapillary_v2']
}
diff --git a/projects/mapillary_dataset/README.md b/projects/mapillary_dataset/README.md
index 2b3099522e..cdc61d53a9 100644
--- a/projects/mapillary_dataset/README.md
+++ b/projects/mapillary_dataset/README.md
@@ -34,6 +34,7 @@ Preparing `Mapillary Vistas Dataset` dataset following [Mapillary Vistas Dataset
| │ │ │ └── polygons
│ │ ├── validation
│ │ │ ├── images
+ │ │ │ ├── v1.2
| │ │ │ ├── instances
| │ │ │ ├── labels
| │ │ │ ├── labels_mask
@@ -46,12 +47,12 @@ Preparing `Mapillary Vistas Dataset` dataset following [Mapillary Vistas Dataset
| │ │ │ └── polygons
```
-### Training commands with `deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py`
+### Training commands
```bash
# Dataset train commands
# at `mmsegmentation` folder
-bash tools/dist_train.sh projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py 4
+bash tools/dist_train.sh projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay_v1-512x1024.py 4
```
## Checklist
@@ -66,20 +67,20 @@ bash tools/dist_train.sh projects/mapillary_dataset/configs/deeplabv3plus_r101-d
- [x] A full README
-- [ ] Milestone 2: Indicates a successful model implementation.
+- [x] Milestone 2: Indicates a successful model implementation.
- - [ ] Training-time correctness
+ - [x] Training-time correctness
-- [ ] Milestone 3: Good to be a part of our core package!
+- [x] Milestone 3: Good to be a part of our core package!
- - [ ] Type hints and docstrings
+ - [x] Type hints and docstrings
- - [ ] Unit tests
+ - [x] Unit tests
- - [ ] Code polishing
+ - [x] Code polishing
- - [ ] Metafile.yml
+ - [x] Metafile.yml
-- [ ] Move your modules into the core package following the codebase's file hierarchy structure.
+- [x] Move your modules into the core package following the codebase's file hierarchy structure.
-- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure.
+- [x] Refactor your modules into the core package following the codebase's file hierarchy structure.
diff --git a/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1.py b/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1.py
new file mode 100644
index 0000000000..611aa4741b
--- /dev/null
+++ b/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1.py
@@ -0,0 +1,68 @@
+# dataset settings
+dataset_type = 'MapillaryDataset_v1'
+data_root = 'data/mapillary/'
+crop_size = (512, 1024)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(
+ type='RandomResize',
+ scale=(2048, 1024),
+ ratio_range=(0.5, 2.0),
+ keep_ratio=True),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='PackSegInputs')
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
+ # add loading annotation after ``Resize`` because ground truth
+ # does not need to do resize data transform
+ dict(type='LoadAnnotations'),
+ dict(type='PackSegInputs')
+]
+img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
+tta_pipeline = [
+ dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
+ dict(
+ type='TestTimeAug',
+ transforms=[
+ [
+ dict(type='Resize', scale_factor=r, keep_ratio=True)
+ for r in img_ratios
+ ],
+ [
+ dict(type='RandomFlip', prob=0., direction='horizontal'),
+ dict(type='RandomFlip', prob=1., direction='horizontal')
+ ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]
+ ])
+]
+train_dataloader = dict(
+ batch_size=2,
+ num_workers=4,
+ persistent_workers=True,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ data_root=data_root,
+ data_prefix=dict(
+ img_path='training/images', seg_map_path='training/v1.2/labels'),
+ pipeline=train_pipeline))
+val_dataloader = dict(
+ batch_size=1,
+ num_workers=4,
+ persistent_workers=True,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ data_root=data_root,
+ data_prefix=dict(
+ img_path='validation/images',
+ seg_map_path='validation/v1.2/labels'),
+ pipeline=test_pipeline))
+test_dataloader = val_dataloader
+
+val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
+test_evaluator = val_evaluator
diff --git a/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_65.py b/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_65.py
new file mode 100644
index 0000000000..f594f37333
--- /dev/null
+++ b/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_65.py
@@ -0,0 +1,37 @@
+# dataset settings
+_base_ = './mapillary_v1.py'
+metainfo = dict(
+ classes=('Bird', 'Ground Animal', 'Curb', 'Fence', 'Guard Rail', 'Barrier',
+ 'Wall', 'Bike Lane', 'Crosswalk - Plain', 'Curb Cut', 'Parking',
+ 'Pedestrian Area', 'Rail Track', 'Road', 'Service Lane',
+ 'Sidewalk', 'Bridge', 'Building', 'Tunnel', 'Person', 'Bicyclist',
+ 'Motorcyclist', 'Other Rider', 'Lane Marking - Crosswalk',
+ 'Lane Marking - General', 'Mountain', 'Sand', 'Sky', 'Snow',
+ 'Terrain', 'Vegetation', 'Water', 'Banner', 'Bench', 'Bike Rack',
+ 'Billboard', 'Catch Basin', 'CCTV Camera', 'Fire Hydrant',
+ 'Junction Box', 'Mailbox', 'Manhole', 'Phone Booth', 'Pothole',
+ 'Street Light', 'Pole', 'Traffic Sign Frame', 'Utility Pole',
+ 'Traffic Light', 'Traffic Sign (Back)', 'Traffic Sign (Front)',
+ 'Trash Can', 'Bicycle', 'Boat', 'Bus', 'Car', 'Caravan',
+ 'Motorcycle', 'On Rails', 'Other Vehicle', 'Trailer', 'Truck',
+ 'Wheeled Slow', 'Car Mount', 'Ego Vehicle'),
+ palette=[[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153],
+ [180, 165, 180], [90, 120, 150], [102, 102, 156], [128, 64, 255],
+ [140, 140, 200], [170, 170, 170], [250, 170, 160], [96, 96, 96],
+ [230, 150, 140], [128, 64, 128], [110, 110, 110], [244, 35, 232],
+ [150, 100, 100], [70, 70, 70], [150, 120, 90], [220, 20, 60],
+ [255, 0, 0], [255, 0, 100], [255, 0, 200], [200, 128, 128],
+ [255, 255, 255], [64, 170, 64], [230, 160, 50], [70, 130, 180],
+ [190, 255, 255], [152, 251, 152], [107, 142, 35], [0, 170, 30],
+ [255, 255, 128], [250, 0, 30], [100, 140, 180], [220, 220, 220],
+ [220, 128, 128], [222, 40, 40], [100, 170, 30], [40, 40, 40],
+ [33, 33, 33], [100, 128, 160], [142, 0, 0], [70, 100, 150],
+ [210, 170, 100], [153, 153, 153], [128, 128, 128], [0, 0, 80],
+ [250, 170, 30], [192, 192, 192], [220, 220, 0], [140, 140, 20],
+ [119, 11, 32], [150, 0, 255], [0, 60, 100], [0, 0, 142],
+ [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
+ [0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, 10]])
+
+train_dataloader = dict(dataset=dict(metainfo=metainfo))
+val_dataloader = dict(dataset=dict(metainfo=metainfo))
+test_dataloader = val_dataloader
diff --git a/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2.py b/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2.py
new file mode 100644
index 0000000000..7cb7a958e5
--- /dev/null
+++ b/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2.py
@@ -0,0 +1,68 @@
+# dataset settings
+dataset_type = 'MapillaryDataset_v2'
+data_root = 'data/mapillary/'
+crop_size = (512, 1024)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(
+ type='RandomResize',
+ scale=(2048, 1024),
+ ratio_range=(0.5, 2.0),
+ keep_ratio=True),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='PackSegInputs')
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
+ # add loading annotation after ``Resize`` because ground truth
+ # does not need to do resize data transform
+ dict(type='LoadAnnotations'),
+ dict(type='PackSegInputs')
+]
+img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
+tta_pipeline = [
+ dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
+ dict(
+ type='TestTimeAug',
+ transforms=[
+ [
+ dict(type='Resize', scale_factor=r, keep_ratio=True)
+ for r in img_ratios
+ ],
+ [
+ dict(type='RandomFlip', prob=0., direction='horizontal'),
+ dict(type='RandomFlip', prob=1., direction='horizontal')
+ ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]
+ ])
+]
+train_dataloader = dict(
+ batch_size=2,
+ num_workers=4,
+ persistent_workers=True,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ data_root=data_root,
+ data_prefix=dict(
+ img_path='training/images', seg_map_path='training/v2.0/labels'),
+ pipeline=train_pipeline))
+val_dataloader = dict(
+ batch_size=1,
+ num_workers=4,
+ persistent_workers=True,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ data_root=data_root,
+ data_prefix=dict(
+ img_path='validation/images',
+ seg_map_path='validation/v2.0/labels'),
+ pipeline=test_pipeline))
+test_dataloader = val_dataloader
+
+val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
+test_evaluator = val_evaluator
diff --git a/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py b/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py
deleted file mode 100644
index 6f7ad65ed8..0000000000
--- a/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py
+++ /dev/null
@@ -1,103 +0,0 @@
-_base_ = ['./_base_/datasets/mapillary_v1_2.py'] # v 1.2 labels
-# _base_ = ['./_base_/datasets/mapillary_v2_0.py'] # v2.0 labels
-custom_imports = dict(imports=[
- 'projects.mapillary_dataset.mmseg.datasets.mapillary_v1_2',
- 'projects.mapillary_dataset.mmseg.datasets.mapillary_v2_0',
-])
-
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-data_preprocessor = dict(
- type='SegDataPreProcessor',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- bgr_to_rgb=True,
- pad_val=0,
- seg_pad_val=255,
- size=(512, 1024))
-
-model = dict(
- type='EncoderDecoder',
- data_preprocessor=data_preprocessor,
- pretrained=None,
- backbone=dict(
- type='ResNet',
- depth=101,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='DepthwiseSeparableASPPHead',
- in_channels=2048,
- in_index=3,
- channels=512,
- dilations=(1, 12, 24, 36),
- c1_in_channels=256,
- c1_channels=48,
- dropout_ratio=0.1,
- num_classes=66, # v1.2
- # num_classes=124, # v2.0
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=66, # v1.2
- # num_classes=124, # v2.0
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
-default_scope = 'mmseg'
-env_cfg = dict(
- cudnn_benchmark=True,
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
- dist_cfg=dict(backend='nccl'))
-vis_backends = [dict(type='LocalVisBackend')]
-visualizer = dict(
- type='SegLocalVisualizer',
- vis_backends=[dict(type='LocalVisBackend')],
- name='visualizer')
-log_processor = dict(by_epoch=False)
-log_level = 'INFO'
-load_from = None
-resume = False
-tta_model = dict(type='SegTTAModel')
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
-optim_wrapper = dict(
- type='OptimWrapper',
- optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001),
- clip_grad=None)
-param_scheduler = [
- dict(
- type='PolyLR',
- eta_min=0.0001,
- power=0.9,
- begin=0,
- end=240000,
- by_epoch=False)
-]
-train_cfg = dict(
- type='IterBasedTrainLoop', max_iters=240000, val_interval=24000)
-val_cfg = dict(type='ValLoop')
-test_cfg = dict(type='TestLoop')
-default_hooks = dict(
- timer=dict(type='IterTimerHook'),
- logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False),
- param_scheduler=dict(type='ParamSchedulerHook'),
- checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=24000),
- sampler_seed=dict(type='DistSamplerSeedHook'),
- visualization=dict(type='SegVisualizationHook'))
diff --git a/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay_v1-512x1024.py b/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay_v1-512x1024.py
new file mode 100644
index 0000000000..b559e0d6aa
--- /dev/null
+++ b/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay_v1-512x1024.py
@@ -0,0 +1,17 @@
+_base_ = [
+ '../../../configs/_base_/models/deeplabv3plus_r50-d8.py',
+ './_base_/datasets/mapillary_v1.py',
+ '../../../configs/_base_/default_runtime.py',
+ '../../../configs/_base_/schedules/schedule_240k.py'
+]
+custom_imports = dict(
+ imports=['projects.mapillary_dataset.mmseg.datasets.mapillary'])
+
+crop_size = (512, 1024)
+data_preprocessor = dict(size=crop_size)
+model = dict(
+ data_preprocessor=data_preprocessor,
+ pretrained='open-mmlab://resnet101_v1c',
+ backbone=dict(depth=101),
+ decode_head=dict(num_classes=66),
+ auxiliary_head=dict(num_classes=66))
diff --git a/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay_v2-512x1024.py b/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay_v2-512x1024.py
new file mode 100644
index 0000000000..cfe31a2c12
--- /dev/null
+++ b/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay_v2-512x1024.py
@@ -0,0 +1,16 @@
+_base_ = [
+ '../../../configs/_base_/models/deeplabv3plus_r50-d8.py',
+ './_base_/datasets/mapillary_v2.py',
+ '../../../configs/_base_/default_runtime.py',
+ '../../../configs/_base_/schedules/schedule_240k.py'
+]
+custom_imports = dict(
+ imports=['projects.mapillary_dataset.mmseg.datasets.mapillary'])
+crop_size = (512, 1024)
+data_preprocessor = dict(size=crop_size)
+model = dict(
+ data_preprocessor=data_preprocessor,
+ pretrained='open-mmlab://resnet101_v1c',
+ backbone=dict(depth=101),
+ decode_head=dict(num_classes=124),
+ auxiliary_head=dict(num_classes=124))
diff --git a/projects/mapillary_dataset/configs/pspnet_r101-d8_4xb2-240k_mapillay_v1-512x1024.py b/projects/mapillary_dataset/configs/pspnet_r101-d8_4xb2-240k_mapillay_v1-512x1024.py
new file mode 100644
index 0000000000..1ca2b57f73
--- /dev/null
+++ b/projects/mapillary_dataset/configs/pspnet_r101-d8_4xb2-240k_mapillay_v1-512x1024.py
@@ -0,0 +1,16 @@
+_base_ = [
+ '../../../configs/_base_/models/pspnet_r50-d8.py',
+ './_base_/datasets/mapillary_v1.py',
+ '../../../configs/_base_/default_runtime.py',
+ '../../../configs/_base_/schedules/schedule_240k.py'
+]
+custom_imports = dict(
+ imports=['projects.mapillary_dataset.mmseg.datasets.mapillary'])
+crop_size = (512, 1024)
+data_preprocessor = dict(size=crop_size)
+model = dict(
+ data_preprocessor=data_preprocessor,
+ pretrained='open-mmlab://resnet101_v1c',
+ backbone=dict(depth=101),
+ decode_head=dict(num_classes=66),
+ auxiliary_head=dict(num_classes=66))
diff --git a/projects/mapillary_dataset/configs/pspnet_r101-d8_4xb2-240k_mapillay_v2-512x1024.py b/projects/mapillary_dataset/configs/pspnet_r101-d8_4xb2-240k_mapillay_v2-512x1024.py
new file mode 100644
index 0000000000..c04746a3dc
--- /dev/null
+++ b/projects/mapillary_dataset/configs/pspnet_r101-d8_4xb2-240k_mapillay_v2-512x1024.py
@@ -0,0 +1,16 @@
+_base_ = [
+ '../../../configs/_base_/models/pspnet_r50-d8.py',
+ './_base_/datasets/mapillary_v2.py',
+ '../../../configs/_base_/default_runtime.py',
+ '../../../configs/_base_/schedules/schedule_240k.py'
+]
+custom_imports = dict(
+ imports=['projects.mapillary_dataset.mmseg.datasets.mapillary'])
+crop_size = (512, 1024)
+data_preprocessor = dict(size=crop_size)
+model = dict(
+ data_preprocessor=data_preprocessor,
+ pretrained='open-mmlab://resnet101_v1c',
+ backbone=dict(depth=101),
+ decode_head=dict(num_classes=124),
+ auxiliary_head=dict(num_classes=124))
diff --git a/projects/mapillary_dataset/docs/en/user_guides/2_dataset_prepare.md b/projects/mapillary_dataset/docs/en/user_guides/2_dataset_prepare.md
index 405e533156..fa07454330 100644
--- a/projects/mapillary_dataset/docs/en/user_guides/2_dataset_prepare.md
+++ b/projects/mapillary_dataset/docs/en/user_guides/2_dataset_prepare.md
@@ -1,87 +1,20 @@
-## Prepare datasets
+## Mapillary Vistas Datasets
-It is recommended to symlink the dataset root to `$MMSEGMENTATION/data`.
-If your folder structure is different, you may need to change the corresponding paths in config files.
+- The dataset could be download [here](https://www.mapillary.com/dataset/vistas) after registration.
-```none
-mmsegmentation
-├── mmseg
-├── tools
-├── configs
-├── data
-│ ├── mapillary
-│ │ ├── training
-│ │ │ ├── images
-│ │ │ ├── v1.2
-| │ │ │ ├── instances
-| │ │ │ ├── labels
-| │ │ │ ├── labels_mask
-| │ │ │ └── panoptic
-│ │ │ ├── v2.0
-| │ │ │ ├── instances
-| │ │ │ ├── labels
-| │ │ │ ├── labels_mask
-| │ │ │ ├── panoptic
-| │ │ │ └── polygons
-│ │ ├── validation
-│ │ │ ├── images
-| │ │ │ ├── instances
-| │ │ │ ├── labels
-| │ │ │ ├── labels_mask
-| │ │ │ └── panoptic
-│ │ │ ├── v2.0
-| │ │ │ ├── instances
-| │ │ │ ├── labels
-| │ │ │ ├── labels_mask
-| │ │ │ ├── panoptic
-| │ │ │ └── polygons
-```
+- Mapillary Vistas Dataset use 8-bit with color-palette to store labels. No conversion operation is required.
-## Mapillary Vistas Datasets
+- Assumption you have put the dataset zip file in `mmsegmentation/data/mapillary`
-- The dataset could be download [here](https://www.mapillary.com/dataset/vistas) after registration.
-- Assumption you have put the dataset zip file in `mmsegmentation/data`
- Please run the following commands to unzip dataset.
+
```bash
- cd data
- mkdir mapillary
- unzip -d mapillary An-ZjB1Zm61yAZG0ozTymz8I8NqI4x0MrYrh26dq7kPgfu8vf9ImrdaOAVOFYbJ2pNAgUnVGBmbue9lTgdBOb5BbKXIpFs0fpYWqACbrQDChAA2fdX0zS9PcHu7fY8c-FOvyBVxPNYNFQuM.zip
- ```
-- After unzip, you will get Mapillary Vistas Dataset like this structure.
- ```none
- ├── data
- │ ├── mapillary
- │ │ ├── training
- │ │ │ ├── images
- │ │ │ ├── v1.2
- | │ │ │ ├── instances
- | │ │ │ ├── labels
- | │ │ │ └── panoptic
- │ │ │ ├── v2.0
- | │ │ │ ├── instances
- | │ │ │ ├── labels
- | │ │ │ ├── panoptic
- | │ │ │ └── polygons
- │ │ ├── validation
- │ │ │ ├── images
- | │ │ │ ├── instances
- | │ │ │ ├── labels
- | │ │ │ └── panoptic
- │ │ │ ├── v2.0
- | │ │ │ ├── instances
- | │ │ │ ├── labels
- | │ │ │ ├── panoptic
- | │ │ │ └── polygons
- ```
-- run following commands to convert RGB labels to mask labels
- ```bash
- # --nproc optional, default 1, whether use multi-progress
- # --version optional, 'v1.2', 'v2.0','all', default 'all', choose convert which version labels
- # run this command at 'mmsegmentation/projects/Mapillary_dataset' folder
- cd mmsegmentation/projects/mapillary_dataset
- python tools/dataset_converters/mapillary.py ../../data/mapillary --nproc 8 --version all
+ cd data/mapillary
+ unzip An-ZjB1Zm61yAZG0ozTymz8I8NqI4x0MrYrh26dq7kPgfu8vf9ImrdaOAVOFYbJ2pNAgUnVGBmbue9lTgdBOb5BbKXIpFs0fpYWqACbrQDChAA2fdX0zS9PcHu7fY8c-FOvyBVxPNYNFQuM.zip
```
- After then, you will get this structure
+
+- After unzip, you will get Mapillary Vistas Dataset like this structure. Semantic segmentation mask labels in `labels` folder.
+
```none
mmsegmentation
├── mmseg
@@ -94,24 +27,229 @@ mmsegmentation
│ │ │ ├── v1.2
| │ │ │ ├── instances
| │ │ │ ├── labels
- | │ │ │ ├── labels_mask
| │ │ │ └── panoptic
│ │ │ ├── v2.0
| │ │ │ ├── instances
| │ │ │ ├── labels
- | │ │ │ ├── labels_mask
| │ │ │ ├── panoptic
| │ │ │ └── polygons
│ │ ├── validation
│ │ │ ├── images
+ | │ │ ├── v1.2
| │ │ │ ├── instances
| │ │ │ ├── labels
- | │ │ │ ├── labels_mask
| │ │ │ └── panoptic
│ │ │ ├── v2.0
| │ │ │ ├── instances
| │ │ │ ├── labels
- | │ │ │ ├── labels_mask
| │ │ │ ├── panoptic
| │ │ │ └── polygons
```
+
+- You could set Datasets version with `MapillaryDataset_v1` and `MapillaryDataset_v2` in your configs.
+ View the Mapillary Vistas Datasets config file here [V1.2](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/_base_/datasets/mapillary_v1.py) and [V2.0](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/_base_/datasets/mapillary_v2.py)
+
+- **View datasets labels index and palette**
+
+- **Mapillary Vistas Datasets labels information**
+ **v1.2 information**
+
+ ```none
+ There are 66 labels classes in v1.2
+ 0--Bird--[165, 42, 42],
+ 1--Ground Animal--[0, 192, 0],
+ 2--Curb--[196, 196, 196],
+ 3--Fence--[190, 153, 153],
+ 4--Guard Rail--[180, 165, 180],
+ 5--Barrier--[90, 120, 150],
+ 6--Wall--[102, 102, 156],
+ 7--Bike Lane--[128, 64, 255],
+ 8--Crosswalk - Plain--[140, 140, 200],
+ 9--Curb Cut--[170, 170, 170],
+ 10--Parking--[250, 170, 160],
+ 11--Pedestrian Area--[96, 96, 96],
+ 12--Rail Track--[230, 150, 140],
+ 13--Road--[128, 64, 128],
+ 14--Service Lane--[110, 110, 110],
+ 15--Sidewalk--[244, 35, 232],
+ 16--Bridge--[150, 100, 100],
+ 17--Building--[70, 70, 70],
+ 18--Tunnel--[150, 120, 90],
+ 19--Person--[220, 20, 60],
+ 20--Bicyclist--[255, 0, 0],
+ 21--Motorcyclist--[255, 0, 100],
+ 22--Other Rider--[255, 0, 200],
+ 23--Lane Marking - Crosswalk--[200, 128, 128],
+ 24--Lane Marking - General--[255, 255, 255],
+ 25--Mountain--[64, 170, 64],
+ 26--Sand--[230, 160, 50],
+ 27--Sky--[70, 130, 180],
+ 28--Snow--[190, 255, 255],
+ 29--Terrain--[152, 251, 152],
+ 30--Vegetation--[107, 142, 35],
+ 31--Water--[0, 170, 30],
+ 32--Banner--[255, 255, 128],
+ 33--Bench--[250, 0, 30],
+ 34--Bike Rack--[100, 140, 180],
+ 35--Billboard--[220, 220, 220],
+ 36--Catch Basin--[220, 128, 128],
+ 37--CCTV Camera--[222, 40, 40],
+ 38--Fire Hydrant--[100, 170, 30],
+ 39--Junction Box--[40, 40, 40],
+ 40--Mailbox--[33, 33, 33],
+ 41--Manhole--[100, 128, 160],
+ 42--Phone Booth--[142, 0, 0],
+ 43--Pothole--[70, 100, 150],
+ 44--Street Light--[210, 170, 100],
+ 45--Pole--[153, 153, 153],
+ 46--Traffic Sign Frame--[128, 128, 128],
+ 47--Utility Pole--[0, 0, 80],
+ 48--Traffic Light--[250, 170, 30],
+ 49--Traffic Sign (Back)--[192, 192, 192],
+ 50--Traffic Sign (Front)--[220, 220, 0],
+ 51--Trash Can--[140, 140, 20],
+ 52--Bicycle--[119, 11, 32],
+ 53--Boat--[150, 0, 255],
+ 54--Bus--[0, 60, 100],
+ 55--Car--[0, 0, 142],
+ 56--Caravan--[0, 0, 90],
+ 57--Motorcycle--[0, 0, 230],
+ 58--On Rails--[0, 80, 100],
+ 59--Other Vehicle--[128, 64, 64],
+ 60--Trailer--[0, 0, 110],
+ 61--Truck--[0, 0, 70],
+ 62--Wheeled Slow--[0, 0, 192],
+ 63--Car Mount--[32, 32, 32],
+ 64--Ego Vehicle--[120, 10, 10],
+ 65--Unlabeled--[0, 0, 0]
+ ```
+
+ **v2.0 information**
+
+ ```none
+ There are 124 labels classes in v2.0
+ 0--Bird--[165, 42, 42],
+ 1--Ground Animal--[0, 192, 0],
+ 2--Ambiguous Barrier--[250, 170, 31],
+ 3--Concrete Block--[250, 170, 32],
+ 4--Curb--[196, 196, 196],
+ 5--Fence--[190, 153, 153],
+ 6--Guard Rail--[180, 165, 180],
+ 7--Barrier--[90, 120, 150],
+ 8--Road Median--[250, 170, 33],
+ 9--Road Side--[250, 170, 34],
+ 10--Lane Separator--[128, 128, 128],
+ 11--Temporary Barrier--[250, 170, 35],
+ 12--Wall--[102, 102, 156],
+ 13--Bike Lane--[128, 64, 255],
+ 14--Crosswalk - Plain--[140, 140, 200],
+ 15--Curb Cut--[170, 170, 170],
+ 16--Driveway--[250, 170, 36],
+ 17--Parking--[250, 170, 160],
+ 18--Parking Aisle--[250, 170, 37],
+ 19--Pedestrian Area--[96, 96, 96],
+ 20--Rail Track--[230, 150, 140],
+ 21--Road--[128, 64, 128],
+ 22--Road Shoulder--[110, 110, 110],
+ 23--Service Lane--[110, 110, 110],
+ 24--Sidewalk--[244, 35, 232],
+ 25--Traffic Island--[128, 196, 128],
+ 26--Bridge--[150, 100, 100],
+ 27--Building--[70, 70, 70],
+ 28--Garage--[150, 150, 150],
+ 29--Tunnel--[150, 120, 90],
+ 30--Person--[220, 20, 60],
+ 31--Person Group--[220, 20, 60],
+ 32--Bicyclist--[255, 0, 0],
+ 33--Motorcyclist--[255, 0, 100],
+ 34--Other Rider--[255, 0, 200],
+ 35--Lane Marking - Dashed Line--[255, 255, 255],
+ 36--Lane Marking - Straight Line--[255, 255, 255],
+ 37--Lane Marking - Zigzag Line--[250, 170, 29],
+ 38--Lane Marking - Ambiguous--[250, 170, 28],
+ 39--Lane Marking - Arrow (Left)--[250, 170, 26],
+ 40--Lane Marking - Arrow (Other)--[250, 170, 25],
+ 41--Lane Marking - Arrow (Right)--[250, 170, 24],
+ 42--Lane Marking - Arrow (Split Left or Straight)--[250, 170, 22],
+ 43--Lane Marking - Arrow (Split Right or Straight)--[250, 170, 21],
+ 44--Lane Marking - Arrow (Straight)--[250, 170, 20],
+ 45--Lane Marking - Crosswalk--[255, 255, 255],
+ 46--Lane Marking - Give Way (Row)--[250, 170, 19],
+ 47--Lane Marking - Give Way (Single)--[250, 170, 18],
+ 48--Lane Marking - Hatched (Chevron)--[250, 170, 12],
+ 49--Lane Marking - Hatched (Diagonal)--[250, 170, 11],
+ 50--Lane Marking - Other--[255, 255, 255],
+ 51--Lane Marking - Stop Line--[255, 255, 255],
+ 52--Lane Marking - Symbol (Bicycle)--[250, 170, 16],
+ 53--Lane Marking - Symbol (Other)--[250, 170, 15],
+ 54--Lane Marking - Text--[250, 170, 15],
+ 55--Lane Marking (only) - Dashed Line--[255, 255, 255],
+ 56--Lane Marking (only) - Crosswalk--[255, 255, 255],
+ 57--Lane Marking (only) - Other--[255, 255, 255],
+ 58--Lane Marking (only) - Test--[255, 255, 255],
+ 59--Mountain--[64, 170, 64],
+ 60--Sand--[230, 160, 50],
+ 61--Sky--[70, 130, 180],
+ 62--Snow--[190, 255, 255],
+ 63--Terrain--[152, 251, 152],
+ 64--Vegetation--[107, 142, 35],
+ 65--Water--[0, 170, 30],
+ 66--Banner--[255, 255, 128],
+ 67--Bench--[250, 0, 30],
+ 68--Bike Rack--[100, 140, 180],
+ 69--Catch Basin--[220, 128, 128],
+ 70--CCTV Camera--[222, 40, 40],
+ 71--Fire Hydrant--[100, 170, 30],
+ 72--Junction Box--[40, 40, 40],
+ 73--Mailbox--[33, 33, 33],
+ 74--Manhole--[100, 128, 160],
+ 75--Parking Meter--[20, 20, 255],
+ 76--Phone Booth--[142, 0, 0],
+ 77--Pothole--[70, 100, 150],
+ 78--Signage - Advertisement--[250, 171, 30],
+ 79--Signage - Ambiguous--[250, 172, 30],
+ 80--Signage - Back--[250, 173, 30],
+ 81--Signage - Information--[250, 174, 30],
+ 82--Signage - Other--[250, 175, 30],
+ 83--Signage - Store--[250, 176, 30],
+ 84--Street Light--[210, 170, 100],
+ 85--Pole--[153, 153, 153],
+ 86--Pole Group--[153, 153, 153],
+ 87--Traffic Sign Frame--[128, 128, 128],
+ 88--Utility Pole--[0, 0, 80],
+ 89--Traffic Cone--[210, 60, 60],
+ 90--Traffic Light - General (Single)--[250, 170, 30],
+ 91--Traffic Light - Pedestrians--[250, 170, 30],
+ 92--Traffic Light - General (Upright)--[250, 170, 30],
+ 93--Traffic Light - General (Horizontal)--[250, 170, 30],
+ 94--Traffic Light - Cyclists--[250, 170, 30],
+ 95--Traffic Light - Other--[250, 170, 30],
+ 96--Traffic Sign - Ambiguous--[192, 192, 192],
+ 97--Traffic Sign (Back)--[192, 192, 192],
+ 98--Traffic Sign - Direction (Back)--[192, 192, 192],
+ 99--Traffic Sign - Direction (Front)--[220, 220, 0],
+ 100--Traffic Sign (Front)--[220, 220, 0],
+ 101--Traffic Sign - Parking--[0, 0, 196],
+ 102--Traffic Sign - Temporary (Back)--[192, 192, 192],
+ 103--Traffic Sign - Temporary (Front)--[220, 220, 0],
+ 104--Trash Can--[140, 140, 20],
+ 105--Bicycle--[119, 11, 32],
+ 106--Boat--[150, 0, 255],
+ 107--Bus--[0, 60, 100],
+ 108--Car--[0, 0, 142],
+ 109--Caravan--[0, 0, 90],
+ 110--Motorcycle--[0, 0, 230],
+ 111--On Rails--[0, 80, 100],
+ 112--Other Vehicle--[128, 64, 64],
+ 113--Trailer--[0, 0, 110],
+ 114--Truck--[0, 0, 70],
+ 115--Vehicle Group--[0, 0, 142],
+ 116--Wheeled Slow--[0, 0, 192],
+ 117--Water Valve--[170, 170, 170],
+ 118--Car Mount--[32, 32, 32],
+ 119--Dynamic--[111, 74, 0],
+ 120--Ego Vehicle--[120, 10, 10],
+ 121--Ground--[81, 0, 81],
+ 122--Static--[111, 111, 0],
+ 123--Unlabeled--[0, 0, 0]
+ ```
diff --git a/projects/mapillary_dataset/mmseg/datasets/mapillary.py b/projects/mapillary_dataset/mmseg/datasets/mapillary.py
new file mode 100644
index 0000000000..f49bd54451
--- /dev/null
+++ b/projects/mapillary_dataset/mmseg/datasets/mapillary.py
@@ -0,0 +1,177 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from mmseg.datasets.basesegdataset import BaseSegDataset
+
+# from mmseg.registry import DATASETS
+
+
+# @DATASETS.register_module()
+class MapillaryDataset_v1(BaseSegDataset):
+ """Mapillary Vistas Dataset.
+
+ Dataset paper link:
+ http://ieeexplore.ieee.org/document/8237796/
+
+ v1.2 contain 66 object classes.
+ (37 instance-specific)
+
+ v2.0 contain 124 object classes.
+ (70 instance-specific, 46 stuff, 8 void or crowd).
+
+ The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
+ fixed to '.png' for Mapillary Vistas Dataset.
+ """
+ METAINFO = dict(
+ classes=('Bird', 'Ground Animal', 'Curb', 'Fence', 'Guard Rail',
+ 'Barrier', 'Wall', 'Bike Lane', 'Crosswalk - Plain',
+ 'Curb Cut', 'Parking', 'Pedestrian Area', 'Rail Track',
+ 'Road', 'Service Lane', 'Sidewalk', 'Bridge', 'Building',
+ 'Tunnel', 'Person', 'Bicyclist', 'Motorcyclist',
+ 'Other Rider', 'Lane Marking - Crosswalk',
+ 'Lane Marking - General', 'Mountain', 'Sand', 'Sky', 'Snow',
+ 'Terrain', 'Vegetation', 'Water', 'Banner', 'Bench',
+ 'Bike Rack', 'Billboard', 'Catch Basin', 'CCTV Camera',
+ 'Fire Hydrant', 'Junction Box', 'Mailbox', 'Manhole',
+ 'Phone Booth', 'Pothole', 'Street Light', 'Pole',
+ 'Traffic Sign Frame', 'Utility Pole', 'Traffic Light',
+ 'Traffic Sign (Back)', 'Traffic Sign (Front)', 'Trash Can',
+ 'Bicycle', 'Boat', 'Bus', 'Car', 'Caravan', 'Motorcycle',
+ 'On Rails', 'Other Vehicle', 'Trailer', 'Truck',
+ 'Wheeled Slow', 'Car Mount', 'Ego Vehicle', 'Unlabeled'),
+ palette=[[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153],
+ [180, 165, 180], [90, 120, 150], [102, 102, 156],
+ [128, 64, 255], [140, 140, 200], [170, 170, 170],
+ [250, 170, 160], [96, 96, 96],
+ [230, 150, 140], [128, 64, 128], [110, 110, 110],
+ [244, 35, 232], [150, 100, 100], [70, 70, 70], [150, 120, 90],
+ [220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200],
+ [200, 128, 128], [255, 255, 255], [64, 170,
+ 64], [230, 160, 50],
+ [70, 130, 180], [190, 255, 255], [152, 251, 152],
+ [107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30],
+ [100, 140, 180], [220, 220, 220], [220, 128, 128],
+ [222, 40, 40], [100, 170, 30], [40, 40, 40], [33, 33, 33],
+ [100, 128, 160], [142, 0, 0], [70, 100, 150], [210, 170, 100],
+ [153, 153, 153], [128, 128, 128], [0, 0, 80], [250, 170, 30],
+ [192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32],
+ [150, 0, 255], [0, 60, 100], [0, 0, 142], [0, 0, 90],
+ [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
+ [0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10,
+ 10], [0, 0, 0]])
+
+ def __init__(self,
+ img_suffix='.jpg',
+ seg_map_suffix='.png',
+ **kwargs) -> None:
+ super().__init__(
+ img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
+
+
+# @DATASETS.register_module()
+class MapillaryDataset_v2(BaseSegDataset):
+ """Mapillary Vistas Dataset.
+
+ Dataset paper link:
+ http://ieeexplore.ieee.org/document/8237796/
+
+ v1.2 contain 66 object classes.
+ (37 instance-specific)
+
+ v2.0 contain 124 object classes.
+ (70 instance-specific, 46 stuff, 8 void or crowd).
+
+ The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
+ fixed to '.png' for Mapillary Vistas Dataset.
+ """
+ METAINFO = dict(
+ classes=(
+ 'Bird', 'Ground Animal', 'Ambiguous Barrier', 'Concrete Block',
+ 'Curb', 'Fence', 'Guard Rail', 'Barrier', 'Road Median',
+ 'Road Side', 'Lane Separator', 'Temporary Barrier', 'Wall',
+ 'Bike Lane', 'Crosswalk - Plain', 'Curb Cut', 'Driveway',
+ 'Parking', 'Parking Aisle', 'Pedestrian Area', 'Rail Track',
+ 'Road', 'Road Shoulder', 'Service Lane', 'Sidewalk',
+ 'Traffic Island', 'Bridge', 'Building', 'Garage', 'Tunnel',
+ 'Person', 'Person Group', 'Bicyclist', 'Motorcyclist',
+ 'Other Rider', 'Lane Marking - Dashed Line',
+ 'Lane Marking - Straight Line', 'Lane Marking - Zigzag Line',
+ 'Lane Marking - Ambiguous', 'Lane Marking - Arrow (Left)',
+ 'Lane Marking - Arrow (Other)', 'Lane Marking - Arrow (Right)',
+ 'Lane Marking - Arrow (Split Left or Straight)',
+ 'Lane Marking - Arrow (Split Right or Straight)',
+ 'Lane Marking - Arrow (Straight)', 'Lane Marking - Crosswalk',
+ 'Lane Marking - Give Way (Row)',
+ 'Lane Marking - Give Way (Single)',
+ 'Lane Marking - Hatched (Chevron)',
+ 'Lane Marking - Hatched (Diagonal)', 'Lane Marking - Other',
+ 'Lane Marking - Stop Line', 'Lane Marking - Symbol (Bicycle)',
+ 'Lane Marking - Symbol (Other)', 'Lane Marking - Text',
+ 'Lane Marking (only) - Dashed Line',
+ 'Lane Marking (only) - Crosswalk', 'Lane Marking (only) - Other',
+ 'Lane Marking (only) - Test', 'Mountain', 'Sand', 'Sky', 'Snow',
+ 'Terrain', 'Vegetation', 'Water', 'Banner', 'Bench', 'Bike Rack',
+ 'Catch Basin', 'CCTV Camera', 'Fire Hydrant', 'Junction Box',
+ 'Mailbox', 'Manhole', 'Parking Meter', 'Phone Booth', 'Pothole',
+ 'Signage - Advertisement', 'Signage - Ambiguous', 'Signage - Back',
+ 'Signage - Information', 'Signage - Other', 'Signage - Store',
+ 'Street Light', 'Pole', 'Pole Group', 'Traffic Sign Frame',
+ 'Utility Pole', 'Traffic Cone', 'Traffic Light - General (Single)',
+ 'Traffic Light - Pedestrians', 'Traffic Light - General (Upright)',
+ 'Traffic Light - General (Horizontal)', 'Traffic Light - Cyclists',
+ 'Traffic Light - Other', 'Traffic Sign - Ambiguous',
+ 'Traffic Sign (Back)', 'Traffic Sign - Direction (Back)',
+ 'Traffic Sign - Direction (Front)', 'Traffic Sign (Front)',
+ 'Traffic Sign - Parking', 'Traffic Sign - Temporary (Back)',
+ 'Traffic Sign - Temporary (Front)', 'Trash Can', 'Bicycle', 'Boat',
+ 'Bus', 'Car', 'Caravan', 'Motorcycle', 'On Rails', 'Other Vehicle',
+ 'Trailer', 'Truck', 'Vehicle Group', 'Wheeled Slow', 'Water Valve',
+ 'Car Mount', 'Dynamic', 'Ego Vehicle', 'Ground', 'Static',
+ 'Unlabeled'),
+ palette=[[165, 42, 42], [0, 192, 0], [250, 170, 31], [250, 170, 32],
+ [196, 196, 196], [190, 153, 153], [180, 165, 180],
+ [90, 120, 150], [250, 170, 33], [250, 170, 34],
+ [128, 128, 128], [250, 170, 35], [102, 102, 156],
+ [128, 64, 255], [140, 140, 200], [170, 170, 170],
+ [250, 170, 36], [250, 170, 160], [250, 170, 37], [96, 96, 96],
+ [230, 150, 140], [128, 64, 128], [110, 110, 110],
+ [110, 110, 110], [244, 35, 232], [128, 196,
+ 128], [150, 100, 100],
+ [70, 70, 70], [150, 150, 150], [150, 120, 90], [220, 20, 60],
+ [220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200],
+ [255, 255, 255], [255, 255, 255], [250, 170, 29],
+ [250, 170, 28], [250, 170, 26], [250, 170,
+ 25], [250, 170, 24],
+ [250, 170, 22], [250, 170, 21], [250, 170,
+ 20], [255, 255, 255],
+ [250, 170, 19], [250, 170, 18], [250, 170,
+ 12], [250, 170, 11],
+ [255, 255, 255], [255, 255, 255], [250, 170, 16],
+ [250, 170, 15], [250, 170, 15], [255, 255, 255],
+ [255, 255, 255], [255, 255, 255], [255, 255, 255],
+ [64, 170, 64], [230, 160, 50],
+ [70, 130, 180], [190, 255, 255], [152, 251, 152],
+ [107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30],
+ [100, 140, 180], [220, 128, 128], [222, 40,
+ 40], [100, 170, 30],
+ [40, 40, 40], [33, 33, 33], [100, 128, 160], [20, 20, 255],
+ [142, 0, 0], [70, 100, 150], [250, 171, 30], [250, 172, 30],
+ [250, 173, 30], [250, 174, 30], [250, 175,
+ 30], [250, 176, 30],
+ [210, 170, 100], [153, 153, 153], [153, 153, 153],
+ [128, 128, 128], [0, 0, 80], [210, 60, 60], [250, 170, 30],
+ [250, 170, 30], [250, 170, 30], [250, 170,
+ 30], [250, 170, 30],
+ [250, 170, 30], [192, 192, 192], [192, 192, 192],
+ [192, 192, 192], [220, 220, 0], [220, 220, 0], [0, 0, 196],
+ [192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32],
+ [150, 0, 255], [0, 60, 100], [0, 0, 142], [0, 0, 90],
+ [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
+ [0, 0, 70], [0, 0, 142], [0, 0, 192], [170, 170, 170],
+ [32, 32, 32], [111, 74, 0], [120, 10, 10], [81, 0, 81],
+ [111, 111, 0], [0, 0, 0]])
+
+ def __init__(self,
+ img_suffix='.jpg',
+ seg_map_suffix='.png',
+ **kwargs) -> None:
+ super().__init__(
+ img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
diff --git a/projects/mapillary_dataset/mmseg/datasets/mapillary_v1_2.py b/projects/mapillary_dataset/mmseg/datasets/mapillary_v1_2.py
deleted file mode 100644
index 975d07b24e..0000000000
--- a/projects/mapillary_dataset/mmseg/datasets/mapillary_v1_2.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from mmseg.datasets.basesegdataset import BaseSegDataset
-from mmseg.registry import DATASETS
-
-
-@DATASETS.register_module()
-class MapillaryDataset_v1_2(BaseSegDataset):
- """Mapillary Vistas Dataset.
-
- Dataset paper link:
- http://ieeexplore.ieee.org/document/8237796/
-
- v1.2 contain 66 object classes.
- (37 instance-specific)
-
- v2.0 contain 124 object classes.
- (70 instance-specific, 46 stuff, 8 void or crowd).
-
- The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
- fixed to '.png' for Mapillary Vistas Dataset.
- """
- METAINFO = dict(
- classes=('Bird', 'Ground Animal', 'Curb', 'Fence', 'Guard Rail',
- 'Barrier', 'Wall', 'Bike Lane', 'Crosswalk - Plain',
- 'Curb Cut', 'Parking', 'Pedestrian Area', 'Rail Track',
- 'Road', 'Service Lane', 'Sidewalk', 'Bridge', 'Building',
- 'Tunnel', 'Person', 'Bicyclist', 'Motorcyclist',
- 'Other Rider', 'Lane Marking - Crosswalk',
- 'Lane Marking - General', 'Mountain', 'Sand', 'Sky', 'Snow',
- 'Terrain', 'Vegetation', 'Water', 'Banner', 'Bench',
- 'Bike Rack', 'Billboard', 'Catch Basin', 'CCTV Camera',
- 'Fire Hydrant', 'Junction Box', 'Mailbox', 'Manhole',
- 'Phone Booth', 'Pothole', 'Street Light', 'Pole',
- 'Traffic Sign Frame', 'Utility Pole', 'Traffic Light',
- 'Traffic Sign (Back)', 'Traffic Sign (Front)', 'Trash Can',
- 'Bicycle', 'Boat', 'Bus', 'Car', 'Caravan', 'Motorcycle',
- 'On Rails', 'Other Vehicle', 'Trailer', 'Truck',
- 'Wheeled Slow', 'Car Mount', 'Ego Vehicle', 'Unlabeled'),
- palette=[[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153],
- [180, 165, 180], [90, 120, 150], [102, 102, 156],
- [128, 64, 255], [140, 140, 200], [170, 170, 170],
- [250, 170, 160], [96, 96, 96],
- [230, 150, 140], [128, 64, 128], [110, 110, 110],
- [244, 35, 232], [150, 100, 100], [70, 70, 70], [150, 120, 90],
- [220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200],
- [200, 128, 128], [255, 255, 255], [64, 170,
- 64], [230, 160, 50],
- [70, 130, 180], [190, 255, 255], [152, 251, 152],
- [107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30],
- [100, 140, 180], [220, 220, 220], [220, 128, 128],
- [222, 40, 40], [100, 170, 30], [40, 40, 40], [33, 33, 33],
- [100, 128, 160], [142, 0, 0], [70, 100, 150], [210, 170, 100],
- [153, 153, 153], [128, 128, 128], [0, 0, 80], [250, 170, 30],
- [192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32],
- [150, 0, 255], [0, 60, 100], [0, 0, 142], [0, 0, 90],
- [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
- [0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10,
- 10], [0, 0, 0]])
-
- def __init__(self,
- img_suffix='.jpg',
- seg_map_suffix='.png',
- **kwargs) -> None:
- super().__init__(
- img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
diff --git a/projects/mapillary_dataset/tools/dataset_converters/mapillary.py b/projects/mapillary_dataset/tools/dataset_converters/mapillary.py
deleted file mode 100644
index a881564cab..0000000000
--- a/projects/mapillary_dataset/tools/dataset_converters/mapillary.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import argparse
-import os.path as osp
-from functools import partial
-
-import mmcv
-import numpy as np
-from mmengine.utils import (mkdir_or_exist, scandir, track_parallel_progress,
- track_progress)
-
-colormap_v1_2 = np.array([[165, 42, 42], [0, 192, 0], [196, 196, 196],
- [190, 153, 153], [180, 165, 180], [90, 120, 150],
- [102, 102, 156], [128, 64, 255], [140, 140, 200],
- [170, 170, 170], [250, 170, 160], [96, 96, 96],
- [230, 150, 140], [128, 64, 128], [110, 110, 110],
- [244, 35, 232], [150, 100, 100], [70, 70, 70],
- [150, 120, 90], [220, 20, 60], [255, 0, 0],
- [255, 0, 100], [255, 0, 200], [200, 128, 128],
- [255, 255, 255], [64, 170, 64], [230, 160, 50],
- [70, 130, 180], [190, 255, 255], [152, 251, 152],
- [107, 142, 35], [0, 170, 30], [255, 255, 128],
- [250, 0, 30], [100, 140, 180], [220, 220, 220],
- [220, 128, 128], [222, 40, 40], [100, 170, 30],
- [40, 40, 40], [33, 33, 33], [100, 128, 160],
- [142, 0, 0], [70, 100, 150], [210, 170, 100],
- [153, 153, 153], [128, 128, 128], [0, 0, 80],
- [250, 170, 30], [192, 192, 192], [220, 220, 0],
- [140, 140, 20], [119, 11, 32], [150, 0, 255],
- [0, 60, 100], [0, 0, 142], [0, 0, 90], [0, 0, 230],
- [0, 80, 100], [128, 64, 64], [0, 0, 110], [0, 0, 70],
- [0, 0, 192], [32, 32, 32], [120, 10, 10], [0, 0, 0]])
-
-colormap_v2_0 = np.array([[165, 42, 42], [0, 192, 0], [250, 170, 31],
- [250, 170, 32], [196, 196, 196], [190, 153, 153],
- [180, 165, 180], [90, 120, 150], [250, 170, 33],
- [250, 170, 34], [128, 128, 128], [250, 170, 35],
- [102, 102, 156], [128, 64, 255], [140, 140, 200],
- [170, 170, 170], [250, 170, 36], [250, 170, 160],
- [250, 170, 37], [96, 96, 96], [230, 150, 140],
- [128, 64, 128], [110, 110, 110], [110, 110, 110],
- [244, 35, 232], [128, 196, 128], [150, 100, 100],
- [70, 70, 70], [150, 150, 150], [150, 120, 90],
- [220, 20, 60], [220, 20, 60], [255, 0, 0],
- [255, 0, 100], [255, 0, 200], [255, 255, 255],
- [255, 255, 255], [250, 170, 29], [250, 170, 28],
- [250, 170, 26], [250, 170, 25], [250, 170, 24],
- [250, 170, 22], [250, 170, 21], [250, 170, 20],
- [255, 255, 255], [250, 170, 19], [250, 170, 18],
- [250, 170, 12], [250, 170, 11], [255, 255, 255],
- [255, 255, 255], [250, 170, 16], [250, 170, 15],
- [250, 170, 15], [255, 255, 255], [255, 255, 255],
- [255, 255, 255], [255, 255, 255], [64, 170, 64],
- [230, 160, 50], [70, 130, 180], [190, 255, 255],
- [152, 251, 152], [107, 142, 35], [0, 170, 30],
- [255, 255, 128], [250, 0, 30], [100, 140, 180],
- [220, 128, 128], [222, 40, 40], [100, 170, 30],
- [40, 40, 40], [33, 33, 33], [100, 128, 160],
- [20, 20, 255], [142, 0, 0], [70, 100, 150],
- [250, 171, 30], [250, 172, 30], [250, 173, 30],
- [250, 174, 30], [250, 175, 30], [250, 176, 30],
- [210, 170, 100], [153, 153, 153], [153, 153, 153],
- [128, 128, 128], [0, 0, 80], [210, 60, 60],
- [250, 170, 30], [250, 170, 30], [250, 170, 30],
- [250, 170, 30], [250, 170, 30], [250, 170, 30],
- [192, 192, 192], [192, 192, 192], [192, 192, 192],
- [220, 220, 0], [220, 220, 0], [0, 0, 196],
- [192, 192, 192], [220, 220, 0], [140, 140, 20],
- [119, 11, 32], [150, 0, 255], [0, 60, 100],
- [0, 0, 142], [0, 0, 90], [0, 0, 230], [0, 80, 100],
- [128, 64, 64], [0, 0, 110], [0, 0, 70], [0, 0, 142],
- [0, 0, 192], [170, 170, 170], [32, 32, 32],
- [111, 74, 0], [120, 10, 10], [81, 0, 81],
- [111, 111, 0], [0, 0, 0]])
-
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='Convert Mapillary dataset to mmsegmentation format')
- parser.add_argument('dataset_path', help='Mapillary folder path')
- parser.add_argument(
- '--version',
- default='all',
- help="Mapillary labels version, 'v1.2','v2.0','all'")
- parser.add_argument('-o', '--out_dir', help='output path')
- parser.add_argument(
- '--nproc', default=1, type=int, help='number of process')
- args = parser.parse_args()
- return args
-
-
-def mapillary_colormap2label(colormap: np.ndarray) -> list:
- """Create a `list` shaped (256^3, 1), convert each color palette to a
- number, which can use to find the correct label value.
-
- For example labels 0--Bird--[165, 42, 42]
- (165*256 + 42) * 256 + 42 = 10824234 (This is list's index])
- `colormap2label[10824234] = 0`
-
- In converting, if a RGB pixel value is [165, 42, 42],
- through colormap2label[10824234]-->can quickly find
- this labels value is 0.
- Through matrix multiply to compute a img is very fast.
-
- Args:
- colormap (np.ndarray): Mapillary Vistas Dataset palette
-
- Returns:
- list: values are mask labels,
- indices are palette's convert results.
- """
- colormap2label = np.zeros(256**3, dtype=np.longlong)
- for i, colormap_ in enumerate(colormap):
- colormap2label[(colormap_[0] * 256 + colormap_[1]) * 256 +
- colormap_[2]] = i
- return colormap2label
-
-
-def mapillary_masklabel(rgb_label: np.ndarray,
- colormap2label: list) -> np.ndarray:
- """Computing a img mask label through `colormap2label` get in
- `mapillary_colormap2label(COLORMAP: np.ndarray)`
-
- Args:
- rgb_label (np.array): a RGB labels img.
- colormap2label (list): get in mapillary_colormap2label(colormap)
-
- Returns:
- np.ndarray: mask labels array.
- """
- colormap_ = rgb_label.astype('uint32')
- idx = np.array((colormap_[:, :, 0] * 256 + colormap_[:, :, 1]) * 256 +
- colormap_[:, :, 2]).astype('uint32')
- return colormap2label[idx]
-
-
-def RGB2Mask(rgb_label_path: str, colormap2label: list) -> None:
- """Mapillary Vistas Dataset provide 8-bit with color-palette class-specific
- labels for semantic segmentation. However, semantic segmentation needs
- single channel mask labels.
-
- This code is about converting mapillary RGB labels
- {traing,validation/v1.2,v2.0/labels} to mask labels
- {{traing,validation/v1.2,v2.0/labels_mask}
-
- Args:
- rgb_label_path (str): image absolute path.
- dataset_version (str): v1.2 or v2.0 to choose color_map .
- """
- rgb_label = mmcv.imread(rgb_label_path, channel_order='rgb')
-
- masks_label = mapillary_masklabel(rgb_label, colormap2label)
-
- mmcv.imwrite(
- masks_label.astype(np.uint8),
- rgb_label_path.replace('labels', 'labels_mask'))
-
-
-def main():
- colormap2label_v1_2 = mapillary_colormap2label(colormap_v1_2)
- colormap2label_v2_0 = mapillary_colormap2label(colormap_v2_0)
-
- dataset_path = args.dataset_path
- if args.out_dir is None:
- out_dir = dataset_path
- else:
- out_dir = args.out_dir
-
- RGB_labels_path = []
- RGB_labels_v1_2_path = []
- RGB_labels_v2_0_path = []
- print('Scanning labels path....')
- for label_path in scandir(dataset_path, suffix='.png', recursive=True):
- if 'labels' in label_path:
- rgb_label_path = osp.join(dataset_path, label_path)
- RGB_labels_path.append(rgb_label_path)
- if 'v1.2' in label_path:
- RGB_labels_v1_2_path.append(rgb_label_path)
- elif 'v2.0' in label_path:
- RGB_labels_v2_0_path.append(rgb_label_path)
-
- if args.version == 'all':
- print(f'Totaly found {len(RGB_labels_path)} {args.version} RGB labels')
- elif args.version == 'v1.2':
- print(f'Found {len(RGB_labels_v1_2_path)} {args.version} RGB labels')
- elif args.version == 'v2.0':
- print(f'Found {len(RGB_labels_v2_0_path)} {args.version} RGB labels')
- print('Making directories...')
- mkdir_or_exist(osp.join(out_dir, 'training', 'v1.2', 'labels_mask'))
- mkdir_or_exist(osp.join(out_dir, 'validation', 'v1.2', 'labels_mask'))
- mkdir_or_exist(osp.join(out_dir, 'training', 'v2.0', 'labels_mask'))
- mkdir_or_exist(osp.join(out_dir, 'validation', 'v2.0', 'labels_mask'))
- print('Directories Have Made...')
-
- if args.nproc > 1:
- if args.version == 'all':
- print('Converting v1.2 ....')
- track_parallel_progress(
- partial(RGB2Mask, colormap2label=colormap2label_v1_2),
- RGB_labels_v1_2_path,
- nproc=args.nproc)
- print('Converting v2.0 ....')
- track_parallel_progress(
- partial(RGB2Mask, colormap2label=colormap2label_v2_0),
- RGB_labels_v2_0_path,
- nproc=args.nproc)
- elif args.version == 'v1.2':
- print('Converting v1.2 ....')
- track_parallel_progress(
- partial(RGB2Mask, colormap2label=colormap2label_v1_2),
- RGB_labels_v1_2_path,
- nproc=args.nproc)
- elif args.version == 'v2.0':
- print('Converting v2.0 ....')
- track_parallel_progress(
- partial(RGB2Mask, colormap2label=colormap2label_v2_0),
- RGB_labels_v2_0_path,
- nproc=args.nproc)
-
- else:
- if args.version == 'all':
- print('Converting v1.2 ....')
- track_progress(
- partial(RGB2Mask, colormap2label=colormap2label_v1_2),
- RGB_labels_v1_2_path)
- print('Converting v2.0 ....')
- track_progress(
- partial(RGB2Mask, colormap2label=colormap2label_v2_0),
- RGB_labels_v2_0_path)
- elif args.version == 'v1.2':
- print('Converting v1.2 ....')
- track_progress(
- partial(RGB2Mask, colormap2label=colormap2label_v1_2),
- RGB_labels_v1_2_path)
- elif args.version == 'v2.0':
- print('Converting v2.0 ....')
- track_progress(
- partial(RGB2Mask, colormap2label=colormap2label_v2_0),
- RGB_labels_v2_0_path)
-
- print('Have convert Mapillary Vistas Datasets RGB labels to Mask labels!')
-
-
-if __name__ == '__main__':
- args = parse_args()
- main()
diff --git a/tests/data/pseudo_mapillary_dataset/images/__CRyFzoDOXn6unQ6a3DnQ.jpg b/tests/data/pseudo_mapillary_dataset/images/__CRyFzoDOXn6unQ6a3DnQ.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..deed3177806e443cee9b00e0fb0e80315488742e
GIT binary patch
literal 1227776
zcmcG#Wl&sE7bV)z1Sf>x!QBGE-3hLZdlKA(Yan)4?Io5WMlm8;2l)#ymz3kL@q7oUUx9}f?o@+C1L3C$}yTAEkX)bz|eZ1jw;nW(ARML1sb@(BtG
z(y@t2iSkSG2nh23CkPr24h}vZJ_P{*1wR8d1ONZA{p$n}VdGU0CIHbG0q8_%Kq9n%
zzW{Vk@A(YvzZ&4b8yY$g;~6FvHV!V{(*VQ^06H2Fh>ih#_6!5#X>{<@a{va>vzH8f
za+t)R4_J&IB>Z8CdDu+yHC?2-6DQ09R-WNFxMbuMl&@G=+1NQa1%-q~M8(7v-YP07
ztEj5!=^Gdt8Jn0|+t}LKJ2*OddHeYK`3D3>L`Fq_iTMgnN=`{l`<|Ww%`Yf~6&071
zmetnPBN`f;np?Vie)aYt`v(Rmr%=-~vvczcYwH`ETiZLkd;6zn=NFe(*EhF!|G|X@
z0RA^vPxt=@?0>>V^n?o?0|SVG^&ebl=zdQQB*J*cz=!!#4uti=gP4&&44XtgF|Vcz
zhe<&9gw)D&0+)#{&`pWB}LCFVLVK
zwDnd9F{Slzf7qUqXf9?VRN+{=vych;UVxHBuV&v-JeXt2khq;@PZ=`~1<2Wfvr~`D
z_OkFkZ!g(~gb3=;7a&M<&_X#lv^8{f(7^(#Ss+&Mh&>IC`xfBr!7DJO%z_fw-7W>J~+{TuuP@e+k>=e87
zsII~S+GTb+7=WTcYA-O`VP_`v#bp7t?!`p`96(QU67_|8vQRM|9j;6S8hHz|+Q3VI
zHGgcMgmXqwknD4@9=R2~&j)jmkbo2c|9?Aa#Ja`H4Rb^sw%c3I;%!5^pi}rhJCI!I
zWG1Izkx}_0@hJIDdhu{y)6cJ(?`f(057$H}duoe^(dhn`T}&O$G{a76$0{3&OIox#
zw*^6=gf5d#`{V?0-nWTK5_~JvbStC_L+dnbvZ@SZ>y%TJDky^KS$k^l
zN-CtI+oU}0kXPzIfH~jP?RDu5Q!Y&^Y}ly5eW_>Iv&42Rui{WiLfvAD@>y##BqG%5
zOE|&cjT>RTily|Y_MyS54<^~dWD7ltMWzKqC48%S^z8otPKZc&sld01UjM3J9DZ!c
z)Zqs=3f;oz=7(*3i^~E|4d*f1%6;-zJEVuwv?qo?@)x;OamT!sN0{Qb_H>=IpI6W1
z5!oz$n|B5Cqb$U{_@%PHd}BXZE~=qOLTP?v#elSx?X>&cMMo5v>~ROfEQ7PjE%O?G
zea1bikbL7W^Tmdl!Y@EU}KEgK`)$MG7$Mj{M>aQU-J@!Rg
z+pRKq{_%=YHKojn>euRIuM1}c9eVe;aeqv?DTpHo*Nx3WR%@Za&xXxX$sew4qZp{`UN|q(=hx020_*N(`pIZGX1SDNf(-}
zkMzWTx7`*xR(z;G_u00kaAT2)G@|nR%(>G4+tL1F1LphfWcJt9%*#S@mCpFf_p{l*
z=PmZWYdiaA>(^wx=&OcnBJu@`j`KW(f?|FLE)7=x^uuBjYAyQocBcN8fzFv5(bL(G4%^uehpL`Y&=+vj&vgve&jU&17bTl!FME`k`xh&Gu(BGe
z#F32X4e%{p8_`oA_=%W*^*F1SXmK5V!R@CuIc_G@){%#bpOtcsJ2e9P%pK8o9}%Om
zMcNYH&1x|cxi^0kM#UKhm#O^&cuzYrCl>g`$%^39aw6v!R=+0@Ev*+o`EaSdEY-zh
z{;aWNW(65s8boZ5dd$t(BK|mgRM(V#tW31JeaCzabB_8lR{F=PEH}rr%x%XbE1|gE
zAY3>DDR!*8bQs6Wk7J3tRZ1jYW^j@4f_nCMG#{H(7B+u0^~`er{6jA;LWmZ-NFF3F
z`tVK1F`Hsuey!t(@YEnSpn8ztop_){BE90RR)Et;P;1z{L!f<>&<}J0?`}Mpc1nM5
zz}?BKX0uooxZ4V8D(g2KbU9Np^6%n|&l3MXW8+HCB{=HqM<7>^&xLVz!
zLug5&W9^y*tQoqN2U4wOC&Ceat{;Q#H|4}hJ(ZP!jR%$=*OAe9{yGZ*dSv(g{>Llz
zK8H-EQx_~yybM@Pr=7l6J-5eqTAPW=tpw5?{3WB{z^obsgacWMlgadv+WXd#ViV$#
zqjgCk?_sn$RRn$c5RB6s;5xU@j+VSdXbb7-kThld&V`!3|^bQt>sKuOnDwTZ7``rGvbCrTJGxG$b2Z)
z)wNx307?l5zV|NEOClN?dd=IIRvpPSZqUy8*CF-=enO)GjbZuz`@*HLsRTvch7Pji
zugP?WUKT{j)x^%iF+9>!`0ts2e4(CkS0W7Ss;qQwn=w6_a+;&)mJ_KG7PD0g@wmc9a~g5IE4@d5(U2FD
z=MA#DmrAT(@R-B6@-&i-+V{HD7cDVDgcFZw&hT3jmKCI;LOj}BkHM5n#)`yZ$&~Ax-c6IZ`NTisvW@
zC9ky*dd$l})iRZ>pZ@-@4>Nm`2#b5L6JAVSL-Muf+dE@*2Yn_MKU
zXWS{KofbM%C{cRfg7}siSh_=}w&8MARF}c5#N_yJP^js~m7(WZRu_Vpz#S;(S36vd
z#cLH24q8vXQFZY$uc>dEs*)b~WJG^x(Sg{%BEaXh-R^!oavp25_99++D9up#1Q8oC
zQ)J@OuOpI=D>J&O6&vA(4@c97_0aES6R6E5soH_QiuDPY42Ty6Ja9<}R|q={g+Jw&
zdlXL-jgyK6TdXIaV8*$$c6W81wys(wuh_$WeWh1#vUi13(3J1S$N7NW8r
zNVcE?-|O4p$M#s1oasm=+e?TurcRaOT%B;^wwVtG%5D?|O}7SltJb!?njU$VlIE;X
z;MfTIA{>(x7**D28qi3k<0)0{yEt?&>U--8F`$t@5;%)EN1OR#+Z5*!!SOeMgoI$g
zG|lWI
zrmE;7pLqOZQFce*gl#w6((fUw^$j{o+%y9-TdC5hn+I367kCBXV@k!2^AK
z@qrgq@#OwC(cm*@yOp13ITt`1GUTPrj*5*8KCG;)SG{7&euThA23V$c?o
zTt~QEn7moMCa+FK7K>GIS(zVuIlI{R0!J6gsbYiW*fbm-?C|miP|o*;{_lx4&>{uYw%p>mR$Q
zAOX*Bpc{7&Hc|a-H~J{tN~W~)v)vFctPhVCGuUZ=_9||Tbg+WCL5GLZUq~h;$t;xs
zf^R*Ec+)1GI3HWs!s)x&x4|THY>1!S!tt&pnCn%dMN{WHv9N1ioD#)T@#1V6U*$}~`mE%syD#ODTW)9?O}N~o6`R)}n|IAWR$;a8L(Tof|um0+TE5D$;
zE4_mYh8NIx4vgSOM{pA6Nb#Ccs%&Y*aDl684GhCljkp;kHOj?#cZ*+9D@G?WQ*Ms*
z@Sr?X5tH$rah_i)87|QemV@uXSjvf9j|vQKDTsHS$}NOLj!Cg0J2V7JS}TZ_qJ%%D
zuFCJxd6mS0`bW0>HP)SCB{mXXI`xIn8No?P`$In#-}>RfAX=v&RrP`TvmirvJ8Q_f
znYck7_9TC}YNwzob7-{7>sGb(AG9M)k~1kazYJ>IpkuajPnkl^NZv&rJl{?0q{4tM
z>wAI)dqGbOj4y%j!VT9*ipJ+Qg*28ENGGy|0IRvw5HGEaf+QgummCG%p^w!mGC?zS
zojr%F&Ete0TcaMGBBOm%!lm_gxiq<#ZGa#4vcScDl2>&p3y^mGsBJ{_I&)*#M2Neb
z?_xN2aWTV|4ds1of+G_;?_om8>@gf}*za#E1T+M8vI8&RF
zHp_vlx=i%_Qjx`KSGjW?_N=W9Yp60?TtFrZ_E*ZZgG+^+0YdeZD`I#0#Mq$=m|li4
z=M)v6#qYqMI6gKy4;V|0+KyFrX4I94Q2O$!0l_Z+d2LL+!X0}SxVz&prSz9>Dl2(c_ro2a^VUFHc5STs;rJla@1tF
z07PGN$(LIKGo;;r%=BC8_a>%=1e#p^$fN>l9mM!s{4GVs6+!!taMi8NhaScxXMLZ?
zIi_8HN7EkK@1E2jNWhKH`co2=2w}QWwD<)_1V^09HH}M#$_rAOf}LspiR4}?ISTp~
z3Eo1ULB=Y-NxDL>M-0d&B9cwb-#VWsL#TuFCph@a=yw*rTPd}tLF?q>@I)TZJoK2;
z$#<6Vl}lXwDmYk|=0#a6E#BdLw<7D4n8v3wP48kTu-1ak*cfjmE|3?h%=`PuiKhux>O%s43lgx*gOAPp(Q7esMePrzfFFvRo;lz@Qkm@
ziXb8(*qJp`z-BF;VvLk!`tf3BoAL)GAq5^7Ou6AZxIr_bE!1V)z`03;x+;r=k$#{&
zHS61X=}j9BJ$v_0H?ZH1ZU~v`p?g!_0d&2&5^U7)@Ze8
z>v{advKmQB?y7yk9G}`=HSp=Am8NjL`5eMQ^qfC8%eORFmf|)P;#*=3D14z(Dxk_n
zjr(ns1o~rujUSz`3Skrk5n1Uk!ADM!{JKrQ6l9+xz%EqkL|U%upQ&RT5MUSd%NxNaQ;M9)grHS|hN`{YY)|ghQdx}Hr
zKHD)i4sQ501|soe_{m>OId%v9kX<4T=5XONF^7ajnEHZshu!*|W+K)IynZS8WEsa-
zUP;oyBrHy5U~6Aryen|==;`wmP}GoI@{w;JgwsKr*nin0_}2$EA_A30kRhB5{{1;z
z_B7`j7M8)i7Gd(dN?v+e)DbnwoLezIU3(EA`)pA-*FV5-)*EBtz*Vcdk%}!p&X-co
zu6@|xM=j2gACEJs-fXqst^F`;g!E(GMctJ+mAK*pSfQD*;{=-8zeJ$x#fvfamP(k<
z>~XxHbd5ei>jxi^*9}uIyB~&wN}mI7XNDxi7s|9GyR)oWJ8J*DiOp
zrtn#S#c#2_12tGETV5%RiKp_iRLNW4cTIuzf}P`aM%=XBew#of>C}k5;8iAZ8qImH
zCs||m+t_U7n(fB-DBZOr?0*0srYsIGBQ5S-72})0TiDw(RpUjjG1Fc9lPN@YXXaD;
zfm=VDTV1~H-upVC?GrVtDrE`$!Ii6DjsJxN(Lh<2z6f4M(q1Dd0ghi0v}*WiMz=tC
zYcX!v8P;^l(S$Ds-u=->E9RgLaGt(Z1YQbnpVyAmIgxzb;38h
z3Dn#0m&0`fMcY|DPNZ-=o6Pu5amt{Su-Ck~Otr?!UF#r9Y7Q~C;?pXL<{9dxkuB4{m`UoFo|=aAD$rp0c^=T0W8Z9F
z@QlgE%*9zv?^fd3R7PzW-Wr7@wa-4xfybsoszvka7*r8#`E
_Vok1EV)qw=;?WbZHj@hN+nb
z3c`?!KlUpSk1oNS^js`pHmZ;~D}E3v9+t$RO~b*=%ZxUeEM|^BF-u|teb07Q9K0Sp
z>L?Ib-{vl`>z^yvNU|mAT{tGTLsGsM=Lk9YM9j*=yu)I4bmuL!#}1(31E2i^VD(4n
zx_cz$Ui{%g>2FZ>IV*5ENo0+S(q3ddl(e`aPt+q~0xrkLz~-vXJ5{6wdqpC@0Mb!Sk&nN#Hi
zXvd+pCN;fh-dWO5q{}2(>_W<^pf9=Z6glGRN~z|S@yW>?>73dI1h_&@CCEI
zKO+;$=xxME=!TQ{qLS!aQHhZHm4di0ysDfs-SsXLktQ3^vB-d=viM2FgfM;A&J;;!
zvYYs1STE~F*54HS^NT_~pjP1`aj3P??1mq0y9bOzaxQPUjCf&zSld|_i78cR3_p%S13fS4d)
zeU7q_O%e@hu7?+*lpJ`pFQD;S#f+wejV;@Wm~-dYy9m+!{#?cPHRev0U{r0eQ%#HB
z2=DTnV!=u>ZhM)(EwYbg8Es<#po&}n;jJD~kl%>PJLVkIf~y8(pmu$Y%w&Rb8>Rn6
zfj<`9{&sZv9{^XIZ{Q$IZ9`@JO4rPKfPFTWoaNA(B3@A?6*EArGUkExqgRlm*VIC4
zfZ&hTSEhh>YpjXx9NM%g5plnxc*$v+)}vN8fRhnZ2tnR{%3PeT_3urOjko}1k#FVHO2Pohvxiiemw=y9cs0)PkAD$R
zb>wXzDr2mME42@d)N5g1O9en&M`b3lk23iOH6V>ry5=EvaX{
zBASHVbzOw^=c>s$igB|KJ2WH^kng=i6B5LQQuLfHnrnrI3u
zurnMY{mU~AUE({C{eqUG#*4EJC2XhKm36!E2;XOGrl+2*2ykXeu^hgW6GgRe+r-*LeGTCi2b!4nRqXiGn~PoirGp!vl9gWR
zo+5LEH9Rpt_g6CT*f>6asQW0!e5PGm0W0;D9x7M?3qX^33r-3IbwJ=)0SQt~JRKgZPAcypx8Wd<@8&Gz1e0Q=~F7ULV{f!G)
z4IrTGb`e6Y;~1lbYPy?%YUrRtwc&4^)BxrlN>YwkYU1azxS9;GLqz~9J5tMmF+~S0
z?gFF^fub=;-03wW>-Iih6_~83P?IopvrCK`Ok_2!EL+ui0qhm8^K7pF@IJng7_iX`UfVD{YCLMZXoY04J`R`nc7O3c*LN`{Oc%|E~r`n5_rDEnM^
za`WhPWG_jGw_g!`!GUt14gr9@lrjEubxHRI!ZOM|8xs{chQzygLea`CHT)^+
z2gN7$^Z%lw(>5Pkx^7q~tz4Z>l5uY;4@L9^mzSNY9S|1R?j@E3ue)<{PO3zv9@wg!
z{6>?yxLbC10vg}PZ-E3CYJsRW5sKKJ1_1Ro>BnLHt2=MdyRpED;!HL`SU)N24+j_T
z#UT0nVZJiAJ^^r}em$U<^=alhHE?rVY&@3gPlxCUrf~*mqDZD3G|k7-mXKqhL*s?M
zBq3*|a5*Zj&tq>#e590xOw9~J^jGf;tIH-Jd%mLOpV1st-D=E@6&XN~s7kE9e*i*;
zNhXmyy?S@D+YQOE9JQ#HrfhL=?9{;6~mtH9hn%mc7s_-k0K{=%OT^<
zI3p@Gh}*kgMnOES#qVSnLJ*GJvHh)T<~$^iuiAe0(c&a4`=SJUc8mwcj&2uZq#XzK
zHmBT8hxA}K_Uo!U=iawFMrWfh=-T6}0$WcR`^+*0KUdecY8~@Se)>7n7BYCZqt$HI
zDpnY7KrJl#P+paZY)L(WGBx)JKR%`#J~)#aXgW7*5vc8MM@WAT2oKuSceJN@yfCaa
zhc!aoD>s;gFlN6oe$=~e7Oj}CT(Rg6@~*^IUTGB(w{TudX)v_gC(vbk=2c%x`RL5n
zV`>mx$5->S>8!GAA-7rQx7rYNd*3+njgQ1zakg|19S$2VL>B4hivZGvElr8Dq!f6$
zc<{S!@@!fH-o_*q0CPVzwGp3Sx8df-xKY4r`-jV+%ZW?f>lXmw2H(U>r0J2Q!byQj
z+Z0q&SU7qUKfJh#HoDB)8pNSo$_e{X=+6=FaWDxJ7Ga!QD&!v^_bd>t%c2IGHH{D|MgCM*K;81{nDxF%weysOU3#u^^@B^*JA*^qoCQKXU{ZYq(Td)-x$rzFIai8GdJe%@S#ry>h<*mbI?
zYoeUPzOIbudnKG|TY@!NnX)*@4~DnqIBi?dSoernW>h$VbbL7lG)W7a`Q4QnGZ~rt
zl{K;=)QJ3iwzI6~FtG)>aV6q$Z!Wz@sP5l@8j}3=Qwgt|u1SaY$qi>kUOuK`NBCj)
z01Q1Fo^gEk$TC2|Id+Fpx^royMokbR)T|k7F)w!Tb(vY7)Iq}6>fHwjt#ffh@M$TX
zZ?h9yvo%5#!q=ir;^KSfdp9e(3#4<~a&a#|y+w)nrJ^Z3zkx$zz%`xIi@uU)n*ka}8xhB`1$XWrX=u
z`&KR6e%KjG3=uDZN6vJ$qP+GrtbW~1&;i-#XTBQafg~0ydwmyYNoEtlN!lBlsuAu;
zibF%jZ`2PPp0s?~?CKy&&p~yM$w;0Jr%%YKc1qjw7G&$9Lx`xmeRXZGk{
zE>jksZR$b--Kbx+Tpg2-v|^zQN^JBfp7RHJ=t>XxAA`43jXd&t%sx18X}x2VJjFH+O#t<({AP~F@9pE2m4ES(tqWeV_4eP)x-eoVM2D~BZrTYhwv|NzQ
z(B2{~^FyD4x?5#6+K4k)kxQV}&8s=yYb7nni?|w;n9XP8Ju_vZW-(kn@Sx70MR%>T
zXTkSvkn8P17HtuX-6MgQv#nbBD(#-{Rv_-V?A@!`y2iMzxBROwMFj=b1!ApFNRZD%
z+_Wd_-MpLOUzZf<|1xNTDbcsdbsnGU^sfSuF9*NX8l7WF81J0H8rq-}gImUahU*ne
zl;F*>!iaJ-?PM)-_rd{oLnIUBh&^k^_`spt$oM&ta8~frG|H>%VuuM~Z({bCapoRm
zRnISwor=_5TG{=k-x~La(PH>C!JQ`1@W*s&!Q9DL@$5eW=B0sC_0#x<+sl1c$RWJ|
z0tzDrSDQqp(QYK|3Tm10#@K03nzYNJNO)HnZkb{AW6{%A_2Iz2`yhAWcZSd#ju!-F
zU-C$IKWvRKN3rE;{R4pN+tXED9jSSYw>(%nbuMUhcU1T5`pAlNF{eR#ddfgvJwn(^t+oWajgJ2hLh1>tj8`h>5cP%gt$z&VUiWO
zPq9@$3iOn*pgafgzfQh&q3df6${`yoHF=EJBUBS5SIZmI4XHqgGk)FR0hy$YOEjmIqPE*6ZKlx%MY6z>k443@^1Uo;YwIA`olP=ig
zP&jAPpiNDd`sIg@?P5PuWwF?8&8kFI(^XL(q!8pkbE1~*U|yDe@t6YRqHpS>WZD4)
zO}>67>=&W^cL>k>^|7V1`p?znb1sxf8Z;HMqTWk8@nF(-+&LX=3A*{>t$Ah4Ec){0
z$`_c;K2iFX^t9uPIX0jl
z$?+UIhj~AFmO*)p8wTz+ztNu2KS1?SZm2d(O84)|lV&=YYJk_3v&;L3CG=%Zm%BU)
zkea|4P1ys#6s%j5i2i7YRGn%rO_8n10De!yb*RIYcfIhio@_)jnl=RrI3yijvHJXC
z&w!_UxKbw$)7_bK=g>3|W3g+4OMm1dvmIMPbxd--^cJGsZr0?*LD-Kp
zDAULhnGECCt{>X^!(XFuv|z?0Si_gvAOXBA`y(c}M$Fsae_iIbE||7V0*vE0N~o}Q
zepq(r5*)OW&x;2(SIk=~B|J;*9Mf?q#(C$FDb}^AGtNO(&2i@z8&K-ly}fV?gdBl2
zd@d?(-W)MB3t#gc+On8oF*aW5#kVn@DKsTNHGh|dB?PBk$y}>Qk|N!Md-b#5;!@ZX
z?CJBc4xVOZ}qsWr-%E3^ggB
z5kq5sD0M%T*OZ$6;|A$Vv+uC+!42q@9XV@ovLo9tix`kbb+{0Po!N&w>V6Tdc?h_DL0u=9AG}*Gh!N2j+S}jGze2zEo+`tN$mi3I#l5cLtg@>c^fN*%gOz*}9AW4(
z3QJd|MjA_>dfb~qSzPa@7t8gbQkjFPyF;KpHN{$6qlJ(g>h&fR3L)Q2td!I#Txa7w
z!ibOmQuUo4h)iYd&U#nnV)l2fwaS
z4;$x-`$DR8ppLDunK*t(!&x#)l$NiE&25{;KZqHOQ+nu9^X5)=0gYfxtlWbW`vsT1
zD_?rru4=gt^A&npv7e|jSI~c}fQ7|Gd09TbubaE}6$v*m{%h``i7U6MGu`J5+gUKf
zm$jaI>bjC0Q=VgK?0|D;wiP>9r>yHy8y}NapIvY#-s*Gu&8&LA`jqYDskGLGG3{!~
z(C83Ff>_G;7U1Cf3I^tNp9l-CyyuhrU$ZtOz*a8a?ZOa?meC^Q)W(w>w4|
zql&V4v$Ttn_96;Mi;0E!NXDvZ4b$X%PLud~e$RrSA^H85U(b&E)N8I+1bTxK;?{X=
zl;sU8Owpt7E@J%v{{XKZIt{CxN$WnPKtApginbg>bq#d$?HeE7>c->&WSSF|p~3uM6?k82~=RBvRqje2ZW6FW;6
z)|OAJIrVFJ$IK0dSU0MP%oc}Sq%Coz5lnsXb7_93g*4k9{CgCC-+f)2lr#3UE2PXW
z$c~#5xG$hi(($P`vuZLgWBf%$1tFYYYJX6V{)l?!%6Htl>cs3#g2(U4lB7s#{70@3
z^_l?k?;aTbV}r||#Y<*Dj-25FH@olJ-NQjVXp`B$j
zdST@2ranc3&`G%_4=wdC4es)BoNaEP@FW>pPP53@>AOKzRo)DZv*`mfkcBu_NId9hR$PmvID$Cwt#0L)B9+W#6Ds%XoAj@X-;Y|JWY*lb+6A=;fN5?q-
zfdd1g2-T?T64mW*^E8f8D1(=3Fsj1MdR&;ttX>9v$W8&MFQ;#c#8AT(IY5f#`qrF)n(gY9K^lcMZ>=DHkk7_xE}+{DJmDf}pn5)`Bq2#j}h4<1jr1
zw&YG4PB#fi(EI0dZa)a#j@RfClKSlEif}dr+2tjfNAyXjq=Q-KYo0V;PAVI?Iqr0U
zH5862y1&8}s#KlPWu-i_?m+JR5Nk=jbu=x$8x-tC68R)7db$?gwNq2#g6Yb%O-j{D
zJgVeu=dilol@vZ`WjuLwdU(x;0mcwl>|r
z9YX~XtyC?H>@p|PT&xF`vM59_EXnTat9AUjD5scP%p@6#4dWO^Q5me=b0SBqvq31?}p2*UnT$Tfxm?5DpqZjGh+?pY68o@jrsjLsK6H!SGDlxuuS75D4{kn=sqL4<~}
znrH6gGX*jr{&WLMvLplKfpjYOmS^(ef}8}Gk{r3_+CaxA>oX9VV+>3b{YOq$dAUey
z+D$u030zG`4`0(75)*32>+NznS%K(R^;OG}aaZKuX68dLQqqXvX=!rQH`T)%`xG^rbfCrK{?~D?a1zNalLz&uxh06V3+!QOwLSfwn
zY)jnDW(UyIy$*8ch821X7Er*mNqL-S&Sl=WKgMT}eD(pa-;&=}iyPH_5<3z6ZZ~Do
z^g}$2C~{PZZ>E{+II5*6lA-zMqEIf=sLg8b<#m+sb2j#UlWfo@iIa4n1Dy#DhSbe)=#^qEt$
zvf|SKZQV-U+kolVBT=N<`J=@8rWttOFrH$c+QSIm!8ETOyj|!iRL5hG#vgE920kZ-
z+UL{3-`d3pRR$I)@{x>qTSayp2fN~^em-l
zS7+U#42Qb;mHcoog96%a42HESKm4fVRWvWKHf~J@DUz~0BLT_&kDg)|`szu7IEz<)
zUMAtUfXUUI9JA15@%9LHL|E{Q)MG9hB=cpRqaaM%O}ajPXEK=-GGb_vhD=O6Fc0Gi
zxeh^$JD+diqIJ3PI%C9tpK0Dc8lrLOwOwJdBDpHN`v-`%36mb7X+SQX)lqLZ)SCsL
zNsx|x$acz!mE^qUxQfZpnlzCuX8xltxv+ibTuQkk5nm*(M+TY?9V;mwa@{=H?f3L-
zKiA>kx%o_FT=Lsfb>%`SN5D3?yzb;GN?<;A7yf{B1Lag#3QwYLCq@stv;H_8!`qxm
z(|&9QXWC7K^~3XqFjQmVzn5d<88_=4dOoJXd+m@yZ2^Ca%ALfUezrT>`rsnB45+89
z#*ep@nz6eXMexsiwR=m3Uas^kQWVRH%RCF!Lv7cFdOZ*zL3)ebBaBC)l$$`~MIDLX
z1bYoP!&)qJUiTRk{#7%$#qirz{RNiO_
z6U`q~F9v6PjpBbQT?qrvxsoP^J3Yo=F3UIJe{azxipC6tV*Qfjq!B+9DZM!eR3DW%
z7c`1qibUPb*g1>_8LEM`cFSKK);jym&A;Y-qH~#DC9unVbpea~N{(+B7gUKCJ6HN115i82WiL8V$is51wOADTQKO8$lGVYpZ!jsg*X1?|2|=ch+u
zB#FT&>Aef7eRpQE34p?FJjTupesRQM@g9~Gb?$5PYbW#3qcIM4u=9>^Vx0HM14q64{@8wkJ)j$huLg-F3ra_rUYY3kz+
zot~^fy*YQMCOSSr{8nPjcpzn|l?>mGknYmv3#TP12lPpI)b~$)BV)^jdi|ZSrXhtJ
z9SjgPZx=k)|7l^tKXi(;#DgqIa2i;0v8(ij>g<}#a>>@t3h50YPwl=BA)kmP4>HBUo$QE(1Pk^(XvE+Wp_4W{Q8qg|asdVGD#{!c
z*JtDv=j|0O3(M)(;oIK(La%3fB+u8l*UL65lZw^6YQK>BOJpoH?$0^I<3M$LI51m^
z<8}`cs7rQ*ago2u2bCAduFv5UeEj7@ab3tCCF8Hd*GgC=7!dpnU6J|Q(zwlDl$|un
z*pKiH0u4Yn89Msw5UgEV?3#T4WhEBdK~@=cCT28aoymeZ(!4uY}x~b
z-ptcAGo2LUNuDQbWTuXy4Ls2{fkX@N;IB9Xo4T3>7
zies?YsDs&QhEIM~qKj;(0e6o6D%F3=YsBz|5tY!U81oo5#Obj=&K;njSMLZoUIp&N
ze@6u9du&$nOMmFEGCBDNcnIL=+%%k+8)?^c=Tj4tzF7HuhdN!F1{8(z2W5dq1Ye=A
zCnB97iQphwLO3sgN(Kge&6p&$_si$6Nul1^MOQKYa|hB0F_KADCXzpBnlA4H$ALyL
zj@2psi@)AKzmovG)B>-^0u=_v)HGJTWbvPrA8_U*487ze$KEeu>$x9xRI9xUdB6IJ
zq;E+29&1zDkMc_G-2ic=cPU`2Z>GCr(MN9B){0S;e7o}agLj4y+Yrm
zYQO-eI1ZH&`qev7;zB6RMIn=5?&U~FW&
z*9yywZXe<67nEV!u#`BjKGVs8mfsSclBbSeXm!r5+io>
zh&^*yC)XRsc08lVfZ@d;T4mGK<0*2jL0+nDI<*lfV~!T8JW=^`pbmc^@kJh2wcv%Z
z<%aH8V*0GZ&qq0?SFTdjj;BeyD!Wo#VJE&{5gqfq@5!-f|EBKNXvGu7@pW19?OD8W
zrVu%%?kFd-NJZ6N4y;~QlI!wy2`{as6N4`_HJ@*HVYqB&BeoYtPkomNUxzA}2*nC1
zyfl56oN%n~Wer)h~?`Istqr3J;Y6VDHIE(hI~21dY)X)+1CLL+eAg!CDVG3?3?u
zUP@v`=r7m{s6iUQc2?;WP#im;08a9}K2(=KAQv*BdYOwJcj0@TpN=C7J64?uLtBT6
z(l{=k%8_*UsKCSunP_&U#7||>HgXHvvEb;T_}g(Q^(g>dB6Yd#^vFXcc9MEP3J5Ye
zjvhjiAMx~8&BsQ$p6vVTtt_!2V`O6qDZg3%xgJ`WiJIL+5o3u#CoCU4JolvEc{;85
zzjmSz%=dfMp)4VdPk>s5S+UP+a0n0jq%oeX^I}j(^fN{t0Za11?@9E%WK!|^F?~y20#Rh*Q5^+XnI9P&ufI!{
zUI?POf!$6h-cT~p>u!gVK&I5f#q~;Z4Fy&lTrYI**jRH{6c0_Hr&X>-GL)r%!b@ze8FU=w&{tGwyHud+UhS{#x5qmx4CZ
zP$u?|O{S)EYyd4)x{~f&W+F>_*Hanux<^zu#})Ob9<1{b*Wx!qqb3exM$;zGAlZS+
z%>G+8=ChQJ`6DIFwGM&Zy?Aucg|{oA*A!A*Sh_dRmbC1NHbVUNR`5>!80oSj6ujKU
zl=_#Mjp55ho*~4$SrWeQ;QWqI^6l*NiFr9D5yPkx!d0iyXn5Y7bY|tErh~_s`fIa<
zdmAIz-m9&SRv2u$HH^bNm0%o2p5HmYbnZ9
zXbn076A~8wnMkMRL;rfcy(J%tPA!akC_9)V3_Bym3pv>+7?gxY&~8vI7xE9F
z_A6~UymoaHd`bAsDVyx)II$**hQ3LE@qX>m*?7{xAgq{TkTz5Os
zCS1olBg`_=NDnSY8g<^8q%8TY7DDK>
z5xmwh_-;@NIIFl8jLvPO^<2n*X4R%mm;3#`p{JSUfLpJOU!TsYp13C%r84#;{&WxS
z%+=@-Jf{&X(HpTFA25{Nv~_m^xz%zoR6A9_r!a_A6+SW@+2xCYeEVBtwx?VB6%&)6
ztK-D7eC@02voGM+4E)779*vZ=o8HAYnQ|v7M@mudr!u;vv2V7W3sODLS(!#HC;Y-6
zgB6V}PE@H}UPo@ktHtUdIL^2P#IxJK>8Q76`f%3rBJE1!V6OsGYrZ&l4;N5_Z5v8#
zEt0|stq)AmwGY2NyMm|`D}FlEh>Y^~ix+cX`w_bQl}#v5pEiV3olBWGPuj
zfU^1O#QMa+Nn6*=>EMw#O(fh@gskkO*&B?hCVl&4KJ(U%1M#p&{+N6`FP~@%81r4-v?A)2WYEhG$2@t`xU}s}tOj
z-hmAVZUv_!4%-zQL4^D5rNdE`$V|MxBFQlP#;87<=yL=$Tm0)yc
zlcBXLd#qiD5N`gJ+qbmQZmCQVQwZRtO~oj7Y45+xFZ@UYhfQc}J?_#bO%{H)zZs!YWS957456c;)Tyd!=xe
zj~8?7=Y%5jlfnD6h_=TFpk6&s3x#*+Q@@|+)5SB-<-Y3u(=JSk=0M%p
z7OEcKa}({VO3Tl7oy9T&QPyf=^E*H)hIPzNGfPrvi}kWE+{@kN;?0(s}{^_
zDp}~;2DDGzo$y-=k)U6fV~jxK1~PxTuOJBJ(_&1>`bksc%-BC&n|F6hbmrBca}~7f
zDdk_&xhLXi-4yLWex{(a`kt{VSuja-vk~-}8es)Rdl0r2nGnYPSZ(gGQdY_TAX4%(
zVOjZgmjBsWFvi44%fMJZCLVoHf6`+(CLh8?OfYCsmszEBZgSAeg=2W~k;B(73}flD
z1Q~kuznjX#8C@G|)q*^j5GBSu_V?#<2RaqX@qIqZw}hi;!;)pLpQ?*Ivw*$54}1v?
zwABE;$=3O8(=RF>L$yCddTve(w_PuyU65(m($3@Emw*ahpP0X2mo0vbYBQUpV67WL
z4NluqfEbVDxXbElk2}i;*h9?F$Sf$`Lsc9Y>bZs?WcZ7jx_`5|f2u}JKwxe3Z@~6p
z-qcr{^TK5_EBrB22Mdi#JGSwXA`ifUO~D1Rz(xlx^5->gP<%OkERuNeN-4u?LX*u9
z#c);L9mQQ8+75F0UyD)YIBg!E@E%zwzQ8l{c))$7-He-;ta}}T>ASxZWUJvSuOZyF
zHAL9{)I%z=`C(^Ge~mEC?d9BNqzCaI0RJyCYL?B#?&|Nrgn;Ay!7zUX%bt@*NlzX(
z3UqdLoUxpf=OU$c#E5qQVaIODCdZLSq!8Tp>ViE^6HB*|b;J5zgj%5I?C
z4Y#uP-*i`<1zA}^AB_`Z-D7tZx7>;tYIo;ibb*a0#(Bq^b(w9p^i)2&Js}8u%W2j#
z7evKmKMb;
za2^GSQhCJ2tN_Wb7$Z$fS0xG6D6r20QNm;!XeAzM#09l#LM;J8VO!-Y8?8u^o6dvx
z3EYDaJ&$qub*fPVvQevvMGA);ah_9cqZWgYOfbZ(x*!PxBDw6{clJ>PpuMs;=8Ka(
zF9VvG;rpu-KV<2`MG@eDttLqCP|J~|TUCYk=0AY#0b!}9ZGutxRfW%Og%W`js^zY`
z9`W+=+qsMRdVB3!tijjWTX96_fAH@&;oC$N(X5#?BU>lP
zV{(sEak5dyxtrLu5$ihffE>G#q63lcU2Yl6W5f}Zm2W4)1P{|qPSJDr)9KEXobA_e
z97=pz)mX^t88A+*=hTlWzdccJZoIbAoN>A@Z^q-R9gwC<;eBru6)tH=DV7AA*%sn>
z;%02tKDgV-zD@8gHnyvG^w{V~XOo{*P1iWDHszHH=K~qeqx=UL>`N5-k~!Ce_Sr%S
zpM@+?*Fwz8=OXuD3WlI8rHC2);TqJR-O=v>Fj7HJ$C(I6tgRfPGdKq`kpGS|toL>^
zxC|aNSDT~dQH3PHCpo>{Y&*JUn-Cz(LT(LNP=DP>TPtZ_q{;5Tcj)967301T>omQS
z=PmezN6s>-bq`k5+AOJN?sZ(HgE3+*SKD1AS`PaNWq^l#?!D%
zIWKC+E0_6`c+ymkIfQsX=V|@eA1xyG`$B`;pO1WnG}vp;%&|H;|1vI13Y^{4rAR+v
zjJf*UR}8ltRn6_vMI-5Z^VTLs^5qOPeNj}V~HXo_vHzr1bWz3Yh*S$fiqqa!gSKT!A4uQJy&^Vh_n)_g`r~5|)L_fI|i5(1p@9IVGgEfHz9c
zJQM0_H01Zi)V&Z{R3A-4%-2Wq*Mq
z4hWTqM3G6F$uXMh%7uPbY|uGc&MsJx(N(JEsqpJN?4YO){Bx(}sc90X8raD9a!oX&qa!^-ZaoSto6$}|6m)oHS5rO;
zNxw+YkfcSN{RI11n*Ha9wm0<25_WD#HNoJ=T~hy6KYqVOwuClRZ3dzf8f@t{Jh@(RbsrB&I&fD(#e5%wFdA
z5t4bS*31a3oJwt>*9jByz%UNk<`Fzvw&5DYL9ZYj(BDxu9roS^jN`5(7^EiIv$BH{
zNRR%rs{T`B$XvOx6x)>ZHX=>37q!P4b9~C|rYfYHe_Xogh>_&?hxsyZdSyP)6pZ=)1JuJkzzh&~;BYz=ORop2`*%i-8
zO`o^7i_fpi3|O~L
z)Z3RY6S+1?&TbkDi837h?oKYe4XZgWju7DHrjfDiW8sp{D>PJ_Ddr~6G7^?-1MAe`
zbr3_)v|vO_yNAfxjTWvIKj^2V_=QPTTJ>TxYG;3%aPZj_X}anb77rxkOZo05=8#eF
z*eo)LF1{#d1$HTsYqe35{tu7}hV6kJRfZ-s2n#VpM<|8Z+Dg!B?^4$|h4gBgnv>rv
zdT}4~!3&?W+%6jOh*mm=D)n@}W?dMO&yNdAHEgb~l02m;^=AG?G6b4VAF5bFzedv4
z)-O#3J=boOt5nz5+I^^T_B~{q-T5n9a16CrTiqDxf3etVMyqDKr2nx3s`kwP7p@a=
zidpu?{vy(ze}q@PSx+v5YR9mY@^FBgElnjgR<=WNHKyyXDs@B`zQ!-z{Hqb72aIY(
ziYw~sOFdv}^vEW-c==JLdbbGwu@p!40g_-qqhy*Trt4QJri-Ba(===Lv>G3r(ieEC@vM~61-%`Q(c*wgFM3<@<;WPY*
zs3z!8siXxv(d{K?6_SNhobZZ8MHs8R-}-)u0s7^e;o0lrc_4A}ZZuOt7CWVS(&g^k
z>=~+rcMBNaQM9SxLdG$O-679bJp00A*q_Ug3_+w@tPV{|-u5b5B3gYVb?vCcFW#8z
zUJWxxgx>=bw~vP69Bs}*4O&JY)Or~C$7R&7ZWI-4~&LlkUdTa0E=p{c+1a*sEO
zq#8Y%n_uazx_BiDw)Cm}`V=WY=T3_o$@y%il##=dW64n&xn@4&T!x$W*iiD_&!}UcV*I8{OqlTK|HusuS_;
zcB!rVNIkhWqPor#0}V{6u<}*&6s8!lFu#%`)deo5_;jU9_Ob;ebw9J8qz~)J1F1Fd-pwFY;c%dSXDy
zN{>27q{?Y&e4G-AoTRH)*M)TGj#NC+=(i%8!rp&?lS(THDr@Al--2m9vgwM_LCnc)
zlHqWk2F0~y3z$8f{h$K}wcGn2JAM_!XiA?Q{_U~P&V{509G<@7Q!hLAhqS8=lU$F-4d|b>TU~br(^&cQ`kBkm-uu$CG
z>JzzI(_BhmVxZ#K31%98-NIqWYFPa7j)3*Oko!#Cw=djLclT}|F%?no%R|IW6ns{M
zVhc#u>n_;i@P;LN&|KQlAbJN=jvnQF--k*5Ww&-@)Ks|69=RDNPS=ax(KYILJ4d_6
z87?ImC>kUT4rR?*m)K*WF2gZzCY7TI60+5p4CD6wU8UWogR?uw
zIr%3<8k1yF_5@~pJxlNh1*WP=gAEB*x;DEj#*J+$elv8&64a$n*}<~q)-q0fu5xvq
z$b#MfOd(?*-?TqTCm9O66O&)JkA`MD4Dn|el~!+ggf*4Y=h)x25m?s87?nlq7O8N8
zj}+wkHxR8cDOKp%vun<_S9(Prw(>c{mrU5
zfyjL&aUMi4%
z_j4)L_z{#amyvv5tkx@1z*}}};Rk{l%K!2NI3xO%sWsi3_iy`6!rYn0C?GWd0fI=~
zBFPGz6=s@_M+=YpN^*QY`{6
zsKSKxM>HNbdkHJ(20Q2!VC-V)U=5Oeg(nud5R|EdjfVF2>3WKPxq5sH10NJd;oi$S
znBNFJ7^Mb&nDf6G30ar9K$A@FG}U+yf?Wlainl*%zfXDkWp5IMQS$+MlkdEd9J1#Y
z^nig3*(}s(z_x3-IgyUIv%Jl>RxnKdNnyK^Ux-4BA)W8yzhAVPqb$=>fGQ?$w{)s8
zAPPJknMLMqzs(2eWVk|SYX|ABa*@})6@xrM6m0Vi^s5#Hx7A!tBg$1s6Oe@t4Y|oB
zf|r!Mr6ffgi8rj-P#}sr<;{R_tUdKx{%togxSSMEE=u!45^*8@-7e0no{_{{@_>Rx
zK{9zxipfxG4kC?V)Wj+%asu~KmpCde%|+YJ3Wdu}C`bg@xhF}70X%=kFd0(RNLA9L
zRMCsMiq39pk)p%~unG_SQ%zxgI+e!ON@(#Y#!WaqQO6)Ko1sMiRmlB;QcW@q{p(ED
z$&vIo@fn^IWi-3D+d;JCjY`oNF^X6krfL8#L!i6bPP4sntmqVqMxjDXrH1n@YW>?q
zcaPgr7FsO%OAa$%iXh?N;7VWgh{hy8*E8c}@*uHAh3&}?0)0iylf$#wY!k)FBTvuM
zT?M=!Eq-5a{G>x!D4-*sCDx=?4WHaGE!~pG0ncJW_uE?;-qiVaRV!R1m%gcxw;-s)
z*pPacIqX@`%P4#hH`0P-%C6RiUbT7sZ?Vw6OIcD~fllDyVOP#-A`mXY4%pjMI#J(txfnnG2guxSjA~ac
zYT~kwkQEXcT2ZPNy`)#dKU)t81a?4J^|!OR&yt!Nym7zTiu9L78jAV@3B@8Efh4mD&xwrxRU
z9whj?=#B&et$CNaDK?U&0B%k|9KGlR!lmusPzY`{^P@q)lySQE*spLOR>=P$2t#XG
zYgAjr=_-%z!uZCO&gh3x@aJ>NKw*=s{tKaKU-^{wVZr*q|2yyI|IXt{_-1T>WxYT@
z+ob(ey-|XsAK<&twtZ5R=yQ7Chx_&0{lODk*t~#W`QK>XaW*DT8+y`HN$R6bONj5A
z`aenMt=Q-i{{alIS*oObTz#vQVM@nuq8yfUe?Xi@)Lp-RKQe86jnNG=v>Mf@wzgxstNT+|t$3GybwR<#zEKMVg9;m32A)wZ*c^?x6|$
z`C=I7&2PEJ(LC}!QJ7XK=TG;Io4Akq$MDLD?02#HX@AzAWLk;D2;+<~w>x#zEPM{|
zD<L`5_nwt
z%6}UdB=A}X7GE+ep++)Qk)eqeVPhPJYnJQxF6;8Y{bJCTNX{4METDk>Pdt^NJOmT8;TWA=jVNI@$~5H=$A&
zrJgx>Dqop$5c9C-skIl+a4ZG3@&ReWb{_m@j-lO1(;dxJ8xn7}!*O^i`!BgxS;CjWq
zT9D_CkMQJ{Ayls)zg*dA7ZE$KBSqt}Rv{s}_-?MHTQI!)Qp1oWgBRw++|-fB1PjZdr(GA(4Z*>-Q2zW&C3<+6fcp;r^+8=mkEWV>Zu
zaQsYuJSoT74&a6)Z6G0C4vqa@c3AbxrO7OAddd=X5YjG76zN%a{MNa`(j`q8DQTl2
zhQo$OE;&qTq1AdW-K-LU*IskBs3SG=I-uR2X{j%}$4&3y^
zdji1r?;*fpDpd;e80w}$U5|+1lqDDNQo%tdm>*l4%<%taDjJ)fE03T2CCy8MZG-Cx
zX)DWXe5LzMdrVfHT&B&DsY&HuDZMGqF?VUTTBmZ!wQAGGZzcS&=qh*RyL;b_Y@-{A
z;~rU5yg>+NzbRkJEn%(S#d+U_5yR``)7PmPVyn0tAk|YN(*nltrByC=pfUYaWtfmD
zz&t(IXf;Qp<7b84jz(2!-q9`>g?b>_XugCQ+5%aJXeG5^e-%COq+efVlldd0-N3-8
zUjP&y@$bDP)X6K|q5EY#7wLwJQFrjzKF1}D&X^-!*fM17iY+=18kW*fuBR{SO_FPY
z8DQW~I?mQB?|q=>VxrzT(ff%FLwi;ENPomAz=1jsnDXTmnzI;9*)Z-2uM5eg#N
z`TPYTJaco4Q}4x^qv(UD;KlnWQy3Pw-$lx~oAz}Bc{XxUJk^|HHMD06f5LI|SRdws
zYmZ=giH=U0!AG#4H?@$Pa@IdB!F2Xklgn5NTbY3KQ0#+%6
za}P5C*(CZ6%+km9Z_Qi%EiJJTmBa*}^J@pC9rAeU=qLyWCL+7hExJ%h!Cp9Ec;
zhN8}^uL|M?l`l2R(=7iPp|7;k>WOos_36@FoyjmWvk`K=+aVh)(Mj0x6#>;z
zR7mb6OpE6sMxAd-w3RtMIyX>ev8TCKo(eKH!;hX7|E)kC+pgQ#nsJsDoz;s`N#C@a
zX)Y0F$%D6*Y*r=rHaAhapX}3P!7C&1mbSn$x;!&!+}2=2BAjx?j@IRl3}vA%#Tzm;
zOQ|P!wM+~#noWxB8#bPAS*R{HR&!@>KX9m}ew@qiy*h^asoh1~iQZ~Y&eU#W??^~C
z+#3t(u?|*|kyQRP+PJ^{$k9YsbCF$Hx}o9XM2to=-+A<)LmD1gVtfD9rZcE~hq{`M
z@UW9=p@I0u9X}&V?4ms(DfQ*P!yOs{zuJphYcE3|F{%=?*=Ri;z>u^6I%O7h<+@6r
zXc(+hc`5a)dE@uexe(t8s5Te6>*vcz$oe8Jr&b2EGXHq?k}@3B&sr^1ktoGQF8%T6
z4HGU|xgTNAKgdLd1}bIa%prVyKf`fKzILNafxGUibAIFytT;PgFxNhxK0sm>9sCz!
zgzHnUF?~jpqt;}gE$k`sThD}9coj%?=&_;@)!;h9G+GX-ilfs;jskkO>I5X
zNP(_Cs_kmfegj>LOpYJr?X>bKQtZMEN@zB8!D`IZ2>aiO3IAbLTT0L4g4}rM8V?Wm
zn$)-1lz2L7S9@DMU$o+v;f`(&?5PfYIBk>F>s4GOoj`E}^DFaRSw(J68*E15v$3Vk
zk=7^%&)T;T=u{dBXNv?&qq9!eQM9fFw%Z%s0f3*T6sab0XzhD&6*}SE(N~cv;^mW^IyC*dx{V+Lhvq2oRqI$Ai;O_9F3Ml5`~GF=sW&G
zH?~x$auX_2q&s-au3-os
zuIKdAF-n)8S*C2nACifeuGpcFE<;~EOB1CCx)ZMCs+C0!CTqj|N~qPGpcPk${{_#r
zGT5lC;{)ELLyHY6<$Q{}nL#LBykDqZep|NmAw9<*KOHqu=T*i%y1(BAJpSe9MsPGN
zdQPUL9eyfITs9LQA?
zEu*6}aihHe85Y_}`cvCcK58N%@jXu0yx%wY{#x?4ltHXTSW)RaD!x<~Lqz^&VP#Lj
zw}`H8`b9ZWdv?sYhP9<}1bI11w9~kL+hXF&nu5Dy`76@C(#oN{(X&k@DctRK0JyDS
zgdt_c2O9;aMe9+M`UAzCLVWHDvgVW_Gx>VJE+CkMb!eu^Xzx_35y4bxV=0F9~gm9*da%o1san@3TYRWZ2GlfD7gK$2j8iLOL
z`y%|dq<+=A8QfnYJK{Q@%`CBPBc^72?#neJ@VHOJd|UP~E_D@eP@kENXIqQ|hLq}Q
zp9;KgOXzkg701Iw2tQszlz0Rc{{wK16~GhO5|5ry7RkNg(Ob`~-g9Ro
zye%Mf1UU@hkoR16#PPCB?)Z$&rl3DnabrY1YZ7VFy(Im+xzN6JxIAn^_m|IV2GeEL
zk6vB{)EI(qZ=`q+8r?q)p!o7YrjeeM$6#>D8nks(Ew7F0P%wr9j#)8?X4}$pUPL
zQl=z$mdou$EU0s%E9cgK{-az&|3I3q3uEkq4`gz#1*DlZ1K+s+X!UR6ePgqp%@=z+
z*X>`6Fi@YrJvBb7EzLLiZSnb}A81#JCW~51EHX$@Cg_n8IUt#0az>kMrR~zlE=!%D
z-pkRK2hyRyA+$d3lEe0SVx!3Tr93@bHD|>{A9|wPrLEKbuQZX{^l9Pm6y-j?KpR$p
zYks4<9(RLwgL9yjF#ptWUOGtCX-MGf*c)CP;);kgx~FxC<7>;}k=Fwn(uT`BhWWkF
zR@P0`PbMMzHk?i(S%A?)}FxDb|x46S`iv=X$MQg8Nr?LMk2gwY8Zz6YcbB
zNmutbrQ6vL`?f!o)_w^0HhsLTTH4N7llu=qZg&UnYmRi({V8UG@s;&<63fuXMdzZ4
zh*+h;T%Q0?JR*E#DqCuMzd+>RDL$oK)vXBHQ<$mv`_=Gev}J5(tO-2kS3AK6#OL9M62xOd?Lq_<_<8w`+_
zepmS{eT{XL8NKbMLZ@P4)prQ(5Vm2@EEy*iP)lcevv^k5iALMp%fjCv#Sc%LLSG7P
zqM~atMenK(M}U97@v9#TM(?c6p6o!vqjfbx=1Yo
z+UOz3hSg4IcTKiXIleQ*#h+RmCm$}v60?WKhN2!~^ObQkQKY*NNd8FBAj*%hu^<$*
ziC%2*A!PW1BX1@1T^R777bY238qHS|6+3?th<0h3K?zG0(nBMuOlBkTd)#kjmQAG@
zv-8>MuJQSyZXKh`MYl&q_}eLOqqpQaM8_jz>}r@UcsxL~wKKby_6{W>r;`mIRCm!-
z>`UBsMY*k!B8WhJ(QT%;Jf~?&PuThuz9LtYfp3D|k{mO(3JVELS=P6!Ihu;7Oz)G;
zt9R}f)2R8b(WGz2>uWEJ`B&t5IOd5rTB60>r|5bBMce+AMmf8wXtcwvA-g^`3=RtQ
zJ)o_l7!=)t9v~y6h)y=m>h<&D(frGzy~|s}Yj=sl*GcUzqdiEfB^N8s;9n$y1E6{mgzZ1@>8agzu`~6`Y)O^vR(!+)
zozf&jqSlheZY|-1z*P2A)x7|S$>cZjPj(VN628`|er3Vg+(2-BOeQTO6i;F+~H3RI~@?J4=!-FFZE}r2r8B%lT1T@sK4XXIJG8E!(!s!
zkxS>sugqu9o5mjFEOpzjkgZWBn9N;oTZ4D(Ynu+3{jTgQMu+8UMQx^M1qO@b{nO!U
z?9CN`AG{G9(a4YI3oQ32>FTHQaeITp&jrRaJXbf6kYE$85{m{EM*WK?UM;(vg#P50
zXHn*+qijI>upRALI5=mWB=P{Aq7f;4H55w^9aI`N&AM5*D)Zc7f#1UFD4IQv)7&4epQEigsI&9-Z`<})z-7CLI5
z#~qZ`QVk|VCJ7J(p@m%bOYuzn*3p;pz7Mk>fT5vEaLoV~%y{xRLoA_xz#A8PK-D0=
zMJ1DcsnEl8?7Xv7tLU^V+R^Z-MpM*;Q$EVipHoeOu(rb0z-!+@^<<=K0#l&VxD-J<
zRBJKLFd;?YU4KV%9o5;ra5WF?&zJR4R}H2;twTPOk9}&!`gYinjLXDkdN^AM|5YnD
z_kJr0n1IU)Z6Pv1
z=O1vF{@_RuCTjDFZl|C$3fb?>y)M(ohjs7HoDS{)agzp^S10+qzB_|{IO7<`SC0fB
z<#JU+rR9QS(T^0h8H?D0W77)gC~uI6CJ?zLrONwBwv)=}ZX6w0j2j}QO|xDv1R=Xt
zkXg!`magqAnBcQ5Qb%VpV#2OpeIq&7Y^RW$p`NUYj>2~w>Kv`HgdfdQE!ZUHOxHzy
zc50ec$jLqnZ>)(5J<&klND`)C=_M}WmI~vigu=AfbOOiZ{g_g$fa*AS_SG)gnoHVmJ&x@TT@DUP;lk(Nhv;AJSLR$22R6qn0Cbs)F@NKXh1~1PV{#8%L)_#o8MI1A;eqm;K(f4!DepM}5zAV$@3_>AK7U`xN2t5)qw8-?kx
zmOO#`(F($0=}ziMQL!qq-9O(+vT@l*l`Q?^M0`A1^x{YO$rJIT$=sC+Y$3Y@2B&M7
zJ58iNl-adzQFHs4Fhs?z`I6K%m|T1?dg*z)Hz&Z16FCfHz$)bU+Ce9?o=kU~hm@k0`S
zj;i2?v;FsB!SkY5uo
zMD7%_)*SG@V1OA3QJ5PCWeKu~fBks+Kfw0pYdK!%YaL%PDr5W%1^PqxsorEwp0K2VA}s5u=aoJ`*8go_(zzfUQ9TNz9p_H
z#r7(NpoHm{T1En!gs#Uy*xl$qQ?hs2!r2NKUS*g`raBu~uY#k!lsqF(XPwc0Fo@F#
z>EN9B&5!D>*MKX>z@ZXbjn@|`alV$ADKTeP@1x&uE?ff>cKPWdc?&$n7qh_5ltal6
z7am@nfG2TsGk8KM~
zG5hlkQTg^O4KrQ6d#KlOmlai&N)qpQ$24-2M>yN@qzTK_Xh%_n}_mYO2fP-crZ*arquuez{0Xm*150y!rDO
zgNX=DzDy(#!*t{4dZWgAxC+RmM{&`$u(gS~iUK3&?7rIXuNh6-1w(}!YLfFV^>wV;
zhnRlw0pZ$7Ch5XdBhw65>KRL6$mhe$xNG?4->`fmyFkV)Com72(E1;x16c3HMvhbI
z-`I^_&9q+BO=efW2VJ+eL%M>6PP$`wRsqquK>IKWwlPjeY@ZIEJtUsHvoGX_zIG)e>&FhdbnSWt7J0{oG&^gIpyYFr_&+B=<
zYhUW;P1Ro?vhLOpqWUYrY()iB(u-k%XP}BzQ{ur`q;K!Fmf;D$;Yhz_yz^PKeMg=N
zs#l=!1!qPsqO}&WQt%(3>=+Beh1Di|1t9h;7g`+m$=KM|o4ja)kNDLmBEJYEH+su3
z*8BN5_pZyI0#2GLkAE%m=K4{rPTqrRSI)79DgPCb%-3|qZyMU^zg?GRs$y-Q4n7Cb
zXZTrWKF+2%6$I7u(PwvjDwEcZ3P*u8;}v*owvSA{{hG;TtbJO!l@3b}`DL!=s+r7*
zz41*qSID!3->Y7wQuKm{oH(Mp!bx%lOBM*5*gkRsswZ;q@Xr}=8i3zYJTQ^J+EZN6
zk3W*;NxVdQN@PaGCAcbdI_NVn)#%^4yzv)op4xeoZjC)WO16hK#m1Gz&7FQ
z9{8%)Mr}$?2R=*TY*)M9{RT7bQl-b!4R^RUqr&V)=#uk%xpWecW9Zz9SI5`fiE(q`
z7^BpfAFOKbM*;G>b6n!mVHMTI@fH5&UE%=_Tw^@V7{XQS0xmc{sd%1Yenv0Z7B(bR
zf@g1?&P;_1uIaSHT(q==s&-TzKh_(q@lSOQIFsR>fD`F&9Yrw^)sPT%sL#+SBGAofG5B715x0
zQ$F70onT_b{f`1ixT?HglIkK1PBhEJ!tyVK?hLKO$EMrxO9ZvlrGVg~czi~yo`kLY
zW9i!ik1yDhWhMo6^hIOjQmH~zL++-p-G<^9Ewy^D4g+sts;}aK0c>NpPGo(7NOPYW
zIyPO}+>~Pmx7naL(S~77nNFTDA`EM7{#}A+1V^*cgxNc_MuWewKFTP}d6b9wsag(f
z_TC7L9c2@={AzGm_U?9xOVlMfe7y+8^==aSxR}mnsLU-eB)L1qy7wZx8+I~W^d6Er
z{^%q@d>BCDvLA#B&Azrbz4es#aXQUkTJx2+N(QqkKw|}6G$tzT4J#Y>ul>wwHdwax
zTQCDtGCAE4yN`Mqris5=wZIb->Z}bv=rfUYNxiPUuXEJrtq5&MOvzd*B(lD-Dyr-k
zBoB&if6^jO^V1Bvr=Mz`k|}IO>MEuYtgjdamX18mC3Ic!00OO>)m0!xhI0)Cb2~7f
z!nc2SA9X)pgpK@6aHdID5I&T%T_P_xsu2tNr>52F5MgD7*8+G&hgS3*dk{g(YmDa`
zv@PtN`7Ko~N7*FpwMH?5&%aw$!=2e2j=$Jwcio}vHq`S)Fd#g;)C-3E+)E0FmdicZkMnWdNTFknvd)w#iW
zmY)lTH;M)I;dYsvlH&Nir|o2w{S&V5
z_t;#IpJGTgi_X(;m|qW4AB^ff4Vn>biL&`Rd@|odnH>@VWsfW?#)*KF=iU<|xjGV7
zqq@xc?DT$#-ALirO;-mZzqX6l!VU$)a;6m<+5JR?4*d$-$#PjT$0ag>49@_9<#)KK
z_hogjueV1k3m$8u}~jLeWvw}f6R^5Cq#{rYc?`TN_IM)h~+`Z{w~ubWjO
z!f2%u@HXdheNVHlwoD+mw1k|h0N2{y;V_new^%=7j;=-FRr)q4
z5vM$p4gt7{(20XA?Pv+Dzr1wlJ2df}0P%m;s1OFOnhp6f6PXzqQi9mm8-VWNae&}wuCLRQr$OyhW;5TR6O|Q0ReS}R<
zB0z+UZ(3_C+~XZS8LU~Y)l(ic3X;)KiMgMHh{R*ls(dGYt5zj2vyVtN@~#cCo@$R<
zt{JxSxbouIXZLVrQufrlf?7zm`oly@Fe$Z@_W4kj?#eYrc7EQL?4dGZI|zty=Go@E
zGMZlv$;9?9q}xH*_1M}VScB>*Y&_`Xuxyf+f6vf)kr4Uhdh5CH$uZFdnfxH-Sm?qC
zK{kM-_FU)eG;o{}v_lgo!yTByzgH@!zmEMGCPm7i^GYE&dBrTTwnz+1IprtsC$
zIe#0AbZvHDo_Z%&_;dCzSSL=lZJR%P;Y+9TF6_`#3a_Jm!z+dUt96R&boB-NYSl^I
z2(&`Z`_*HizhhS$P__h~=v7)QR~IiC@HiX2_olTldAu4ZLym2WPsCCL&|&goDG>1F
zr^M8e)mD*h0;e%2=zsxlLe%&B
z^$=OUv@q@HIlB;6UyM6K;+UmGJ(Pt~Bo}4Nk1=G?XXH9UZ8X0;J&C2bNlw)w6Uox{
zECRJxDo^%j?ud&U5)3W3YDD^l*%UE-ao_!W^;T-3eS|z{6E<09cP96;@l<}}9K)T{
z?)?px%Hp#O!k+^YueWK6!OGqr&_C)-(~hhmOB~aa3ELwuzKJEz)zBqM{Cb8)zGx+Z
z80dewD}q-LR{XSz9L^-1+k@}3)y4E2-t=)HHwck^nQ2){<`343UR1D^9LwL;Gxn(b
z@WI~H&S~OT47g#*nFoHnZ)B7K>gvUvL9h*pM}=!8GRKcPK#xPwsbqBv5;7B^*^L;;
z3O7H8O&zjw8T5yb^vqFQex4h+s&9CSLFe~5QU)Ot%zG@j6AZjWu`=xkgxsxcMD)1|
zDV*zmx)L$`-)w>n^FK!vB}DNI8h%H8t=VL4bSKTba)ky87LeJo_gyCw=sT8u+3z|KuwHf@gmfRl^ujcs5JN?$Bopqi$k%N+Oqqr7bj#){
z@6LYH&f8QZ6tpTsHCjyeRmiC(?f6S9EqJ%+$qTJsa-3I)Rn9JIAySFgg)ukvX)7tf%MaqhdS0=dfyuiW&KFQfIA=Mk6GmfNPlAIy3OQ2)%nW
zFcX{kJ$
z(|z2U_F=g8qn!vrb&2ZqsSVBR?v4Nxid3QEN9udZWNHC^E^G(G=dPEbrbHcdJAX61
zV!L0cw<>=m%b^^Oo4-Tvycp5?+|(v(cC)qx$n&w-cKwu^yuhbvCy
z&+t9Q96MHY<%@H|zboeh#IQ5!c(&L6K>VzHYrbzhsp91GQ98EjZqhfURi9-kmH*rl
zVz9LgXw2!+c_ZIICf!pbQMmk@mA_+F5A|{x#;*7}6}WIr;B1uA+Y$Q~P*gzS=`s(W
zz4*6Vrep~V^Y9B|NsSo~VLaO<_R+Yo
z;84z!C#tF|IX=#R6%UQRHY$hp^{j~n*qvHMQ%X-7dS)$7;9gFW6m-J$i?A~y1U`Xl
zrid7uaoZn_a=R;j;T<;BHm~RogP%7~^cTD}2!6&&-x8LVZ}xJee@=40@R~|vbnhXM
z4V_?J!~4+k+DgdNu}Yg=JE7RaJ?XF61^;a2UpD(QtO`dw-W}HJxF9S(7$5T5#LJqe
zTAqpt(VzRZi~4#!3#7x#nE=b6*$wpsKxRw`Lch
zTHK(R<~2d$h#!2R&e69NFXkYu^e^z<_iVM&ki>R`isc!)I13!MUa?770=el;FIvAQ
z$`B2&?B)VD=WcUbHt1VcEq?lpBs{bG#fDz)_D)OoXwF-9r9Xi9gAHek{&Z^G{I$EG
zO8#hAc2}ijXcguw#0WQ7UmZkkzwu~Cc^K5kWM454Q|{E}*9Vcmj*G#+!Cf*%dUUG^
z@wo-aa=lzt0{_f>w=Ec@I%gzxD!pzV8jUMSX^zY}sc=wkXiqfro0i8pI<8%hxS~X`
zbsaO=so#CG5?S!2IQM!0ZiZtc&$>~AK5h6&i0GUn5pwpSF7dg|WLOPoX?oq2o#}^<
z;Xx){r{&aOreY~Os;S3d28-`rJkR=Hgdf3_Urm<GE7^G#f)9rAAXLSXcFr`K7f$A;G5Rpn|6)TFGq$)+?mFVwPy_4WoQlk5Oc
z%d<A%MjI|C(y_NFeXbQ|#$_<6^+N5}9D0R#qLjYV**ECJ_KwRW4YN$t
zfzBz#S+WLNnd*BWk2{)r^7PVu_ZQ0FU`=rC!a_U
z02uKtSJO&idGBVF7UQv*K4|1B3TrF|b2I-3Sb%QT7$@$0!xq3Z{@cgJc~?!QgYG;_
zbs_M@Xh9$^8vTw~bNBDd6jV<{zFa{x&Z9R|NQ`T?^f5&lEVealEr#mn9Ot^Eh`!N>
z>3?Ok%NN#>lm*tl_*@*q7a5HzNKgTG$Es>jVg;0oEYU4#ni`W0xvWQr8TV8vO`{si
zA!{@?e7?XTVTbHjZvZvrXjdywo)fyVWYN*71Ru(k5_w)v6JbjA1}Ie>UBf=!wzC}*
z;*&=!RQw78PRDC}rhTLRSD5A!ZrC)aSV8;k2W=f=
zLTtr$#j@+dTYBx~-xhRT-me-fTMv!P88jxTL5s
z9-?oIp`1nAX|GV>CPV3$2tKmEJS12lhSi}cIcPZ8#GkY2b@6jGL8}!0K!W8igeq`#
z-x8cm{ZW9IZ{KWzNtOrPJde*y5vCK7P>*(r*8g*V-
z-hNV1R|7<1H0A~^zpF3!jeoGxTH=fUAlO%XTOt1t8ILHmS&RCAqa>ZcCTq>(DOJWPyclzAj0uSv8J`L>CJ;+m?gWrm{mxhIECW9-mKZ@;{x)F&VtzIX<$_ludAH
zun$B0hg~!`-*Rq_Mas4+iY8U-w?hh6d1nS(Mv=M!)x0d6V+Ep(`wBEpHG=7A{a=k8
zl4Z^6L`cV~zuRz9u3x5U0S{%rc6oo8ZK)GcGlynrDPp>>mYAoWLL}NwB|h+4;?Jg-
zRjYl6@#e9|-iGV3Ce4O~Lb768i_dne0MboDyVu2yR+M)_$W$`UP2Fdj=|sM`&2h0N7uX=%Hw
zO$pxSA;#Arrkya8m8j06*?{M&M6)m=@1`8pFRslfRX)YgjW7sf_`A9?>qt?fmK7cU
z;|+2*7nu#;n{}0|LSHOJ&9OU^WV|jC&1poF9s^H^kcWU;9|q*{EqMWR5<>Sdy1ODq
z2q0Yg#wH7GfN!XZC3J`vQ_%k$=njr!(fzUcX{k=|7s?
zHcSpZj@q#4yi+z?!m3^&ixMb(O8eygHH0)H;e>NP<7c93p!MF|e}Lvy&cN|tinvYD
zEByv4`sR^&KkY9LYnui8%ku$J*XQ5wCQSp?Enyx&bbkedbK>&4$zG#xq$Tm_Cf%)7
zF*v#oKhNr|qQOsBdZ=!6aZ;_F0-Pu-v7JkN6*OZG)E06iyHy@Xk&DYF-9>r=WjDxK
z?o;?&eU`_5SR_Tf#HjYD>P)af<{(Y%@Zcf1o-NB0#*jolEdyXalgIHh(vU|czfoku
z*8(si27!bMlO&9QOyue+)EK6dP*j>!cv@!(5uTXHWqK+^dJjJWI6I7dy9}x!xVsYXjzi0+S0Pv0+w!hA>
zs8W#A563JgjLxo=L7gmwRc&V^zAfk%*wd;+p
zg9z(=^bvmdhEy2G-IUFV4zj7s<>v_nZU{pyiY-NUq~F8YR-7bOh5o1GRS6$OBK2`Q
zS#?mV$e`67ce&0zU`bSxB;V`Co)dK`UElofsKkVtXGE~m7UR9mP*qghK6{QbZ|9Bh
zV65U-65g2oZiO^}W_RMFCo9Ky#g+N3)Pvvp+ivwva1xdZYw*@;T1-PV%|I=dUl!cwFG})B=H%h%GL>5lL2E!I8d8+;^v5_+@nmu
zn)cYS`kCn*Id#(b4px_bvlBY}%&h;)CBRY!KOV#R&NPdSWav(Te~OKy;HPAs0k`s2
z5yKYyV^O&x$s!Xn2a&zDDE;f6KBLop1wp=?@o*Wo@BG`V=^e=k6AEnUrtQmj1N9>V@h$-*R>$+M!zu)-8-+o}~Y=TkPYQQ3mhr+q95bOqL#=#}&_c
z)N%ycE_Dc5Cc-p+DzTJn>W1ZGA
z;c4;xH})$dqAxc5Y@=k(ceDwo&{^J%zanJ0?>pa{?>LZr_Qc_d;>`CUrrL!*IPu8V
z&zvp37yN3+%G29Tu7+jDf3Ns%->6xn#qXF)kI}``QG}eazl%s|o>w*p{>~q$qiN)a
zvz@Gpv%nYqmfGkB*>l`1jOS{2?rOoM_ECZ4q`HkDIfrzX{ZrcCX0CxAwoTw2ugt*e
z4p`7hY1=bvmE(mva53TES*WM|p6T{M<>JwUULA0KFSyC~KR|bIjmqU_b{W}AM;l`+
z1Z&?SxRoq0yutIZJ$K-K)g#b0QM+-ghC&$9_1Rqi%E?9k3TMr7?wkW?rl9{I0Gyxy
zygDTNZ(^yD8Mm|cqdGV7n?wF%8jL^J5;@+8P>r*B=meXUW_O75q#frzu*?FeBN>52
z4ITB861_AOb?W{&vv*D`#xm!V+0QApI2Pb)K}&|t#`4!o0M9VP=`4Gj0_drROoV&2`dPr>y+(9Vl!WfF?uQ_{OKIs3ov
zDZ^)qMpc;88SE4-lC)Iz+COdR5SpKZbubO(hQSl|AmERC9;7xP&qHBg$^_Fo+CdiM
z^JGMR`9E9FV_?gKbt$6eXXfWEj|rRf#m@9NnRJJr68q@>UjsAur4&ZGNw?_Sf>0g1^uj{zAbQc7yAXiKg(S
zp$##RbtFM=srx+Qrlbgq(64`2WmdtK@k{}~9teyrm)Fu-fY}OzelMhHd&p5vDVvF`
zmUX3=*D{<|f&9qZM3`f#~O1#D8fr6eK0j0$rPO5Yi%H!P&%zfxcRI1h?AWl$s}ztPQAixE`M8EU2oK
z?nOg>)Z}6N?z@MJVlU7+Ru|c#ZkDIo9;@OGe+h+@d|*revl{Z_JT3qdyco`CAk#
zx#g$()tgeUeG#JCAk+6jjhOF8Vbz$7NJ8tyfX>Do(z~jTi_u(@CZpfOTbD9M9>h-R
zmaP?Bgse@aAF*FC{Mzw)c?Y@DLWH4J%q));t;la`epT)y0Dd0Gh_k}Skw&~B#oJtz
zM?AG9-lL0i=W6zIY;C8Vwi@I|4mJFZH0XB@d`Em#=zaKQcUj(1Y%0;!6ghaCCYw_)
z?gHbEb6#`NSeZqsNDco=H}hRw^^<~#sT>*BXdf!Es7;5l$ToK7j@U=m4h56J?P-40
z@ixB0v{~in=zs2bj2Sl{SDwEM|Lr8W!oK-^#aA`%;%Oty^vn8m?zVs@PPCwZJ+W={
zG3l2H_2rLnhfH_+T9;J2rzv3UQa@qh+LL$rObdcxDc^a^CeQS%T)qlH)5(CyCra%e
zA9DYt$@Z?qpEoX!*FuTX*W9;4XEO3KCoLR2nzA`COV`?fa#oPZO54ZM;Yxiz683ZC
z=`)K?C-6W3;MyaQOgAQ03oBvJA3h86A7Fc;Igai*4E88-uHP^B?Y2Axg&!4d
zk@Nt{%a`tIlXjz)z~mvDP?>rf`!ZQMe)L@tThYscPWoUvU>4Q(G(p3R
zEz%-+w+zNVS}{djyjJc!<*u=z($u_%aIDEaj~$Bs>wynDAAO@IaQm-MxuQ3vs$>-!
ze9i{(b(uE@8a=h{=XIe(SOPYu9Cj}L>avBE@T8l4o?bvMll{_8c^6Z91l>HMJ8AEjeVV9ksHF
ztU#E1RJ5`Py3`99T5RH5cI$nx+O4ua{G
z$g^fqkrcl6Tv-;mo1CT2SL_l8%>#sE{OY0~U&6GZ{0go_qFZ}jRcKJVk8aszYAN{5Sx6kDP+@8W5`Pw^m()Q
z4NpK$N*yI2z8m7L>t4ReX{-!x%eS<7i5wRTBkyCEP;XWO$W_USb}Ywi&cFDBq)`)G
zbMC-tstU+qZl}w0%DPm=ASDJmmc4%kO{%&4Xp!Ig8WwyyB{7+BHA4I#9fUn4&1$*f
z{{f0a2?{LglDt7>y{Y;{loJNmhDJs>n=(yr4$iyk{w;5Fvb*!7R<*P$A8&GX)((?Vx_4|(XZK1)^~b@<9m{Z`lmfz>d9uPCH^
z@+vlK!P}cXd$3XQIfiJ}*GlufC52#l$Ui&c(8g!5<|kWao1ACA(G1(wz!ZgQm^*&X
z(G-Ifh3`(pH*##5Pt8p0Svx_YjAASU;QIACm7YFjsme}BqMdeh9v!Yu2xqy3QE&ng
z36gl@Yq$v~OE4X2ij|jc)FlTu2(C@AoEru5i8Vz@0A(2A6D>#D$`(c?
ze3!qDCEE!ibI(fT(%@}9gcj4&S#~Mb6Ak+8l&CZQ>ABGAPbj~en%0&EfOA-c(u3nn
z=37YEJCEAKIuPGkllMs8-Pc&nH@RRMJ*w<>7)sL0G!^|obU$J33qsxw*W0XK`x-k&
zxl^%j3t3P~(YGoici;gmP3kf5l427d3S
zLfi0B+4gKTsIq#}nc93;P-NXsdH9)aM(x9my6wEKW2)ERy<#~y>9;i{*G}+Rdz$C(
z%gXcoN%$dnEn~{tYSgC1(d^Uw+>HL1
z{1gcny8bnS*ceSWU^!|t(OC8Vvah6z>x#CT40!&BaX=MvyB%t$48TUna!10UPq0_V<~B
zDMnELoxqfZ6-PeU7qyZcBDT}d|U*BIV_;tp_#74GP>8r*Zr0T@|B$uVjh+Ff=H3zo~uFoIFqoWxlnnw39`4Vx3xBA9v2
z!izC$y+}?d#7w!A20n&@sB>Q{BhP`y@gd91|`BU$f
zB%1F$;J?D83l+h|e5`g+xUA&Qi^aJ<#1*XOU0g=V36bfJlOhOujW@LNB-}#B8p6UR
zG`9JmoymWDa;57d_#$oAw+}T^?K7e{*pTrU1a~T|4t5g}2Um%wp*;s#b}vIa$nQ4P
zQhW8PvGWnM+3DL>297fi2~8a4u+XOs(UMP91FeL_XILEb}hJgW?#3Fu@K$N>G_1E=0x00N^Mx
zoys!N$FIlN*42nDz+PnfgQ5Yw;jHcR^!+lD2d#!%6|Q>;Q&j@B(d<(5#$S&%u-r{u
z(xj|8GUAH;5FKh-pY(kHV!7MQy?E`ROLEd4Qe;8~q9-!dbNh*KWyy!I-E^Bg`9g-2
z>{`lSvsFx0_(zusW9d~&YO289H0`O^N2_bUjTnt$#VSa;gDg=ImZ>riM4=s);pahJqeB
z+zMxYfd~EP$ph^7U(Gp&O*>*)Y6&E$i7Wg9)4-*Q1s~Sj!yUmu;FpMl9?wK;R-8OB
zhmQKWPj$`OQHVi80}Sst61I<=w0R-MQ*0{EG<5b-(Hgv0h8#xQ0p&bQ(2W&r!OG>4
zA5qh|(D9z0MD!HERxFps@rq%%a-zb8<_TxTa7WvKJR#44VInx3Kxz)%J7FnBu%da3
z7_WzXK|Rx1&Dn>5mffg~E6d|urd?}H#EbUB7gp@HqG0fC9-FZCrsOM&HnF6fxx^$h
zV4t?jxKWUJLIX+kAA#HSH7n3C3QB!78y!0zwCCyrqwv#HK$xDE6Do*JMgt50;@zP}
zmR(3tKzsm5@>kIy3e({$2s74tL`zFbGUBpWu~p}Gf<0|gol@4-4#`2KMWq9-VqDDq
zHqW2H<{GU%AI_+I2=kR2!15U)Zi*T4#r}eEdnUEd?nJ3eX9hMEdTh}ZSRJ37#Hjgx
zEZ^uZpvkK8C#OhHWoz0ZAvwCnes5Qe+tmJ4RPbA7H%`gEqT$CWX_r>;9KqxX(!=9F
z&lya#A-~>vy~C+0_vGj$suWI0a`Znw65`<^^>Hgp!SWVbO!+h2x+HZY4r0#|iV@2^
zB>T9*%7V}#zYuJ^wlm!sSBGi_W%Zsa!A~^Vu{^3nd|wl|5q)>zHM(PHnk_GkXG&}U
zku~}BjfpwzpC`vUW%qn0&-!fb2g}-`wat=M0WFLx`GSvMfFRQ{!u(ZJ
z*V(QyTF|lDRuJ;BZwY26mxn0XmQ73X``Z3xL&&&Xs-^%EoDoToSt>$!=G6FNfSN{?
zKJ=IgJz+QGWI95%-#f#(m?u3s3%-`zX(>LBw2o7AS1m@^Oum}@jxmkuEOz`Odafsh
zH=5K;6cckcX{nmI?P7xEZh#YE$Q`0fH%r;L)i%_chk`r}0^b&$2u~w(fB5;v93Qgc
zzWe}7a~BStM|ic@9}0g86GT|b(zfTiC7^z31a$dN2ieZXheGjR7bN7}WYzG~
zM^4q?XWB#2onTBhB#WF9nnIiVl6-ice43=fNEIW}%pzdH%_V^usKS!N932RJUkOgL
z0G!I(Rg~MmHwV!Yu5NjXu?~KdtO%(0CGWnoG{%V*l|EEnEd7fLHo~0sjX9GqsBq7n
zg4+5eL^ScBlH>ub;1o7EeR|Xc_jDMZS{!Phof$&K<}2K2;{f${RiFg1IOTbOzllCT
z{lH&|w
z%?OY^E5)efGBrBmqQBOpGS2IHdY1JSNyRf$w*XKoLhAVVrjPd|*lSzGO{I;(E63k^
zVXVj5A6L?W{^BswIag9>2>AYp)$w_NaRwjg{Z$#u%25zqPzG<-*k>7Mg-#KCy{n9o
zLxL<_-bCK{T*VYw=9(;F4vQD5Q`B8zzQDMjNuW9>Q-T#{)5xb0Af5tG)69Sp
zxT<*sR+NWQNU{@rS5_FEl%&vsAOHAHcmZP1!MyO(TbQO2-qxe}zhNu?SKV&^7i8xD
z_p^~HRD5jXQquI>Olh5s!UWHP-yAkXmXdvXq`SMWv>naNo!Cy^hbBzUys=nSquOo-
zuz^en8>7n%D7;(6OQ1)WpwbY*1XLM-)=AK=Xuvm%#@I#vr?mrX)Y2NRmRZ|{8kDB&
z_Rgw@bT`z+F8lugzco%)3r$Ro8W_oFR0VG*9FCu5!jmY+Sgn4F$)9{DjUelYypG|K
zzCsP{lm_{W3f0@9(|!#!qS7##X#Ie8O+n
z*A$zfsaP~2h=W!ugoqCQP%bu?uWJoGd{+E^JJ#V;JXi5%3ipwF^tAu}VJWkyuGNKo
z8ob})G8t62FO1qZVc72{VzNSf)mlBY%Qpl1j(kVfYS2WM@A&kj{cYi_Jun+ov;u=A
zv*NSPXzNs~uu|6z`QY>>`iRf&Y#Nwnx}Ph2_yE2zj%7kjO-GX>7=K(c2vwTiR}i;N
z@Crk0_lvc&$ncsVmG6#j%fm_F6M!g1*&xhD?}4hk79ExP##^j(i%{|B%!tPt#T
zk_>&+L$7*eoU>h;^;c)Fs>?vczT*)1R5=tafuCJ^a}zCfHxa?CaR%2!Dc!R$zTEZ+d?R_IpO^MK@~n$hIxO$LBG1=pXf#HvM!_Zq(!Gr8!E%tFYWi-rDhJlJ#EJ!Pi~=r>9O^F-6+jjVz26b;F)hS;MQsV^hKAJ{_GS~wzH-Q3*o_!Ku}
zed0no@%{s#W7?e1vJr79gu7_m)_iThG){pZmVfFpIS25P0Yv2kYCO`$_PtVyHjKP5AVu|sTi-&oCWu$Pb4L-LN!E>x4@GnR^r2urgJ&OJ1he@q+T4mO&v!3>>?f3mv@eW`h_v){l{u<-(jkhWM0@(HWqfx#_8`>1#fyk=pnRswM2TwlLBYwb{%
zA7jkLH#~QHGL;tG|2U_d5pUGi=XbtlIkrJ1L~-T9@Y~U1$~v9{_UXGQ{;}&
zK5|mpjQk|Gx(TJCm*5@}g)}S3ca|0>R^Yk}uN)4Uv(X~A$?z|Vbnyjevh1;k9;C
zeG&%L^06ynvKXv0m)ojwJ?F=NW`Z@Jy5d#5{z8VM-4F-@IL4h~^A{Zu
zH$xv>n}UnY))Y&CUS*aQ$I=0d8DMwFTpp1p#Nrj6SLv%gY
zUg;#O?2C!gGN}-8^fUGs;mjL$b^Im+M^8R%b-EL73c=n=U))*hSwwD;d2a5#WV$#l
z)t9dgGWD`-s8yDuzhZ^b<=aR|Do|2dB}aA&zp8YZK(Fuv89>-LuA*V7j#1;
zN@7!d=><^9mc(yY%o9HFz-8ent4Mq)DP>_^IreK?(ic`DJ8yi
zgTfcQ_D70&M^g70E%%vCtK+DJ(|vKLC<6GlK4!lt_acNI6q=vminI
z9)9+rw*d5g;8BZZ`b}tiI)@)axkXmRjtYlDEVW?a#JG2>`PNp
z8ltlvYOEeBw&c<&+*RTZ-e+jYX`Gtymy?}`5W7mA84(;`j~>i0Pq=e8FFpl5RAC|M
zg^U?Awzq{9U1z2+h7;ualIUv6p$6+jpvX^x=2Dta$#PYJ-;^)+ycB>r=-YSddaNGWJ}ds|?PIY(N#AOqBzNFVTw`X60MjTxl8x3{RhyOA4TWc~=t7)UooLuUWCzq_LP%XjcQ!w{(K&>UD2K
zD7mzBq`sruO)Yw&hyAV{b#x38c6BiQN9fh)F;&|x_lY%V%Pj79JwvL{&^Wi`sir=e
zgVIs8$)EABC-BjtzQuco6ygb)j(UQ5x(*k9vu+o+cPz~8u!ZXG2^U=MAbs8YCf~vn
zM54MNInMomD#b-Joys|sDOUc(wk8>jm=5`}9DWU|E^f!<$*W%Z#p`U|uK4VeEOuRk
z=zksRuD(Qm6WGz$)CVdl?H_gcOt^7>B+CjU;42(s$4tudOaGi5pB`Z{oBHj-Ctt
zH3aO94so08gUWBMh11e`lzpp7|EMB1AB!S^XxwSafnciG1|JdqNJJnXUlE@h1Drko
zp+`^7gf*QDRKZODSgpbq_X8VeLj45q$s4Jl!egjP4zF``4Z-c5D=V{1EpBoAAq)xg
z-R?+{U6Evj6s!xBcgDDyWE_;B%L{IURD*%s1~h9?yBXPHrx*s_;yekud94|=*QqMQ
zavNghRb~tjszyxK+%!?rkgq;g7{4H?vp%~iiL%S+zj#ld46Memp7sdJYw-`#?7@rq
zaQ#OMZz69c&ag!JXzahC=>?|hLSA-0kXh>_cct&UM#c^Zxa}SKfMu`!S$rSrVig;=
zOsphgv6BZu)SfdcarqQYwE{c=np7rB_Bb