Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Compatibility of visualization_cam.py from MMSegmentation1.x with MMSegmentation0.x #3756

Open
libinpg opened this issue Aug 11, 2024 · 0 comments

Comments

@libinpg
Copy link

libinpg commented Aug 11, 2024

Version: MMSegmentation0.x

Description: I’ve been working on semantic segmentation tasks using MMSegmentation and found the visualization_cam.py script in version 1.x particularly useful. My current project, however, is based on MMSegmentation0.x. I am wondering if it is possible to use this script directly with MMSegmentation0.x, or if modifications are required to adapt it to the older version.

Reproduction Steps:

Clone the MMSegmentation1.x repository.
Attempt to copy the visualization_cam.py script to the tools/analysis_tools/ directory of MMSegmentation0.x.
Try to run the script and observe the results. this is my code in MMSegmentation1.x
`import sys
import os
import time

sys.path.append(os.path.abspath(os.path.join(os.getcwd(), ".")))
from PIL import Image
import albumentations as A
import torch
from albumentations.pytorch import ToTensorV2
import cv2
import numpy as np
from collections import OrderedDict
from typing import Dict, Iterable, Callable
from torch import nn, Tensor
from pprint import pprint
import torch.nn as nn
from torchvision.utils import make_grid
from torch.utils.tensorboard.writer import SummaryWriter
import matplotlib.pyplot as plt
import json
import argparse
from dataclasses import dataclass

from pytorch_grad_cam import GradCAM, GradCAMPlusPlus, LayerCAM, XGradCAM, EigenCAM, EigenGradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image, preprocess_image
from mmseg.apis import init_model, inference_model

##################################################################################################################################################################

Supported grad-cam type map

METHOD_MAP = {
'gradcam': GradCAM,
'gradcam++': GradCAMPlusPlus,
'xgradcam': XGradCAM,
'eigencam': EigenCAM,
'eigengradcam': EigenGradCAM,
'layercam': LayerCAM,
}

DEVICE = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
image_name = "test"
#IMAGE_FILE_PATH = os.path.join(image_file, image_name + (".png"))
IMAGE_FILE_PATH = "test.png"
MEAN = [0.535, 0.520, 0.581]
STD = [0.149, 0.111, 0.104]

CONFIG = 'segformer_mit-b0_8xb2-160k_ade20k-512x512.py'
CHECKPOINT = 'iter_16000.pth'
PREVIEW_MODEL = True

TARGET_LAYERS = ["model.model.backbone.layer4"] # TARGET_LAYERS请在main函数中修改,已标注修改位置

METHOD = 'GradCAM'
SEM_CLASSES = ['1']
TARGET_CATEGORY = '1'
VIS_CAM_RESULTS = True
CAM_SAVE_PATH = "cam"
LIKE_VIT = True
PRITN_MODEL_PRED_SEG = False

递归查找操作

def find_backbone(model):
for name, module in model.named_children():
print(f"Checking {name}")
if name == 'backbone':
print("Found backbone!")
return module
else:
found = find_backbone(module)
if found:
return found
return None

def parse_args():
parser = argparse.ArgumentParser(description='Visualize CAM')
parser.add_argument('--img', default=IMAGE_FILE_PATH, help='Image file')
parser.add_argument('--config', default=CONFIG ,help='Config file')
parser.add_argument('--checkpoint', default=CHECKPOINT, help='Checkpoint file')
# parser.add_argument(
# '--target_layers',
# default=TARGET_LAYERS,
# nargs='+',
# type=str,
# help='The target layers to get CAM, if not set, the tool will '
# 'specify the norm layer in the last block. Backbones '
# 'implemented by users are recommended to manually specify'
# ' target layers in commmad statement.')
parser.add_argument(
'--preview_model',
default=PREVIEW_MODEL,
help='To preview all the model layers')

parser.add_argument(
    '--method',
    default=METHOD,
    help='Type of method to use, supports '
    f'{", ".join(list(METHOD_MAP.keys()))}.')

parser.add_argument(
    '--sem_classes',
    default=SEM_CLASSES,
    nargs='+',
    type=int,
    help='all classes that model predict.')
parser.add_argument(
    '--target_category',
    default=TARGET_CATEGORY,
    type=str,
    help='The target category to get CAM, default to use result '
    'get from given model.')

parser.add_argument(
    '--aug_mean',
    default=MEAN,
    nargs='+',
    type=float,
    help='augmentation mean')

parser.add_argument(
    '--aug_std',
    default=STD,
    nargs='+',
    type=float,
    help='augmentation std')

parser.add_argument(
    '--cam_save_path',
    default=CAM_SAVE_PATH,
    type=str,
    help='The path to save visualize cam image, default not to save.')
parser.add_argument(
    '--vis_cam_results',
    default=VIS_CAM_RESULTS)
parser.add_argument('--device', default=DEVICE, help='Device to use cpu')

parser.add_argument(
    '--like_vision_transformer',
    default=LIKE_VIT,
    help='Whether the target model is a ViT-like network.')

parser.add_argument(
    '--print_model_pred_seg',
    default=PRITN_MODEL_PRED_SEG,
    help='')

args = parser.parse_args()
if args.method.lower() not in METHOD_MAP.keys():
    raise ValueError(f'invalid CAM type {args.method},'
                     f' supports {", ".join(list(METHOD_MAP.keys()))}.')

return args

def make_input_tensor(image_file_path, mean, std, device):
if not os.path.exists(image_file_path):
raise(f"{image_file_path} is not exist!")
img = Image.open(image_file_path)
img_array = np.array(img)
rgb_img = np.float32(img_array) / 255
input_tensor = preprocess_image(rgb_img, mean=mean, std=std)
if device == torch.device('cuda:0'):
input_tensor = input_tensor.to(device)
print(f"input_tensor has been to {device}")
return input_tensor, rgb_img

def make_model(config_path, checkpoint_path, device):
# 从配置文件和权重文件构建模型
model = init_model(config_path, checkpoint_path, device=device)
print('网络设置完毕 :成功载入了训练完毕的权重。')
return model

from torch.nn import functional as F
class SegmentationModelOutputWrapper(torch.nn.Module):
def init(self, model):
super(SegmentationModelOutputWrapper, self).init()
self.model = model

def forward(self, x):
    print("x.shape",x.shape)
    out = F.interpolate(self.model(x), size=x.shape[-2:], mode='bilinear', align_corners=False)
    return out

class SemanticSegmentationTarget:
def init(self, category, mask):
self.category = category
self.mask = torch.from_numpy(mask)
if torch.cuda.is_available():
self.mask = self.mask.cuda()

def __call__(self, model_output):
    return (model_output[self.category, :, : ] * self.mask).sum()

def reshape_transform_fc(in_tensor):
result = in_tensor.reshape(in_tensor.size(0),
int(np.sqrt(in_tensor.size(1))), int(np.sqrt(in_tensor.size(1))), in_tensor.size(2))

result = result.transpose(2, 3).transpose(1, 2)
return result

def main():
args = parse_args()

input_tensor, rgb_img = make_input_tensor(args.img, args.aug_mean, args.aug_std, device=args.device)

cfg = args.config
checkpoint = args.checkpoint
model_mmseg = make_model(cfg, checkpoint, device=args.device)
# model_mmseg = init_model(config_path, checkpoint_path, device=device)

results= inference_model(model_mmseg, args.img)

if args.print_model_pred_seg:
    # 推理给定图像
    pprint(results)

if args.preview_model:
    print('模型modules如下:')
    pprint([name for name, _ in model_mmseg.named_modules()])

model = SegmentationModelOutputWrapper(model_mmseg)
output = model(input_tensor)

sem_classes = args.sem_classes
sem_class_to_idx = {cls: idx for (idx, cls) in enumerate(sem_classes)}

if len(sem_classes) == 1:
    output = torch.nn.functional.sigmoid(output).cpu()
    perd_mask = torch.where(output > 0.3, torch.ones_like(output), torch.zeros_like(output))
    perd_mask = perd_mask.detach().cpu().numpy()
    
else:
    output = torch.nn.functional.softmax(output, dim=1).cpu()
    perd_mask = output[0, :, :, :].argmax(axis=0).detach().cpu().numpy()

category = sem_class_to_idx[args.target_category]
mask_float = np.float32(perd_mask == category)

# reshape_transform = reshape_transform_fc if args.like_vision_transformer else None

##########################################################################################################################################################################

尝试在模型中查找backbone

backbone = find_backbone(model)
if backbone:
    print("Backbone found!")
else:
    print("Backbone not found")
    
# 访问实际的模型对象
if hasattr(model, '_model'):
    actual_model = model._model
elif hasattr(model, 'module'):
    actual_model = model.module
else:
    actual_model = model

if backbone:
    # 选择目标层,例如Segformer模型中的最后一层的norm1
    target_layers = [backbone.layers[3][-1]]


##########################################################################################################################################################################
targets = [SemanticSegmentationTarget(category, mask_float)]
model.to(DEVICE)
GradCAM_Class = METHOD_MAP[args.method.lower()]
with GradCAM_Class(model=model,
            target_layers=target_layers,
            reshape_transform=reshape_transform_fc if args.like_vision_transformer else None
            ) as cam:
    grayscale_cam = cam(input_tensor=input_tensor, targets=targets)[0, :]
    cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)

vir_image = Image.fromarray(cam_image)

if args.vis_cam_results:
    vir_image.show()
cam_save_path = f"{args.cam_save_path}/{os.path.basename(args.config).split('.')[0]}"
if not os.path.exists(cam_save_path):
    os.makedirs(cam_save_path)
vir_image.save(os.path.join(cam_save_path, f"{os.path.basename(args.img).split('.')[0]}.png"))

if name == 'main':

main()

`
i used the same code in mmseg0.x, i did some changes, such as update "from mmseg.apis import init_model, inference_model" to "from mmseg.apis import init_segmentor, inference_segmentor"
Expected Behavior: The visualization_cam.py script should run smoothly in the MMSegmentation0.x environment and produce the expected visualization results.

Actual Behavior:
C:\Users\17905\AppData\Local\anaconda3\envs\mmsegmentationv0.x_pure\lib\site-packages\mmcv_init_.py:20: UserWarning: On January 1, 2023, MMCV will release v2.0.0, in which it will remove components related to the training process and add a data transformation module. In addition, it will rename the package names mmcv to mmcv-lite and mmcv-full to mmcv. See https://github.com/open-mmlab/mmcv/blob/master/docs/en/compatibility.md for more details.
warnings.warn(
input_tensor has been to cuda:0
c:\users\17905\desktop\mmsegmentation_0.x_pure\mmseg\models\decode_heads\decode_head.py:104: UserWarning: For binary segmentation, we suggest usingout_channels = 1 to define the outputchannels of segmentor, and use thresholdto convert seg_logist into a predictionapplying a threshold
warnings.warn('For binary segmentation, we suggest using'
c:\users\17905\desktop\mmsegmentation_0.x_pure\mmseg\models\losses\cross_entropy_loss.py:235: UserWarning: Default avg_non_ignore is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set avg_non_ignore=True.
warnings.warn(
load checkpoint from local path: C:\Users\17905\Desktop\mmsegmentation_0.x_pure\work_dirs\segformer_mit-b2_512x512_160k_ade20k\iter_100.pth
网络设置完毕 :成功载入了训练完毕的权重。
模型modules如下:
['',
'backbone',
'backbone.layers',
'backbone.layers.0',
'backbone.layers.0.0',
'backbone.layers.0.0.projection',
'backbone.layers.0.0.norm',
'backbone.layers.0.1',
'backbone.layers.0.1.0',
'backbone.layers.0.1.0.norm1',
'backbone.layers.0.1.0.attn',
'backbone.layers.0.1.0.attn.attn',
'backbone.layers.0.1.0.attn.attn.out_proj',
'backbone.layers.0.1.0.attn.proj_drop',
'backbone.layers.0.1.0.attn.dropout_layer',
'backbone.layers.0.1.0.attn.sr',
'backbone.layers.0.1.0.attn.norm',
'backbone.layers.0.1.0.norm2',
'backbone.layers.0.1.0.ffn',
'backbone.layers.0.1.0.ffn.activate',
'backbone.layers.0.1.0.ffn.layers',
'backbone.layers.0.1.0.ffn.layers.0',
'backbone.layers.0.1.0.ffn.layers.1',
'backbone.layers.0.1.0.ffn.layers.3',
'backbone.layers.0.1.0.ffn.layers.4',
'backbone.layers.0.1.0.ffn.dropout_layer',
'backbone.layers.0.1.1',
'backbone.layers.0.1.1.norm1',
'backbone.layers.0.1.1.attn',
'backbone.layers.0.1.1.attn.attn',
'backbone.layers.0.1.1.attn.attn.out_proj',
'backbone.layers.0.1.1.attn.proj_drop',
'backbone.layers.0.1.1.attn.dropout_layer',
'backbone.layers.0.1.1.attn.sr',
'backbone.layers.0.1.1.attn.norm',
'backbone.layers.0.1.1.norm2',
'backbone.layers.0.1.1.ffn',
'backbone.layers.0.1.1.ffn.activate',
'backbone.layers.0.1.1.ffn.layers',
'backbone.layers.0.1.1.ffn.layers.0',
'backbone.layers.0.1.1.ffn.layers.1',
'backbone.layers.0.1.1.ffn.layers.3',
'backbone.layers.0.1.1.ffn.layers.4',
'backbone.layers.0.1.1.ffn.dropout_layer',
'backbone.layers.0.1.2',
'backbone.layers.0.1.2.norm1',
'backbone.layers.0.1.2.attn',
'backbone.layers.0.1.2.attn.attn',
'backbone.layers.0.1.2.attn.attn.out_proj',
'backbone.layers.0.1.2.attn.proj_drop',
'backbone.layers.0.1.2.attn.dropout_layer',
'backbone.layers.0.1.2.attn.sr',
'backbone.layers.0.1.2.attn.norm',
'backbone.layers.0.1.2.norm2',
'backbone.layers.0.1.2.ffn',
'backbone.layers.0.1.2.ffn.activate',
'backbone.layers.0.1.2.ffn.layers',
'backbone.layers.0.1.2.ffn.layers.0',
'backbone.layers.0.1.2.ffn.layers.1',
'backbone.layers.0.1.2.ffn.layers.3',
'backbone.layers.0.1.2.ffn.layers.4',
'backbone.layers.0.1.2.ffn.dropout_layer',
'backbone.layers.0.2',
'backbone.layers.1',
'backbone.layers.1.0',
'backbone.layers.1.0.projection',
'backbone.layers.1.0.norm',
'backbone.layers.1.1',
'backbone.layers.1.1.0',
'backbone.layers.1.1.0.norm1',
'backbone.layers.1.1.0.attn',
'backbone.layers.1.1.0.attn.attn',
'backbone.layers.1.1.0.attn.attn.out_proj',
'backbone.layers.1.1.0.attn.proj_drop',
'backbone.layers.1.1.0.attn.dropout_layer',
'backbone.layers.1.1.0.attn.sr',
'backbone.layers.1.1.0.attn.norm',
'backbone.layers.1.1.0.norm2',
'backbone.layers.1.1.0.ffn',
'backbone.layers.1.1.0.ffn.activate',
'backbone.layers.1.1.0.ffn.layers',
'backbone.layers.1.1.0.ffn.layers.0',
'backbone.layers.1.1.0.ffn.layers.1',
'backbone.layers.1.1.0.ffn.layers.3',
'backbone.layers.1.1.0.ffn.layers.4',
'backbone.layers.1.1.0.ffn.dropout_layer',
'backbone.layers.1.1.1',
'backbone.layers.1.1.1.norm1',
'backbone.layers.1.1.1.attn',
'backbone.layers.1.1.1.attn.attn',
'backbone.layers.1.1.1.attn.attn.out_proj',
'backbone.layers.1.1.1.attn.proj_drop',
'backbone.layers.1.1.1.attn.dropout_layer',
'backbone.layers.1.1.1.attn.sr',
'backbone.layers.1.1.1.attn.norm',
'backbone.layers.1.1.1.norm2',
'backbone.layers.1.1.1.ffn',
'backbone.layers.1.1.1.ffn.activate',
'backbone.layers.1.1.1.ffn.layers',
'backbone.layers.1.1.1.ffn.layers.0',
'backbone.layers.1.1.1.ffn.layers.1',
'backbone.layers.1.1.1.ffn.layers.3',
'backbone.layers.1.1.1.ffn.layers.4',
'backbone.layers.1.1.1.ffn.dropout_layer',
'backbone.layers.1.1.2',
'backbone.layers.1.1.2.norm1',
'backbone.layers.1.1.2.attn',
'backbone.layers.1.1.2.attn.attn',
'backbone.layers.1.1.2.attn.attn.out_proj',
'backbone.layers.1.1.2.attn.proj_drop',
'backbone.layers.1.1.2.attn.dropout_layer',
'backbone.layers.1.1.2.attn.sr',
'backbone.layers.1.1.2.attn.norm',
'backbone.layers.1.1.2.norm2',
'backbone.layers.1.1.2.ffn',
'backbone.layers.1.1.2.ffn.activate',
'backbone.layers.1.1.2.ffn.layers',
'backbone.layers.1.1.2.ffn.layers.0',
'backbone.layers.1.1.2.ffn.layers.1',
'backbone.layers.1.1.2.ffn.layers.3',
'backbone.layers.1.1.2.ffn.layers.4',
'backbone.layers.1.1.2.ffn.dropout_layer',
'backbone.layers.1.1.3',
'backbone.layers.1.1.3.norm1',
'backbone.layers.1.1.3.attn',
'backbone.layers.1.1.3.attn.attn',
'backbone.layers.1.1.3.attn.attn.out_proj',
'backbone.layers.1.1.3.attn.proj_drop',
'backbone.layers.1.1.3.attn.dropout_layer',
'backbone.layers.1.1.3.attn.sr',
'backbone.layers.1.1.3.attn.norm',
'backbone.layers.1.1.3.norm2',
'backbone.layers.1.1.3.ffn',
'backbone.layers.1.1.3.ffn.activate',
'backbone.layers.1.1.3.ffn.layers',
'backbone.layers.1.1.3.ffn.layers.0',
'backbone.layers.1.1.3.ffn.layers.1',
'backbone.layers.1.1.3.ffn.layers.3',
'backbone.layers.1.1.3.ffn.layers.4',
'backbone.layers.1.1.3.ffn.dropout_layer',
'backbone.layers.1.2',
'backbone.layers.2',
'backbone.layers.2.0',
'backbone.layers.2.0.projection',
'backbone.layers.2.0.norm',
'backbone.layers.2.1',
'backbone.layers.2.1.0',
'backbone.layers.2.1.0.norm1',
'backbone.layers.2.1.0.attn',
'backbone.layers.2.1.0.attn.attn',
'backbone.layers.2.1.0.attn.attn.out_proj',
'backbone.layers.2.1.0.attn.proj_drop',
'backbone.layers.2.1.0.attn.dropout_layer',
'backbone.layers.2.1.0.attn.sr',
'backbone.layers.2.1.0.attn.norm',
'backbone.layers.2.1.0.norm2',
'backbone.layers.2.1.0.ffn',
'backbone.layers.2.1.0.ffn.activate',
'backbone.layers.2.1.0.ffn.layers',
'backbone.layers.2.1.0.ffn.layers.0',
'backbone.layers.2.1.0.ffn.layers.1',
'backbone.layers.2.1.0.ffn.layers.3',
'backbone.layers.2.1.0.ffn.layers.4',
'backbone.layers.2.1.0.ffn.dropout_layer',
'backbone.layers.2.1.1',
'backbone.layers.2.1.1.norm1',
'backbone.layers.2.1.1.attn',
'backbone.layers.2.1.1.attn.attn',
'backbone.layers.2.1.1.attn.attn.out_proj',
'backbone.layers.2.1.1.attn.proj_drop',
'backbone.layers.2.1.1.attn.dropout_layer',
'backbone.layers.2.1.1.attn.sr',
'backbone.layers.2.1.1.attn.norm',
'backbone.layers.2.1.1.norm2',
'backbone.layers.2.1.1.ffn',
'backbone.layers.2.1.1.ffn.activate',
'backbone.layers.2.1.1.ffn.layers',
'backbone.layers.2.1.1.ffn.layers.0',
'backbone.layers.2.1.1.ffn.layers.1',
'backbone.layers.2.1.1.ffn.layers.3',
'backbone.layers.2.1.1.ffn.layers.4',
'backbone.layers.2.1.1.ffn.dropout_layer',
'backbone.layers.2.1.2',
'backbone.layers.2.1.2.norm1',
'backbone.layers.2.1.2.attn',
'backbone.layers.2.1.2.attn.attn',
'backbone.layers.2.1.2.attn.attn.out_proj',
'backbone.layers.2.1.2.attn.proj_drop',
'backbone.layers.2.1.2.attn.dropout_layer',
'backbone.layers.2.1.2.attn.sr',
'backbone.layers.2.1.2.attn.norm',
'backbone.layers.2.1.2.norm2',
'backbone.layers.2.1.2.ffn',
'backbone.layers.2.1.2.ffn.activate',
'backbone.layers.2.1.2.ffn.layers',
'backbone.layers.2.1.2.ffn.layers.0',
'backbone.layers.2.1.2.ffn.layers.1',
'backbone.layers.2.1.2.ffn.layers.3',
'backbone.layers.2.1.2.ffn.layers.4',
'backbone.layers.2.1.2.ffn.dropout_layer',
'backbone.layers.2.1.3',
'backbone.layers.2.1.3.norm1',
'backbone.layers.2.1.3.attn',
'backbone.layers.2.1.3.attn.attn',
'backbone.layers.2.1.3.attn.attn.out_proj',
'backbone.layers.2.1.3.attn.proj_drop',
'backbone.layers.2.1.3.attn.dropout_layer',
'backbone.layers.2.1.3.attn.sr',
'backbone.layers.2.1.3.attn.norm',
'backbone.layers.2.1.3.norm2',
'backbone.layers.2.1.3.ffn',
'backbone.layers.2.1.3.ffn.activate',
'backbone.layers.2.1.3.ffn.layers',
'backbone.layers.2.1.3.ffn.layers.0',
'backbone.layers.2.1.3.ffn.layers.1',
'backbone.layers.2.1.3.ffn.layers.3',
'backbone.layers.2.1.3.ffn.layers.4',
'backbone.layers.2.1.3.ffn.dropout_layer',
'backbone.layers.2.1.4',
'backbone.layers.2.1.4.norm1',
'backbone.layers.2.1.4.attn',
'backbone.layers.2.1.4.attn.attn',
'backbone.layers.2.1.4.attn.attn.out_proj',
'backbone.layers.2.1.4.attn.proj_drop',
'backbone.layers.2.1.4.attn.dropout_layer',
'backbone.layers.2.1.4.attn.sr',
'backbone.layers.2.1.4.attn.norm',
'backbone.layers.2.1.4.norm2',
'backbone.layers.2.1.4.ffn',
'backbone.layers.2.1.4.ffn.activate',
'backbone.layers.2.1.4.ffn.layers',
'backbone.layers.2.1.4.ffn.layers.0',
'backbone.layers.2.1.4.ffn.layers.1',
'backbone.layers.2.1.4.ffn.layers.3',
'backbone.layers.2.1.4.ffn.layers.4',
'backbone.layers.2.1.4.ffn.dropout_layer',
'backbone.layers.2.1.5',
'backbone.layers.2.1.5.norm1',
'backbone.layers.2.1.5.attn',
'backbone.layers.2.1.5.attn.attn',
'backbone.layers.2.1.5.attn.attn.out_proj',
'backbone.layers.2.1.5.attn.proj_drop',
'backbone.layers.2.1.5.attn.dropout_layer',
'backbone.layers.2.1.5.attn.sr',
'backbone.layers.2.1.5.attn.norm',
'backbone.layers.2.1.5.norm2',
'backbone.layers.2.1.5.ffn',
'backbone.layers.2.1.5.ffn.activate',
'backbone.layers.2.1.5.ffn.layers',
'backbone.layers.2.1.5.ffn.layers.0',
'backbone.layers.2.1.5.ffn.layers.1',
'backbone.layers.2.1.5.ffn.layers.3',
'backbone.layers.2.1.5.ffn.layers.4',
'backbone.layers.2.1.5.ffn.dropout_layer',
'backbone.layers.2.2',
'backbone.layers.3',
'backbone.layers.3.0',
'backbone.layers.3.0.projection',
'backbone.layers.3.0.norm',
'backbone.layers.3.1',
'backbone.layers.3.1.0',
'backbone.layers.3.1.0.norm1',
'backbone.layers.3.1.0.attn',
'backbone.layers.3.1.0.attn.attn',
'backbone.layers.3.1.0.attn.attn.out_proj',
'backbone.layers.3.1.0.attn.proj_drop',
'backbone.layers.3.1.0.attn.dropout_layer',
'backbone.layers.3.1.0.norm2',
'backbone.layers.3.1.0.ffn',
'backbone.layers.3.1.0.ffn.activate',
'backbone.layers.3.1.0.ffn.layers',
'backbone.layers.3.1.0.ffn.layers.0',
'backbone.layers.3.1.0.ffn.layers.1',
'backbone.layers.3.1.0.ffn.layers.3',
'backbone.layers.3.1.0.ffn.layers.4',
'backbone.layers.3.1.0.ffn.dropout_layer',
'backbone.layers.3.1.1',
'backbone.layers.3.1.1.norm1',
'backbone.layers.3.1.1.attn',
'backbone.layers.3.1.1.attn.attn',
'backbone.layers.3.1.1.attn.attn.out_proj',
'backbone.layers.3.1.1.attn.proj_drop',
'backbone.layers.3.1.1.attn.dropout_layer',
'backbone.layers.3.1.1.norm2',
'backbone.layers.3.1.1.ffn',
'backbone.layers.3.1.1.ffn.activate',
'backbone.layers.3.1.1.ffn.layers',
'backbone.layers.3.1.1.ffn.layers.0',
'backbone.layers.3.1.1.ffn.layers.1',
'backbone.layers.3.1.1.ffn.layers.3',
'backbone.layers.3.1.1.ffn.layers.4',
'backbone.layers.3.1.1.ffn.dropout_layer',
'backbone.layers.3.1.2',
'backbone.layers.3.1.2.norm1',
'backbone.layers.3.1.2.attn',
'backbone.layers.3.1.2.attn.attn',
'backbone.layers.3.1.2.attn.attn.out_proj',
'backbone.layers.3.1.2.attn.proj_drop',
'backbone.layers.3.1.2.attn.dropout_layer',
'backbone.layers.3.1.2.norm2',
'backbone.layers.3.1.2.ffn',
'backbone.layers.3.1.2.ffn.activate',
'backbone.layers.3.1.2.ffn.layers',
'backbone.layers.3.1.2.ffn.layers.0',
'backbone.layers.3.1.2.ffn.layers.1',
'backbone.layers.3.1.2.ffn.layers.3',
'backbone.layers.3.1.2.ffn.layers.4',
'backbone.layers.3.1.2.ffn.dropout_layer',
'backbone.layers.3.2',
'decode_head',
'decode_head.loss_decode',
'decode_head.conv_seg',
'decode_head.dropout',
'decode_head.convs',
'decode_head.convs.0',
'decode_head.convs.0.conv',
'decode_head.convs.0.bn',
'decode_head.convs.0.activate',
'decode_head.convs.1',
'decode_head.convs.1.conv',
'decode_head.convs.1.bn',
'decode_head.convs.1.activate',
'decode_head.convs.2',
'decode_head.convs.2.conv',
'decode_head.convs.2.bn',
'decode_head.convs.2.activate',
'decode_head.convs.3',
'decode_head.convs.3.conv',
'decode_head.convs.3.bn',
'decode_head.convs.3.activate',
'decode_head.fusion_conv',
'decode_head.fusion_conv.conv',
'decode_head.fusion_conv.bn',
'decode_head.fusion_conv.activate']
x.shape torch.Size([1, 3, 512, 512])
Traceback (most recent call last):
File "tools/chx-cam.py", line 627, in
main()
File "tools/chx-cam.py", line 565, in main
output = model(input_tensor)
File "C:\Users\17905\AppData\Local\anaconda3\envs\mmsegmentationv0.x_pure\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "tools/chx-cam.py", line 521, in forward
out = F.interpolate(self.model(x), size=x.shape[-2:], mode='bilinear', align_corners=False)
File "C:\Users\17905\AppData\Local\anaconda3\envs\mmsegmentationv0.x_pure\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\17905\AppData\Local\anaconda3\envs\mmsegmentationv0.x_pure\lib\site-packages\mmcv\runner\fp16_utils.py", line 119, in new_func
return old_func(*args, **kwargs)
TypeError: forward() missing 1 required positional argument: 'img_metas'

Environment:

OS: windows 10
Python Version: 3.8.19
PyTorch Version: 1.13.1
CUDA Version: 11.7
MMCV Version: mmcv-full==1.7.2
MMSegmentation Version: 0.30.0
Additional Notes: [Include any other relevant information here.]

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant