Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ImportError: cannot import name 'NumpyMetric' from 'pytorch_lightning.metrics.metric' #3

Open
1632325673 opened this issue Sep 30, 2021 · 7 comments

Comments

@1632325673
Copy link

作者你好,为什么我用你推荐的pytorch_lightning版本,还出现这个错误

@chenjiachengzzz
Copy link

作者你好,为什么我用你推荐的pytorch_lightning版本,还出现这个错误
您好,请问这个问题你解决了吗

@chenjiachengzzz
Copy link

作者你好,为什么我用你推荐的pytorch_lightning版本,还出现这个错误

请问你是怎么预处理数据的

@Aristot1e
Copy link

作者你好,为什么我用你推荐的pytorch_lightning版本,还出现这个错误

请问一下,这个问题你现在有解决吗? 我也遇见了同样的问题。谢谢

@chenjiachengzzz
Copy link

作者你好,为什么我用你推荐的pytorch_lightning版本,还出现这个错误

请问一下,这个问题你现在有解决吗? 我也遇见了同样的问题。谢谢

You need to make appropriate changes in a couple of files to make this work.

In fastmri/mri_ixi_module_t2net.py, change the existing code with the following code:

At line 21:

from fastmri.evaluate import DistributedMetricSum
At line 88-93

self.NMSE = DistributedMetricSum()
self.SSIM = DistributedMetricSum()
self.PSNR = DistributedMetricSum()
self.ValLoss = DistributedMetricSum()
self.TestLoss = DistributedMetricSum()
self.TotExamples = DistributedMetricSum()
Replace the existinig fastmri/evaluate.py, with the following code:

"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""

import argparse
import pathlib
from argparse import ArgumentParser

import h5py
import numpy as np
import pytorch_lightning as pl
from pytorch_lightning.metrics.metric import Metric
from runstats import Statistics
from skimage.metrics import peak_signal_noise_ratio, structural_similarity

import torch

import fastmri
from fastmri import evaluate

class MSE(Metric):
"""Calculates MSE and aggregates by summing across distr processes."""

def __init__(self, name="MSE", *args, **kwargs):
    super().__init__(name=name, *args, **kwargs)

def forward(self, gt, pred):
    return mse(gt, pred)

class NMSE(Metric):
"""Calculates NMSE and aggregates by summing across distr processes."""

def __init__(self, name="NMSE", *args, **kwargs):
    super().__init__(name=name, *args, **kwargs)

def forward(self, gt, pred):
    return nmse(gt, pred)

class PSNR(Metric):
"""Calculates PSNR and aggregates by summing across distr processes."""

def __init__(self, name="PSNR", *args, **kwargs):
    super().__init__(name=name, *args, **kwargs)

def forward(self, gt, pred):
    return psnr(gt, pred)

class SSIM(Metric):
"""Calculates SSIM and aggregates by summing across distr processes."""

def __init__(self, name="SSIM", *args, **kwargs):
    super().__init__(name=name, *args, **kwargs)

def forward(self, gt, pred, maxval=None):
    return ssim(gt, pred, maxval=maxval)

class DistributedMetricSum(pl.metrics.Metric):
def init(self, dist_sync_on_step=True):
super().init(dist_sync_on_step=dist_sync_on_step)

    self.add_state("quantity", default=torch.tensor(0.0), dist_reduce_fx="sum")

def update(self, batch: torch.Tensor):  # type: ignore
    self.quantity += batch

def compute(self):
    return self.quantity

def mse(gt, pred):
"""Compute Mean Squared Error (MSE)"""
return np.mean((gt - pred) ** 2)

def nmse(gt, pred):
"""Compute Normalized Mean Squared Error (NMSE)"""
return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2

def psnr(gt, pred):
"""Compute Peak Signal to Noise Ratio metric (PSNR)"""
return peak_signal_noise_ratio(gt, pred, data_range=gt.max())

def ssim(gt, pred, maxval=None):
"""Compute Structural Similarity Index Metric (SSIM)"""
maxval = gt.max() if maxval is None else maxval

ssim = 0
for slice_num in range(gt.shape[0]):
    ssim = ssim + structural_similarity(
        gt[slice_num], pred[slice_num], data_range=maxval
    )

ssim = ssim / gt.shape[0]

return ssim

METRIC_FUNCS = dict(MSE=mse, NMSE=nmse, PSNR=psnr, SSIM=ssim,)

class Metrics(object):
"""
Maintains running statistics for a given collection of metrics.
"""

def __init__(self, metric_funcs):
    """
    Args:
        metric_funcs (dict): A dict where the keys are metric names and the
            values are Python functions for evaluating that metric.
    """
    self.metrics = {metric: Statistics() for metric in metric_funcs}

def push(self, target, recons):
    for metric, func in METRIC_FUNCS.items():
        self.metrics[metric].push(func(target, recons))

def means(self):
    return {metric: stat.mean() for metric, stat in self.metrics.items()}

def stddevs(self):
    return {metric: stat.stddev() for metric, stat in self.metrics.items()}

def __repr__(self):
    means = self.means()
    stddevs = self.stddevs()
    metric_names = sorted(list(means))
    return " ".join(
        f"{name} = {means[name]:.4g} +/- {2 * stddevs[name]:.4g}"
        for name in metric_names
    )

def evaluate(args, recons_key):
metrics = Metrics(METRIC_FUNCS)

for tgt_file in args.target_path.iterdir():
    with h5py.File(tgt_file, "r") as target, h5py.File(
        args.predictions_path / tgt_file.name, "r"
    ) as recons:
        if args.acquisition and args.acquisition != target.attrs["acquisition"]:
            continue

        if args.acceleration and target.attrs["acceleration"] != args.acceleration:
            continue

        target = target[recons_key][()]
        recons = recons["reconstruction"][()]
        target = transforms.center_crop(
            target, (target.shape[-1], target.shape[-1])
        )
        recons = transforms.center_crop(
            recons, (target.shape[-1], target.shape[-1])
        )
        metrics.push(target, recons)

return metrics

if name == "main":
parser = ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--target-path",
type=pathlib.Path,
required=True,
help="Path to the ground truth data",
)
parser.add_argument(
"--predictions-path",
type=pathlib.Path,
required=True,
help="Path to reconstructions",
)
parser.add_argument(
"--challenge",
choices=["singlecoil", "multicoil"],
required=True,
help="Which challenge",
)
parser.add_argument("--acceleration", type=int, default=None)
print ('corpd')
parser.add_argument(
"--acquisition",
choices=[
"CORPD_FBK",
"CORPDFS_FBK",
"AXT1",
"AXT1PRE",
"AXT1POST",
"AXT2",
"AXFLAIR",
],
default=CORPD_FBK,
help="If set, only volumes of the specified acquisition type are used "
"for evaluation. By default, all volumes are included.",
)
args = parser.parse_args()

recons_key = (
    "reconstruction_rss" if args.challenge == "multicoil" else "reconstruction_esc"
)
metrics = evaluate(args, recons_key)
print(metrics)

@Aristot1e
Copy link

作者你好,为什么我用你推荐的pytorch_lightning版本,还出现这个错误

请问你是怎么预处理数据的

请问一下,您的数据是怎么处理的? IXI数据集的.nii文件需要转为h5文件吗? 还有训练和验证的mat文件是什么呢?谢谢

@chunmeifeng
Copy link
Owner

Please use pytorch_lightning==0.8.1

@wdayang
Copy link

wdayang commented May 3, 2022

Please use pytorch_lightning==0.8.1

Thanks, It works fine.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

5 participants