我有一个使用ROI池的mlmodel,我正在使用它
(改编自here)(非NN层版本)

def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        outputs = Variable(torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)).cuda()

        for roi_ind, roi in enumerate(rois):
            batch_ind = int(roi[0].data[0])
            roi_start_w, roi_start_h, roi_end_w, roi_end_h = np.round(
                roi[1:].data.cpu().numpy() * self.spatial_scale).astype(int)
            roi_width = max(roi_end_w - roi_start_w + 1, 1)
            roi_height = max(roi_end_h - roi_start_h + 1, 1)
            bin_size_w = float(roi_width) / float(self.pooled_width)
            bin_size_h = float(roi_height) / float(self.pooled_height)

            for ph in range(self.pooled_height):
                hstart = int(np.floor(ph * bin_size_h))
                hend = int(np.ceil((ph + 1) * bin_size_h))
                hstart = min(data_height, max(0, hstart + roi_start_h))
                hend = min(data_height, max(0, hend + roi_start_h))
                for pw in range(self.pooled_width):
                    wstart = int(np.floor(pw * bin_size_w))
                    wend = int(np.ceil((pw + 1) * bin_size_w))
                    wstart = min(data_width, max(0, wstart + roi_start_w))
                    wend = min(data_width, max(0, wend + roi_start_w))

                    is_empty = (hend <= hstart) or(wend <= wstart)
                    if is_empty:
                        outputs[roi_ind, :, ph, pw] = 0
                    else:
                        data = features[batch_ind]
                        outputs[roi_ind, :, ph, pw] = torch.max(
                            torch.max(data[:, hstart:hend, wstart:wend], 1)[0], 2)[0].view(-1)

        return outputs


我想将pytorch模型转换为caffe,因此需要将以上内容转换为我正在使用以下内容的NN层(改编自here

def forward(self, input, rois):
    output = []
    rois = rois.data.float()
    num_rois = rois.size(0)

    rois[:,1:].mul_(self.spatial_scale)
    rois = rois.long()
    for i in range(num_rois):
        roi = rois[i]
        im_idx = roi[0]
        im = input.narrow(0, im_idx, 1)[..., roi[2]:(roi[4]+1), roi[1]:(roi[3]+1)]
        op = nn.functional.adaptive_max_pool2d(input = im, output_size = self.size)
        output.append(op)

    return torch.cat(tuple(output), dim=0)


返回的输出似乎与上述方法不匹配,即使它们执行的功能相同。我似乎陷入僵局。如果我在上面犯了任何明显的错误,谁能指出?

最佳答案

发现了问题-
与空间比例相乘后的rois被四舍五入,必须像这样长时间调用后才调用round函数

rois = rois.data.float()
num_rois = rois.size(0)

rois[:,1:].mul_(self.spatial_scale)
rois = rois.round().long() ## Check this here !!


希望这对某人有帮助!

关于python - 将pytorch中的roi池转换为nn层,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/53157978/

10-10 04:39