[英]Accessing a specific layer in a pretrained model in PyTorch
我想从TimeSformer model 的某些块中提取特征,并且还想删除最后两层。
import torch
from timesformer.models.vit import TimeSformer
model = TimeSformer(img_size=224, num_classes=400, num_frames=8, attention_type='divided_space_time', pretrained_model='/path/to/pretrained/model.pyth')
model的打印如下:
TimeSformer(
(model): VisionTransformer(
(dropout): Dropout(p=0.0, inplace=False)
(patch_embed): PatchEmbed(
(proj): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))
)
(pos_drop): Dropout(p=0.0, inplace=False)
(time_drop): Dropout(p=0.0, inplace=False)
(blocks): ModuleList( #************
(0): Block(
(norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): Linear(in_features=768, out_features=2304, bias=True)
(proj): Linear(in_features=768, out_features=768, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(attn_drop): Dropout(p=0.0, inplace=False)
)
(temporal_norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)
(temporal_attn): Attention(
(qkv): Linear(in_features=768, out_features=2304, bias=True)
(proj): Linear(in_features=768, out_features=768, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(attn_drop): Dropout(p=0.0, inplace=False)
)
(temporal_fc): Linear(in_features=768, out_features=768, bias=True)
(drop_path): Identity()
(norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=768, out_features=3072, bias=True)
(act): GELU()
(fc2): Linear(in_features=3072, out_features=768, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(1): Block(
(norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): Linear(in_features=768, out_features=2304, bias=True)
(proj): Linear(in_features=768, out_features=768, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(attn_drop): Dropout(p=0.0, inplace=False)
)
(temporal_norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)
(temporal_attn): Attention(
(qkv): Linear(in_features=768, out_features=2304, bias=True)
(proj): Linear(in_features=768, out_features=768, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(attn_drop): Dropout(p=0.0, inplace=False)
)
(temporal_fc): Linear(in_features=768, out_features=768, bias=True)
(drop_path): DropPath()
(norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=768, out_features=3072, bias=True)
(act): GELU()
(fc2): Linear(in_features=3072, out_features=768, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
.
.
.
.
.
.
(11): Block(
(norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): Linear(in_features=768, out_features=2304, bias=True)
(proj): Linear(in_features=768, out_features=768, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(attn_drop): Dropout(p=0.0, inplace=False)
)
(temporal_norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)
(temporal_attn): Attention(
(qkv): Linear(in_features=768, out_features=2304, bias=True)
(proj): Linear(in_features=768, out_features=768, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(attn_drop): Dropout(p=0.0, inplace=False)
)
(temporal_fc): Linear(in_features=768, out_features=768, bias=True)
(drop_path): DropPath()
(norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=768, out_features=3072, bias=True)
(act): GELU()
(fc2): Linear(in_features=3072, out_features=768, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(norm): LayerNorm((768,), eps=1e-06, elementwise_affine=True) **** I want to remove this layer*****
(head): Linear(in_features=768, out_features=400, bias=True) **** I want to remove this layer*****
)
)
具体来说,我想提取 model 的第 4、第 8 和第 11 块的输出并删除两层的 lats。 我怎样才能做到这一点。 我尝试使用 TimeSformer.blocks[0] 但这不起作用。
更新:
我有一个 Class,我需要访问 TimeSformer 的上述块作为 class 的 output。 这个 class 的输入是一个 5D 张量。 这是我用于提取上述块的输出的未修改代码:
class Model(nn.Module):
def __init__(self, pretrained=False):
super(Model, self).__init__()
self.model =TimeSformer(img_size=224, num_classes=400, num_frames=8, attention_type='divided_space_time',
pretrained_model='/home/morteza/HD2S_Encoder/models/timesformer/timesformer/models/TimeSformer_divST_16x16_448_K400.pyth')
self.activation = {}
def get_activation(name):
def hook(model, input, output):
self.activation[name] = output.detach()
return hook
self.model.model.blocks[4].register_forward_hook(get_activation('block4'))
self.model.model.blocks[8].register_forward_hook(get_activation('block8'))
self.model.model.blocks[11].register_forward_hook(get_activation('block11'))
block4_output = self.activation['block4']
block8_output = self.activation['block8']
block11_output = self.activation['block11']
def forward(self, x, out_consp = False):
features2, features3, features4 = self.model(x)
要从特定层中提取中间 output,可以将其注册为钩子,示例如下面的 snipcode 所示:
import torch
from timesformer.models.vit import TimeSformer
model = TimeSformer(img_size=224, num_classes=400, num_frames=8, attention_type='divided_space_time', pretrained_model='/path/to/pretrained/model.pyth')
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
model.model.blocks[4].register_forward_hook(get_activation('block4'))
model.model.blocks[8].register_forward_hook(get_activation('block8'))
model.model.blocks[11].register_forward_hook(get_activation('block11'))
x = torch.randn(3,3,224,224)
output = model(x)
block4_output = activation['block4']
block8_output = activation['block8']
block11_output = activation['block11']
要删除最后两层,您可以将它们替换为 Identity:
model.norm = torch.nn.Identity()
model.head= torch.nn.Identity()
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.