byobu/tmux是screen的增强版, 敲入byobu命令直接创建和恢复会话,其他快捷键可与screen兼容
sudo apt install byobu
byobu new -s
byobu kill-sesssion -t
byobu a -t
byobu ls
shortcut | usage |
---|---|
F2 | Create a new window 打开一个新的窗口 |
F3 | Move to the previous window 进入前一个窗口 |
F4 | Move to the next window 进入后一个窗口 |
F5 | Refresh all status notifications |
F6 | Detach from the session and logout 断开链接 |
F7 | Enter copy/scrollback mode 进入scrollback模式 |
F8 | Rename the current window |
F9 | Launch the Byobu Configuration Menu |
F12 | Lock this terminal |
Alt-Pageup | Scroll back through this window’s history |
Alt-Pagedown | Scroll forward through this window’s history |
shift - F1 | 帮助 |
Shift-F2 | Split the screen horizontally 横切割新建个窗口 |
Shift-F3 | Move focus to the next split |
Shift-F4 | Move focus to the previous split |
Shift-F6 | Detach from the session, but do not logout |
Shift-F5 | Collapse all splits |
Shift+F11 | 最大化其中一个窗口 |
Shift-F12 | Toggle all of Byobu’s keybindings on or off |
Ctrl+F2 | Split the screen vertically 竖切割新建个窗口 |
Ctrl+F5 | Reconnect any SSH/GPG sockets or agents |
Ctrl+F6 | Kill window |
I hit F7 to enter scrollback mode,
Space to start selecting,
g to scroll to the top of the buffer (thanks @GeorgeMarian)
Enter to copy (to byobu’s clipboard, not a terminal/system one),
then cat > my-byobu-dump.txt
in the terminal,
Alt+Insert or ctrl+A,] to paste (again, from byobu’s clipboard)
Ctrl+D to close the file.
# !/bin/bash
# Usage: force_cmd.sh pull/push dir max_tries
cd $2
if [ ! $3 ]
then
max=10
else
max=$3
fi
echo "max=$max"
num=0
#git $1
# error code 128: unable to access
while [[ $num -le $max ]]
do
error=0
if ! git $1; then error=1; fi
num=$[$num+1]
echo $num
if [ $error = 0 ]
then
exit
fi
done
# !/bin/bash
# Usage:
# alias fgit='bash ...force_git.sh'
# fgit [max_tries] args...
if ! [[ $1 =~ ^-?[0-9]+$ ]]
then
max=10
cmd=${@:1}
else
max=$1
cmd=${@:2}
fi
echo "max=$max"
echo "cmd=git $cmd"
num=0
# error code 128: unable to access
while [[ $num -le $max ]]
do
error=0
if ! git $cmd; then error=1; fi
num=$[$num+1]
echo $num
if [ $error = 0 ]
then
exit
fi
done
https://oldpan.me/archives/pytorch-to-use-multiple-gpus
https://zhuanlan.zhihu.com/p/86441879
https://zhuanlan.zhihu.com/p/234293510
https://zhuanlan.zhihu.com/p/145427849
level set , water shed , point cloud, convex optim , unsupervised , deform
# Copyright (c) 2020. The Medical Image Computing (MIC) Lab, 陶豪毅
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, List
import torch
import torch.nn as nn
from medtk.model.nnModules import ComponentModule, BlockModule
from medtk.model.nd import MaxPoolNd
from medtk.model.blocks import ConvNormAct, VResConvNormAct, \
BasicBlockNd, BottleneckNd, \
SEBasicBlockNd, SEBottleneckNd
class ConvLayer(BlockModule):
def __init__(self, dim, in_channels, out_channels, stride, num_blocks, groups=1, base_width=64, dilation=1):
super().__init__()
self.blocks = []
for i in range(num_blocks):
if i == 0:
self.blocks.append(ConvNormAct(dim, in_channels, out_channels, kernel_size=3, padding=1, stride=stride))
else:
self.blocks.append(ConvNormAct(dim, out_channels, out_channels, kernel_size=3, padding=1))
self.blocks.append(ConvNormAct(dim, out_channels, out_channels, kernel_size=3, padding=1))
for i, m in enumerate(self.blocks):
self.add_module(str(i), m)
def forward(self, x):
for layer in self.blocks:
x = layer(x)
return x
class VResConvLayer(BlockModule):
def __init__(self, dim, in_channels, out_channels, stride, num_blocks, groups=1, base_width=64, dilation=1):
super().__init__()
self.blocks = []
for i in range(num_blocks):
if i == 0:
self.blocks.append(ConvNormAct(dim, in_channels, out_channels, kernel_size=3, padding=1, stride=stride))
else:
self.blocks.append(VResConvNormAct(dim, out_channels, out_channels, kernel_size=3, padding=1))
for i, m in enumerate(self.blocks):
self.add_module(str(i), m)
def forward(self, x):
for layer in self.blocks:
x = layer(x)
return x
class ResidualLayer(BlockModule):
BLOCK = BasicBlockNd
def __init__(self, dim, in_channels, out_channels, stride, num_blocks, groups=1, base_width=64, dilation=1):
super(ResidualLayer, self).__init__()
downsample = None
if stride != 1 or in_channels != out_channels * self.BLOCK.expansion:
downsample = nn.Sequential(
self.build_conv(dim, in_channels, out_channels * self.BLOCK.expansion,
kernel_size=1, stride=stride, bias=False),
self.build_norm(dim, out_channels * self.BLOCK.expansion),
)
self.blocks = nn.ModuleList([self.BLOCK(dim,
in_planes=in_channels,
planes=out_channels,
stride=stride,
dilation=1,
downsample=downsample,
groups=1,
width_per_group=64)
])
in_planes = out_channels * self.BLOCK.expansion
for i in range(1, num_blocks):
self.blocks.append(
self.BLOCK(dim,
in_planes=in_planes,
planes=out_channels,
stride=1,
dilation=1,
groups=1,
width_per_group=64))
for i, m in enumerate(self.blocks):
self.add_module(str(i), m)
def forward(self, x):
for i in range(len(self.blocks)):
layer = getattr(self, str(i))
x = layer(x)
return x
class ResidualBottleneckLayer(ResidualLayer):
BLOCK = BottleneckNd
def __init__(self, dim, in_channels, out_channels, stride, num_blocks, groups=1, base_width=64, dilation=1):
out_channels = out_channels // self.BLOCK.expansion
super(ResidualBottleneckLayer, self).__init__(dim, in_channels, out_channels,
stride, num_blocks, groups, base_width, dilation)
class SEResidualLayer(ResidualLayer):
BLOCK = SEBasicBlockNd
def __init__(self, dim, in_channels, out_channels, stride, num_blocks, groups=1, base_width=64, dilation=1):
super(SEResidualLayer, self).__init__(dim, in_channels, out_channels,
stride, num_blocks, groups, base_width, dilation)
class SEResidualBottleneckLayer(ResidualLayer):
BLOCK = SEBottleneckNd
def __init__(self, dim, in_channels, out_channels, stride, num_blocks, groups=1, base_width=64, dilation=1):
out_channels = out_channels // self.BLOCK.expansion
super(SEResidualBottleneckLayer, self).__init__(dim, in_channels, out_channels,
stride, num_blocks, groups, base_width, dilation)
class Encoder(ComponentModule):
"""
support list:
- Vanilla UNet
- ResNet
- ResNeXt
-
"""
LAYERS = {
'conv': (ConvLayer, 1), # UNet, VNet
'v_conv': (VResConvLayer, 1), # VBNet
'res': (ResidualLayer, 1), # ResNet 18, 34
'b_res': (ResidualBottleneckLayer, 4), # ResNet or ResNeXt ge than 50
'se_res': (SEResidualLayer, 1), # SEResNet 18, 34
'se_b_res': (SEResidualBottleneckLayer, 4),
}
def __init__(self,
dim: int,
in_channels: int,
features=(16, 32, 64, 128),
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
num_blocks=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
layer_type='conv',
groups=1,
width_per_group=64,
first_conv=(64, 7, 1),
downsample=False):
super(Encoder, self).__init__()
assert isinstance(out_indices, (list, tuple)), \
'out_indices must be a list/tuple but get a {}'.format(type(out_indices))
assert max(out_indices) < len(strides), "max out_index must smaller than stages"
assert len(strides) == len(num_blocks) == len(features)
assert layer_type in self.LAYERS.keys()
self.first_features, self.first_kernel, self.first_stride = first_conv
self.downsample = downsample
self.dim = dim
self.in_channels = in_channels
self.features = features
self.strides = strides
self.dilations = dilations
self.num_blocks = num_blocks
self.out_indices = out_indices
self.stages = len(self.strides)
self.groups = groups
self.width_per_group = width_per_group
self.layer_type = layer_type
self.layer, self.expansion = self.LAYERS[layer_type]
self.conv1 = self.build_conv(dim, self.in_channels,
self.first_features,
kernel_size=self.first_kernel,
stride=self.first_stride,
padding=self.first_kernel // 2,
bias='res' not in self.layer_type)
self.bn1 = self.build_norm(self.dim, self.first_features)
self.relu = self.build_act()
self.maxpool = MaxPoolNd(self.dim)(kernel_size=3, stride=2, padding=1)
self.layers = self.init_layers()
# self.init_weights()
def init_layers(self):
layers = nn.ModuleList()
in_planes = self.first_features
for i in range(self.stages):
layer_name = 'layer{}'.format(i + 1)
layer = self.layer(
self.dim,
in_planes,
self.features[i],
stride=self.strides[i],
num_blocks=self.num_blocks[i],
groups=self.groups,
base_width=self.width_per_group,
dilation=self.dilations[i])
in_planes = self.features[i]
self.add_module(layer_name, layer)
layers.append(layer)
return layers
def init_weights(self):
for m in self.modules():
if self.is_conv(self.dim, m):
nn.init.kaiming_normal_(m.weight, 1e-2)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif self.is_norm(self.dim, m):
nn.init.normal_(m.weight, 1.0, 0.02)
m.bias.data.zero_()
def forward(self, inputs):
x = self.conv1(inputs)
x = self.bn1(x)
x = self.relu(x)
if self.downsample:
x = self.maxpool(x)
outs = []
for i in range(self.stages):
layer_name = 'layer{}'.format(i + 1)
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return outs
if __name__ == "__main__":
def init_seed(SEED):
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
init_seed(666)
# UNet = Encoder(
# dim=2,
# in_channels=3,
# features=(32, 64, 128, 256),
# strides=(1, 2, 2, 2),
# dilations=(1, 1, 1, 1),
# num_blocks=(1, 1, 1, 1),
# out_indices=(0, 1, 2, 3),
# layer_type='conv'
# )
# model = UNet
# VNet = Encoder(
# dim=2,
# in_channels=3,
# features=(32, 64, 128, 256),
# strides=(1, 2, 2, 2),
# dilations=(1, 1, 1, 1),
# num_blocks=(1, 2, 3, 4),
# out_indices=(0, 1, 2, 3),
# layer_type='v_conv'
# )
# model = VNet
# TVNet = Encoder(
# dim=2,
# in_channels=3,
# features=(32, 64, 128, 256),
# strides=(1, 2, 2, 2),
# dilations=(1, 1, 1, 1),
# num_blocks=(2, 2, 2, 2),
# out_indices=(0, 1, 2, 3),
# layer_type='v_conv'
# )
# model = TVNet
# ResNet18 = Encoder(
# dim=2,
# in_channels=3,
# features=(64, 128, 256, 512),
# strides=(1, 2, 2, 2),
# dilations=(1, 1, 1, 1),
# num_blocks=(2, 2, 2, 2),
# out_indices=(0, 1, 2, 3),
# first_conv=(64, 7, 2),
# layer_type='res',
# downsample=True
# )
# model = ResNet18
# ResNet34 = Encoder(
# dim=2,
# in_channels=3,
# features=(64, 128, 256, 512),
# strides=(1, 2, 2, 2),
# dilations=(1, 1, 1, 1),
# num_blocks=(3, 4, 6, 3),
# out_indices=(0, 1, 2, 3),
# first_conv=(64, 7, 2),
# layer_type='res',
# downsample=True
# )
# model = ResNet34
# ResNet50 = Encoder(
# dim=2,
# in_channels=3,
# features=(256, 512, 1024, 2048),
# strides=(1, 2, 2, 2),
# dilations=(1, 1, 1, 1),
# num_blocks=(3, 4, 6, 3),
# out_indices=(0, 1, 2, 3),
# first_conv=(64, 7, 2),
# layer_type='b_res',
# downsample=True
# )
# model = ResNet50
# ResNeXt50_32x4 = Encoder(
# dim=2,
# in_channels=3,
# features=(256, 512, 1024, 2048),
# strides=(1, 2, 2, 2),
# dilations=(1, 1, 1, 1),
# num_blocks=(3, 4, 6, 3),
# out_indices=(0, 1, 2, 3),
# groups=32,
# width_per_group=4,
# first_conv=(64, 7, 2),
# layer_type='b_res',
# downsample=True
# )
# model = ResNeXt50_32x4
# SEResNet50 = Encoder(
# dim=2,
# in_channels=3,
# features=(256, 512, 1024, 2048),
# strides=(1, 2, 2, 2),
# dilations=(1, 1, 1, 1),
# num_blocks=(3, 4, 6, 3),
# out_indices=(0, 1, 2, 3),
# first_conv=(64, 7, 2),
# layer_type='se_b_res',
# downsample=True
# )
# model = SEResNet50
# SEResNet50 = Encoder(
# dim=2,
# in_channels=3,
# features=(256, 512, 1024, 2048),
# strides=(1, 2, 2, 2),
# dilations=(1, 1, 1, 1),
# num_blocks=(3, 4, 6, 3),
# out_indices=(0, 1, 2, 3),
# first_conv=(64, 7, 2),
# layer_type='se_b_res',
# downsample=True
# )
# model = SEResNet50
# DeeplungResNet18 = Encoder(
# dim=3,
# in_channels=1,
# features=(32, 64, 64, 64),
# strides=(1, 2, 2, 2),
# dilations=(1, 1, 1, 1),
# num_blocks=(2, 2, 3, 3),
# out_indices=(0, 1, 2, 3),
# first_conv=(24, 7, 2),
# layer_type='res',
# groups=32,
# width_per_group=4,
# )
# model = DeeplungResNet18
ResNet18 = Encoder(
dim=3,
in_channels=1,
features=(16, 32, 64, 128),
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
num_blocks=(2, 2, 2, 2),
out_indices=(0, 1, 2, 3),
first_conv=(64, 7, 2),
layer_type='res',
downsample=True
)
model = ResNet18
print(model)
model.print_model_params()
data = torch.ones((1, 1, 96, 96, 96))
outs = model(data)
for o in outs:
print(o.shape)
print(torch.sum(o))
from medtk.runner.checkpoint import load_checkpoint
# load_checkpoint(model, 'https://download.pytorch.org/models/resnet18-5c106cde.pth')
# load_checkpoint(model, 'https://download.pytorch.org/models/resnet34-333f7ec4.pth')
# load_checkpoint(model, 'https://download.pytorch.org/models/resnet50-19c8e357.pth')
# load_checkpoint(model, 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth')
# load_checkpoint(model, 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth')