中印生育行为影响家庭暴力的经济学分析pdf

武汉大学优秀硕士论文pdf,知网下载量高达12927,还在不断增长中,大胆猜测应该是知网建网以来最大下载量。知网下载收费,从B站视频整理成pdf,分享给大家欣赏。【下载

声明:仅供学习交流使用,请勿用做它途。如侵犯知识产权,请联系删除。

正文第2页 编造了《离婚法》,第4页新中国成立时间写成1049年,这种文章也能被中国知网收录!

torch FashionMNIST数据集导出图片

import numpy as np
import struct
 
from PIL import Image
import os

path_home='./data/FashionMNIST/raw/'
data_file = path_home+'train-images-idx3-ubyte'
# It's 47040016B, but we should set to 47040000B
data_file_size = 47040016
data_file_size = str(data_file_size - 16) + 'B'
 
data_buf = open(data_file, 'rb').read()
 
magic, numImages, numRows, numColumns = struct.unpack_from(
    '>IIII', data_buf, 0)
datas = struct.unpack_from(
    '>' + data_file_size, data_buf, struct.calcsize('>IIII'))
datas = np.array(datas).astype(np.uint8).reshape(
    numImages, 1, numRows, numColumns)
 
label_file = path_home+'train-labels-idx1-ubyte'
 
# It's 60008B, but we should set to 60000B
label_file_size = 60008
label_file_size = str(label_file_size - 8) + 'B'
 
label_buf = open(label_file, 'rb').read()
 
magic, numLabels = struct.unpack_from('>II', label_buf, 0)
labels = struct.unpack_from(
    '>' + label_file_size, label_buf, struct.calcsize('>II'))
labels = np.array(labels).astype(np.int64)
 
datas_root = 'mnist_train'
if not os.path.exists(datas_root):
    os.mkdir(datas_root)
 
for i in range(10):
    file_name = datas_root + os.sep + str(i)
    if not os.path.exists(file_name):
        os.mkdir(file_name)
 
for ii in range(numLabels):
    img = Image.fromarray(datas[ii, 0, 0:28, 0:28])
    label = labels[ii]
    file_name = datas_root + os.sep + str(label) + os.sep + \
        'mnist_train_' + str(ii) + '.png'
    img.save(file_name)

torch自定义数据集模型训练demo

import torch
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torch import nn
import os
import pandas as pd
from torchvision.io import decode_image


device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else "cpu"
#  独热数据逆转化 ,本例独热化非必须
def arc_one_hot(x,list=torch.tensor([0,1,2,3,4,5,6,7,8,9],dtype=torch.float).to(device)):
    return x@list
#  1.创建自定义数据集
class CustomImageDataset(Dataset):
    def __init__(self, annotations_file, img_dir, transform=None, target_transform=None):
        self.img_labels = pd.read_csv(annotations_file, header=None) # 注意首行默认会被作为标题行忽略,或者设置header=None 
        self.img_dir = img_dir
        self.transform = transform
        self.target_transform = target_transform

    def __len__(self):
        return len(self.img_labels)

    def __getitem__(self, idx):
        img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
        #print(img_path)
        image = decode_image(img_path).float().div(255) #需要转成float类型,否则无法训练
        #print(image.shape)
        label = self.img_labels.iloc[idx, 1]
        #print(label)
        if self.transform:
            image = self.transform(image)
        if self.target_transform:
            label = self.target_transform(label)
        #独热化
        # print(label)
        new_transform = Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(dim=0, index=torch.tensor(y), value=1))
        label = new_transform(label)
        # print("------fuck")
        # print(label)
        return image, label
    

# csv注意是否有标题行
csv_path='/Users/mnist_test_cus_data/imglist_train.csv'
img_dir='/Users/mnist_test_cus_data/imgs_train/'
batch_size = 64
# 创建自定义数据集实例
mydataset = CustomImageDataset(annotations_file=csv_path, img_dir=img_dir, transform=None, target_transform=None)

# 使用 DataLoader 加载数据
mydataloader = DataLoader(mydataset, batch_size, shuffle=True, num_workers=0) #, num_workers=4 macos报错
print(len(mydataloader))
print(len(mydataloader.dataset))
# print(mydataset[59999])
# print(mydataset[0][0])
# print(mydataset[0][1])
# exit()

# Download test data from open datasets.
test_data = datasets.FashionMNIST(
    root="data",
    train=False,
    download=True,
    transform=ToTensor(),
    target_transform = Lambda(lambda y: torch.zeros(10,dtype=torch.float).scatter_(dim=0, index=torch.tensor(y), value=1))
)
test_dataloader = DataLoader(test_data, batch_size=batch_size)

# 遍历 DataLoader
#for batch in mydataloader:
#    images, labels = batch
    #print(images.size(), labels.size())
    #print(images)
    #print(labels)

for X, y in test_dataloader:
    print(f"Shape of X [N, C, H, W]: {X.shape}")
    print(f"Shape of y: {y.shape} {y.dtype}")

    # print(X)
    # print(y)
    # print(arc_one_hot(y.to(device)))

    break
print(len(mydataloader))
# exit()


#  2. 可视化数据
def showdata():
    labels_map = {
        0: "T-Shirt",
        1: "Trouser",
        2: "Pullover",
        3: "Dress",
        4: "Coat",
        5: "Sandal",
        6: "Shirt",
        7: "Sneaker",
        8: "Bag",
        9: "Ankle Boot",
    }
    figure = plt.figure(figsize=(8, 8))
    cols, rows = 3, 3
    xxx=''
    for i in range(1, cols * rows + 1):
        sample_idx = torch.randint(len(mydataset), size=(1,)).item()
        img, label = mydataset[sample_idx]
        figure.add_subplot(rows, cols, i)
        # 独热逆转化
        label=arc_one_hot(label.to(device)).item()
        plt.title(labels_map[label])
        plt.axis("off")
        xxx=img
        plt.imshow(img.squeeze(), cmap="gray")
    plt.show()
    print(xxx.shape)
    print('------')
    print(xxx.squeeze().shape)

    # Display image and label.
    train_features, train_labels = next(iter(mydataloader))
    print(f"Feature batch shape: {train_features.size()}")
    print(f"Labels batch shape: {train_labels.size()}")
    img = train_features[0].squeeze()
    label = train_labels[0]
    plt.imshow(img, cmap="gray")
    plt.show()
    print(f"Label: {label}")
    # exit()


# 3.定义模型

print(f"Using {device} device")

# Define model
class NeuralNetwork(nn.Module):
    def __init__(self):
        super().__init__()
        self.flatten = nn.Flatten() #维度展平
        self.linear_relu_stack = nn.Sequential(
            nn.Linear(28*28, 512),
            nn.ReLU(),
            nn.Linear(512, 512),
            nn.ReLU(),
            nn.Linear(512, 10)
        )

    def forward(self, x):
        x = self.flatten(x)
        logits = self.linear_relu_stack(x)
        return logits

model = NeuralNetwork().to(device)
print(model)

# 4. 定义损失函数和优化器
loss_fn = nn.CrossEntropyLoss() #交叉熵
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)

# 5. 训练
def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)
    print("size="+str(size))
    model.train() # 启用 Batch Normalization 和 Dropout,归一化,随机丢弃神经元防止过拟合,测试时不丢弃
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)
        # Compute prediction error
        pred = model(X)
        # print("---------->pred=")
        # print(pred)
        # print(y)
        # print("-----------------------<")
        # 不需要独热逆转化,交叉熵计算过程包含了独热编码,但是不仍然可以使用独热参数
        # y=arc_one_hot(y)
        loss = loss_fn(pred, y)

        # Backpropagation
        loss.backward() # 计算梯度
        optimizer.step() # 根据梯度优化参数
        optimizer.zero_grad() # 梯度归零

        if batch % 100 == 0: # 每100个batch打印一次
            loss, current = loss.item(), (batch + 1) * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")
        # exit()
# 6. 测试
def test(dataloader, model, loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            #print(y)
            #不需要独热逆编码
            #y=arc_one_hot(y)
            #print(y)
            pred = model(X)
            #print(pred)
            test_loss += loss_fn(pred, y).item()
            #统计个数
            # >>> xx==zz
            # tensor([ True, False, False,  True, False,  True, False,  True, False, False,
            #          True, False, False,  True, False, False,  True, False,  True, False,
            #          True, False, False, False, False,  True,  True, False,  True, False,
            #         False,  True, False, False, False, False, False, False,  True,  True,
            #         False, False, False,  True, False, False,  True, False, False,  True,
            #         False,  True, False,  True,  True, False, False, False, False,  True,
            #         False,  True,  True,  True])
            # >>> (xx==zz).type(torch.float)
            # tensor([1., 0., 0., 1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 1., 0., 0., 1., 0.,
            #         1., 0., 1., 0., 0., 0., 0., 1., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0.,
            #         0., 0., 1., 1., 0., 0., 0., 1., 0., 0., 1., 0., 0., 1., 0., 1., 0., 1.,
            #         1., 0., 0., 0., 0., 1., 0., 1., 1., 1.])
            # >>> (xx==zz).type(torch.float).sum()
            # tensor(25.)
            # >>> (xx==zz).type(torch.float).sum().item()
            # 25.0
            yy=arc_one_hot(y)
            correct += (pred.argmax(1) == yy).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size
    print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")

#  7. 训练和测试
def do_train():
    epochs = 5
    for t in range(epochs):
        print(f"Epoch {t+1}\n-------------------------------")
        train(mydataloader, model, loss_fn, optimizer)
        test(test_dataloader, model, loss_fn)
    print("Done!")
    __save_model__()

# for var_name in model.state_dict():
#     print(var_name, "\t", model.state_dict()[var_name])

# for var_name in optimizer.state_dict():
#     print(var_name, "\t", optimizer.state_dict()[var_name])

#  8. 保存模型
def __save_model__():
    path="model.pth"
    torch.save(model.state_dict(), path)
    print("Saved PyTorch Model State to "+path)
#  9. 加载模型
def load_model():
    model = NeuralNetwork().to(device)
    model.load_state_dict(torch.load("model.pth", weights_only=True))
    return model
#  10. 测试模型
def  test_model():
    model = load_model()
    classes = [
        "T-shirt/top",
        "Trouser",
        "Pullover",
        "Dress",
        "Coat",
        "Sandal",
        "Shirt",
        "Sneaker",
        "Bag",
        "Ankle boot",
    ]

    model.eval()
    x, y = test_data[0][0], test_data[0][1]
    with torch.no_grad():
        x = x.to(device)
        pred = model(x)
        # 独热转化,同device才能计算
        y=arc_one_hot(y.to(device)).int()
        print(y)

        print(f"Predicted: {pred[0].argmax(0)}, Actual: {y}")
        predicted, actual = classes[pred[0].argmax(0)], classes[y]
        print(f'Predicted: "{predicted}", Actual: "{actual}"')

def  do_test_model():
    load_model()
    test_model()
def do_train_model():
    showdata()
    do_train()
    test_model()
def main():
    do_train_model()
    #do_test_model()
    

if __name__ == '__main__':
    main()

bash zsh awk按列表整理文件

背景

我们有一堆文件,在文件夹A中,需要按照文件列表list.txt 复制文件到B中,A中文件和list.txt 中文件均较多

1、文件列表

list.txt类似如下

mnist_test_644.png

mnist_test_2180.png

mnist_test_122.png

mnist_test_2816.png

2、bash

b=`sed ‘r/g’ list.txt`

for i in $b;

do

cp -r “A/”$i “B/”;

done

3、zsh

for i (${(s: :)$(<list.txt)});

do

cp -r “A/”$i “B/”;

done

或者(不推荐,如果txt每行有空格,导致错误,上面兼容更好)

fl=$(<list.txt)

for i (${(f)fl});

do

cp -r “A/”$i “B/”;

done

4、awk 拼接语句管道执行

awk ‘{print “cp -r A/”$1″ B/;” }’ list.txt |sh 

bash zsh awk字符串分割

fl.txt文件
12
34
56
78


Bash

1、IFS定义分隔符,默认空格、tab、换行、回车

bash-3.2$ a="a b c d"
bash-3.2$ for i in $a;
> do
> echo $i","
> done
a,
b,
c,
d,

bash-3.2$ b=`sed 'r/g' fl.txt`
bash-3.2$ for i in $b; do echo $i","; done
12,
34,
56,
78,

a="a,b,c,d"
#换行符分割
IFS=$'\n'

bash-3.2$ a="a,b,c,d"
bash-3.2$ for i in $a;
> do 
> echo $i;
> done
a,b,c,d

设置分隔符为逗号
bash-3.2$ IFS=$','
bash-3.2$ for i in $a; do  echo $i; done
a
b
c
d

2、使用分割符生成数组
bash-3.2$ aa="hello,shell,split,test"
bash-3.2$ array=(${aa//,/})
bash-3.2$ for i in ${array[@]}
> do
> echo $i
> done
Helloshellsplittest

bash-3.2$ array=(${aa/\n/,/})
bash-3.2$ for i in ${array[@]}; do echo $i; done
hello
shell
split
Test

bash-3.2$ echo ${array[0]}
hello
bash-3.2$ echo ${array[1]}
Shell


Zsh 
Zsh 不会默认使用空格、tab、换行、回车分割

1、(f)按行分割

str=$(<fl.txt)

% echo $str
12 
34 
56 
78

for i (${(f)str}){
echo $i"#"
}
12#
34# 
56# 
78#

注意,写在一起这样不行
for i (${(f)$(<fl.txt)});
do 
echo $i",";
done
12 34 56 78#

直接输出和使用变量行为不一致
echo $(<fl.txt)
12 34 56 78
aa=$(<fl.txt)
echo $aa
12
34
56
78


需要使用(s:chr:)方式
for i (${(s: :)$(<fl.txt)});
do 
echo $i",";
done


或者使用sed读取
aa=`sed 'r/g' fl.txt`;
for i (${(f)aa});
do 
echo $i",";
Done



2、(s:chr:)

s='foo,bar,baz'
#仅s也可,见过p w @,:可以用其他符号代替
for i  in ${(ps:,:)s} ; do   
echo "$i END"
done
foo END
bar END
baz END


awk

bash-3.2$ aa=`awk '{print $1}' fl.txt`
bash-3.2$ for i in $aa
> do
> echo $i
> done
12
34
56
78