新建文件夹.zip

  • milumilu1205
    了解作者
  • Python
    开发工具
  • 12KB
    文件大小
  • zip
    文件格式
  • 0
    收藏次数
  • 1 积分
    下载积分
  • 1
    下载次数
  • 2020-10-13 16:12
    上传日期
一些简单的python编程,比如支持向量机,卷积神经网络,长短期神经网络以及图片加工处理
新建文件夹.zip
  • 新建文件夹
  • SVM.py
    5.9KB
  • CNN.py
    7.2KB
  • picture.py
    3.8KB
  • proc_images.py
    2.8KB
  • 卷积.py
    5.1KB
  • LSTM1.py
    6.3KB
内容介绍
import numpy as np import os import torch from PIL import Image import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import torch.utils.data as data from torchvision import datasets, transforms,models import matplotlib.pyplot as plt import time import copy #参数 batch_size=32 data_dir = "F:/大图片" num_classes = 38 lr = 0.01 momentum = 0.5 #在一定程度上解决局部最优,公式:v=mu*v-learning_rate*dw #w=w+v,dw为损失函数的一阶导数 num_epochs = 2 data_transforms = { "train": transforms.Compose([transforms.Grayscale (1), transforms.ToTensor() ,transforms.Normalize ((0.9851,),(0.0301,)) ]), "val": transforms.Compose([ transforms .Grayscale(1),transforms.ToTensor() ,transforms.Normalize((0.9851,), (0.0301,)) ])} image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ["train", "val"]} # data =[d[0].data.cpu().numpy() for d in image_datasets['train']] # # print(data) # print(np.std(data)) # print(np.mean(data)) dataloaders_dict ={x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=0 )for x in ["train", "val"]} device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # # img = next(iter(dataloaders_dict["val"]))[0] # # print(img.shape) # # #画图 # # unloader = transforms.ToPILImage() # reconvert into PIL image # # plt.ion() # # def imshow(tensor, title=None): # # image = tensor.cpu().clone() # we clone the tensor to not do changes on it # # image = image.squeeze(0) # remove the fake batch dimension # # image = unloader(image) # # plt.imshow(image,'gray') # # if title is not None: # # plt.title(title) # # plt.pause(0.001) # pause a bit so that plots are updated # # plt.ioff() # # plt.show() # # plt.figure() # # imshow(img[0], title='Image') #32*32 # class Net(nn.Module): # def __init__(self): # super(Net, self).__init__() # self.conv1 = nn.Conv2d(1, 20, 5, 1) # 28 * 28 -> (28+1-5) 24 * 24 # self.conv2 = nn.Conv2d(20, 50, 5, 1) # 20 * 20 # self.fc1 = nn.Linear(5*5*50, 500) # self.fc2 = nn.Linear(500, 8) # def forward(self, x): # x: 1 * 28 * 28 # x = F.relu(self.conv1(x)) # 20 * 24 * 24 # x = F.max_pool2d(x,2,2) # 12 * 12 # x = F.relu(self.conv2(x)) # 8 * 8 # x = F.max_pool2d(x,2,2) # 4 *4 # x = x.view(-1, 5*5*50) # reshape (5 * 2 * 10), view(5, 20) -> (5 * 20) # x = F.relu(self.fc1(x)) # x= self.fc2(x) # return x # return F.log_softmax(x, dim=1) # log probability train_losses=[] train_counter=[] test_loss=[] test_counter=[] #28*28 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5, 1) # 28 * 28 -> (28+1-5) 24 * 24 self.conv2 = nn.Conv2d(20, 50, 5, 1) # 20 * 20 self.fc1 = nn.Linear(4*4*50, 500) self.fc2 = nn.Linear(500, 38) def forward(self, x): # x: 1 * 28 * 28 x = F.relu(self.conv1(x)) # 20 * 24 * 24 x = F.max_pool2d(x,2,2) # 12 * 12 x = F.relu(self.conv2(x)) # 8 * 8 x = F.max_pool2d(x,2,2) # 4 *4 x = x.view(-1, 4*4*50) # reshape (5 * 2 * 10), view(5, 20) -> (5 * 20) x = F.relu(self.fc1(x)) x= self.fc2(x) # return x return F.log_softmax(x, dim=1) # log probability def train(model, device, train_loader, optimizer, epoch): model.train() for idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) pred = model(data) # batch_size * 10 loss = F.nll_loss(pred, target) # SGD optimizer.zero_grad()#梯度初始化为0 loss.backward()#反向传播求梯度 optimizer.step()#更新参数 if idx % 10 == 0: print("Train Epoch: {},[{}/{}], Loss: {}".format( epoch,idx*len(data) ,len(train_loader.dataset) ,loss.item())) train_losses.append(loss.item()) train_counter.append((idx*32)+(epoch-1)*len(train_loader.dataset )) def test(model, device, test_loader): model.eval() total_loss = 0. correct = 0. with torch.no_grad(): for idx, (data, target) in enumerate(test_loader): data, target = data.to(device), target.to(device) #print(target) output = model(data) # batch_size * 10 total_loss += F.nll_loss(output, target, reduction="sum").item()#将零维张量转化为float浮点数 pred = output.argmax(dim=1) # batch_size * 1 correct += pred.eq(target.view_as(pred)).sum().item() total_loss /= len(test_loader.dataset) acc = correct/len(test_loader.dataset) print(len(test_loader .dataset ) ) print("Test loss: {}, Accuracy: {}".format(total_loss, acc)) device=torch.device('cuda' if torch.cuda.is_available() else "cpu") model = Net().to(device) optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)#冲量 for epoch in range(1,num_epochs+1): train(model, device, dataloaders_dict['train'], optimizer, epoch) time_start=time.time() test(model, device, dataloaders_dict['val']) time_end=time.time() print('time:',time_end -time_start) flg=plt.figure() plt.plot(train_counter,train_losses ,color='blue') plt.xlabel ('number of training examples seen ') plt.ylabel ('train loss') plt.show() #torch.save(model.state_dict() , "jia1_cnn.pt") #画曲线图 # model = Net() # model.load_state_dict(torch.load('jia1_cnn.pt')) # model.eval() # batch_size=1 # loss=0.0 # correct=0.0 # data_transforms = transforms.Compose([transforms.Grayscale (1), transforms.ToTensor() # ,transforms .Normalize ((0.9549,),(0.0812,)) # ]) # data_dir='F:/ye' # test_data= datasets.ImageFolder(os.path.join(data_dir, 'test'),data_transforms ) # # data =[d[0].data.cpu().numpy() for d in test_data ] # # print(data)#28*28 两个数组因为两张图 # # print(np.mean(data)) # # print(np.std(data)) # dataloaders = torch.utils.data.DataLoader(dataset=test_data,batch_size= batch_size,shuffle= True ) # #print(next(iter(dataloaders)))#数据+标签(因为batch=1,所以为一个标签) # for idx, (data, target) in enumerate(dataloaders):#因为batch为1,所以一个一个训练 # #data, target = data.to(device), target.to(device) # print(idx) # print(data) # print(len(data)) # print(target) # ou=model(data) # loss+=F.nll_loss(ou, target,reduction='sum').item() # print(type(loss)) # print(ou) # pred = ou.argmax(dim=1)#出现八个标签的概率值,取最大的那个索引 # print(pred) # correct += pred.eq(target.view_as(pred)).sum().item() # print(correct)
评论
    相关推荐
    • python
      python
    • Python
      Python
    • 可爱的python
      CDay−2 完成核心功能 22 CDay−1 实用化中文 31 CDay0 时刻准备着!发布 41 CDay+1 优化!对自个儿的反省 46 CDay+2 界面!不应该是难事儿 54 CDay+3 优化!多线程 69 CDayN 基于 Python 的无尽探索 75
    • pythonProject22
      pythonProject22
    • 可爱的Python
      本书的内容主要来自CPyUG社区的邮件列表,由Python的行者根据自身经验组织而成,是为从来没有听说过Python的其他语言程序员准备的一份实用的导学性质的书。笔者试图将优化后的学习体验,通过故事的方式传达给读者,...
    • 可爱的Python
      《可爱的Python》的内容主要来自CPyUG社区的邮件列表,由Python的行者根据自身经验组织而成,是为从来没有听说过Python的其他语言程序员准备的一份实用的导学性质的书。笔者试图将优化后的学习体验,通过故事的方式...
    • python
      python
    • python撩妹
      python实现1、将你女票的名字,及示爱语言用爱心的形式表达出来,撩妹必备 2、用python画出小猪佩奇。3、用python画出叮当猫以及自定义的精美的落款。撩妹必备,超值的。
    • python python python python python
      python python python python pythonpython python python python python
    • Python
      Python