neural-networks-master.zip

  • Q4_455570
    了解作者
  • Python
    开发工具
  • 12.1KB
    文件大小
  • zip
    文件格式
  • 0
    收藏次数
  • VIP专享
    资源类型
  • 0
    下载次数
  • 2022-03-01 23:50
    上传日期
python神经网络学习代码,基于深度学习的图像超分辨率技术研究 Research on Image Super-Resolution Technology Based on Deep Learning 图像的超分辨率重建技术指的是将给定的低分辨率图像通过特定的算法恢复成相应的高分辨率图像。随着人工智能的崛起和深度学习的不算发展,基于深度学习的超分辨率重建技术也越来越多的被人关注和研究。本文主要介绍两种基于深度学习的超分辨率重建技术SRCNN(超分辨率卷积神经网络)和SRGAN(超分辨率生成对抗网络),在Tensorflow框架下进行了两种模型的训练仿真,并测试了模型的效果。最后分析两种超分辨率重建技术对低分辨率图像的超分辨率重建效果图,对比这两种模型的结果可以得出:SRCNN和SRGAN比起传统的SC(SparseCode,稀疏编码)在重构效果上更好,SRCNN重构技术更注重的是对图像的局部特征提取而达到更好的整体重建质量,而SRGAN重构技术则更注重图片精细的细节纹理,使得重构的图片更逼近原始图片。
neural-networks-master.zip
  • neural-networks-master
  • art1.py
    6.1KB
  • README.rst
    560B
  • perceptron.py
    3.1KB
  • jordan.py
    4.4KB
  • voronoi.py
    2.1KB
  • mlp.py
    5.7KB
  • som.py
    4.8KB
  • elman.py
    4.3KB
  • neural_gas.py
    3.6KB
内容介绍
#!/usr/bin/env python # ----------------------------------------------------------------------------- # Adaptive Resonance Theory # Copyright (C) 2011 Nicolas P. Rougier # # Distributed under the terms of the BSD License. # ----------------------------------------------------------------------------- # Reference: Grossberg, S. (1987) # Competitive learning: From interactive activation to # adaptive resonance, Cognitive Science, 11, 23-63 # # Requirements: python 2.5 or above => http://www.python.org # numpy 1.0 or above => http://numpy.scipy.org # ----------------------------------------------------------------------------- from __future__ import print_function from __future__ import division import numpy as np class ART: ''' ART class Usage example: -------------- # Create a ART network with input of size 5 and 20 internal units >>> network = ART(5,10,0.5) ''' def __init__(self, n=5, m=10, rho=.5): ''' Create network with specified shape Parameters: ----------- n : int Size of input m : int Maximum number of internal units rho : float Vigilance parameter ''' # Comparison layer self.F1 = np.ones(n) # Recognition layer self.F2 = np.ones(m) # Feed-forward weights self.Wf = np.random.random((m,n)) # Feed-back weights self.Wb = np.random.random((n,m)) # Vigilance self.rho = rho # Number of active units in F2 self.active = 0 def learn(self, X): ''' Learn X ''' # Compute F2 output and sort them (I) self.F2[...] = np.dot(self.Wf, X) I = np.argsort(self.F2[:self.active].ravel())[::-1] for i in I: # Check if nearest memory is above the vigilance level d = (self.Wb[:,i]*X).sum()/X.sum() if d >= self.rho: # Learn data self.Wb[:,i] *= X self.Wf[i,:] = self.Wb[:,i]/(0.5+self.Wb[:,i].sum()) return self.Wb[:,i], i # No match found, increase the number of active units # and make the newly active unit to learn data if self.active < self.F2.size: i = self.active self.Wb[:,i] *= X self.Wf[i,:] = self.Wb[:,i]/(0.5+self.Wb[:,i].sum()) self.active += 1 return self.Wb[:,i], i return None,None # ----------------------------------------------------------------------------- if __name__ == '__main__': np.random.seed(1) # Example 1 : very simple data # ------------------------------------------------------------------------- network = ART( 5, 10, rho=0.5) data = [" O ", " O O", " O", " O O", " O", " O O", " O", " OO O", " OO ", " OO O", " OO ", "OOO ", "OO ", "O ", "OO ", "OOO ", "OOOO ", "OOOOO", "O ", " O ", " O ", " O ", " O", " O O", " OO O", " OO ", "OOO ", "OO ", "OOOO ", "OOOOO"] X = np.zeros(len(data[0])) for i in range(len(data)): for j in range(len(data[i])): X[j] = (data[i][j] == 'O') Z, k = network.learn(X) print("|%s|"%data[i],"-> class", k) # Example 2 : Learning letters # ------------------------------------------------------------------------- def letter_to_array(letter): ''' Convert a letter to a numpy array ''' shape = len(letter), len(letter[0]) Z = np.zeros(shape, dtype=int) for row in range(Z.shape[0]): for column in range(Z.shape[1]): if letter[row][column] == '#': Z[row][column] = 1 return Z def print_letter(Z): ''' Print an array as if it was a letter''' for row in range(Z.shape[0]): for col in range(Z.shape[1]): if Z[row,col]: print( '#', end="" ) else: print( ' ', end="" ) print( ) A = letter_to_array( [' #### ', '# #', '# #', '######', '# #', '# #', '# #'] ) B = letter_to_array( ['##### ', '# #', '# #', '##### ', '# #', '# #', '##### '] ) C = letter_to_array( [' #### ', '# #', '# ', '# ', '# ', '# #', ' #### '] ) D = letter_to_array( ['##### ', '# #', '# #', '# #', '# #', '# #', '##### '] ) E = letter_to_array( ['######', '# ', '# ', '#### ', '# ', '# ', '######'] ) F = letter_to_array( ['######', '# ', '# ', '#### ', '# ', '# ', '# '] ) samples = [A,B,C,D,E,F] network = ART( 6*7, 10, rho=0.15 ) for i in range(len(samples)): Z, k = network.learn(samples[i].ravel()) print("%c"%(ord('A')+i),"-> class",k) print_letter(Z.reshape(7,6))
评论