You are on page 1of 4

In [50]:

#COMP386 V1 Receptive Fields Through Efficient Coding - HW2


#Carolina Cervantes and Pragna Bhatt
import sklearn.decomposition
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import pylab as py
import PIL.Image

In [55]:

show_these = ['1.jpg','2.jpg','3.jpg'] # there are 1-13.tiff's

# read in images and display


fig = py.figure(figsize=(8,14))
for (i,file) in enumerate(show_these):
py.subplot(3,1,i+1)
img = PIL.Image.open('data/'+file)
py.imshow(img, cmap=py.cm.Greys_r)
#images to grayscale
image = np.asarray(img, 'double').transpose()
image = image[0,:,:]
#reshaping array to not include color for images
print(image.shape)

(500, 250)
(815, 437)
(720, 480)
In [58]:
def collect_natural_patches(num_patches = 500000, patch_width = 16):
""" collects image patches"""

max_tries = num_patches * 50
image_width = 200

img_first_patch = 0 # the first patch number accepted from an image


img_first_try = 0 # the first attempt to take a patch from the image
patch_cnt = 0 # number of collected patches
try_cnt = 0 # number of attempted collected patches
num_pixels = patch_width * patch_width

patch_sample = np.zeros([patch_width,patch_width],float)
patch = np.zeros([num_pixels,1],float)

img_patches = np.zeros([num_pixels,num_patches],float)

# change the image sampled from


nat_img_cnt = 1
image = PIL.Image.open('data/' + str(nat_img_cnt) + '.jpg')
image = np.asarray(image, 'double').transpose()
image = image[0,:,:] #i added this to solve my array shape errors
print(image.shape)
# normalizing the image
image -= image.mean()
image /= image.std()

# collect the patches


while patch_cnt < num_patches and try_cnt < max_tries:
try_cnt += 1 # number of total patches attempted

if (try_cnt - img_first_try) > max_tries/3 or \


(patch_cnt - img_first_patch) > num_patches/3:
# change the image sampled from
nat_img_cnt += 1
image = PIL.Image.open('data/' + str(nat_img_cnt) + '.jpg')
image = np.asarray(image, 'double').transpose()
image = image[0,:,:]
# normalizing the image
image -= image.mean()
image /= image.std()

img_first_patch = patch_cnt
img_first_try = try_cnt

# update on every switch of images


print (int(100 * float(patch_cnt)/num_patches),' percent complete')

px = np.random.randint(0,image_width - patch_width)
py = np.random.randint(0,image_width - patch_width)

patch_sample = image[px:px+patch_width,py:py+patch_width].copy()
patch_std = patch_sample.std()

if patch_std > 0.0: # > 0 to remove blank/uninteresting patches for speed


# create the patch vector
patch = np.reshape(patch_sample, num_pixels)
patch = patch - np.mean(patch)
img_patches[:,patch_cnt] = patch.copy()
patch_cnt += 1
return img_patches

patches_mat = collect_natural_patches(num_patches = 500000, patch_width = 16)


print('\nshape of the extracted image patch data:', patches_mat.shape)

#this function collects patches from the natural image files in my data folder
(500, 250)
33 percent complete
66 percent complete

shape of the extracted image patch data: (256, 500000)

In [59]:
def show_patches_mat(pre_patches, show_patch_num = 16, display=True):
""" this function generates a 2D array to display image patches (assuming square) """

patches = pre_patches

tot_patches = patches.shape[1]
data_dim = patches.shape[0]
patch_width = int(np.round(np.sqrt(data_dim)))

# extract show_patch_num patches


disp_patch = np.zeros([data_dim, show_patch_num], float)
for i in range(0,show_patch_num):
patch_i = i * tot_patches // show_patch_num

patch = patches[:,patch_i].copy()
pmax = patch.max()
pmin = patch.min()
# fix patch range from min to max to 0 to 1
if pmax > pmin:
patch = (patch - pmin) / (pmax - pmin)
disp_patch[:,i] = patch.copy()

bw = 5 # border width
pw = patch_width

patches_y = int(np.sqrt(show_patch_num))
patches_x = int(np.ceil(float(show_patch_num) / patches_y))
patch_img = disp_patch.max() * np.ones([(pw + bw) * patches_x - bw,
patches_y * (pw + bw) - bw], float)
for i in range(0,show_patch_num):
y_i = i // patches_y
x_i = i % patches_y

reshaped = disp_patch[:,i].reshape((pw,pw))
full_patch = np.zeros([pw, pw], float)
full_patch[0:pw,:] = reshaped[:,:].copy()
patch_img[x_i*(pw+bw):x_i*(pw+bw)+pw,y_i*(pw+bw):y_i*(pw+bw)+pw] = full_patch

if display:
py.bone()
py.imshow(patch_img.T, interpolation='nearest')
py.axis('off')
return

show_patches_mat(patches_mat)
#this function shows the patches collected from the natural images

In [60]:
# finding PCA components

pcatemp = sklearn.decomposition.PCA(n_components=10)
pcatemp = sklearn.decomposition.PCA(n_components=10)
pcafit = pcatemp.fit(np.transpose(patches_mat))

print('Quality of image representation using only 40% as much data per patch',
'\n(using only responses along these 10 components instead of the full 64 dimensions):',
'\n',np.sum(100*pcafit.explained_variance_ratio_),'%')

print('\nPercentage of data explained by each filter:\n',100*pcafit.explained_variance_ratio_)

pca_comp = pcafit.components_
show_patches_mat(np.transpose(pca_comp), 25)

#this function finds 10 PCA components from these image patches


#PCA does not resemble receptive fields, its often used as preprocessing for ICA

Quality of image representation using only 40% as much data per patch
(using only responses along these 10 components instead of the full 64 dimensions):
65.79012792923416 %

Percentage of data explained by each filter:


[18.82967345 16.16512988 6.80062893 6.06072873 4.70328631 3.79098888
2.69306368 2.34966458 2.22577359 2.1711899 ]

In [61]:
# finding ICA components

icatemp = sklearn.decomposition.FastICA(n_components=10) # note, sensitive to n_components


icafit = icatemp.fit(np.transpose(patches_mat))

ica_comp = icafit.components_
# print('shape of the ica component matrix: ',ica_comp.shape)

show_patches_mat(np.transpose(ica_comp), 25)
#This function shows 10 ICA components, resembles visual receptive fields

You might also like