-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtestPong.py
415 lines (401 loc) · 21.3 KB
/
testPong.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
"""
AIGame: connects OpenAI gym game to the model (V1-M1-RL)
Adapted from arm.py
Original Version: 2015jan28 by [email protected]
Modified Version: 2019oct1 by [email protected]
Modified 2019-2020 samn
"""
from pylab import concatenate, figure, show, ion, ioff, pause,xlabel, ylabel, plot, Circle, sqrt, arctan, arctan2, close
from copy import copy
from random import uniform, seed, sample, randint
from matplotlib import pyplot as plt
import random
import numpy as np
from skimage.transform import downscale_local_mean
import json
import gym
import sys
from gym import wrappers
from time import time
import os
import anim
from matplotlib import animation
env = gym.make('Pong-v0',frameskip=3)
env.reset()
totalNB_episodes = 1 #run for number of episodes to test the game behavior
cNB_episodes = 0 #before start, the number of episodes are set to 0.
##GAME BEHAVIOR TO PLOT
allActions = [] #list of all actions generated randomly
allProposedActions = [] #list of all actions computed if executed racket will follow the ball
allRewards = [] #rewards +1 if point scored by the player, -1 is point lost by the player and 0 otherwise
allHits = [] #1 when ball hits the racket of the player
countAll = 0 #total number of action in a game
intaction = 5 # integrate this many actions together before returning reward information to model
actions = [] #list of actions provided to the game for play.
epCount = []
InputImages = []
last_obs = [] #make sure this does not introduce a bug
last_ball_dir = 0
possibleactions = [1,3,4] #{"UP": 4,"DOWN":3, "NOMOVE":1},
################################
### PLAY GAME
###############################
def playGame (actions, epCount, InputImages, last_obs, last_ball_dir): #actions need to be generated from motor cortex
global countAll, intaction
rewards = []
proposed_actions =[]
total_hits = []
Images = []
Ball_pos = []
Racket_pos = []
input_dim = int(np.sqrt(400))
dsum_Images = np.zeros(shape=(input_dim,input_dim)) #previously we merged 2x2 pixels into 1 value. Now we merge 8x8 pixels into 1 value. so the original 160x160 pixels will result into 20x20 values instead of previously used 80x80.
gray_Image = np.zeros(shape=(160,160))
done = False
dirSensitiveNeurons = np.zeros(shape=(10,10))
count = 0
for a in range(intaction):
#for each action generated by the firing rate of the motor cortex, find the suggested-action by comparing the position of the ball and racket
caction = actions[a] #action generated by the firing rate of the motor cortex
if np.shape(last_obs)[0]>0: #if last_obs is not empty
ImageCourt = last_obs[34:194,20:140,:] # what is significance of indices 34 through 194?
ImageAgent = last_obs[34:194,141:144,:]
sIC = np.sum(ImageCourt,2) #since the color of object is uniform, add values or r,g,b to get a single value
sIA = np.sum(ImageAgent,2) #since the color of object is uniform, add values or r,g,b to get a single value
pixelVal_Agent = np.amax(sIA) #find the pixel value representing Agent/Racket
pixelVal_Ball = np.amax(sIC) #find the pixel value representing Ball..court is blackish
sIA[sIA<pixelVal_Agent]=0 #make binary image of Agent/Racket
sIC[sIC<pixelVal_Ball]=0 #make binary image of court
Ball_inds = []
for i in range(sIC.shape[0]):
for j in range(sIC.shape[1]):
if sIC[i,j]>0:
Ball_inds.append([i,j])
if sIC.shape[0]*sIC.shape[1]==np.shape(Ball_inds)[0]: #if there is no ball in the court
ypos_Ball = -1
xpos_Ball = -1
else:
ypos_Ball = np.median(Ball_inds,0)[0] #y position of the center of mass of the ball
xpos_Ball = np.median(Ball_inds,0)[1] #x position of the center of mass of the ball
Racket_inds = []
for i in range(sIA.shape[0]):
for j in range(sIA.shape[1]):
if sIA[i,j]>0:
Racket_inds.append([i,j])
ypos_Racket = np.median(Racket_inds,0)[0] #y position of the center of mass of the racket
xpos_Racket = np.median(Racket_inds,0)[1] #x position of the center of mass of the racket
#Now we know the position of racket relative to the ball. We can suggest the action for the racket so that it doesn't miss the ball.
#For the time being, I am implementing a simple rule i.e. based on only the ypos of racket relative to the ball
if ypos_Racket>ypos_Ball: #if the racket is lower than the ball the suggestion is to move up
proposed_action = 4 #move up
elif ypos_Racket<ypos_Ball: #if the racket is higher than the ball the suggestion is to move down
proposed_action = 3 #move down
elif ypos_Racket==ypos_Ball:
proposed_action = 1 #no move
elif ypos_Ball==-1: #guess about proposed move can't be made because ball was not visible in the court
proposed_action = -1 #no valid action guessed
Images.append(np.sum(last_obs[34:194,:,:],2))
Ball_pos.append([19+xpos_Ball,ypos_Ball])
Racket_pos.append([140+xpos_Racket,ypos_Racket])
else:
proposed_action = -1 #if there is no last_obs
ypos_Ball = -1 #if there is no last_obs, no position of ball
xpos_Ball = -1 #if there is no last_obs, no position of ball
observation, reward, done, info = env.step(caction)
#find position of ball after action
##FROM HERE ON----> Not tested
ImageCourt2 = observation[34:194,20:140,:]
sIC2 = np.sum(ImageCourt2,2) #since the color of object is uniform, add values or r,g,b to get a single value
newpixelVal_Ball = np.amax(sIC2) #find the pixel value representing Ball..court is blackish
sIC2[sIC2<newpixelVal_Ball]=0 #make binary image of court
Ball2_inds = []
for i in range(sIC2.shape[0]):
for j in range(sIC2.shape[1]):
if sIC2[i,j]>0:
Ball2_inds.append([i,j])
if sIC2.shape[0]*sIC2.shape[1]==np.shape(Ball2_inds)[0]: #if there is no ball in the court
ypos_Ball2 = -1
xpos_Ball2 = -1
else:
ypos_Ball2 = np.median(Ball2_inds,0)[0] #y position of the center of mass of the ball
xpos_Ball2 = np.median(Ball2_inds,0)[1] #x position of the center of mass of the ball
if xpos_Ball>0 and xpos_Ball2>0:
if xpos_Ball2-xpos_Ball>0:
ball_moves_towards_racket = 1 #use proposed action for reward only when the ball moves towards the racket
current_ball_dir = 1
elif xpos_Ball2-xpos_Ball<0:
ball_moves_towards_racket = 0
current_ball_dir = -1
else:
ball_moves_towards_racket = 0
current_ball_dir = 0 #direction can't be determinted prob. because the ball didn't move in x dir.
else:
ball_moves_towards_racket = 0
current_ball_dir = 0 #direction can't be determined because either current or last position of the ball is outside the court
ball_hits_racket = 0
if last_ball_dir==0 or current_ball_dir==0: # no way to find out if the ball hit the racket
ball_hits_racket = 0 #therefore assumed that ball didn't hit the racket--weak/bad assumption
else:
if last_ball_dir==1 and current_ball_dir==-1 and reward==0:
#if the ball was moving towards the racket and now its moving away from racket and didnt lose
ball_hits_racket = 1
last_ball_dir = current_ball_dir
total_hits.append(ball_hits_racket) # i dont think this can be more than a single hit in 5 moves. so check if sum is greater than 1, print error
#TILL HERE ---- not tested
env.render()
last_obs = observation #current observation will be used as last_obs for the next action
if done:
env.reset()
last_obs = [] # when the game ends, and new game starts, there is no last observation
done = False
rewards.append(reward)
proposed_actions.append(proposed_action)
Image = observation[34:194,:,:] # why does it only use rows 34 through 194?
for i in range(160):
for j in range(160):
gray_Image[i][j]= 0.2989*Image[i][j][0] + 0.5870*Image[i][j][1] + 0.1140*Image[i][j][2]
gray_ds = downscale_local_mean(gray_Image,(8,8))
gray_ds = np.where(gray_ds>np.min(gray_ds)+1,255,gray_ds) #Different thresholding
if count==0: #
i0 = 0.6*gray_ds
count = count+1
elif count==1:
i1 = 0.7*gray_ds
count = count+1
elif count==2:
i2 = 0.8*gray_ds
count = count+1
elif count==3:
i3 = 0.9*gray_ds
count = count+1
else:
i4 = 1.0*gray_ds
count = 0
countAll = countAll+1
dsum_Images = np.maximum(i0,i1)
dsum_Images = np.maximum(dsum_Images,i2)
dsum_Images = np.maximum(dsum_Images,i3)
dsum_Images = np.maximum(dsum_Images,i4)
#compute directions of motion for every other pixel.
bkgPixel = np.amin(dsum_Images)
for dSNeuron_x in range(10):
Rx = 2*dSNeuron_x
if Rx==0:
Rxs = [Rx,Rx+1,Rx+2]
elif Rx==1:
Rxs = [Rx-1, Rx, Rx+1, Rx+2]
elif Rx==((2*10)-2):
Rxs = [Rx-2,Rx-1,Rx,Rx+1]
else:
Rxs = [Rx-2,Rx-1,Rx,Rx+1,Rx+2]
for dSNeuron_y in range(10):
Ry = 2*dSNeuron_y
if Ry==0:
Rys = [Ry, Ry+1, Ry+2]
elif Ry==1:
Rys = [Ry-1, Ry, Ry+1, Ry+2]
elif Ry==((2*10)-2):
Rys = [Ry-2,Ry-1,Ry,Ry+1]
else:
Rys = [Ry-2,Ry-1,Ry,Ry+1,Ry+2]
FOV = np.zeros(shape=(len(Rxs),len(Rys)))
for xinds in range(len(Rxs)):
for yinds in range(len(Rys)):
FOV[xinds,yinds] = dsum_Images[Rxs[xinds],Rys[yinds]]
max_value = np.amax(FOV)
max_ind = np.where(FOV==max_value)
bkg_inds = np.where(FOV == bkgPixel)
if len(bkg_inds[0])>0:
for yinds in range(len(bkg_inds[0])):
ix = bkg_inds[0][yinds]
iy = bkg_inds[1][yinds]
FOV[ix,iy] = 1000
min_value = np.amin(FOV)
min_ind = np.where(FOV==min_value)
if len(max_ind[0])>len(min_ind[0]):
mL = len(min_ind[0])
elif len(max_ind[0])<len(min_ind[0]):
mL = len(max_ind[0])
else:
mL = len(max_ind[0])
dir1 = [max_ind[0][range(mL)]-min_ind[0][range(mL)],max_ind[1][range(mL)]-min_ind[1][range(mL)]] #direction of the object motion in a field of view over last 5 frames/observations.
dir2 = [np.median(dir1[1]),-1*np.median(dir1[0])] #flip y because indexing starts from top left.
dirMain = [1,0] #using a reference for 0 degrees....considering first is for rows and second is for columns
ndir2 = dir2 / np.linalg.norm(dir2)
ndirMain = dirMain / np.linalg.norm(dirMain)
theta = np.degrees(np.arccos(np.dot(ndir2,ndirMain))) #if theta is nan, no movement is detected
if dir2[1]<0:
theta = 360-theta
dirSensitiveNeurons[dSNeuron_x,dSNeuron_y] = theta
if np.isnan(theta)=='False':
print('Theta for FOV ',FOV,' is: ', theta)
print('Computed angles:', dirSensitiveNeurons)
InputImages.append(dsum_Images)
fr_Images = 40/(1+np.exp((np.multiply(-1,dsum_Images)+123)/25))
fr_Images = np.subtract(fr_Images,7.722) #baseline firing rate subtraction. Instead all excitatory neurons are firing at 5Hz.
firing_rates = np.reshape(fr_Images,400) #400 for 20*20
if done: # what is done? --- when done == 1, it means that 1 episode of the game ends, so it needs to be reset.
epCount.append(countAll)
env.reset()
env.frameskip = 3
countAll = 0 # should self.count also get set to 0?
if np.sum(total_hits)>1:
print('ERROR COMPUTING NUMBER OF HITS')
return rewards, epCount, InputImages, last_obs, proposed_actions, last_ball_dir, total_hits, Racket_pos, Ball_pos, Images, dirSensitiveNeurons
fig = plt.figure(figsize=(12,8))
gs = fig.add_gridspec(5,4)
f_ax1 = fig.add_subplot(gs[0:2,0]) #for 5-image input
f_ax2 = fig.add_subplot(gs[0:2,1]) #for single image
f_axa = fig.add_subplot(gs[0:2,2]) #for direction selectivity
f_ax3 = fig.add_subplot(gs[2,0:2]) #display executed/proposed actions
f_ax3a = fig.add_subplot(gs[2,2:4]) #display
f_ax4 = fig.add_subplot(gs[3,0:2])
f_ax4a = fig.add_subplot(gs[3,2:4])
f_ax5 = fig.add_subplot(gs[4,0])
f_ax5a = fig.add_subplot(gs[4,1])
f_ax5b = fig.add_subplot(gs[4,2])
#f_ax5c = fig.add_subplot(gs[4,3])
cbaxes = fig.add_axes([0.75, 0.62, 0.01, 0.24])
tinds = 0
maxtstr = len(str(100000))
cumRewardActions = []
cumPunishingActions = []
#while cNB_episodes<totalNB_episodes: #play game while number of current episode is less than total number of episodes.
while tinds<50000:
print('Actions executed:',tinds)
actions = []
for _ in range(5): #choose 5 actions and pass those actions to the playGame
action = possibleactions[random.randint(0,2)] #pick random action from 1,3 and 4.
actions.append(action)
rewards, epCount, InputImages, last_obs, proposed_actions, last_ball_dir, total_hits, Racket_pos, Ball_pos, Images, dirSensitiveNeurons = playGame(actions, epCount, InputImages, last_obs, last_ball_dir)
allepisodes_totalHits = []
allepisodes_totalMissHits = []
allepisodes_totalPoints = []
allepisodes_rewardingActions = []
allepisodes_punishingActions = []
if len(epCount)==0: #first episode is running
totalHits_cpeisode = np.sum(allHits) #when the racket hits the ball
totalMissHits_cepisode = np.sum(np.where(np.array(allRewards)==-1,1,0)) #when the racket misses the ball and loses a point
totalPoints_cepisode = np.sum(np.where(np.array(allRewards)==1,1,0)) #when a point is scored
A1_cepisode = np.subtract(allActions,allProposedActions)
rewardingActions_cepisode = np.sum(np.where(A1_cepisode==0,1,0))
punishingActions_cepisode = np.sum(np.where((A1_cepisode>0) | (A1_cepisode<0),1,0))
allepisodes_totalHits.append(totalHits_cpeisode)
allepisodes_totalMissHits.append(totalMissHits_cepisode)
allepisodes_totalPoints.append(totalPoints_cepisode)
allepisodes_rewardingActions.append(rewardingActions_cepisode)
allepisodes_punishingActions.append(punishingActions_cepisode)
else:
nbEpisodes = len(epCount)
beg_ep_action = 0
for ep in range(nbEpisodes):
cEpActions = epCount[ep]
end_ep_action = beg_ep_action + cEpActions - 1
totalHits_cpeisode = np.sum(allHits[beg_ep_action:end_ep_action]) #when the racket hits the ball
totalMissHits_cepisode = np.sum(np.where(np.array(allRewards[beg_ep_action:end_ep_action])==-1,1,0)) #when the racket misses the ball and loses a point
totalPoints_cepisode = np.sum(np.where(np.array(allRewards[beg_ep_action:end_ep_action])==1,1,0)) #when a point is scored
A1_cepisode = np.subtract(allActions[beg_ep_action:end_ep_action],allProposedActions[beg_ep_action:end_ep_action])
beg_ep_action = end_ep_action + 1
rewardingActions_cepisode = np.sum(np.where(A1_cepisode==0,1,0))
punishingActions_cepisode = np.sum(np.where((A1_cepisode>0) | (A1_cepisode<0),1,0))
allepisodes_totalHits.append(totalHits_cpeisode)
allepisodes_totalMissHits.append(totalMissHits_cepisode)
allepisodes_totalPoints.append(totalPoints_cepisode)
allepisodes_rewardingActions.append(rewardingActions_cepisode)
allepisodes_punishingActions.append(punishingActions_cepisode)
if len(allHits)>end_ep_action:
totalHits_cpeisode = np.sum(allHits[beg_ep_action:len(allHits)]) #when the racket hits the ball
totalMissHits_cepisode = np.sum(np.where(np.array(allRewards[beg_ep_action:len(allRewards)])==-1,1,0)) #when the racket misses the ball and loses a point
totalPoints_cepisode = np.sum(np.where(np.array(allRewards[beg_ep_action:len(allRewards)])==1,1,0)) #when a point is scored
A1_cepisode = np.subtract(allActions[beg_ep_action:len(allActions)],allProposedActions[beg_ep_action:len(allActions)])
rewardingActions_cepisode = np.sum(np.where(A1_cepisode==0,1,0))
punishingActions_cepisode = np.sum(np.where((A1_cepisode>0) | (A1_cepisode<0),1,0))
allepisodes_totalHits.append(totalHits_cpeisode)
allepisodes_totalMissHits.append(totalMissHits_cepisode)
allepisodes_totalPoints.append(totalPoints_cepisode)
allepisodes_rewardingActions.append(rewardingActions_cepisode)
allepisodes_punishingActions.append(punishingActions_cepisode)
f_ax5.cla()
f_ax5.plot(allepisodes_rewardingActions ,'o-',MarkerSize=5,MarkerFaceColor='r',MarkerEdgeColor='r')
f_ax5.plot(allepisodes_punishingActions ,'s-',MarkerSize=5,MarkerFaceColor='b',MarkerEdgeColor='b')
f_ax5.set_ylabel('Follow|Not')
f_ax5.set_xlabel('episodes')
f_ax5.legend(('Follow','Not Follow'),loc='upper right')
f_ax5a.cla()
f_ax5a.plot(allepisodes_totalHits ,'o',MarkerSize=5,MarkerFaceColor='r',MarkerEdgeColor='r')
f_ax5a.plot(allepisodes_totalMissHits ,'s',MarkerSize=3,MarkerFaceColor='k',MarkerEdgeColor='k')
f_ax5a.set_xlabel('episodes')
f_ax5a.legend(('Hits','Miss'),loc='upper right')
f_ax5b.cla()
f_ax5b.plot(allepisodes_totalPoints ,'o-',MarkerSize=6,MarkerFaceColor='g',MarkerEdgeColor='g')
f_ax5b.set_xlabel('episodes')
f_ax5b.legend(('Scores'),loc='upper right')
#f_ax5a.set_ylabel('Hits')
for action in actions:
allActions.append(action)
for pactions in proposed_actions: #also record proposed actions
allProposedActions.append(pactions)
for reward in rewards: # this generates an error - since rewards only declared for sim.rank==0; bug?
allRewards.append(reward)
for hits in total_hits:
allHits.append(hits)
f_ax1.cla()
f_ax1.imshow(InputImages[-1])
f_ax1.set_title('Input Images [t-5,t]')
f_axa.cla()
fa = f_axa.imshow(dirSensitiveNeurons,origin='upper',vmin=0, vmax=359, cmap='Dark2')
f_axa.set_xlim((-0.5,9.5))
f_axa.set_ylim((9.5,-0.5))
f_axa.set_xticks(ticks=[0,2,4,6,8])
#f_axa.set_yticks(ticks=[0,2,4,6,8])
f_axa.set_title('direction angles [t-5,t]')
c1 = plt.colorbar(fa,cax = cbaxes)
c1.set_ticks([22,67,112,157,202,247,292,337])
c1.set_ticklabels(['E','NE','N','NW','W','SW','S','SE'])
cumHits = np.cumsum(allHits) #cummulative hits evolving with time.
missHits = np.where(np.array(allRewards)==-1,1,0)
cumMissHits = np.cumsum(missHits) #if a reward is -1, replace it with 1 else replace it with 0.
A1 = np.subtract(allActions,allProposedActions)
tpnts = range(5,len(A1)+5,5)
rewardingActions = np.sum(np.where(A1==0,1,0))
punishingActions = np.sum(np.where((A1>0) | (A1<0),1,0))
totalActs = rewardingActions + punishingActions
print('Total Actions',totalActs)
cumRewardActions.append(rewardingActions/totalActs)
cumPunishingActions.append(punishingActions/totalActs)
f_ax3.plot(allActions,LineStyle="None",Marker=2,MarkerSize=6,MarkerFaceColor="None",MarkerEdgeColor='r')
f_ax3.plot(allProposedActions,LineStyle="None",Marker=3,MarkerSize=6,MarkerFaceColor="None",MarkerEdgeColor='b')
f_ax3.set_yticks(ticks=[1,3,4])
f_ax3.set_yticklabels(labels=['No action','Down','Up'])
f_ax3.set_ylim((0.5,4.5))
f_ax3.legend(('Executed','Proposed'),loc='upper left')
f_ax3a.cla()
f_ax3a.plot(tpnts,np.array(cumRewardActions),'o-',MarkerSize=5,MarkerFaceColor='r',MarkerEdgeColor='r')
f_ax3a.plot(tpnts,np.array(cumPunishingActions),'s-',MarkerSize=5,MarkerFaceColor='b',MarkerEdgeColor='b')
f_ax3a.legend(('Rewarding actions','Punishing Actions'),loc='upper left')
f_ax4.cla()
f_ax4.plot(allRewards,'o-',MarkerFaceColor="None",MarkerEdgeColor='g')
f_ax4.legend('Rewards')
f_ax4a.cla()
f_ax4a.plot(cumHits,Marker='o',MarkerSize=5,MarkerFaceColor='r',MarkerEdgeColor='r')
f_ax4a.plot(cumMissHits,Marker='s',MarkerSize=3,MarkerFaceColor='k',MarkerEdgeColor='k')
f_ax4a.legend(('Cumm. Hits','Cumm. Miss'),loc='upper left')
#plt.pause(1)
f_ax2.cla()
for nbi in range(np.shape(Racket_pos)[0]):
f_ax2.imshow(Images[nbi])
if Ball_pos[nbi][0]>18: #to account for offset for the court
f_ax2.plot(Racket_pos[nbi][0],Racket_pos[nbi][1],'o',MarkerSize=5, MarkerFaceColor="None",MarkerEdgeColor='r')
f_ax2.plot(Ball_pos[nbi][0],Ball_pos[nbi][1],'o',MarkerSize=5, MarkerFaceColor="None",MarkeredgeColor='b')
f_ax2.set_title('last obs')
#plt.pause(0.1)
ctstrl = len(str(tinds))
tpre = ''
for ttt in range(maxtstr-ctstrl):
tpre = tpre+'0'
fn = tpre+str(tinds)+'.png'
fnimg = '/tmp/'+fn
plt.savefig(fnimg)
#lfnimage.append(fnimg)
tinds = tinds+1
anim.savemp4('/tmp/*.png','data/randGameBehavior.mp4',10)