├── model_lstm.jpg ├── matlab_m ├── classfile.m ├── demo.m ├── savetomat.m ├── read_skeleton_file.m ├── show_skeleton_on_RGB_frames.m ├── show_skeleton_on_IR_frames.m └── show_skeleton_on_depthmaps.m ├── lstm_py ├── mtop1.py ├── evaluate.py └── main.py ├── README.md └── keras └── main.py /model_lstm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsjtu/Human-Action-Recognition-from-Skeleton-Data/HEAD/model_lstm.jpg -------------------------------------------------------------------------------- /matlab_m/classfile.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsjtu/Human-Action-Recognition-from-Skeleton-Data/HEAD/matlab_m/classfile.m -------------------------------------------------------------------------------- /matlab_m/demo.m: -------------------------------------------------------------------------------- 1 | clear;clc; 2 | for t=60:61 3 | fileFolder=['D:\research\ntuRGB\ske_f\',num2str(t),'\']; 4 | dirOutput=dir(fullfile(fileFolder,'*.skeleton')); 5 | savepath=['D:\research\ntuRGB\mat_f\',num2str(t),'\']; 6 | fileNames={dirOutput.name}; 7 | LengthFiles = length(fileNames) 8 | for n = 864:LengthFiles; 9 | % if(exist([savepath,num2str(n),'-points.mat'],'file') ==2) 10 | % continue; 11 | % else 12 | fileName=char(fileNames(n)) 13 | savetomat(fileFolder,fileName,n,savepath); 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /matlab_m/savetomat.m: -------------------------------------------------------------------------------- 1 | function savetomat(myfilefolder,filename,n,savepath) 2 | 3 | body1 = read_skeleton_file([myfilefolder,filename]) 4 | num=length(body1) 5 | %%fprintf('x: %f \n',body1(1).bodies.joints(1).x) 6 | % global x 7 | % global y 8 | % % global z 9 | % body1.bodies(1).joints.x==(num,25) 10 | x=zeros(num,25); 11 | for i=1:num 12 | for j=1:25 13 | x(i,j)=body1(i).bodies(1).joints(j).x; 14 | end 15 | end 16 | 17 | y=zeros(num,25); 18 | 19 | for i=1:num 20 | for j=1:25 21 | y(i,j)=body1(i).bodies(1).joints(j).y; 22 | end 23 | end 24 | z=zeros(num,25); 25 | for i=1:num 26 | for j=1:25 27 | z(i,j)=body1(i).bodies(1).joints(j).z; 28 | end 29 | end 30 | filename 31 | myend=findstr(filename,'.') 32 | myend=myend(1) 33 | chr=[savepath,filename(1:myend-1),'.mat'] 34 | chr 35 | save ( chr,'x','y','z'); 36 | end -------------------------------------------------------------------------------- /lstm_py/mtop1.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.io 3 | from PIL import Image 4 | import cv2 5 | import os,os.path,shutil 6 | import re 7 | 8 | from scipy.interpolate import interp1d 9 | 10 | ##save file size(3,60,25) 11 | timestep_size=60 12 | 13 | 14 | def find_martrix_min_value(data_matrix): 15 | ''' 16 | 功能:找到矩阵最小值 17 | ''' 18 | new_data=[] 19 | for i in range(len(data_matrix)): 20 | new_data.append(min(data_matrix[i])) 21 | print ('data_matrix 最小值为:', min(new_data)) 22 | return min(new_data) 23 | 24 | 25 | def find_martrix_max_value(data_matrix): 26 | ''' 27 | 功能:找到矩阵最大值 28 | ''' 29 | new_data=[] 30 | for i in range(len(data_matrix)): 31 | new_data.append(max(data_matrix[i])) 32 | print ('data_matrix 最大值为:', max(new_data)) 33 | return max(new_data) 34 | #get whole joints place 35 | #transfor .mat(all joints) into wanted point and reference(.npz) 36 | def mtop( filename,savepath): 37 | point= scipy.io.loadmat(filename) # 读取mat文件 38 | #point=np.load("whole1.npz") 39 | wx=point['x']##whole joints point 40 | wy=point['y'] 41 | wz=point['z'] 42 | w=np.vstack((wx,wy,wz)).reshape(3,-1,25) #left arm, right arm,torso, left leg, right leg 43 | center=w[:,:,0] 44 | center=center.repeat(25) 45 | center=center.reshape(3,-1,25) 46 | #print(center) 47 | w=w-center 48 | if w.shape[1]>60 : 49 | file_new=filename[filename.find('S'):filename.find('.mat')] 50 | #print(file_new) 51 | np.save(savepath+file_new,w) 52 | 53 | def eachFile(folder): 54 | allFile = os.listdir(folder) 55 | fileNames = [] 56 | for file in allFile: 57 | fullPath = os.path.join(folder, file) 58 | fileNames.append(fullPath) 59 | return fileNames 60 | 61 | 62 | # main part 63 | for i in range(60,61): 64 | srcFolder='./mat_f/'+str(i) 65 | savepath='./CV_40/' 66 | fileNames =eachFile(srcFolder) 67 | for fileName in fileNames: 68 | print(fileName) 69 | #print(int(fileName.find('C'))) 70 | if(int(fileName[fileName.find('C')+1:fileName.find('C')+4])==1): 71 | savepath='./CV_40/test/' 72 | else: 73 | savepath='./CV_40/train/' 74 | mtop(fileName,savepath) 75 | 76 | srcFolder='./CV/train' 77 | fileNames =eachFile(srcFolder) 78 | trainpath='./CS/train/' 79 | testpath='./CS/test/' 80 | for fileName in fileNames: 81 | subject=int(fileName[fileName.find('S')+1:fileName.find('S')+4]) 82 | a=[1, 2, 4, 5, 8, 9, 13, 14, 15, 83 | 16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38] 84 | if subject in a: 85 | newname=trainpath+fileName[fileName.find('S'):] 86 | 87 | else: 88 | newname=testpath+fileName[fileName.find('S'):] 89 | shutil.copyfile(fileName,newname) 90 | 91 | 92 | 93 | 94 | 95 | -------------------------------------------------------------------------------- /matlab_m/read_skeleton_file.m: -------------------------------------------------------------------------------- 1 | 2 | function bodyinfo = read_skeleton_file(filename) 3 | % Reads an .skeleton file from "NTU RGB+D 3D Action Recognition Dataset". 4 | % 5 | % Argrument: 6 | % filename: full adress and filename of the .skeleton file. 7 | % 8 | % For further information please refer to: 9 | % NTU RGB+D dataset's webpage: 10 | % http://rose1.ntu.edu.sg/Datasets/actionRecognition.asp 11 | % NTU RGB+D dataset's github page: 12 | % https://github.com/shahroudy/NTURGB-D 13 | % CVPR 2016 paper: 14 | % Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang, 15 | % "NTU RGB+D: A Large Scale Dataset for 3D Human Activity Analysis", 16 | % in IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016 17 | % 18 | % For more details about the provided data, please refer to: 19 | % https://msdn.microsoft.com/en-us/library/dn799271.aspx 20 | % https://msdn.microsoft.com/en-us/library/dn782037.aspx 21 | 22 | fileid = fopen(filename); 23 | framecount = fscanf(fileid,'%d',1); % no of the recorded frames 24 | 25 | bodyinfo=[]; % to store multiple skeletons per frame 26 | 27 | for f=1:framecount 28 | bodycount = fscanf(fileid,'%d',1); % no of observerd skeletons in current frame 29 | for b=1:bodycount 30 | clear body; 31 | body.bodyID = fscanf(fileid,'%ld',1); % tracking id of the skeleton 32 | arrayint = fscanf(fileid,'%d',6); % read 6 integers 33 | body.clipedEdges = arrayint(1); 34 | body.handLeftConfidence = arrayint(2); 35 | body.handLeftState = arrayint(3); 36 | body.handRightConfidence = arrayint(4); 37 | body.handRightState = arrayint(5); 38 | body.isResticted = arrayint(6); 39 | lean = fscanf(fileid,'%f',2); 40 | body.leanX = lean(1); 41 | body.leanY = lean(2); 42 | body.trackingState = fscanf(fileid,'%d',1); 43 | 44 | body.jointCount = fscanf(fileid,'%d',1); % no of joints (25) 45 | joints=[]; 46 | for j=1:body.jointCount 47 | jointinfo = fscanf(fileid,'%f',11); 48 | joint=[]; 49 | 50 | % 3D location of the joint j 51 | joint.x = jointinfo(1); 52 | joint.y = jointinfo(2); 53 | joint.z = jointinfo(3); 54 | 55 | % 2D location of the joint j in corresponding depth/IR frame 56 | joint.depthX = jointinfo(4); 57 | joint.depthY = jointinfo(5); 58 | 59 | % 2D location of the joint j in corresponding RGB frame 60 | joint.colorX = jointinfo(6); 61 | joint.colorY = jointinfo(7); 62 | 63 | % The orientation of the joint j 64 | joint.orientationW = jointinfo(8); 65 | joint.orientationX = jointinfo(9); 66 | joint.orientationY = jointinfo(10); 67 | joint.orientationZ = jointinfo(11); 68 | 69 | % The tracking state of the joint j 70 | joint.trackingState = fscanf(fileid,'%d',1); 71 | 72 | body.joints(j)=joint; 73 | end 74 | bodyinfo(f).bodies(b)=body; 75 | end 76 | end 77 | fclose(fileid); 78 | end -------------------------------------------------------------------------------- /matlab_m/show_skeleton_on_RGB_frames.m: -------------------------------------------------------------------------------- 1 | function []=show_skeleton_on_RGB_frames(... 2 | skeletonfilename,rgbfilename,outputvideofilename) 3 | % Draws the skeleton data on RGB frames. 4 | % 5 | % Argrument: 6 | % skeletonfilename: full adress and filename of the .skeleton file. 7 | % rgbfilename: corresponding RGB video file 8 | % outputvideofilename (optional): the filename for output video file. 9 | % 10 | % For further information please refer to: 11 | % NTU RGB+D dataset's webpage: 12 | % http://rose1.ntu.edu.sg/Datasets/actionRecognition.asp 13 | % NTU RGB+D dataset's github page: 14 | % https://github.com/shahroudy/NTURGB-D 15 | % CVPR 2016 paper: 16 | % Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang, 17 | % "NTU RGB+D: A Large Scale Dataset for 3D Human Activity Analysis", 18 | % in IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016 19 | 20 | bodyinfo = read_skeleton_file(skeletonfilename); 21 | 22 | videofile = VideoReader(rgbfilename); 23 | rgbvid = read(videofile); 24 | 25 | if nargin>2 % if the output video file is given 26 | writerObj = VideoWriter(outputvideofilename); 27 | open(writerObj); 28 | end 29 | 30 | % in the skeleton structure, each joint is connected to some other joint: 31 | connecting_joint = ... 32 | [2 1 21 3 21 5 6 7 21 9 10 11 1 13 14 15 1 17 18 19 2 8 8 12 12]; 33 | 34 | % reapeat this for every frame 35 | for f=1:numel(bodyinfo) 36 | try 37 | imrgb = rgbvid(:,:,:,f); 38 | 39 | % for all the detected skeletons in the current frame: 40 | for b=1:numel(bodyinfo(f).bodies) 41 | % for all the 25 joints within each skeleton: 42 | for j=1:25 43 | try 44 | % use red color for drawing joint connections 45 | rv=255; 46 | gv=0; 47 | bv=0; 48 | 49 | k = connecting_joint(j); 50 | 51 | joint = bodyinfo(f).bodies(b).joints(j); 52 | dx = joint.colorX; 53 | dy = joint.colorY; 54 | joint2 = bodyinfo(f).bodies(b).joints(k); 55 | dx2 = joint2.colorX; 56 | dy2 = joint2.colorY; 57 | 58 | xdist=abs(dx-dx2); 59 | ydist=abs(dy-dy2); 60 | 61 | % locate the pixels of the connecting line between the 62 | % two joints 63 | if xdist>ydist 64 | xrange = [dx:sign(dx2-dx):dx2]; 65 | yrange = [dy:sign(dy2-dy)*abs((dy2-dy)/(dx2-dx)):dy2]; 66 | else 67 | yrange = [dy:sign(dy2-dy):dy2]; 68 | xrange = [dx:sign(dx2-dx)*abs((dx2-dx)/(dy2-dy)):dx2]; 69 | end 70 | % draw the line! 71 | for i=1:numel(xrange) 72 | dx = int32(round(xrange(i))); 73 | dy = int32(round(yrange(i))); 74 | imrgb(dy-3:dy+3,dx-3:dx+3,1)=rv; 75 | imrgb(dy-3:dy+3,dx-3:dx+3,2)=gv; 76 | imrgb(dy-3:dy+3,dx-3:dx+3,3)=bv; 77 | end 78 | 79 | joint = bodyinfo(f).bodies(b).joints(j); 80 | dx = int32(round(joint.colorX)); 81 | dy = int32(round(joint.colorY)); 82 | 83 | % use green color to draw joints 84 | rv=0; 85 | gv=255; 86 | bv=0; 87 | imrgb(dy-7:dy+7,dx-7:dx+7,1)=rv; 88 | imrgb(dy-7:dy+7,dx-7:dx+7,2)=gv; 89 | imrgb(dy-7:dy+7,dx-7:dx+7,3)=bv; 90 | catch err1 91 | disp(err1); 92 | end 93 | end 94 | end 95 | imrgb = imrgb(1:1080,1:1920,:); 96 | imshow(imrgb); 97 | writeVideo(writerObj,imrgb); 98 | pause(0.001); 99 | catch err2 100 | disp(err2); 101 | end 102 | end 103 | close(writerObj); 104 | end 105 | -------------------------------------------------------------------------------- /matlab_m/show_skeleton_on_IR_frames.m: -------------------------------------------------------------------------------- 1 | function []=show_skeleton_on_IR_frames(... 2 | skeletonfilename,irfilename,outputvideofilename) 3 | % Draws the skeleton data on IR frames. 4 | % 5 | % Argrument: 6 | % skeletonfilename: full adress and filename of the .skeleton file. 7 | % irfilename: corresponding IR video file 8 | % outputvideofilename (optional): the filename for output video file. 9 | % 10 | % For further information please refer to: 11 | % NTU RGB+D dataset's webpage: 12 | % http://rose1.ntu.edu.sg/Datasets/actionRecognition.asp 13 | % NTU RGB+D dataset's github page: 14 | % https://github.com/shahroudy/NTURGB-D 15 | % CVPR 2016 paper: 16 | % Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang, 17 | % "NTU RGB+D: A Large Scale Dataset for 3D Human Activity Analysis", 18 | % in IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016 19 | 20 | bodyinfo = read_skeleton_file(skeletonfilename); 21 | 22 | videofile = VideoReader(irfilename); 23 | irvid = read(videofile); 24 | 25 | if nargin>2 % if the output video file is given 26 | writerObj = VideoWriter(outputvideofilename); 27 | open(writerObj); 28 | end 29 | 30 | % in the skeleton structure, each joint is connected to some other joint: 31 | connecting_joint = ... 32 | [2 1 21 3 21 5 6 7 21 9 10 11 1 13 14 15 1 17 18 19 2 8 8 12 12]; 33 | 34 | % reapeat this for every frame 35 | for f=1:numel(bodyinfo) 36 | try 37 | imir = irvid(:,:,:,f); 38 | 39 | % for all the detected skeletons in the current frame: 40 | for b=1:numel(bodyinfo(f).bodies) 41 | % for all the 25 joints within each skeleton: 42 | for j=1:25 43 | try 44 | % use red color for drawing joint connections 45 | rv=255; 46 | gv=0; 47 | bv=0; 48 | 49 | k = connecting_joint(j); 50 | 51 | joint = bodyinfo(f).bodies(b).joints(j); 52 | dx = joint.depthX; 53 | dy = joint.depthY; 54 | joint2 = bodyinfo(f).bodies(b).joints(k); 55 | dx2 = joint2.depthX; 56 | dy2 = joint2.depthY; 57 | 58 | xdist=abs(dx-dx2); 59 | ydist=abs(dy-dy2); 60 | 61 | % locate the pixels of the connecting line between the 62 | % two joints 63 | if xdist>ydist 64 | xrange = [dx:sign(dx2-dx):dx2]; 65 | yrange = [dy:sign(dy2-dy)*abs((dy2-dy)/(dx2-dx)):dy2]; 66 | else 67 | yrange = [dy:sign(dy2-dy):dy2]; 68 | xrange = [dx:sign(dx2-dx)*abs((dx2-dx)/(dy2-dy)):dx2]; 69 | end 70 | % draw the line! 71 | for i=1:numel(xrange) 72 | dx = int32(round(xrange(i))); 73 | dy = int32(round(yrange(i))); 74 | imir(dy-3:dy+3,dx-3:dx+3,1)=rv; 75 | imir(dy-3:dy+3,dx-3:dx+3,2)=gv; 76 | imir(dy-3:dy+3,dx-3:dx+3,3)=bv; 77 | end 78 | 79 | joint = bodyinfo(f).bodies(b).joints(j); 80 | dx = int32(round(joint.depthX)); 81 | dy = int32(round(joint.depthY)); 82 | 83 | % use green color to draw joints 84 | rv=0; 85 | gv=255; 86 | bv=0; 87 | imir(dy-3:dy+3,dx-3:dx+3,1)=rv; 88 | imir(dy-3:dy+3,dx-3:dx+3,2)=gv; 89 | imir(dy-3:dy+3,dx-3:dx+3,3)=bv; 90 | catch err1 91 | disp(err1); 92 | end 93 | end 94 | end 95 | imir = imir(1:424,1:512,:); 96 | imshow(imir); 97 | if nargin>2 98 | writeVideo(writerObj,imir); 99 | end 100 | pause(0.003); 101 | catch err2 102 | disp(err2); 103 | end 104 | end 105 | 106 | if nargin>2 107 | close(writerObj); 108 | end 109 | end 110 | -------------------------------------------------------------------------------- /matlab_m/show_skeleton_on_depthmaps.m: -------------------------------------------------------------------------------- 1 | function []=show_skeleton_on_depthmaps(... 2 | skeletonfilename,depthmapsfolder,outputvideofilename) 3 | % Draws the skeleton data on depthmaps. 4 | % 5 | % Argrument: 6 | % skeletonfilename: full adress and filename of the .skeleton file. 7 | % depthmapsfolder: corresponding depthmaps folder (full or masked) 8 | % outputvideofilename (optional): the filename for output video file. 9 | % 10 | % For further information please refer to: 11 | % NTU RGB+D dataset's webpage: 12 | % http://rose1.ntu.edu.sg/Datasets/actionRecognition.asp 13 | % NTU RGB+D dataset's github page: 14 | % https://github.com/shahroudy/NTURGB-D 15 | % CVPR 2016 paper: 16 | % Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang, 17 | % "NTU RGB+D: A Large Scale Dataset for 3D Human Activity Analysis", 18 | % in IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016 19 | 20 | bodyinfo = read_skeleton_file(skeletonfilename); 21 | 22 | if nargin>2 % if the output video file is given 23 | writerObj = VideoWriter(outputvideofilename); 24 | open(writerObj); 25 | end 26 | 27 | % in the skeleton structure, each joint is connected to some other joint: 28 | connecting_joint = ... 29 | [2 1 21 3 21 5 6 7 21 9 10 11 1 13 14 15 1 17 18 19 2 8 8 12 12]; 30 | 31 | % reapeat this for every frame 32 | for f=1:numel(bodyinfo) 33 | try 34 | imdepth = imread(fullfile(depthmapsfolder,sprintf('Depth-%08d.png',f))); 35 | % depth maps are in millimeters we need to scale them to [0,255] 36 | % for visualization: 37 | imdepth = double(imdepth)/5000; 38 | imdepth(imdepth>1)=1; 39 | imdepth = uint8(255*imdepth); 40 | 41 | % make it a 3-channel color image... 42 | depthframe = repmat(imdepth,1,1,3); 43 | 44 | % for all the detected skeletons in the current frame: 45 | for b=1:numel(bodyinfo(f).bodies) 46 | % for all the 25 joints within each skeleton: 47 | for j=1:25 48 | try 49 | % use red color for drawing joint connections 50 | rv=255; 51 | gv=0; 52 | bv=0; 53 | 54 | k = connecting_joint(j); 55 | 56 | joint = bodyinfo(f).bodies(b).joints(j); 57 | dx = joint.depthX; 58 | dy = joint.depthY; 59 | joint2 = bodyinfo(f).bodies(b).joints(k); 60 | dx2 = joint2.depthX; 61 | dy2 = joint2.depthY; 62 | 63 | xdist=abs(dx-dx2); 64 | ydist=abs(dy-dy2); 65 | 66 | % locate the pixels of the connecting line between the 67 | % two joints 68 | if xdist>ydist 69 | xrange = [dx:sign(dx2-dx):dx2]; 70 | yrange = [dy:sign(dy2-dy)*abs((dy2-dy)/(dx2-dx)):dy2]; 71 | else 72 | yrange = [dy:sign(dy2-dy):dy2]; 73 | xrange = [dx:sign(dx2-dx)*abs((dx2-dx)/(dy2-dy)):dx2]; 74 | end 75 | % draw the line! 76 | for i=1:numel(xrange) 77 | dx = int32(round(xrange(i))); 78 | dy = int32(round(yrange(i))); 79 | depthframe(dy-1:dy+1,dx-1:dx+1,1)=rv; 80 | depthframe(dy-1:dy+1,dx-1:dx+1,2)=gv; 81 | depthframe(dy-1:dy+1,dx-1:dx+1,3)=bv; 82 | end 83 | 84 | joint = bodyinfo(f).bodies(b).joints(j); 85 | dx = int32(round(joint.depthX)); 86 | dy = int32(round(joint.depthY)); 87 | 88 | % use green color to draw joints 89 | rv=0; 90 | gv=255; 91 | bv=0; 92 | depthframe(dy-3:dy+3,dx-3:dx+3,1)=rv; 93 | depthframe(dy-3:dy+3,dx-3:dx+3,2)=gv; 94 | depthframe(dy-3:dy+3,dx-3:dx+3,3)=bv; 95 | catch err1 96 | disp(err1); 97 | end 98 | end 99 | end 100 | 101 | % cut the extra boundaries if any! 102 | depthframe = depthframe(1:424,1:512,:); 103 | 104 | imshow(depthframe); 105 | if nargin>2 106 | writeVideo(writerObj,depthframe); 107 | end 108 | pause(0.01); 109 | catch err2 110 | disp(err2); 111 | end 112 | end 113 | 114 | if nargin>2 115 | close(writerObj); 116 | end 117 | end 118 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Human-Action-Recognition-from-Skeleton-Data 2 | 3 | A Simple But High-accuracy LSTM for human Action Recognition 4 | 5 | # Code structure 6 | 7 | - [`matlab_m/`](matlab_m/): transform the given dataset(NTU RGB+D) to your need , from "*.skeleton" to "*.mat" 8 | 9 | - [`demo.m`](matlab_m/demo.m): an example for you to transform the dataset using the given functions .You shall verify the "fileFolder","dirOutput",and "savepath" 10 | - [`classfile.m`](matlab_m/classfile.m): divide the "*.mat" file according to their class. 11 | - [`read_skeleton_file.m`](matlab_m/read_skeleton_file.m): a function to read the skeleton files (given by NTU RGB dataset) 12 | - [`savetomat.m`](matlab_m/savetomat.m): a function to save the skeleton data from skeleton files to mat files 13 | - [`show_skeleton_on_depthmaps.m`](matlab_m/show_skeleton_on_depthmaps.m): a function to show the skeleton information on the depthmaps(thanks to the NTU RGB+D dataset) 14 | - [`show_skeleton_on_IR_frames.m`](matlab_m/show_skeleton_on_IR_frames.m): a function to show the skeleton information on the IR frames(thanks to the NTU RGB+D dataset) 15 | - [`show_skeleton_on_RGB_frames.m`](matlab_m/show_skeleton_on_RGB_frames.m): a function to show the skeleton information on the RGB frames(thanks to the NTU RGB+D dataset) 16 | - [`lstm_py/`](lstm_py/): the train and test python file using tensorflow lib. 17 | 18 | - [`main.py`](lstm_py/main.py): the train python file using tensorflow. 19 | - [`evaluate.py`](lstm_py/evaluate.py): the test file to evaluate your model perfermance. 20 | - [`mtop.py`](lstm_py/mtop.py): transform the skeleton files form "*.mat" to "*.npy" for python files . Also, you may use it for seperate train and test set . 21 | - [`model_lstm/`](lstm_py/model_lstm): well-trained model of lstm . 22 | - [`keras`](keras): the train and test python file using keras lib. 23 | 24 | - [`main.py`](keras/main.py): an example for you to transform the dataset using the given functions .You shall verify the "train_file", and "test_file" 25 | 26 | # Requirements 27 | 28 | - code only tested on linux system (ubuntu 16.04) 29 | 30 | - Python 3 (Anaconda 3.6.3 specifically) with numpy and matplotlib 31 | 32 | - Tensorflow 33 | 34 | - keras 35 | 36 | - matlab 37 | 38 | # model structure 39 | 40 | ![model](model_lstm.jpg) 41 | 42 | # To prepare using the given data by NTU RGB+D 43 | 44 | ## Using matlab (from "*.skeleton" to "*.mat") 45 | 46 | In file [`demo.m`](matlab_m/demo.m) 47 | 48 | ```matlab 49 | fileFolder=['D:\research\ntuRGB\ske_f\',num2str(t),'\'];%using your own dataset path 50 | savepath=['D:\research\ntuRGB\mat_f\',num2str(t),'\'];%using your own save path 51 | ``` 52 | 53 | In file [`classfile.m`](matlab_m/classfile.m) 54 | 55 | ```matlab 56 | SOURCE_PATH_t =[ 'D:\research\ntuRGB\mat_f\',num2str(i),'\'];%using your own "*.mat" files path 57 | DST_PATH_t1 = [ 'D:\research\ntuRGB\mat_f\',num2str(i),'\test'];%using your own wanted test set saved path 58 | DST_PATH_t2 = [ 'D:\research\ntuRGB\mat_f\',num2str(i),'\train'];%using your own wanted train set saved path 59 | ``` 60 | 61 | ```bash 62 | matlab demo.m 63 | matlab classfile.m 64 | ``` 65 | 66 | ## Using python (from "*.mat" to "*.npy") 67 | 68 | In file [`mtop.py`](lstm_py/mtop.py) 69 | 70 | ```python 71 | trainpath='./CS/train/'#verify your train data files forder here ("*.mat" file) 72 | testpath='./CS/test/'#verify your test data files forder here ("*.mat" file) 73 | ``` 74 | 75 | # Using Tensorflow 76 | 77 | ## To train 78 | 79 | In file [`main.py`](lstm_py/main.py) 80 | 81 | ```python 82 | train_file='CV_20/train' #verify your train data files forder here 83 | test_file='CV_20/test' #verify your train data files forder here 84 | model_file="model/my-model.meta"#verify your train model data file 85 | model_path="model/"#verify your train model data folder 86 | ``` 87 | 88 | 89 | ```bash 90 | python lstm_py/main.py 91 | ``` 92 | 93 | - you will get your own model saved in the "model/" 94 | 95 | ## To test 96 | 97 | In file [`evaluate.py`](lstm_py/evaluate.py) 98 | 99 | ```python 100 | train_file='CV_20/train' #verify your train data files forder here 101 | test_file='CV_20/test' #verify your train data files forder here 102 | model_file="model/my-model.meta"#verify your train model data file 103 | model_path="model/"#verify your train model data folder 104 | ``` 105 | 106 | 107 | ```bash 108 | python lstm_py/evaluate.py 109 | ``` 110 | 111 | # Using keras 112 | 113 | ## To train and test 114 | 115 | In file [`main.py`](keras/main.py) 116 | 117 | ```python 118 | 119 | train_file='CV_20/train' #verify your train data files forder here 120 | test_file='CV_20/test' #verify your train data files forder here 121 | model_file="model/my-model.meta"#verify your train model data file 122 | model_path="model/"#verify your train model data folder 123 | ``` 124 | 125 | ```bash 126 | python keras/main.py 127 | ``` 128 | -------------------------------------------------------------------------------- /keras/main.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import numpy as np 3 | import os 4 | import sys 5 | import random 6 | import matplotlib.pyplot as plt 7 | from keras.preprocessing.text import Tokenizer 8 | from keras.preprocessing.sequence import pad_sequences 9 | from keras.utils.np_utils import to_categorical 10 | from keras.models import Sequential,load_model 11 | from keras.layers import Embedding, LSTM, Dense, Activation,Dropout 12 | from keras.optimizers import SGD,Adam 13 | 14 | 15 | 16 | train_file='CS/train' 17 | test_file='CS/test' 18 | 19 | num_ske = 25 20 | hidden_size = 100 # hidden layer num 21 | layer_num = 4 # LSTM layer num 22 | class_num = 60 # last class number 23 | cell_type = "lstm" # lstm or block_lstm 24 | NUM_SKE=25 25 | batch_size =128 # tf.int32, batch_size = 128 26 | timestep_size=150 27 | learning_rate=1e-3 28 | epoch_size=1000 29 | N=10 30 | 31 | def eachFile(folder): 32 | allFile = os.listdir(folder) 33 | fileNames = [] 34 | for file in allFile: 35 | 36 | fileNames.append(file) 37 | return fileNames 38 | 39 | def getlist(filefolder): 40 | print(filefolder+' load start') 41 | fileNames=eachFile(filefolder) 42 | num=len(fileNames) 43 | #print(len(fileNames)) 44 | listp=[[]for i in range (num)] 45 | i=0 46 | classname=[]#list can only use .append 47 | 48 | for fileName in fileNames: 49 | p=np.load(filefolder+'/'+fileName) 50 | #p.astype(np.float32) 51 | listp[i].append(p) 52 | classname.append(fileName[-5]) #filename example='S001C001P001R001A001.mat' 53 | 54 | #print(listp[0][0].shape) (3,103,25) 55 | i=i+1 56 | return listp,classname 57 | 58 | def get_all(listp,classname): 59 | #print(type(listp[1])[0]) 60 | imdata=np.zeros((len(listp),timestep_size, 61 | num_ske*3 62 | ),dtype=float) 63 | currentlen=np.zeros((len(listp) 64 | )) 65 | imlabel=np.zeros((len(listp), 66 | class_num),dtype=float) 67 | j=0 68 | i=0 69 | print('len',len(listp)) 70 | for i in range (len(listp)-1): 71 | tmp=np.array(listp[i][0]) 72 | tmp=np.swapaxes(tmp,1,2)#(timestep_size,3,num_ske) 73 | tmp=np.reshape(tmp,[-1,3*num_ske])#(timestep_size,3*num_ske,) 74 | tmp_len=int(tmp.shape[0]) 75 | currentlen[i]=tmp_len 76 | if tmp_len