Product Features
...
Machine Learning Models
Model Types
Machine Learning Classification
1min
machine learning classification uses models created in tensorflow see upload a model docid 2yyfqysfpmvkg6z8bb2xb for more information the tensorflow images processor feeds images to an already created tensorflow model the tensorflow processor feeds timeseries data to an aready created tensorflow model this use case is a customized classification version of a cnn classification model from the tensorflow website \# cnn model from numpy import mean from numpy import std from numpy import dstack from pandas import read csv import numpy as np import tensorflow as tf from tensorflow import keras from sklearn preprocessing import standardscaler import matplotlib pyplot as plt \# load a single file as a numpy array def load file(filepath) dataframe = read csv(filepath, header=none, delim whitespace=true) return dataframe values \# load a list of files and return as a 3d numpy array def load group(filenames, prefix='') loaded = list() for name in filenames data = load file(prefix + name) loaded append(data) \# stack group so that features are the 3rd dimension loaded = dstack(loaded) return loaded \# load a dataset group, such as train or test def load dataset group(group, prefix='') filepath = prefix + group + '/inertial signals/' \# load all 9 files as a single array filenames = list() \# total acceleration filenames += \['total acc x '+group+' txt', 'total acc y '+group+' txt', 'total acc z '+group+' txt'] body acceleration filenames += \['body acc x '+group+' txt', 'body acc y '+group+' txt', 'body acc z '+group+' txt'] \# body gyroscopefilenames += \['body gyro x '+group+' txt', 'body gyro y '+group+' txt', 'body gyro z '+group+' txt'] \# load input data x = load group(filenames, filepath) load class output y = load file(prefix + group + '/y '+group+' txt') return x, y \# load the dataset, returns train and test x and y elements def load dataset(prefix='') \# load all train trainx, trainy = load dataset group('train', prefix + 'hardataset/') print(trainx shape, trainy shape) \# load all test testx, testy = load dataset group('test', prefix + 'hardataset/') print(testx shape, testy shape) zero offset class values trainy = trainy 1 testy = testy 1 one hot encode y trainy = tf keras utils to categorical(trainy) testy = tf keras utils to categorical(testy) print(trainx shape, trainy shape, testx shape, testy shape) return trainx, trainy, testx, testy \# standardize data def scale data(trainx, testx, standardize) \# remove overlap cut = int(trainx shape\[1] / 2) longx = trainx\[ , cut , ] \# flatten windows longx = longx reshape((longx shape\[0] longx shape\[1], longx shape\[2])) \# flatten train and test flattrainx = trainx reshape((trainx shape\[0] trainx shape\[1], trainx shape\[2])) flattestx = testx reshape((testx shape\[0] testx shape\[1], testx shape\[2])) \# standardize if standardize s = standardscaler() \# fit on training data s fit(longx) \# apply to training and test data longx = s transform(longx) flattrainx = s transform(flattrainx) flattestx = s transform(flattestx) \# reshape flattrainx = flattrainx reshape((trainx shape)) flattestx = flattestx reshape((testx shape)) return flattrainx, flattestx \# fit and evaluate a model def evaluate model(trainx, trainy, testx, testy, param, n filters, kernal size) verbose, epochs, batch size = 0, 10, 32 n timesteps, n features, n outputs = trainx shape\[1], trainx shape\[2], trainy shape\[1] \# scale data trainx, testx = scale data(trainx, testx, param) model = keras sequential() model add(tf keras layers conv1d(filters=n filters, kernel size=kernal size, activation='relu', input shape=(n timesteps,n features))) model add(tf keras layers conv1d(filters=n filters, kernel size=kernal size, activation='relu')) model add(tf keras layers dropout(0 5)) model add(tf keras layers maxpooling1d(pool size=2)) model add(tf keras layers flatten()) model add(tf keras layers dense(100, activation='relu')) model add(tf keras layers dense(n outputs, activation='softmax')) model compile(loss='categorical crossentropy', optimizer='adam', metrics=\['accuracy']) \# fit network model fit(trainx, trainy, epochs=epochs, batch size=batch size, verbose=verbose) \# evaluate model , accuracy = model evaluate(testx, testy, batch size=batch size, verbose=0) return accuracy, model \# summarize scores def summarize results(scores, params) print(scores, params) summarize mean and standard deviation for i in range(len(scores)) m, s = mean(scores\[i]), std(scores\[i]) print('param=%s % 3f%% (+/ % 3f)' % (params\[i], m, s)) \# boxplot of scores \# plt boxplot(scores, labels=params) \# plt savefig('exp cnn standardize png') \# run an experiment def run experiment(params, repeats=1) \# load data trainx, trainy, testx, testy = load dataset() \# test each parameter all scores = list() for p in params \# repeat experiment scores = list() model = keras sequential() for r in range(repeats) score, model = evaluate model(trainx, trainy, testx, testy, p, n filters=64, kernal size=3) score = score 100 0 model summary() \# if p \# model save("/motionmodel/") yy = model predict(trainx) print(np round(yy,3)) print(testy) print('>p=%s #%d % 3f' % (p, r+1, score)) scores append(score) all scores append(scores) \# summarize results summarize results(all scores, params) \### run the experiment n params = \[false, true] run experiment(n params) \# plt show()