I’m trying to hold a persistent connection to the particle cloud so that I can receive updates to my backend based on publishes from the P1s I have in the field. Everything works great, but after a few days the connection is lost and I no longer receive any signals despite the server functioning normal. I tried to implement this examples, but it is still failing
https://github.com/spark/sparkjs/blob/master/examples/node/get-event-stream-forever.js
I am wondering if something is wrong with the code below, or if I’m missing another piece. Thanks!
var mysql = require('mysql');
var cluster = require('cluster');
var os = require('os');
var bunyan = require('bunyan');
var spark = require('spark');
var async = require('async');
var net = require('net');
var config = require('./api/config.js');
var log = bunyan.createLogger({ name: 'services' });
var pool = mysql.createPoolCluster();
if (cluster.isMaster) {
pool.add(config.dbPoolCreds);
var numWorkers = os.cpus().length;
log.info('Master cluster setting up ' + numWorkers + ' workers...');
for (var i = 0; i < numWorkers; i++) {
var worker = cluster.fork();
}
spark.login(config.particleLogin, function(err, body) {
if (err) {
log.error(err);
}
log.info({ spark: body });
});
var openStream = function() {
var req = spark.getEventStream(false, 'mine', function(data) {
log.warn(data);
worker.send(data);
});
req.on('end', function() {
log.warn('Particle server ended! re-opening in 3 seconds...');
setTimeout(openStream, 3 * 1000);
});
};
spark.on('login', function() {
openStream();
});
cluster.on('online', function(worker) {
log.info('Worker ' + worker.process.pid + ' is online');
});
cluster.on('exit', function(worker, code, signal) {
log.info('Worker ' + worker.process.pid + ' died with code: ' + code + ', and signal: ' + signal);
log.info('Starting a new worker');
cluster.fork();
});
} else {
pool.add(config.dbPoolCreds);
process.on('message', function(data) {
if (!data) {
return;
}
log.warn(data);
});
}