I have search feature implemented on fs
so when i have input string from client i split files data based on each line, but if you see server.log
if i pulled data based on line it missed data from chunk that has multiple lines eg you can see first event is two lines so based on my search it will return [2017-03-22T20:25:04Z]|zldv6658|info|bmid: n/a|infra.actorRouter|Adding event to queue: { queue: 'd-email',
it will miss second line for that event. How can i get complete data may be based on time variable ?
searchService.js
async.eachSeries(filesData.logFiles, function(logfile, done) {
// read file
console.log('SearchEnv in eachSeries', filesData.searchEnv);
fs.createReadStream('./logs/' + filesData.searchEnv + '/' + logfile.filename)
.pipe(split())
.on('data', function(line) {
if (line.toLowerCase().indexOf(searchStr.toLowerCase()) != -1) parseLog(line, prevLine);
else prevLine = line;
});
function parseLog(line, prev) {
// Very rudimentary check...
if (line.indexOf('|') === -1) line = prev + line;
// Parse as you were doing
var messageDateInfo = line.split('|')[0].replace(/[\[\]']+/g, '');
console.log('1st message date is', messageDateInfo)
messageDateInfo = new Date(messageDateInfo).getTime();
searchStartDate = new Date(searchStartDate).getTime();
searchEndDate = new Date(searchEndDate).getTime();
console.log('message date is', messageDateInfo)
console.log('start date is ', messageDateInfo - searchStartDate);
console.log('end date is ', searchEndDate - messageDateInfo);
if (messageDateInfo - searchStartDate > 0 && searchEndDate - messageDateInfo > 0) {
// console.log("message date is within this time range");
results.push({
filename: logfile.filename,
value: line
});
}
}
done();
}, function(err) {
if (err) {
console.log('error', err);
}
// wrong: results.map(result, function (result){
results.map(function(result) {
console.log('results');
});
// send back results
callback(results);
results = [];
logFiles = null;
});
}
server.log
[2017-03-22T20:25:04Z]|zldv6658|info|bmid: n/a|infra.actorRouter|Adding event to queue: { queue: 'd-email',
msgId: '7eec01e9-6395-4fee-b44f-f09a40e56978' }
[2017-03-22T20:25:04Z]|zldv6658|info|bmid: n/a|infra.templateActor|Filter match for actor/rule (d-email/email_service) with msgId: 7eec01e9-6395-4fee-b44f-f09a40e56978
[2017-03-22T20:25:04Z]|zldv6658|info|bmid: 7eec01e9-6395-4fee-b44f-f09a40e56978|mailDispatcher|Received mail event. msgId=7eec01e9-6395-4fee-b44f-f09a40e56978
[2017-03-22T20:25:04Z]|zldv6658|info|bmid: n/a|mailDispatcher|Mail event with msgId 7eec01e9-6395-4fee-b44f-f09a40e56978 successful: 3 messages delivered
[2017-03-22T20:25:05Z]|zldv6658|verbose|bmid: n/a|routes.event|Received Event from IP (::ffff:130.9.137.139): 74609753-143b-4e06-845c-9a5721575c19
{"event":{"header":{"eventSource":"AOTSvTM","timestamp":1481966987000,"eventType":"http://aotsvtm.eventing.att.com/SendEscalationsEvent/V1","entityId":"ENTITYID_1"}
You can use the split
module (similarly how I demonstrated in my other answer to your very similar question ) with the fs
module.
fs.createReadStream(file)
.pipe(split())
.on('data', function (line) {
//each chunk now is a seperate line!
});
See the docs: https://www.npmjs.com/package/split
If your log actually has multiline events, you could just keep the previous line(s) in memory while parsing. Also, don't just load the whole thing at once in memory. Use streams to reduce the strain on your machine.
let prevLine;
fs.createReadStream(file)
.pipe(split())
.on('data', function (line) {
if (line.toLowerCase().indexOf(searchStr.toLowerCase()) != -1) parseLog(line, prevLine);
else prevLine = line;
});
});
function parseLog(line, prev) {
// Very rudimentary check...
if (line.indexOf('|') === -1) line = prev + line;
// Parse as you were doing
}
As a rule of thumb for the future, log files are much easier to manage when build with single-line-json.
The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.