Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion build_inside_container.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ autoreconf --install
# FLags to print compiler warnings
DEBUG_CFLAGS="-Wall -Werror -Wextra"

export CFLAGS=" ${DEBUG_CFLAGS} -I${INSTALL_DIR}/include/rtmessage -I${INSTALL_DIR}/include/msgpack -I${INSTALL_DIR}/include/rbus -I${INSTALL_DIR}/include -I/usr/include/glib-2.0 -I/usr/lib/x86_64-linux-gnu/glib-2.0/include -I/usr/local/include -DFEATURE_SUPPORT_WEBCONFIG -DRDK_LOGGER"
export CFLAGS=" ${DEBUG_CFLAGS} -I${INSTALL_DIR}/include/rtmessage -I${INSTALL_DIR}/include/msgpack -I${INSTALL_DIR}/include/rbus -I${INSTALL_DIR}/include -I/usr/include/glib-2.0 -I/usr/lib/x86_64-linux-gnu/glib-2.0/include -I/usr/local/include -DFEATURE_SUPPORT_WEBCONFIG -DRDK_LOGGER -DPERSIST_LOG_MON_REF"

export LDFLAGS="-L/usr/lib/x86_64-linux-gnu -lglib-2.0"

Expand Down
139 changes: 82 additions & 57 deletions source/dcautil/dca.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#include <sys/stat.h>
#include <sys/mman.h>
#include <limits.h>

#include <sys/sendfile.h>
#include <cjson/cJSON.h>

#include "dcautil.h"
Expand Down Expand Up @@ -98,14 +98,15 @@ static const char *strnstr(const char *haystack, const char *needle, size_t len)

for (size_t i = 0; i + needle_len <= len; i++)
{
if (memcmp(haystack + i, needle, needle_len) == 0)
{
return haystack + i;
}
if (haystack[i] == '\0')
{
break;
}
if (memcmp(haystack + i, needle, needle_len) == 0)
{
return haystack + i;
}

}
return NULL;
}
Expand Down Expand Up @@ -144,10 +145,7 @@ int processTopPattern(char* profileName, Vector* topMarkerList, Vector* out_gre
size_t var = 0;
size_t vCount = Vector_Size(topMarkerList);
T2Debug("topMarkerList for profile %s is of count = %lu \n", profileName, (unsigned long )vCount);
// Get logfile -> seek value map associated with the profile

// We are getting the exec count directly from the profileExecCounter parameter
//int profileExecCounter = gsProfile->execCounter;
char* filename = NULL;

for (var = 0; var < vCount; ++var)
Expand All @@ -159,6 +157,10 @@ int processTopPattern(char* profileName, Vector* topMarkerList, Vector* out_gre
}
int tmp_skip_interval, is_skip_param;
tmp_skip_interval = grepMarkerObj->skipFreq;
if(tmp_skip_interval <= 0)
{
tmp_skip_interval = 0;
}
is_skip_param = (profileExecCounter % (tmp_skip_interval + 1) == 0) ? 0 : 1;
if (is_skip_param != 0)
{
Expand Down Expand Up @@ -193,6 +195,10 @@ int processTopPattern(char* profileName, Vector* topMarkerList, Vector* out_gre
// If the skip frequency is set, skip the marker processing for this interval
int tmp_skip_interval, is_skip_param;
tmp_skip_interval = grepMarkerObj->skipFreq;
if(tmp_skip_interval <= 0)
{
tmp_skip_interval = 0;
}
is_skip_param = (profileExecCounter % (tmp_skip_interval + 1) == 0) ? 0 : 1;
if (is_skip_param != 0)
{
Expand Down Expand Up @@ -509,7 +515,6 @@ static char* getAbsolutePatternMatch(FileDescriptor* fileDescriptor, const char*
static int processPatternWithOptimizedFunction(const GrepMarker* marker, Vector* out_grepResultList, FileDescriptor* filedescriptor)
{
// Sanitize the input

const char* memmmapped_data_cf = filedescriptor->cfaddr;
if (!marker || !out_grepResultList || !memmmapped_data_cf)
{
Expand Down Expand Up @@ -547,7 +552,6 @@ static int processPatternWithOptimizedFunction(const GrepMarker* marker, Vector*
{
// Get the last occurrence of the pattern in the memory-mapped data
last_found = getAbsolutePatternMatch(filedescriptor, pattern);
// TODO : If trimParameter is true, trim the pattern before adding to the result list
if (last_found)
{
// If a match is found, process it accordingly
Expand Down Expand Up @@ -598,7 +602,6 @@ static int getLogFileDescriptor(GrepSeekProfile* gsProfile, const char* logPath,
return -1;
}

// Calculate the file size
struct stat sb;
if (fstat(fd, &sb) == -1)
{
Expand All @@ -607,7 +610,6 @@ static int getLogFileDescriptor(GrepSeekProfile* gsProfile, const char* logPath,
return -1;
}

// Check if the file size is 0
if (sb.st_size == 0)
{
T2Error("The size of the logfile is 0 for %s\n", logFile);
Expand Down Expand Up @@ -665,7 +667,6 @@ static int getRotatedLogFileDescriptor(const char* logPath, const char* logFile)
return -1;
}

// Calculate the file size
struct stat rb;
if (fstat(rd, &rb) == -1)
{
Expand All @@ -692,10 +693,12 @@ static void freeFileDescriptor(FileDescriptor* fileDescriptor)
if(fileDescriptor->baseAddr)
{
munmap(fileDescriptor->baseAddr, fileDescriptor->cf_file_size);
fileDescriptor->baseAddr = NULL;
}
if(fileDescriptor->rotatedAddr)
{
munmap(fileDescriptor->rotatedAddr, fileDescriptor->rf_file_size);
fileDescriptor->rotatedAddr = NULL;
}
fileDescriptor->cfaddr = NULL;
fileDescriptor->rfaddr = NULL;
Expand Down Expand Up @@ -736,7 +739,7 @@ static FileDescriptor* getFileDeltaInMemMapAndSearch(const int fd, const off_t s
off_t offset_in_page_size_multiple ;
unsigned int bytes_ignored = 0, bytes_ignored_main = 0, bytes_ignored_rotated = 0;
// Find the nearest multiple of page size
if (seek_value > 0)
if (seek_value > 0 && PAGESIZE > 0)
{
offset_in_page_size_multiple = (seek_value / PAGESIZE) * PAGESIZE;
bytes_ignored = seek_value - offset_in_page_size_multiple;
Expand All @@ -746,64 +749,88 @@ static FileDescriptor* getFileDeltaInMemMapAndSearch(const int fd, const off_t s
offset_in_page_size_multiple = 0;
bytes_ignored = 0;
}
//create a tmp file for main file fd
char tmp_fdmain[] = "/tmp/dca_tmpfile_fdmainXXXXXX";
int tmp_fd = mkstemp(tmp_fdmain);
if (tmp_fd == -1)
{
T2Error("Failed to create temp file: %s\n", strerror(errno));
return NULL;
}
unlink(tmp_fdmain);
off_t offset = 0;
ssize_t sent = sendfile(tmp_fd, fd, &offset, sb.st_size);
if (sent != sb.st_size)
{
T2Error("sendfile failed: %s\n", strerror(errno));
close(tmp_fd);
return NULL;
}

if(seek_value > sb.st_size || check_rotated == true)
{
int rd = getRotatedLogFileDescriptor(logPath, logFile);
if (rd == -1)
if (rd != -1 && fstat(rd, &rb) == 0 && rb.st_size > 0)
{
T2Error("Error opening rotated file. Start search in current file\n");
T2Debug("File size rounded to nearest page size used for offset read: %jd bytes\n", (intmax_t)offset_in_page_size_multiple);
addrcf = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, offset_in_page_size_multiple);
bytes_ignored_main = bytes_ignored;
}
else
{
int fs = 0;
fs = fstat(rd, &rb);
if(fs == -1)
char tmp_fdrotated[] = "/tmp/dca_tmpfile_fdrotatedXXXXXX";
int tmp_rd = mkstemp(tmp_fdrotated);
if (tmp_rd == -1)
{
T2Error("Error getting file size\n");
close(rd);
T2Error("Failed to create temp file: %s\n", strerror(errno));
return NULL;
}
else
unlink(tmp_fdrotated);
offset = 0;
sent = sendfile(tmp_rd, rd, &offset, rb.st_size);
if (sent != rb.st_size)
{
if(rb.st_size == 0)
{
T2Error("The Size of the logfile is 0\n");
close(rd);
}
T2Error("sendfile failed: %s\n", strerror(errno));
close(tmp_rd);
return NULL;
}

if(rb.st_size > 0)
addrcf = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, tmp_fd, 0);
addrrf = mmap(NULL, rb.st_size, PROT_READ, MAP_PRIVATE, tmp_rd, offset_in_page_size_multiple);
bytes_ignored_rotated = bytes_ignored;
close(rd);
close(tmp_rd);
rd = -1;
}
else
{
T2Error("Error opening rotated file. Start search in current file\n");
T2Debug("File size rounded to nearest page size used for offset read: %jd bytes\n", (intmax_t)offset_in_page_size_multiple);
if(seek_value < sb.st_size)
{
addrcf = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
addrrf = mmap(NULL, rb.st_size, PROT_READ, MAP_PRIVATE, rd, offset_in_page_size_multiple);
bytes_ignored_rotated = bytes_ignored;
if(rd != -1)
{
close(rd);
rd = -1;
}
addrcf = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, tmp_fd, offset_in_page_size_multiple);
bytes_ignored_main = bytes_ignored;
}


if(rb.st_size == 0 && fs == -1)
else
{
T2Debug("No contents in rotated log file. File size rounded to nearest page size used for offset read: %jd bytes\n", (intmax_t)offset_in_page_size_multiple);
addrcf = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, offset_in_page_size_multiple);
bytes_ignored_main = bytes_ignored;
T2Debug("Log file got rotated. Ignoring invalid mapping\n");
close(tmp_fd);
close(fd);
return NULL;
}
}
}
else
{
T2Info("File size rounded to nearest page size used for offset read: %jd bytes\n", (intmax_t)offset_in_page_size_multiple);
addrcf = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, offset_in_page_size_multiple);
bytes_ignored_main = bytes_ignored;
addrrf = NULL; // No rotated file in this case
T2Debug("File size rounded to nearest page size used for offset read: %jd bytes\n", (intmax_t)offset_in_page_size_multiple);
if(seek_value < sb.st_size)
{
addrcf = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, tmp_fd, offset_in_page_size_multiple);
bytes_ignored_main = bytes_ignored;
}
else
{
T2Debug("Log file got rotated. Ignoring invalid mapping\n");
close(tmp_fd);
close(fd);
return NULL;
}
addrrf = NULL;
}

close(tmp_fd);
close(fd);

if (addrcf == MAP_FAILED)
Expand Down Expand Up @@ -871,12 +898,10 @@ static int parseMarkerListOptimized(GrepSeekProfile *gsProfile, Vector * ip_vMar
}

char *prevfile = NULL;
//GrepSeekProfile* gsProfile = NULL;

size_t var = 0;
size_t vCount = Vector_Size(ip_vMarkerList);

// Get logfile -> seek value map associated with the profile
//gsProfile = (GrepSeekProfile *) getLogSeekMapForProfile(profileName);
if(NULL == gsProfile)
{
T2Error("%s Unable to retrieve/create logSeekMap for profile \n", __FUNCTION__);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -226,3 +226,65 @@ Scenario: Multiprofile with TriggerConditions
Given When the telemetry daemon is already running
When a multiprofile is configured with TriggerConditions
Then Multiprofile should be accepted and report should be generated whenever trigger condition is triggered

Scenario: Check for HASH value matches of profile to avoid duplicate processing
Given a multiprofile is running
When another multiprofile with same name and hash is configured
Then the configuration will be ignored

Scenario: Support for subscribing to TR181 Parameter value change
Given a datamodel marker is configured as method subscribe
When the tr181 parameter value changes
Then the value change will be sent as an event to the telemetry daemon

Scenario: Data harvesting from previous logs folder for report profiles with log file search markers
Given the device has logs from the previous session in the PreviousLogs folder
When a profile goes through log files for report generation
Then the log files in PreviousLogs folder will also be grepped for log lines

Scenario: Capability to support multiple split markers for the same log line
When two split markers are configured for the same log line in a file
Then both the markers will be reported

Scenario: Include data from data source Tr181 parameters as Accumulate
Given a datamodel marker is configured as method subscribe and use accumulate
When the tr181 parameter value changes multiple time inside the reporting interval
Then all the changes will be reported with values

Scenario: Report sending over HTTP protocol
Given a profile is confugred with report sending protocol as HTTP along with the respective endpoint
Then the report will be sent to the configured endpoint

Scenario: Caching of upload failed reports
Given a json report is attemplted to be sent the configured method
When the attempt to send the report fails
Then the report will be cached to be sent later along with the next report

Scenario: Report sending with protocol set as RBUS_METHOD in report profiles.
Given a profile is confugred with report sending protocol as HTTP along with the respective datamodel
Then the report will be configured to the respective datamodel

Scenario: Report generation for profiles with log grep markers during log file rotation scenarios
Given a grep marker is configured
When the respective log file reaches a certain limit and has been rotated
Then the content of the roatated log file is also grepped for the search string

Scenario: Event accumulate with and without timestamp in report profiles for event markers and datamodel.
Given an event marker or tr181 marker with subscribe are configured with reportTimeStamp
When the event is sent to the telementry
Then the telemetry report will have the time the event was received as timestamp

Scenario: Forced on demand reporting outside the regular reporting intervals.
Given a single profile or a multiprofile is running
When kill signal 29 is sent to the telemetry daemon
Then a reportwill be generated immediately for all the running profiles

Scenario: Stress testing of interaction with rbus interface to check for any deadlocks or rbus timeouts.
Given telemetry is running and an event marker is configured
When the configured event markers is sent in large numbers without any interval
Then all the events should be captured and telemetry daemon should not be crashing

Scenario: profile persistence
Given a multiprofile is expired
When the telemetry is restarted
Then the profile will be enabled after restart
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
####################################################################################
# If not stated otherwise in this file or this component's Licenses
# following copyright and licenses apply:
#
# Copyright 2024 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################


Feature: Telemetry Single profile configuration and report generation

Scenario: Single profile configuration with event marker and use as accumulate
Given When the telemetry daemon is already running
When a single profile is configured with event marker and use as accumulate
Then generated report should contain the values for all occurrences of the marker

Scenario: Capability to support multiple split markers for the same log line
When two split markers are configured for the same log line in a file
Then both the markers will be reported

Scenario: Caching of upload failed reports
Given a json report is attemplted to be sent the configured method
When the attempt to send the report fails
Then the report will be cached to be sent later along with the next report

Scenario: Data harvesting from previous logs folder for report profiles with log file search markers
Given the device has logs from the previous session in the PreviousLogs folder
When a profile goes through log files for report generation
Then the log files in PreviousLogs folder will also be grepped for log lines
Loading